diff options
Diffstat (limited to 'net')
48 files changed, 396 insertions, 644 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c index 121bf6f49148..2e62105d91bd 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -962,7 +962,6 @@ static struct file_operations arp_seq_fops = { | |||
962 | 962 | ||
963 | static int __init atm_clip_init(void) | 963 | static int __init atm_clip_init(void) |
964 | { | 964 | { |
965 | struct proc_dir_entry *p; | ||
966 | neigh_table_init_no_netlink(&clip_tbl); | 965 | neigh_table_init_no_netlink(&clip_tbl); |
967 | 966 | ||
968 | clip_tbl_hook = &clip_tbl; | 967 | clip_tbl_hook = &clip_tbl; |
@@ -972,9 +971,15 @@ static int __init atm_clip_init(void) | |||
972 | 971 | ||
973 | setup_timer(&idle_timer, idle_timer_check, 0); | 972 | setup_timer(&idle_timer, idle_timer_check, 0); |
974 | 973 | ||
975 | p = create_proc_entry("arp", S_IRUGO, atm_proc_root); | 974 | #ifdef CONFIG_PROC_FS |
976 | if (p) | 975 | { |
977 | p->proc_fops = &arp_seq_fops; | 976 | struct proc_dir_entry *p; |
977 | |||
978 | p = create_proc_entry("arp", S_IRUGO, atm_proc_root); | ||
979 | if (p) | ||
980 | p->proc_fops = &arp_seq_fops; | ||
981 | } | ||
982 | #endif | ||
978 | 983 | ||
979 | return 0; | 984 | return 0; |
980 | } | 985 | } |
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c index 4b1faca5013f..1d3de42fada0 100644 --- a/net/atm/ipcommon.c +++ b/net/atm/ipcommon.c | |||
@@ -25,22 +25,27 @@ | |||
25 | /* | 25 | /* |
26 | * skb_migrate appends the list at "from" to "to", emptying "from" in the | 26 | * skb_migrate appends the list at "from" to "to", emptying "from" in the |
27 | * process. skb_migrate is atomic with respect to all other skb operations on | 27 | * process. skb_migrate is atomic with respect to all other skb operations on |
28 | * "from" and "to". Note that it locks both lists at the same time, so beware | 28 | * "from" and "to". Note that it locks both lists at the same time, so to deal |
29 | * of potential deadlocks. | 29 | * with the lock ordering, the locks are taken in address order. |
30 | * | 30 | * |
31 | * This function should live in skbuff.c or skbuff.h. | 31 | * This function should live in skbuff.c or skbuff.h. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | 34 | ||
35 | void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) | 35 | void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to) |
36 | { | 36 | { |
37 | unsigned long flags; | 37 | unsigned long flags; |
38 | struct sk_buff *skb_from = (struct sk_buff *) from; | 38 | struct sk_buff *skb_from = (struct sk_buff *) from; |
39 | struct sk_buff *skb_to = (struct sk_buff *) to; | 39 | struct sk_buff *skb_to = (struct sk_buff *) to; |
40 | struct sk_buff *prev; | 40 | struct sk_buff *prev; |
41 | 41 | ||
42 | spin_lock_irqsave(&from->lock,flags); | 42 | if ((unsigned long) from < (unsigned long) to) { |
43 | spin_lock(&to->lock); | 43 | spin_lock_irqsave(&from->lock, flags); |
44 | spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING); | ||
45 | } else { | ||
46 | spin_lock_irqsave(&to->lock, flags); | ||
47 | spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING); | ||
48 | } | ||
44 | prev = from->prev; | 49 | prev = from->prev; |
45 | from->next->prev = to->prev; | 50 | from->next->prev = to->prev; |
46 | prev->next = skb_to; | 51 | prev->next = skb_to; |
@@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) | |||
51 | from->prev = skb_from; | 56 | from->prev = skb_from; |
52 | from->next = skb_from; | 57 | from->next = skb_from; |
53 | from->qlen = 0; | 58 | from->qlen = 0; |
54 | spin_unlock_irqrestore(&from->lock,flags); | 59 | spin_unlock_irqrestore(&from->lock, flags); |
55 | } | 60 | } |
56 | 61 | ||
57 | 62 | ||
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 10a3c0aa8398..000695c48583 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -145,7 +145,7 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi, | |||
145 | ax25_cb *s; | 145 | ax25_cb *s; |
146 | struct hlist_node *node; | 146 | struct hlist_node *node; |
147 | 147 | ||
148 | spin_lock_bh(&ax25_list_lock); | 148 | spin_lock(&ax25_list_lock); |
149 | ax25_for_each(s, node, &ax25_list) { | 149 | ax25_for_each(s, node, &ax25_list) { |
150 | if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) | 150 | if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) |
151 | continue; | 151 | continue; |
@@ -154,12 +154,12 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi, | |||
154 | /* If device is null we match any device */ | 154 | /* If device is null we match any device */ |
155 | if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) { | 155 | if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) { |
156 | sock_hold(s->sk); | 156 | sock_hold(s->sk); |
157 | spin_unlock_bh(&ax25_list_lock); | 157 | spin_unlock(&ax25_list_lock); |
158 | return s->sk; | 158 | return s->sk; |
159 | } | 159 | } |
160 | } | 160 | } |
161 | } | 161 | } |
162 | spin_unlock_bh(&ax25_list_lock); | 162 | spin_unlock(&ax25_list_lock); |
163 | 163 | ||
164 | return NULL; | 164 | return NULL; |
165 | } | 165 | } |
@@ -174,7 +174,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, | |||
174 | ax25_cb *s; | 174 | ax25_cb *s; |
175 | struct hlist_node *node; | 175 | struct hlist_node *node; |
176 | 176 | ||
177 | spin_lock_bh(&ax25_list_lock); | 177 | spin_lock(&ax25_list_lock); |
178 | ax25_for_each(s, node, &ax25_list) { | 178 | ax25_for_each(s, node, &ax25_list) { |
179 | if (s->sk && !ax25cmp(&s->source_addr, my_addr) && | 179 | if (s->sk && !ax25cmp(&s->source_addr, my_addr) && |
180 | !ax25cmp(&s->dest_addr, dest_addr) && | 180 | !ax25cmp(&s->dest_addr, dest_addr) && |
@@ -185,7 +185,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, | |||
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | spin_unlock_bh(&ax25_list_lock); | 188 | spin_unlock(&ax25_list_lock); |
189 | 189 | ||
190 | return sk; | 190 | return sk; |
191 | } | 191 | } |
@@ -235,7 +235,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) | |||
235 | struct sk_buff *copy; | 235 | struct sk_buff *copy; |
236 | struct hlist_node *node; | 236 | struct hlist_node *node; |
237 | 237 | ||
238 | spin_lock_bh(&ax25_list_lock); | 238 | spin_lock(&ax25_list_lock); |
239 | ax25_for_each(s, node, &ax25_list) { | 239 | ax25_for_each(s, node, &ax25_list) { |
240 | if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && | 240 | if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && |
241 | s->sk->sk_type == SOCK_RAW && | 241 | s->sk->sk_type == SOCK_RAW && |
@@ -248,7 +248,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) | |||
248 | kfree_skb(copy); | 248 | kfree_skb(copy); |
249 | } | 249 | } |
250 | } | 250 | } |
251 | spin_unlock_bh(&ax25_list_lock); | 251 | spin_unlock(&ax25_list_lock); |
252 | } | 252 | } |
253 | 253 | ||
254 | /* | 254 | /* |
@@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void) | |||
486 | { | 486 | { |
487 | ax25_cb *ax25; | 487 | ax25_cb *ax25; |
488 | 488 | ||
489 | if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) | 489 | if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) |
490 | return NULL; | 490 | return NULL; |
491 | 491 | ||
492 | memset(ax25, 0x00, sizeof(*ax25)); | ||
493 | atomic_set(&ax25->refcount, 1); | 492 | atomic_set(&ax25->refcount, 1); |
494 | 493 | ||
495 | skb_queue_head_init(&ax25->write_queue); | 494 | skb_queue_head_init(&ax25->write_queue); |
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 47e6e790bd67..b787678220ff 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c | |||
@@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev) | |||
55 | { | 55 | { |
56 | ax25_dev *ax25_dev; | 56 | ax25_dev *ax25_dev; |
57 | 57 | ||
58 | if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { | 58 | if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { |
59 | printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); | 59 | printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); |
60 | return; | 60 | return; |
61 | } | 61 | } |
62 | 62 | ||
63 | ax25_unregister_sysctl(); | 63 | ax25_unregister_sysctl(); |
64 | 64 | ||
65 | memset(ax25_dev, 0x00, sizeof(*ax25_dev)); | ||
66 | |||
67 | dev->ax25_ptr = ax25_dev; | 65 | dev->ax25_ptr = ax25_dev; |
68 | ax25_dev->dev = dev; | 66 | ax25_dev->dev = dev; |
69 | dev_hold(dev); | 67 | dev_hold(dev); |
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c index 1d4ab641f82b..4d22d4430ec8 100644 --- a/net/ax25/ax25_ds_subr.c +++ b/net/ax25/ax25_ds_subr.c | |||
@@ -80,7 +80,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25) | |||
80 | ax25_start_t3timer(ax25); | 80 | ax25_start_t3timer(ax25); |
81 | ax25_ds_set_timer(ax25->ax25_dev); | 81 | ax25_ds_set_timer(ax25->ax25_dev); |
82 | 82 | ||
83 | spin_lock_bh(&ax25_list_lock); | 83 | spin_lock(&ax25_list_lock); |
84 | ax25_for_each(ax25o, node, &ax25_list) { | 84 | ax25_for_each(ax25o, node, &ax25_list) { |
85 | if (ax25o == ax25) | 85 | if (ax25o == ax25) |
86 | continue; | 86 | continue; |
@@ -106,7 +106,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25) | |||
106 | if (ax25o->state != AX25_STATE_0) | 106 | if (ax25o->state != AX25_STATE_0) |
107 | ax25_start_t3timer(ax25o); | 107 | ax25_start_t3timer(ax25o); |
108 | } | 108 | } |
109 | spin_unlock_bh(&ax25_list_lock); | 109 | spin_unlock(&ax25_list_lock); |
110 | } | 110 | } |
111 | 111 | ||
112 | void ax25_ds_establish_data_link(ax25_cb *ax25) | 112 | void ax25_ds_establish_data_link(ax25_cb *ax25) |
@@ -162,13 +162,13 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev) | |||
162 | int res = 0; | 162 | int res = 0; |
163 | struct hlist_node *node; | 163 | struct hlist_node *node; |
164 | 164 | ||
165 | spin_lock_bh(&ax25_list_lock); | 165 | spin_lock(&ax25_list_lock); |
166 | ax25_for_each(ax25, node, &ax25_list) | 166 | ax25_for_each(ax25, node, &ax25_list) |
167 | if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { | 167 | if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { |
168 | res = 1; | 168 | res = 1; |
169 | break; | 169 | break; |
170 | } | 170 | } |
171 | spin_unlock_bh(&ax25_list_lock); | 171 | spin_unlock(&ax25_list_lock); |
172 | 172 | ||
173 | return res; | 173 | return res; |
174 | } | 174 | } |
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 5961459935eb..4f44185955c7 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c | |||
@@ -85,7 +85,7 @@ static void ax25_ds_timeout(unsigned long arg) | |||
85 | return; | 85 | return; |
86 | } | 86 | } |
87 | 87 | ||
88 | spin_lock_bh(&ax25_list_lock); | 88 | spin_lock(&ax25_list_lock); |
89 | ax25_for_each(ax25, node, &ax25_list) { | 89 | ax25_for_each(ax25, node, &ax25_list) { |
90 | if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) | 90 | if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) |
91 | continue; | 91 | continue; |
@@ -93,7 +93,7 @@ static void ax25_ds_timeout(unsigned long arg) | |||
93 | ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); | 93 | ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); |
94 | ax25_disconnect(ax25, ETIMEDOUT); | 94 | ax25_disconnect(ax25, ETIMEDOUT); |
95 | } | 95 | } |
96 | spin_unlock_bh(&ax25_list_lock); | 96 | spin_unlock(&ax25_list_lock); |
97 | 97 | ||
98 | ax25_dev_dama_off(ax25_dev); | 98 | ax25_dev_dama_off(ax25_dev); |
99 | } | 99 | } |
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index 77ba07c67682..07ac0207eb69 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c | |||
@@ -66,10 +66,10 @@ int ax25_protocol_register(unsigned int pid, | |||
66 | protocol->pid = pid; | 66 | protocol->pid = pid; |
67 | protocol->func = func; | 67 | protocol->func = func; |
68 | 68 | ||
69 | write_lock(&protocol_list_lock); | 69 | write_lock_bh(&protocol_list_lock); |
70 | protocol->next = protocol_list; | 70 | protocol->next = protocol_list; |
71 | protocol_list = protocol; | 71 | protocol_list = protocol; |
72 | write_unlock(&protocol_list_lock); | 72 | write_unlock_bh(&protocol_list_lock); |
73 | 73 | ||
74 | return 1; | 74 | return 1; |
75 | } | 75 | } |
@@ -80,16 +80,16 @@ void ax25_protocol_release(unsigned int pid) | |||
80 | { | 80 | { |
81 | struct protocol_struct *s, *protocol; | 81 | struct protocol_struct *s, *protocol; |
82 | 82 | ||
83 | write_lock(&protocol_list_lock); | 83 | write_lock_bh(&protocol_list_lock); |
84 | protocol = protocol_list; | 84 | protocol = protocol_list; |
85 | if (protocol == NULL) { | 85 | if (protocol == NULL) { |
86 | write_unlock(&protocol_list_lock); | 86 | write_unlock_bh(&protocol_list_lock); |
87 | return; | 87 | return; |
88 | } | 88 | } |
89 | 89 | ||
90 | if (protocol->pid == pid) { | 90 | if (protocol->pid == pid) { |
91 | protocol_list = protocol->next; | 91 | protocol_list = protocol->next; |
92 | write_unlock(&protocol_list_lock); | 92 | write_unlock_bh(&protocol_list_lock); |
93 | kfree(protocol); | 93 | kfree(protocol); |
94 | return; | 94 | return; |
95 | } | 95 | } |
@@ -98,14 +98,14 @@ void ax25_protocol_release(unsigned int pid) | |||
98 | if (protocol->next->pid == pid) { | 98 | if (protocol->next->pid == pid) { |
99 | s = protocol->next; | 99 | s = protocol->next; |
100 | protocol->next = protocol->next->next; | 100 | protocol->next = protocol->next->next; |
101 | write_unlock(&protocol_list_lock); | 101 | write_unlock_bh(&protocol_list_lock); |
102 | kfree(s); | 102 | kfree(s); |
103 | return; | 103 | return; |
104 | } | 104 | } |
105 | 105 | ||
106 | protocol = protocol->next; | 106 | protocol = protocol->next; |
107 | } | 107 | } |
108 | write_unlock(&protocol_list_lock); | 108 | write_unlock_bh(&protocol_list_lock); |
109 | } | 109 | } |
110 | 110 | ||
111 | EXPORT_SYMBOL(ax25_protocol_release); | 111 | EXPORT_SYMBOL(ax25_protocol_release); |
@@ -266,13 +266,13 @@ int ax25_protocol_is_registered(unsigned int pid) | |||
266 | struct protocol_struct *protocol; | 266 | struct protocol_struct *protocol; |
267 | int res = 0; | 267 | int res = 0; |
268 | 268 | ||
269 | read_lock(&protocol_list_lock); | 269 | read_lock_bh(&protocol_list_lock); |
270 | for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) | 270 | for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) |
271 | if (protocol->pid == pid) { | 271 | if (protocol->pid == pid) { |
272 | res = 1; | 272 | res = 1; |
273 | break; | 273 | break; |
274 | } | 274 | } |
275 | read_unlock(&protocol_list_lock); | 275 | read_unlock_bh(&protocol_list_lock); |
276 | 276 | ||
277 | return res; | 277 | return res; |
278 | } | 278 | } |
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 4cf87540fb3a..e9d94291581e 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c | |||
@@ -102,8 +102,8 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) | |||
102 | int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) | 102 | int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) |
103 | { | 103 | { |
104 | int (*func)(struct sk_buff *, ax25_cb *); | 104 | int (*func)(struct sk_buff *, ax25_cb *); |
105 | volatile int queued = 0; | ||
106 | unsigned char pid; | 105 | unsigned char pid; |
106 | int queued = 0; | ||
107 | 107 | ||
108 | if (skb == NULL) return 0; | 108 | if (skb == NULL) return 0; |
109 | 109 | ||
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 6fb47e00e188..be04e9fb11f6 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c | |||
@@ -75,15 +75,13 @@ | |||
75 | 75 | ||
76 | static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl) | 76 | static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl) |
77 | { | 77 | { |
78 | struct cmtp_application *app = kmalloc(sizeof(*app), GFP_KERNEL); | 78 | struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL); |
79 | 79 | ||
80 | BT_DBG("session %p application %p appl %d", session, app, appl); | 80 | BT_DBG("session %p application %p appl %d", session, app, appl); |
81 | 81 | ||
82 | if (!app) | 82 | if (!app) |
83 | return NULL; | 83 | return NULL; |
84 | 84 | ||
85 | memset(app, 0, sizeof(*app)); | ||
86 | |||
87 | app->state = BT_OPEN; | 85 | app->state = BT_OPEN; |
88 | app->appl = appl; | 86 | app->appl = appl; |
89 | 87 | ||
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 182254a580e2..b81a01c64aea 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c | |||
@@ -335,10 +335,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) | |||
335 | baswap(&src, &bt_sk(sock->sk)->src); | 335 | baswap(&src, &bt_sk(sock->sk)->src); |
336 | baswap(&dst, &bt_sk(sock->sk)->dst); | 336 | baswap(&dst, &bt_sk(sock->sk)->dst); |
337 | 337 | ||
338 | session = kmalloc(sizeof(struct cmtp_session), GFP_KERNEL); | 338 | session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); |
339 | if (!session) | 339 | if (!session) |
340 | return -ENOMEM; | 340 | return -ENOMEM; |
341 | memset(session, 0, sizeof(struct cmtp_session)); | ||
342 | 341 | ||
343 | down_write(&cmtp_session_sem); | 342 | down_write(&cmtp_session_sem); |
344 | 343 | ||
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 54e8e5ea2154..5ed474277903 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -336,9 +336,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) | |||
336 | 336 | ||
337 | if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { | 337 | if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { |
338 | /* Entry not in the cache. Add new one. */ | 338 | /* Entry not in the cache. Add new one. */ |
339 | if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) | 339 | if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) |
340 | return; | 340 | return; |
341 | memset(e, 0, sizeof(struct inquiry_entry)); | ||
342 | e->next = cache->list; | 341 | e->next = cache->list; |
343 | cache->list = e; | 342 | cache->list = e; |
344 | } | 343 | } |
@@ -800,12 +799,10 @@ struct hci_dev *hci_alloc_dev(void) | |||
800 | { | 799 | { |
801 | struct hci_dev *hdev; | 800 | struct hci_dev *hdev; |
802 | 801 | ||
803 | hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL); | 802 | hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); |
804 | if (!hdev) | 803 | if (!hdev) |
805 | return NULL; | 804 | return NULL; |
806 | 805 | ||
807 | memset(hdev, 0, sizeof(struct hci_dev)); | ||
808 | |||
809 | skb_queue_head_init(&hdev->driver_init); | 806 | skb_queue_head_init(&hdev->driver_init); |
810 | 807 | ||
811 | return hdev; | 808 | return hdev; |
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index edfea772fb67..c6abf2a5a932 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | config BT_HIDP | 1 | config BT_HIDP |
2 | tristate "HIDP protocol support" | 2 | tristate "HIDP protocol support" |
3 | depends on BT && BT_L2CAP && (BROKEN || !S390) | 3 | depends on BT && BT_L2CAP && INPUT |
4 | select INPUT | ||
5 | help | 4 | help |
6 | HIDP (Human Interface Device Protocol) is a transport layer | 5 | HIDP (Human Interface Device Protocol) is a transport layer |
7 | for HID reports. HIDP is required for the Bluetooth Human | 6 | for HID reports. HIDP is required for the Bluetooth Human |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index b9c24a55425c..c6e3a2c27c6e 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -582,10 +582,9 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
582 | bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) | 582 | bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) |
583 | return -ENOTUNIQ; | 583 | return -ENOTUNIQ; |
584 | 584 | ||
585 | session = kmalloc(sizeof(struct hidp_session), GFP_KERNEL); | 585 | session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); |
586 | if (!session) | 586 | if (!session) |
587 | return -ENOMEM; | 587 | return -ENOMEM; |
588 | memset(session, 0, sizeof(struct hidp_session)); | ||
589 | 588 | ||
590 | session->input = input_allocate_device(); | 589 | session->input = input_allocate_device(); |
591 | if (!session->input) { | 590 | if (!session->input) { |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index eaaad658d11d..d56f60b392ac 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -185,7 +185,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) | |||
185 | { | 185 | { |
186 | struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; | 186 | struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; |
187 | 187 | ||
188 | write_lock(&l->lock); | 188 | write_lock_bh(&l->lock); |
189 | if (sk == l->head) | 189 | if (sk == l->head) |
190 | l->head = next; | 190 | l->head = next; |
191 | 191 | ||
@@ -193,7 +193,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) | |||
193 | l2cap_pi(next)->prev_c = prev; | 193 | l2cap_pi(next)->prev_c = prev; |
194 | if (prev) | 194 | if (prev) |
195 | l2cap_pi(prev)->next_c = next; | 195 | l2cap_pi(prev)->next_c = next; |
196 | write_unlock(&l->lock); | 196 | write_unlock_bh(&l->lock); |
197 | 197 | ||
198 | __sock_put(sk); | 198 | __sock_put(sk); |
199 | } | 199 | } |
@@ -313,9 +313,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) | |||
313 | static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) | 313 | static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) |
314 | { | 314 | { |
315 | struct l2cap_chan_list *l = &conn->chan_list; | 315 | struct l2cap_chan_list *l = &conn->chan_list; |
316 | write_lock(&l->lock); | 316 | write_lock_bh(&l->lock); |
317 | __l2cap_chan_add(conn, sk, parent); | 317 | __l2cap_chan_add(conn, sk, parent); |
318 | write_unlock(&l->lock); | 318 | write_unlock_bh(&l->lock); |
319 | } | 319 | } |
320 | 320 | ||
321 | static inline u8 l2cap_get_ident(struct l2cap_conn *conn) | 321 | static inline u8 l2cap_get_ident(struct l2cap_conn *conn) |
@@ -328,14 +328,14 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn) | |||
328 | * 200 - 254 are used by utilities like l2ping, etc. | 328 | * 200 - 254 are used by utilities like l2ping, etc. |
329 | */ | 329 | */ |
330 | 330 | ||
331 | spin_lock(&conn->lock); | 331 | spin_lock_bh(&conn->lock); |
332 | 332 | ||
333 | if (++conn->tx_ident > 128) | 333 | if (++conn->tx_ident > 128) |
334 | conn->tx_ident = 1; | 334 | conn->tx_ident = 1; |
335 | 335 | ||
336 | id = conn->tx_ident; | 336 | id = conn->tx_ident; |
337 | 337 | ||
338 | spin_unlock(&conn->lock); | 338 | spin_unlock_bh(&conn->lock); |
339 | 339 | ||
340 | return id; | 340 | return id; |
341 | } | 341 | } |
@@ -1416,11 +1416,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
1416 | if (!sk) | 1416 | if (!sk) |
1417 | goto response; | 1417 | goto response; |
1418 | 1418 | ||
1419 | write_lock(&list->lock); | 1419 | write_lock_bh(&list->lock); |
1420 | 1420 | ||
1421 | /* Check if we already have channel with that dcid */ | 1421 | /* Check if we already have channel with that dcid */ |
1422 | if (__l2cap_get_chan_by_dcid(list, scid)) { | 1422 | if (__l2cap_get_chan_by_dcid(list, scid)) { |
1423 | write_unlock(&list->lock); | 1423 | write_unlock_bh(&list->lock); |
1424 | sock_set_flag(sk, SOCK_ZAPPED); | 1424 | sock_set_flag(sk, SOCK_ZAPPED); |
1425 | l2cap_sock_kill(sk); | 1425 | l2cap_sock_kill(sk); |
1426 | goto response; | 1426 | goto response; |
@@ -1458,7 +1458,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
1458 | result = status = 0; | 1458 | result = status = 0; |
1459 | 1459 | ||
1460 | done: | 1460 | done: |
1461 | write_unlock(&list->lock); | 1461 | write_unlock_bh(&list->lock); |
1462 | 1462 | ||
1463 | response: | 1463 | response: |
1464 | bh_unlock_sock(parent); | 1464 | bh_unlock_sock(parent); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 155a2b93760e..77eab8f4c7fd 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -273,10 +273,10 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d) | |||
273 | 273 | ||
274 | struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) | 274 | struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) |
275 | { | 275 | { |
276 | struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); | 276 | struct rfcomm_dlc *d = kzalloc(sizeof(*d), prio); |
277 | |||
277 | if (!d) | 278 | if (!d) |
278 | return NULL; | 279 | return NULL; |
279 | memset(d, 0, sizeof(*d)); | ||
280 | 280 | ||
281 | init_timer(&d->timer); | 281 | init_timer(&d->timer); |
282 | d->timer.function = rfcomm_dlc_timeout; | 282 | d->timer.function = rfcomm_dlc_timeout; |
@@ -289,6 +289,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) | |||
289 | rfcomm_dlc_clear_state(d); | 289 | rfcomm_dlc_clear_state(d); |
290 | 290 | ||
291 | BT_DBG("%p", d); | 291 | BT_DBG("%p", d); |
292 | |||
292 | return d; | 293 | return d; |
293 | } | 294 | } |
294 | 295 | ||
@@ -522,10 +523,10 @@ int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig) | |||
522 | /* ---- RFCOMM sessions ---- */ | 523 | /* ---- RFCOMM sessions ---- */ |
523 | static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) | 524 | static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) |
524 | { | 525 | { |
525 | struct rfcomm_session *s = kmalloc(sizeof(*s), GFP_KERNEL); | 526 | struct rfcomm_session *s = kzalloc(sizeof(*s), GFP_KERNEL); |
527 | |||
526 | if (!s) | 528 | if (!s) |
527 | return NULL; | 529 | return NULL; |
528 | memset(s, 0, sizeof(*s)); | ||
529 | 530 | ||
530 | BT_DBG("session %p sock %p", s, sock); | 531 | BT_DBG("session %p sock %p", s, sock); |
531 | 532 | ||
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 2ff2d5b87c93..bd8d671a0ba6 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -169,10 +169,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) | |||
169 | 169 | ||
170 | BT_DBG("id %d channel %d", req->dev_id, req->channel); | 170 | BT_DBG("id %d channel %d", req->dev_id, req->channel); |
171 | 171 | ||
172 | dev = kmalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); | 172 | dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); |
173 | if (!dev) | 173 | if (!dev) |
174 | return -ENOMEM; | 174 | return -ENOMEM; |
175 | memset(dev, 0, sizeof(struct rfcomm_dev)); | ||
176 | 175 | ||
177 | write_lock_bh(&rfcomm_dev_lock); | 176 | write_lock_bh(&rfcomm_dev_lock); |
178 | 177 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 85defccc0287..7714a2ec3854 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -108,17 +108,14 @@ static void sco_sock_init_timer(struct sock *sk) | |||
108 | static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) | 108 | static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) |
109 | { | 109 | { |
110 | struct hci_dev *hdev = hcon->hdev; | 110 | struct hci_dev *hdev = hcon->hdev; |
111 | struct sco_conn *conn; | 111 | struct sco_conn *conn = hcon->sco_data; |
112 | |||
113 | if ((conn = hcon->sco_data)) | ||
114 | return conn; | ||
115 | 112 | ||
116 | if (status) | 113 | if (conn || status) |
117 | return conn; | 114 | return conn; |
118 | 115 | ||
119 | if (!(conn = kmalloc(sizeof(struct sco_conn), GFP_ATOMIC))) | 116 | conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC); |
117 | if (!conn) | ||
120 | return NULL; | 118 | return NULL; |
121 | memset(conn, 0, sizeof(struct sco_conn)); | ||
122 | 119 | ||
123 | spin_lock_init(&conn->lock); | 120 | spin_lock_init(&conn->lock); |
124 | 121 | ||
@@ -134,6 +131,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) | |||
134 | conn->mtu = 60; | 131 | conn->mtu = 60; |
135 | 132 | ||
136 | BT_DBG("hcon %p conn %p", hcon, conn); | 133 | BT_DBG("hcon %p conn %p", hcon, conn); |
134 | |||
137 | return conn; | 135 | return conn; |
138 | } | 136 | } |
139 | 137 | ||
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 8be9f2123e54..6ccd32b30809 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb) | |||
35 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 35 | int br_dev_queue_push_xmit(struct sk_buff *skb) |
36 | { | 36 | { |
37 | /* drop mtu oversized packets except gso */ | 37 | /* drop mtu oversized packets except gso */ |
38 | if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size) | 38 | if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) |
39 | kfree_skb(skb); | 39 | kfree_skb(skb); |
40 | else { | 40 | else { |
41 | #ifdef CONFIG_BRIDGE_NETFILTER | 41 | #ifdef CONFIG_BRIDGE_NETFILTER |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8298a5179aef..cbc8a389a0a8 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
761 | { | 761 | { |
762 | if (skb->protocol == htons(ETH_P_IP) && | 762 | if (skb->protocol == htons(ETH_P_IP) && |
763 | skb->len > skb->dev->mtu && | 763 | skb->len > skb->dev->mtu && |
764 | !skb_shinfo(skb)->gso_size) | 764 | !skb_is_gso(skb)) |
765 | return ip_fragment(skb, br_dev_queue_push_xmit); | 765 | return ip_fragment(skb, br_dev_queue_push_xmit); |
766 | else | 766 | else |
767 | return br_dev_queue_push_xmit(skb); | 767 | return br_dev_queue_push_xmit(skb); |
diff --git a/net/core/dev.c b/net/core/dev.c index 066a60a75280..4d2b5167d7f5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1162,9 +1162,17 @@ int skb_checksum_help(struct sk_buff *skb, int inward) | |||
1162 | unsigned int csum; | 1162 | unsigned int csum; |
1163 | int ret = 0, offset = skb->h.raw - skb->data; | 1163 | int ret = 0, offset = skb->h.raw - skb->data; |
1164 | 1164 | ||
1165 | if (inward) { | 1165 | if (inward) |
1166 | skb->ip_summed = CHECKSUM_NONE; | 1166 | goto out_set_summed; |
1167 | goto out; | 1167 | |
1168 | if (unlikely(skb_shinfo(skb)->gso_size)) { | ||
1169 | static int warned; | ||
1170 | |||
1171 | WARN_ON(!warned); | ||
1172 | warned = 1; | ||
1173 | |||
1174 | /* Let GSO fix up the checksum. */ | ||
1175 | goto out_set_summed; | ||
1168 | } | 1176 | } |
1169 | 1177 | ||
1170 | if (skb_cloned(skb)) { | 1178 | if (skb_cloned(skb)) { |
@@ -1181,6 +1189,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward) | |||
1181 | BUG_ON(skb->csum + 2 > offset); | 1189 | BUG_ON(skb->csum + 2 > offset); |
1182 | 1190 | ||
1183 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); | 1191 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); |
1192 | |||
1193 | out_set_summed: | ||
1184 | skb->ip_summed = CHECKSUM_NONE; | 1194 | skb->ip_summed = CHECKSUM_NONE; |
1185 | out: | 1195 | out: |
1186 | return ret; | 1196 | return ret; |
@@ -1201,17 +1211,35 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1201 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 1211 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
1202 | struct packet_type *ptype; | 1212 | struct packet_type *ptype; |
1203 | int type = skb->protocol; | 1213 | int type = skb->protocol; |
1214 | int err; | ||
1204 | 1215 | ||
1205 | BUG_ON(skb_shinfo(skb)->frag_list); | 1216 | BUG_ON(skb_shinfo(skb)->frag_list); |
1206 | BUG_ON(skb->ip_summed != CHECKSUM_HW); | ||
1207 | 1217 | ||
1208 | skb->mac.raw = skb->data; | 1218 | skb->mac.raw = skb->data; |
1209 | skb->mac_len = skb->nh.raw - skb->data; | 1219 | skb->mac_len = skb->nh.raw - skb->data; |
1210 | __skb_pull(skb, skb->mac_len); | 1220 | __skb_pull(skb, skb->mac_len); |
1211 | 1221 | ||
1222 | if (unlikely(skb->ip_summed != CHECKSUM_HW)) { | ||
1223 | static int warned; | ||
1224 | |||
1225 | WARN_ON(!warned); | ||
1226 | warned = 1; | ||
1227 | |||
1228 | if (skb_header_cloned(skb) && | ||
1229 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | ||
1230 | return ERR_PTR(err); | ||
1231 | } | ||
1232 | |||
1212 | rcu_read_lock(); | 1233 | rcu_read_lock(); |
1213 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { | 1234 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { |
1214 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { | 1235 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { |
1236 | if (unlikely(skb->ip_summed != CHECKSUM_HW)) { | ||
1237 | err = ptype->gso_send_check(skb); | ||
1238 | segs = ERR_PTR(err); | ||
1239 | if (err || skb_gso_ok(skb, features)) | ||
1240 | break; | ||
1241 | __skb_push(skb, skb->data - skb->nh.raw); | ||
1242 | } | ||
1215 | segs = ptype->gso_segment(skb, features); | 1243 | segs = ptype->gso_segment(skb, features); |
1216 | break; | 1244 | break; |
1217 | } | 1245 | } |
@@ -1727,7 +1755,7 @@ static int ing_filter(struct sk_buff *skb) | |||
1727 | if (dev->qdisc_ingress) { | 1755 | if (dev->qdisc_ingress) { |
1728 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); | 1756 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); |
1729 | if (MAX_RED_LOOP < ttl++) { | 1757 | if (MAX_RED_LOOP < ttl++) { |
1730 | printk("Redir loop detected Dropping packet (%s->%s)\n", | 1758 | printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n", |
1731 | skb->input_dev->name, skb->dev->name); | 1759 | skb->input_dev->name, skb->dev->name); |
1732 | return TC_ACT_SHOT; | 1760 | return TC_ACT_SHOT; |
1733 | } | 1761 | } |
@@ -2922,7 +2950,7 @@ int register_netdevice(struct net_device *dev) | |||
2922 | /* Fix illegal SG+CSUM combinations. */ | 2950 | /* Fix illegal SG+CSUM combinations. */ |
2923 | if ((dev->features & NETIF_F_SG) && | 2951 | if ((dev->features & NETIF_F_SG) && |
2924 | !(dev->features & NETIF_F_ALL_CSUM)) { | 2952 | !(dev->features & NETIF_F_ALL_CSUM)) { |
2925 | printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", | 2953 | printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", |
2926 | dev->name); | 2954 | dev->name); |
2927 | dev->features &= ~NETIF_F_SG; | 2955 | dev->features &= ~NETIF_F_SG; |
2928 | } | 2956 | } |
@@ -2930,7 +2958,7 @@ int register_netdevice(struct net_device *dev) | |||
2930 | /* TSO requires that SG is present as well. */ | 2958 | /* TSO requires that SG is present as well. */ |
2931 | if ((dev->features & NETIF_F_TSO) && | 2959 | if ((dev->features & NETIF_F_TSO) && |
2932 | !(dev->features & NETIF_F_SG)) { | 2960 | !(dev->features & NETIF_F_SG)) { |
2933 | printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", | 2961 | printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", |
2934 | dev->name); | 2962 | dev->name); |
2935 | dev->features &= ~NETIF_F_TSO; | 2963 | dev->features &= ~NETIF_F_TSO; |
2936 | } | 2964 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 44f6a181a754..476aa3978504 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -257,11 +257,11 @@ nodata: | |||
257 | } | 257 | } |
258 | 258 | ||
259 | 259 | ||
260 | static void skb_drop_fraglist(struct sk_buff *skb) | 260 | static void skb_drop_list(struct sk_buff **listp) |
261 | { | 261 | { |
262 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 262 | struct sk_buff *list = *listp; |
263 | 263 | ||
264 | skb_shinfo(skb)->frag_list = NULL; | 264 | *listp = NULL; |
265 | 265 | ||
266 | do { | 266 | do { |
267 | struct sk_buff *this = list; | 267 | struct sk_buff *this = list; |
@@ -270,6 +270,11 @@ static void skb_drop_fraglist(struct sk_buff *skb) | |||
270 | } while (list); | 270 | } while (list); |
271 | } | 271 | } |
272 | 272 | ||
273 | static inline void skb_drop_fraglist(struct sk_buff *skb) | ||
274 | { | ||
275 | skb_drop_list(&skb_shinfo(skb)->frag_list); | ||
276 | } | ||
277 | |||
273 | static void skb_clone_fraglist(struct sk_buff *skb) | 278 | static void skb_clone_fraglist(struct sk_buff *skb) |
274 | { | 279 | { |
275 | struct sk_buff *list; | 280 | struct sk_buff *list; |
@@ -830,41 +835,75 @@ free_skb: | |||
830 | 835 | ||
831 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) | 836 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
832 | { | 837 | { |
838 | struct sk_buff **fragp; | ||
839 | struct sk_buff *frag; | ||
833 | int offset = skb_headlen(skb); | 840 | int offset = skb_headlen(skb); |
834 | int nfrags = skb_shinfo(skb)->nr_frags; | 841 | int nfrags = skb_shinfo(skb)->nr_frags; |
835 | int i; | 842 | int i; |
843 | int err; | ||
844 | |||
845 | if (skb_cloned(skb) && | ||
846 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) | ||
847 | return err; | ||
836 | 848 | ||
837 | for (i = 0; i < nfrags; i++) { | 849 | for (i = 0; i < nfrags; i++) { |
838 | int end = offset + skb_shinfo(skb)->frags[i].size; | 850 | int end = offset + skb_shinfo(skb)->frags[i].size; |
839 | if (end > len) { | 851 | |
840 | if (skb_cloned(skb)) { | 852 | if (end < len) { |
841 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 853 | offset = end; |
842 | return -ENOMEM; | 854 | continue; |
843 | } | ||
844 | if (len <= offset) { | ||
845 | put_page(skb_shinfo(skb)->frags[i].page); | ||
846 | skb_shinfo(skb)->nr_frags--; | ||
847 | } else { | ||
848 | skb_shinfo(skb)->frags[i].size = len - offset; | ||
849 | } | ||
850 | } | 855 | } |
851 | offset = end; | 856 | |
857 | if (len > offset) | ||
858 | skb_shinfo(skb)->frags[i++].size = len - offset; | ||
859 | |||
860 | skb_shinfo(skb)->nr_frags = i; | ||
861 | |||
862 | for (; i < nfrags; i++) | ||
863 | put_page(skb_shinfo(skb)->frags[i].page); | ||
864 | |||
865 | if (skb_shinfo(skb)->frag_list) | ||
866 | skb_drop_fraglist(skb); | ||
867 | break; | ||
852 | } | 868 | } |
853 | 869 | ||
854 | if (offset < len) { | 870 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
871 | fragp = &frag->next) { | ||
872 | int end = offset + frag->len; | ||
873 | |||
874 | if (skb_shared(frag)) { | ||
875 | struct sk_buff *nfrag; | ||
876 | |||
877 | nfrag = skb_clone(frag, GFP_ATOMIC); | ||
878 | if (unlikely(!nfrag)) | ||
879 | return -ENOMEM; | ||
880 | |||
881 | nfrag->next = frag->next; | ||
882 | frag = nfrag; | ||
883 | *fragp = frag; | ||
884 | } | ||
885 | |||
886 | if (end < len) { | ||
887 | offset = end; | ||
888 | continue; | ||
889 | } | ||
890 | |||
891 | if (end > len && | ||
892 | unlikely((err = pskb_trim(frag, len - offset)))) | ||
893 | return err; | ||
894 | |||
895 | if (frag->next) | ||
896 | skb_drop_list(&frag->next); | ||
897 | break; | ||
898 | } | ||
899 | |||
900 | if (len > skb_headlen(skb)) { | ||
855 | skb->data_len -= skb->len - len; | 901 | skb->data_len -= skb->len - len; |
856 | skb->len = len; | 902 | skb->len = len; |
857 | } else { | 903 | } else { |
858 | if (len <= skb_headlen(skb)) { | 904 | skb->len = len; |
859 | skb->len = len; | 905 | skb->data_len = 0; |
860 | skb->data_len = 0; | 906 | skb->tail = skb->data + len; |
861 | skb->tail = skb->data + len; | ||
862 | if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) | ||
863 | skb_drop_fraglist(skb); | ||
864 | } else { | ||
865 | skb->data_len -= skb->len - len; | ||
866 | skb->len = len; | ||
867 | } | ||
868 | } | 907 | } |
869 | 908 | ||
870 | return 0; | 909 | return 0; |
diff --git a/net/core/stream.c b/net/core/stream.c index e9489696f694..d1d7decf70b0 100644 --- a/net/core/stream.c +++ b/net/core/stream.c | |||
@@ -196,15 +196,13 @@ EXPORT_SYMBOL(sk_stream_error); | |||
196 | 196 | ||
197 | void __sk_stream_mem_reclaim(struct sock *sk) | 197 | void __sk_stream_mem_reclaim(struct sock *sk) |
198 | { | 198 | { |
199 | if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) { | 199 | atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM, |
200 | atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM, | 200 | sk->sk_prot->memory_allocated); |
201 | sk->sk_prot->memory_allocated); | 201 | sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1; |
202 | sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1; | 202 | if (*sk->sk_prot->memory_pressure && |
203 | if (*sk->sk_prot->memory_pressure && | 203 | (atomic_read(sk->sk_prot->memory_allocated) < |
204 | (atomic_read(sk->sk_prot->memory_allocated) < | 204 | sk->sk_prot->sysctl_mem[0])) |
205 | sk->sk_prot->sysctl_mem[0])) | 205 | *sk->sk_prot->memory_pressure = 0; |
206 | *sk->sk_prot->memory_pressure = 0; | ||
207 | } | ||
208 | } | 206 | } |
209 | 207 | ||
210 | EXPORT_SYMBOL(__sk_stream_mem_reclaim); | 208 | EXPORT_SYMBOL(__sk_stream_mem_reclaim); |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index f4f0627ea41c..6f14bb5a28d4 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -484,7 +484,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | |||
484 | err = -EINVAL; | 484 | err = -EINVAL; |
485 | else | 485 | else |
486 | err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L, | 486 | err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L, |
487 | (struct dccp_so_feat *) | 487 | (struct dccp_so_feat __user *) |
488 | optval); | 488 | optval); |
489 | break; | 489 | break; |
490 | 490 | ||
@@ -493,7 +493,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | |||
493 | err = -EINVAL; | 493 | err = -EINVAL; |
494 | else | 494 | else |
495 | err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R, | 495 | err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R, |
496 | (struct dccp_so_feat *) | 496 | (struct dccp_so_feat __user *) |
497 | optval); | 497 | optval); |
498 | break; | 498 | break; |
499 | 499 | ||
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 06e785fe5757..22f321d9bf9d 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -399,9 +399,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) | |||
399 | rcu_read_lock(); | 399 | rcu_read_lock(); |
400 | hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { | 400 | hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { |
401 | if (idx < s_idx) | 401 | if (idx < s_idx) |
402 | continue; | 402 | goto next; |
403 | if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) | 403 | if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) |
404 | break; | 404 | break; |
405 | next: | ||
405 | idx++; | 406 | idx++; |
406 | } | 407 | } |
407 | rcu_read_unlock(); | 408 | rcu_read_unlock(); |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index da33393be45f..8514106761b0 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -572,16 +572,6 @@ config TCP_CONG_VENO | |||
572 | loss packets. | 572 | loss packets. |
573 | See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf | 573 | See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf |
574 | 574 | ||
575 | config TCP_CONG_COMPOUND | ||
576 | tristate "TCP Compound" | ||
577 | depends on EXPERIMENTAL | ||
578 | default n | ||
579 | ---help--- | ||
580 | TCP Compound is a sender-side only change to TCP that uses | ||
581 | a mixed Reno/Vegas approach to calculate the cwnd. | ||
582 | For further details look here: | ||
583 | ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf | ||
584 | |||
585 | endmenu | 575 | endmenu |
586 | 576 | ||
587 | config TCP_CONG_BIC | 577 | config TCP_CONG_BIC |
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 38b8039bdd55..4878fc5be85f 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile | |||
@@ -47,7 +47,6 @@ obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o | |||
47 | obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o | 47 | obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o |
48 | obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o | 48 | obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o |
49 | obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o | 49 | obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o |
50 | obj-$(CONFIG_TCP_CONG_COMPOUND) += tcp_compound.o | ||
51 | 50 | ||
52 | obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ | 51 | obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ |
53 | xfrm4_output.o | 52 | xfrm4_output.o |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 318d4674faa1..c84a32070f8d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1097 | 1097 | ||
1098 | EXPORT_SYMBOL(inet_sk_rebuild_header); | 1098 | EXPORT_SYMBOL(inet_sk_rebuild_header); |
1099 | 1099 | ||
1100 | static int inet_gso_send_check(struct sk_buff *skb) | ||
1101 | { | ||
1102 | struct iphdr *iph; | ||
1103 | struct net_protocol *ops; | ||
1104 | int proto; | ||
1105 | int ihl; | ||
1106 | int err = -EINVAL; | ||
1107 | |||
1108 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | ||
1109 | goto out; | ||
1110 | |||
1111 | iph = skb->nh.iph; | ||
1112 | ihl = iph->ihl * 4; | ||
1113 | if (ihl < sizeof(*iph)) | ||
1114 | goto out; | ||
1115 | |||
1116 | if (unlikely(!pskb_may_pull(skb, ihl))) | ||
1117 | goto out; | ||
1118 | |||
1119 | skb->h.raw = __skb_pull(skb, ihl); | ||
1120 | iph = skb->nh.iph; | ||
1121 | proto = iph->protocol & (MAX_INET_PROTOS - 1); | ||
1122 | err = -EPROTONOSUPPORT; | ||
1123 | |||
1124 | rcu_read_lock(); | ||
1125 | ops = rcu_dereference(inet_protos[proto]); | ||
1126 | if (likely(ops && ops->gso_send_check)) | ||
1127 | err = ops->gso_send_check(skb); | ||
1128 | rcu_read_unlock(); | ||
1129 | |||
1130 | out: | ||
1131 | return err; | ||
1132 | } | ||
1133 | |||
1100 | static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | 1134 | static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) |
1101 | { | 1135 | { |
1102 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 1136 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
@@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = { | |||
1162 | static struct net_protocol tcp_protocol = { | 1196 | static struct net_protocol tcp_protocol = { |
1163 | .handler = tcp_v4_rcv, | 1197 | .handler = tcp_v4_rcv, |
1164 | .err_handler = tcp_v4_err, | 1198 | .err_handler = tcp_v4_err, |
1199 | .gso_send_check = tcp_v4_gso_send_check, | ||
1165 | .gso_segment = tcp_tso_segment, | 1200 | .gso_segment = tcp_tso_segment, |
1166 | .no_policy = 1, | 1201 | .no_policy = 1, |
1167 | }; | 1202 | }; |
@@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void); | |||
1208 | static struct packet_type ip_packet_type = { | 1243 | static struct packet_type ip_packet_type = { |
1209 | .type = __constant_htons(ETH_P_IP), | 1244 | .type = __constant_htons(ETH_P_IP), |
1210 | .func = ip_rcv, | 1245 | .func = ip_rcv, |
1246 | .gso_send_check = inet_gso_send_check, | ||
1211 | .gso_segment = inet_gso_segment, | 1247 | .gso_segment = inet_gso_segment, |
1212 | }; | 1248 | }; |
1213 | 1249 | ||
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 6c642d11d4ca..773b12ba4e3c 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -457,13 +457,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) | |||
457 | 457 | ||
458 | rcu_read_lock(); | 458 | rcu_read_lock(); |
459 | hlist_for_each_entry(r, node, &fib_rules, hlist) { | 459 | hlist_for_each_entry(r, node, &fib_rules, hlist) { |
460 | |||
461 | if (idx < s_idx) | 460 | if (idx < s_idx) |
462 | continue; | 461 | goto next; |
463 | if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, | 462 | if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, |
464 | cb->nlh->nlmsg_seq, | 463 | cb->nlh->nlmsg_seq, |
465 | RTM_NEWRULE, NLM_F_MULTI) < 0) | 464 | RTM_NEWRULE, NLM_F_MULTI) < 0) |
466 | break; | 465 | break; |
466 | next: | ||
467 | idx++; | 467 | idx++; |
468 | } | 468 | } |
469 | rcu_read_unlock(); | 469 | rcu_read_unlock(); |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1cb65305e102..23fb9d9768e3 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1252,8 +1252,8 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, | |||
1252 | */ | 1252 | */ |
1253 | 1253 | ||
1254 | if (!fa_head) { | 1254 | if (!fa_head) { |
1255 | fa_head = fib_insert_node(t, &err, key, plen); | ||
1256 | err = 0; | 1255 | err = 0; |
1256 | fa_head = fib_insert_node(t, &err, key, plen); | ||
1257 | if (err) | 1257 | if (err) |
1258 | goto out_free_new_fa; | 1258 | goto out_free_new_fa; |
1259 | } | 1259 | } |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 2160874ce7aa..03ff62ebcfeb 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -86,7 +86,7 @@ static struct inet_peer *peer_root = peer_avl_empty; | |||
86 | static DEFINE_RWLOCK(peer_pool_lock); | 86 | static DEFINE_RWLOCK(peer_pool_lock); |
87 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ | 87 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ |
88 | 88 | ||
89 | static volatile int peer_total; | 89 | static int peer_total; |
90 | /* Exported for sysctl_net_ipv4. */ | 90 | /* Exported for sysctl_net_ipv4. */ |
91 | int inet_peer_threshold = 65536 + 128; /* start to throw entries more | 91 | int inet_peer_threshold = 65536 + 128; /* start to throw entries more |
92 | * aggressively at this stage */ | 92 | * aggressively at this stage */ |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index e1a7dba2fa8a..184c78ca79e6 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -428,6 +428,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
428 | goto drop; | 428 | goto drop; |
429 | } | 429 | } |
430 | 430 | ||
431 | /* Remove any debris in the socket control block */ | ||
432 | memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); | ||
433 | |||
431 | return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, | 434 | return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, |
432 | ip_rcv_finish); | 435 | ip_rcv_finish); |
433 | 436 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ca0e714613fb..7c9f9a6421b8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb) | |||
209 | return dst_output(skb); | 209 | return dst_output(skb); |
210 | } | 210 | } |
211 | #endif | 211 | #endif |
212 | if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) | 212 | if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) |
213 | return ip_fragment(skb, ip_finish_output2); | 213 | return ip_fragment(skb, ip_finish_output2); |
214 | else | 214 | else |
215 | return ip_finish_output2(skb); | 215 | return ip_finish_output2(skb); |
@@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1095 | while (size > 0) { | 1095 | while (size > 0) { |
1096 | int i; | 1096 | int i; |
1097 | 1097 | ||
1098 | if (skb_shinfo(skb)->gso_size) | 1098 | if (skb_is_gso(skb)) |
1099 | len = size; | 1099 | len = size; |
1100 | else { | 1100 | else { |
1101 | 1101 | ||
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 8e0374847532..8a8b5cf2f7fe 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -70,7 +70,8 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) | |||
70 | if (err) | 70 | if (err) |
71 | goto out; | 71 | goto out; |
72 | 72 | ||
73 | skb_put(skb, dlen - plen); | 73 | skb->truesize += dlen - plen; |
74 | __skb_put(skb, dlen - plen); | ||
74 | memcpy(skb->data, scratch, dlen); | 75 | memcpy(skb->data, scratch, dlen); |
75 | out: | 76 | out: |
76 | put_cpu(); | 77 | put_cpu(); |
diff --git a/net/ipv4/tcp_compound.c b/net/ipv4/tcp_compound.c deleted file mode 100644 index bc54f7e9aea9..000000000000 --- a/net/ipv4/tcp_compound.c +++ /dev/null | |||
@@ -1,448 +0,0 @@ | |||
1 | /* | ||
2 | * TCP Vegas congestion control | ||
3 | * | ||
4 | * This is based on the congestion detection/avoidance scheme described in | ||
5 | * Lawrence S. Brakmo and Larry L. Peterson. | ||
6 | * "TCP Vegas: End to end congestion avoidance on a global internet." | ||
7 | * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480, | ||
8 | * October 1995. Available from: | ||
9 | * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps | ||
10 | * | ||
11 | * See http://www.cs.arizona.edu/xkernel/ for their implementation. | ||
12 | * The main aspects that distinguish this implementation from the | ||
13 | * Arizona Vegas implementation are: | ||
14 | * o We do not change the loss detection or recovery mechanisms of | ||
15 | * Linux in any way. Linux already recovers from losses quite well, | ||
16 | * using fine-grained timers, NewReno, and FACK. | ||
17 | * o To avoid the performance penalty imposed by increasing cwnd | ||
18 | * only every-other RTT during slow start, we increase during | ||
19 | * every RTT during slow start, just like Reno. | ||
20 | * o Largely to allow continuous cwnd growth during slow start, | ||
21 | * we use the rate at which ACKs come back as the "actual" | ||
22 | * rate, rather than the rate at which data is sent. | ||
23 | * o To speed convergence to the right rate, we set the cwnd | ||
24 | * to achieve the right ("actual") rate when we exit slow start. | ||
25 | * o To filter out the noise caused by delayed ACKs, we use the | ||
26 | * minimum RTT sample observed during the last RTT to calculate | ||
27 | * the actual rate. | ||
28 | * o When the sender re-starts from idle, it waits until it has | ||
29 | * received ACKs for an entire flight of new data before making | ||
30 | * a cwnd adjustment decision. The original Vegas implementation | ||
31 | * assumed senders never went idle. | ||
32 | * | ||
33 | * | ||
34 | * TCP Compound based on TCP Vegas | ||
35 | * | ||
36 | * further details can be found here: | ||
37 | * ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf | ||
38 | */ | ||
39 | |||
40 | #include <linux/config.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/module.h> | ||
43 | #include <linux/skbuff.h> | ||
44 | #include <linux/inet_diag.h> | ||
45 | |||
46 | #include <net/tcp.h> | ||
47 | |||
48 | /* Default values of the Vegas variables, in fixed-point representation | ||
49 | * with V_PARAM_SHIFT bits to the right of the binary point. | ||
50 | */ | ||
51 | #define V_PARAM_SHIFT 1 | ||
52 | |||
53 | #define TCP_COMPOUND_ALPHA 3U | ||
54 | #define TCP_COMPOUND_BETA 1U | ||
55 | #define TCP_COMPOUND_GAMMA 30 | ||
56 | #define TCP_COMPOUND_ZETA 1 | ||
57 | |||
58 | /* TCP compound variables */ | ||
59 | struct compound { | ||
60 | u32 beg_snd_nxt; /* right edge during last RTT */ | ||
61 | u32 beg_snd_una; /* left edge during last RTT */ | ||
62 | u32 beg_snd_cwnd; /* saves the size of the cwnd */ | ||
63 | u8 doing_vegas_now; /* if true, do vegas for this RTT */ | ||
64 | u16 cntRTT; /* # of RTTs measured within last RTT */ | ||
65 | u32 minRTT; /* min of RTTs measured within last RTT (in usec) */ | ||
66 | u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */ | ||
67 | |||
68 | u32 cwnd; | ||
69 | u32 dwnd; | ||
70 | }; | ||
71 | |||
72 | /* There are several situations when we must "re-start" Vegas: | ||
73 | * | ||
74 | * o when a connection is established | ||
75 | * o after an RTO | ||
76 | * o after fast recovery | ||
77 | * o when we send a packet and there is no outstanding | ||
78 | * unacknowledged data (restarting an idle connection) | ||
79 | * | ||
80 | * In these circumstances we cannot do a Vegas calculation at the | ||
81 | * end of the first RTT, because any calculation we do is using | ||
82 | * stale info -- both the saved cwnd and congestion feedback are | ||
83 | * stale. | ||
84 | * | ||
85 | * Instead we must wait until the completion of an RTT during | ||
86 | * which we actually receive ACKs. | ||
87 | */ | ||
88 | static inline void vegas_enable(struct sock *sk) | ||
89 | { | ||
90 | const struct tcp_sock *tp = tcp_sk(sk); | ||
91 | struct compound *vegas = inet_csk_ca(sk); | ||
92 | |||
93 | /* Begin taking Vegas samples next time we send something. */ | ||
94 | vegas->doing_vegas_now = 1; | ||
95 | |||
96 | /* Set the beginning of the next send window. */ | ||
97 | vegas->beg_snd_nxt = tp->snd_nxt; | ||
98 | |||
99 | vegas->cntRTT = 0; | ||
100 | vegas->minRTT = 0x7fffffff; | ||
101 | } | ||
102 | |||
103 | /* Stop taking Vegas samples for now. */ | ||
104 | static inline void vegas_disable(struct sock *sk) | ||
105 | { | ||
106 | struct compound *vegas = inet_csk_ca(sk); | ||
107 | |||
108 | vegas->doing_vegas_now = 0; | ||
109 | } | ||
110 | |||
111 | static void tcp_compound_init(struct sock *sk) | ||
112 | { | ||
113 | struct compound *vegas = inet_csk_ca(sk); | ||
114 | const struct tcp_sock *tp = tcp_sk(sk); | ||
115 | |||
116 | vegas->baseRTT = 0x7fffffff; | ||
117 | vegas_enable(sk); | ||
118 | |||
119 | vegas->dwnd = 0; | ||
120 | vegas->cwnd = tp->snd_cwnd; | ||
121 | } | ||
122 | |||
123 | /* Do RTT sampling needed for Vegas. | ||
124 | * Basically we: | ||
125 | * o min-filter RTT samples from within an RTT to get the current | ||
126 | * propagation delay + queuing delay (we are min-filtering to try to | ||
127 | * avoid the effects of delayed ACKs) | ||
128 | * o min-filter RTT samples from a much longer window (forever for now) | ||
129 | * to find the propagation delay (baseRTT) | ||
130 | */ | ||
131 | static void tcp_compound_rtt_calc(struct sock *sk, u32 usrtt) | ||
132 | { | ||
133 | struct compound *vegas = inet_csk_ca(sk); | ||
134 | u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */ | ||
135 | |||
136 | /* Filter to find propagation delay: */ | ||
137 | if (vrtt < vegas->baseRTT) | ||
138 | vegas->baseRTT = vrtt; | ||
139 | |||
140 | /* Find the min RTT during the last RTT to find | ||
141 | * the current prop. delay + queuing delay: | ||
142 | */ | ||
143 | |||
144 | vegas->minRTT = min(vegas->minRTT, vrtt); | ||
145 | vegas->cntRTT++; | ||
146 | } | ||
147 | |||
148 | static void tcp_compound_state(struct sock *sk, u8 ca_state) | ||
149 | { | ||
150 | |||
151 | if (ca_state == TCP_CA_Open) | ||
152 | vegas_enable(sk); | ||
153 | else | ||
154 | vegas_disable(sk); | ||
155 | } | ||
156 | |||
157 | |||
158 | /* 64bit divisor, dividend and result. dynamic precision */ | ||
159 | static inline u64 div64_64(u64 dividend, u64 divisor) | ||
160 | { | ||
161 | u32 d = divisor; | ||
162 | |||
163 | if (divisor > 0xffffffffULL) { | ||
164 | unsigned int shift = fls(divisor >> 32); | ||
165 | |||
166 | d = divisor >> shift; | ||
167 | dividend >>= shift; | ||
168 | } | ||
169 | |||
170 | /* avoid 64 bit division if possible */ | ||
171 | if (dividend >> 32) | ||
172 | do_div(dividend, d); | ||
173 | else | ||
174 | dividend = (u32) dividend / d; | ||
175 | |||
176 | return dividend; | ||
177 | } | ||
178 | |||
179 | /* calculate the quartic root of "a" using Newton-Raphson */ | ||
180 | static u32 qroot(u64 a) | ||
181 | { | ||
182 | u32 x, x1; | ||
183 | |||
184 | /* Initial estimate is based on: | ||
185 | * qrt(x) = exp(log(x) / 4) | ||
186 | */ | ||
187 | x = 1u << (fls64(a) >> 2); | ||
188 | |||
189 | /* | ||
190 | * Iteration based on: | ||
191 | * 3 | ||
192 | * x = ( 3 * x + a / x ) / 4 | ||
193 | * k+1 k k | ||
194 | */ | ||
195 | do { | ||
196 | u64 x3 = x; | ||
197 | |||
198 | x1 = x; | ||
199 | x3 *= x; | ||
200 | x3 *= x; | ||
201 | |||
202 | x = (3 * x + (u32) div64_64(a, x3)) / 4; | ||
203 | } while (abs(x1 - x) > 1); | ||
204 | |||
205 | return x; | ||
206 | } | ||
207 | |||
208 | |||
209 | /* | ||
210 | * If the connection is idle and we are restarting, | ||
211 | * then we don't want to do any Vegas calculations | ||
212 | * until we get fresh RTT samples. So when we | ||
213 | * restart, we reset our Vegas state to a clean | ||
214 | * slate. After we get acks for this flight of | ||
215 | * packets, _then_ we can make Vegas calculations | ||
216 | * again. | ||
217 | */ | ||
218 | static void tcp_compound_cwnd_event(struct sock *sk, enum tcp_ca_event event) | ||
219 | { | ||
220 | if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START) | ||
221 | tcp_compound_init(sk); | ||
222 | } | ||
223 | |||
224 | static void tcp_compound_cong_avoid(struct sock *sk, u32 ack, | ||
225 | u32 seq_rtt, u32 in_flight, int flag) | ||
226 | { | ||
227 | struct tcp_sock *tp = tcp_sk(sk); | ||
228 | struct compound *vegas = inet_csk_ca(sk); | ||
229 | u8 inc = 0; | ||
230 | |||
231 | if (vegas->cwnd + vegas->dwnd > tp->snd_cwnd) { | ||
232 | if (vegas->cwnd > tp->snd_cwnd || vegas->dwnd > tp->snd_cwnd) { | ||
233 | vegas->cwnd = tp->snd_cwnd; | ||
234 | vegas->dwnd = 0; | ||
235 | } else | ||
236 | vegas->cwnd = tp->snd_cwnd - vegas->dwnd; | ||
237 | |||
238 | } | ||
239 | |||
240 | if (!tcp_is_cwnd_limited(sk, in_flight)) | ||
241 | return; | ||
242 | |||
243 | if (vegas->cwnd <= tp->snd_ssthresh) | ||
244 | inc = 1; | ||
245 | else if (tp->snd_cwnd_cnt < tp->snd_cwnd) | ||
246 | tp->snd_cwnd_cnt++; | ||
247 | |||
248 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | ||
249 | inc = 1; | ||
250 | tp->snd_cwnd_cnt = 0; | ||
251 | } | ||
252 | |||
253 | if (inc && tp->snd_cwnd < tp->snd_cwnd_clamp) | ||
254 | vegas->cwnd++; | ||
255 | |||
256 | /* The key players are v_beg_snd_una and v_beg_snd_nxt. | ||
257 | * | ||
258 | * These are so named because they represent the approximate values | ||
259 | * of snd_una and snd_nxt at the beginning of the current RTT. More | ||
260 | * precisely, they represent the amount of data sent during the RTT. | ||
261 | * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, | ||
262 | * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding | ||
263 | * bytes of data have been ACKed during the course of the RTT, giving | ||
264 | * an "actual" rate of: | ||
265 | * | ||
266 | * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration) | ||
267 | * | ||
268 | * Unfortunately, v_beg_snd_una is not exactly equal to snd_una, | ||
269 | * because delayed ACKs can cover more than one segment, so they | ||
270 | * don't line up nicely with the boundaries of RTTs. | ||
271 | * | ||
272 | * Another unfortunate fact of life is that delayed ACKs delay the | ||
273 | * advance of the left edge of our send window, so that the number | ||
274 | * of bytes we send in an RTT is often less than our cwnd will allow. | ||
275 | * So we keep track of our cwnd separately, in v_beg_snd_cwnd. | ||
276 | */ | ||
277 | |||
278 | if (after(ack, vegas->beg_snd_nxt)) { | ||
279 | /* Do the Vegas once-per-RTT cwnd adjustment. */ | ||
280 | u32 old_wnd, old_snd_cwnd; | ||
281 | |||
282 | /* Here old_wnd is essentially the window of data that was | ||
283 | * sent during the previous RTT, and has all | ||
284 | * been acknowledged in the course of the RTT that ended | ||
285 | * with the ACK we just received. Likewise, old_snd_cwnd | ||
286 | * is the cwnd during the previous RTT. | ||
287 | */ | ||
288 | if (!tp->mss_cache) | ||
289 | return; | ||
290 | |||
291 | old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) / | ||
292 | tp->mss_cache; | ||
293 | old_snd_cwnd = vegas->beg_snd_cwnd; | ||
294 | |||
295 | /* Save the extent of the current window so we can use this | ||
296 | * at the end of the next RTT. | ||
297 | */ | ||
298 | vegas->beg_snd_una = vegas->beg_snd_nxt; | ||
299 | vegas->beg_snd_nxt = tp->snd_nxt; | ||
300 | vegas->beg_snd_cwnd = tp->snd_cwnd; | ||
301 | |||
302 | /* We do the Vegas calculations only if we got enough RTT | ||
303 | * samples that we can be reasonably sure that we got | ||
304 | * at least one RTT sample that wasn't from a delayed ACK. | ||
305 | * If we only had 2 samples total, | ||
306 | * then that means we're getting only 1 ACK per RTT, which | ||
307 | * means they're almost certainly delayed ACKs. | ||
308 | * If we have 3 samples, we should be OK. | ||
309 | */ | ||
310 | |||
311 | if (vegas->cntRTT > 2) { | ||
312 | u32 rtt, target_cwnd, diff; | ||
313 | u32 brtt, dwnd; | ||
314 | |||
315 | /* We have enough RTT samples, so, using the Vegas | ||
316 | * algorithm, we determine if we should increase or | ||
317 | * decrease cwnd, and by how much. | ||
318 | */ | ||
319 | |||
320 | /* Pluck out the RTT we are using for the Vegas | ||
321 | * calculations. This is the min RTT seen during the | ||
322 | * last RTT. Taking the min filters out the effects | ||
323 | * of delayed ACKs, at the cost of noticing congestion | ||
324 | * a bit later. | ||
325 | */ | ||
326 | rtt = vegas->minRTT; | ||
327 | |||
328 | /* Calculate the cwnd we should have, if we weren't | ||
329 | * going too fast. | ||
330 | * | ||
331 | * This is: | ||
332 | * (actual rate in segments) * baseRTT | ||
333 | * We keep it as a fixed point number with | ||
334 | * V_PARAM_SHIFT bits to the right of the binary point. | ||
335 | */ | ||
336 | if (!rtt) | ||
337 | return; | ||
338 | |||
339 | brtt = vegas->baseRTT; | ||
340 | target_cwnd = ((old_wnd * brtt) | ||
341 | << V_PARAM_SHIFT) / rtt; | ||
342 | |||
343 | /* Calculate the difference between the window we had, | ||
344 | * and the window we would like to have. This quantity | ||
345 | * is the "Diff" from the Arizona Vegas papers. | ||
346 | * | ||
347 | * Again, this is a fixed point number with | ||
348 | * V_PARAM_SHIFT bits to the right of the binary | ||
349 | * point. | ||
350 | */ | ||
351 | |||
352 | diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; | ||
353 | |||
354 | dwnd = vegas->dwnd; | ||
355 | |||
356 | if (diff < (TCP_COMPOUND_GAMMA << V_PARAM_SHIFT)) { | ||
357 | u64 v; | ||
358 | u32 x; | ||
359 | |||
360 | /* | ||
361 | * The TCP Compound paper describes the choice | ||
362 | * of "k" determines the agressiveness, | ||
363 | * ie. slope of the response function. | ||
364 | * | ||
365 | * For same value as HSTCP would be 0.8 | ||
366 | * but for computaional reasons, both the | ||
367 | * original authors and this implementation | ||
368 | * use 0.75. | ||
369 | */ | ||
370 | v = old_wnd; | ||
371 | x = qroot(v * v * v) >> TCP_COMPOUND_ALPHA; | ||
372 | if (x > 1) | ||
373 | dwnd = x - 1; | ||
374 | else | ||
375 | dwnd = 0; | ||
376 | |||
377 | dwnd += vegas->dwnd; | ||
378 | |||
379 | } else if ((dwnd << V_PARAM_SHIFT) < | ||
380 | (diff * TCP_COMPOUND_BETA)) | ||
381 | dwnd = 0; | ||
382 | else | ||
383 | dwnd = | ||
384 | ((dwnd << V_PARAM_SHIFT) - | ||
385 | (diff * | ||
386 | TCP_COMPOUND_BETA)) >> V_PARAM_SHIFT; | ||
387 | |||
388 | vegas->dwnd = dwnd; | ||
389 | |||
390 | } | ||
391 | |||
392 | /* Wipe the slate clean for the next RTT. */ | ||
393 | vegas->cntRTT = 0; | ||
394 | vegas->minRTT = 0x7fffffff; | ||
395 | } | ||
396 | |||
397 | tp->snd_cwnd = vegas->cwnd + vegas->dwnd; | ||
398 | } | ||
399 | |||
400 | /* Extract info for Tcp socket info provided via netlink. */ | ||
401 | static void tcp_compound_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) | ||
402 | { | ||
403 | const struct compound *ca = inet_csk_ca(sk); | ||
404 | if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { | ||
405 | struct tcpvegas_info *info; | ||
406 | |||
407 | info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO, | ||
408 | sizeof(*info))); | ||
409 | |||
410 | info->tcpv_enabled = ca->doing_vegas_now; | ||
411 | info->tcpv_rttcnt = ca->cntRTT; | ||
412 | info->tcpv_rtt = ca->baseRTT; | ||
413 | info->tcpv_minrtt = ca->minRTT; | ||
414 | rtattr_failure:; | ||
415 | } | ||
416 | } | ||
417 | |||
418 | static struct tcp_congestion_ops tcp_compound = { | ||
419 | .init = tcp_compound_init, | ||
420 | .ssthresh = tcp_reno_ssthresh, | ||
421 | .cong_avoid = tcp_compound_cong_avoid, | ||
422 | .rtt_sample = tcp_compound_rtt_calc, | ||
423 | .set_state = tcp_compound_state, | ||
424 | .cwnd_event = tcp_compound_cwnd_event, | ||
425 | .get_info = tcp_compound_get_info, | ||
426 | |||
427 | .owner = THIS_MODULE, | ||
428 | .name = "compound", | ||
429 | }; | ||
430 | |||
431 | static int __init tcp_compound_register(void) | ||
432 | { | ||
433 | BUG_ON(sizeof(struct compound) > ICSK_CA_PRIV_SIZE); | ||
434 | tcp_register_congestion_control(&tcp_compound); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static void __exit tcp_compound_unregister(void) | ||
439 | { | ||
440 | tcp_unregister_congestion_control(&tcp_compound); | ||
441 | } | ||
442 | |||
443 | module_init(tcp_compound_register); | ||
444 | module_exit(tcp_compound_unregister); | ||
445 | |||
446 | MODULE_AUTHOR("Angelo P. Castellani, Stephen Hemminger"); | ||
447 | MODULE_LICENSE("GPL"); | ||
448 | MODULE_DESCRIPTION("TCP Compound"); | ||
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index aaa1538c0692..fa3e1aad660c 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c | |||
@@ -139,14 +139,19 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, | |||
139 | tp->snd_cwnd++; | 139 | tp->snd_cwnd++; |
140 | } | 140 | } |
141 | } else { | 141 | } else { |
142 | /* Update AIMD parameters */ | 142 | /* Update AIMD parameters. |
143 | * | ||
144 | * We want to guarantee that: | ||
145 | * hstcp_aimd_vals[ca->ai-1].cwnd < | ||
146 | * snd_cwnd <= | ||
147 | * hstcp_aimd_vals[ca->ai].cwnd | ||
148 | */ | ||
143 | if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { | 149 | if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { |
144 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && | 150 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && |
145 | ca->ai < HSTCP_AIMD_MAX - 1) | 151 | ca->ai < HSTCP_AIMD_MAX - 1) |
146 | ca->ai++; | 152 | ca->ai++; |
147 | } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { | 153 | } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) { |
148 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && | 154 | while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) |
149 | ca->ai > 0) | ||
150 | ca->ai--; | 155 | ca->ai--; |
151 | } | 156 | } |
152 | 157 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a886e6efbbe..a891133f00e4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -496,6 +496,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
496 | } | 496 | } |
497 | } | 497 | } |
498 | 498 | ||
499 | int tcp_v4_gso_send_check(struct sk_buff *skb) | ||
500 | { | ||
501 | struct iphdr *iph; | ||
502 | struct tcphdr *th; | ||
503 | |||
504 | if (!pskb_may_pull(skb, sizeof(*th))) | ||
505 | return -EINVAL; | ||
506 | |||
507 | iph = skb->nh.iph; | ||
508 | th = skb->h.th; | ||
509 | |||
510 | th->check = 0; | ||
511 | th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); | ||
512 | skb->csum = offsetof(struct tcphdr, check); | ||
513 | skb->ip_summed = CHECKSUM_HW; | ||
514 | return 0; | ||
515 | } | ||
516 | |||
499 | /* | 517 | /* |
500 | * This routine will send an RST to the other tcp. | 518 | * This routine will send an RST to the other tcp. |
501 | * | 519 | * |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 193363e22932..d16f863cf687 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb) | |||
134 | } | 134 | } |
135 | #endif | 135 | #endif |
136 | 136 | ||
137 | if (!skb_shinfo(skb)->gso_size) | 137 | if (!skb_is_gso(skb)) |
138 | return xfrm4_output_finish2(skb); | 138 | return xfrm4_output_finish2(skb); |
139 | 139 | ||
140 | skb->protocol = htons(ETH_P_IP); | 140 | skb->protocol = htons(ETH_P_IP); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c250d0af10d7..2316a4315a18 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -508,6 +508,26 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
508 | kfree(ifp); | 508 | kfree(ifp); |
509 | } | 509 | } |
510 | 510 | ||
511 | static void | ||
512 | ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) | ||
513 | { | ||
514 | struct inet6_ifaddr *ifa, **ifap; | ||
515 | int ifp_scope = ipv6_addr_src_scope(&ifp->addr); | ||
516 | |||
517 | /* | ||
518 | * Each device address list is sorted in order of scope - | ||
519 | * global before linklocal. | ||
520 | */ | ||
521 | for (ifap = &idev->addr_list; (ifa = *ifap) != NULL; | ||
522 | ifap = &ifa->if_next) { | ||
523 | if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) | ||
524 | break; | ||
525 | } | ||
526 | |||
527 | ifp->if_next = *ifap; | ||
528 | *ifap = ifp; | ||
529 | } | ||
530 | |||
511 | /* On success it returns ifp with increased reference count */ | 531 | /* On success it returns ifp with increased reference count */ |
512 | 532 | ||
513 | static struct inet6_ifaddr * | 533 | static struct inet6_ifaddr * |
@@ -573,8 +593,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
573 | 593 | ||
574 | write_lock(&idev->lock); | 594 | write_lock(&idev->lock); |
575 | /* Add to inet6_dev unicast addr list. */ | 595 | /* Add to inet6_dev unicast addr list. */ |
576 | ifa->if_next = idev->addr_list; | 596 | ipv6_link_dev_addr(idev, ifa); |
577 | idev->addr_list = ifa; | ||
578 | 597 | ||
579 | #ifdef CONFIG_IPV6_PRIVACY | 598 | #ifdef CONFIG_IPV6_PRIVACY |
580 | if (ifa->flags&IFA_F_TEMPORARY) { | 599 | if (ifa->flags&IFA_F_TEMPORARY) { |
@@ -987,7 +1006,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, | |||
987 | continue; | 1006 | continue; |
988 | } else if (score.scope < hiscore.scope) { | 1007 | } else if (score.scope < hiscore.scope) { |
989 | if (score.scope < daddr_scope) | 1008 | if (score.scope < daddr_scope) |
990 | continue; | 1009 | break; /* addresses sorted by scope */ |
991 | else { | 1010 | else { |
992 | score.rule = 2; | 1011 | score.rule = 2; |
993 | goto record_it; | 1012 | goto record_it; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2c5b44575af0..3bc74ce78800 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb) | |||
147 | 147 | ||
148 | int ip6_output(struct sk_buff *skb) | 148 | int ip6_output(struct sk_buff *skb) |
149 | { | 149 | { |
150 | if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || | 150 | if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || |
151 | dst_allfrag(skb->dst)) | 151 | dst_allfrag(skb->dst)) |
152 | return ip6_fragment(skb, ip6_output2); | 152 | return ip6_fragment(skb, ip6_output2); |
153 | else | 153 | else |
@@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
229 | skb->priority = sk->sk_priority; | 229 | skb->priority = sk->sk_priority; |
230 | 230 | ||
231 | mtu = dst_mtu(dst); | 231 | mtu = dst_mtu(dst); |
232 | if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) { | 232 | if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { |
233 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 233 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); |
234 | return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, | 234 | return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, |
235 | dst_output); | 235 | dst_output); |
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index b285b0357084..7e4d1c17bfbc 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
@@ -109,7 +109,8 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
109 | goto out_put_cpu; | 109 | goto out_put_cpu; |
110 | } | 110 | } |
111 | 111 | ||
112 | skb_put(skb, dlen - plen); | 112 | skb->truesize += dlen - plen; |
113 | __skb_put(skb, dlen - plen); | ||
113 | memcpy(skb->data, scratch, dlen); | 114 | memcpy(skb->data, scratch, dlen); |
114 | err = ipch->nexthdr; | 115 | err = ipch->nexthdr; |
115 | 116 | ||
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 0c17dec11c8d..43327264e69c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -57,29 +57,11 @@ | |||
57 | 57 | ||
58 | DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; | 58 | DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; |
59 | 59 | ||
60 | static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | 60 | static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, |
61 | int proto) | ||
61 | { | 62 | { |
62 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 63 | struct inet6_protocol *ops = NULL; |
63 | struct ipv6hdr *ipv6h; | ||
64 | struct inet6_protocol *ops; | ||
65 | int proto; | ||
66 | 64 | ||
67 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
68 | ~(SKB_GSO_UDP | | ||
69 | SKB_GSO_DODGY | | ||
70 | SKB_GSO_TCP_ECN | | ||
71 | SKB_GSO_TCPV6 | | ||
72 | 0))) | ||
73 | goto out; | ||
74 | |||
75 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
76 | goto out; | ||
77 | |||
78 | ipv6h = skb->nh.ipv6h; | ||
79 | proto = ipv6h->nexthdr; | ||
80 | __skb_pull(skb, sizeof(*ipv6h)); | ||
81 | |||
82 | rcu_read_lock(); | ||
83 | for (;;) { | 65 | for (;;) { |
84 | struct ipv6_opt_hdr *opth; | 66 | struct ipv6_opt_hdr *opth; |
85 | int len; | 67 | int len; |
@@ -88,30 +70,80 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
88 | ops = rcu_dereference(inet6_protos[proto]); | 70 | ops = rcu_dereference(inet6_protos[proto]); |
89 | 71 | ||
90 | if (unlikely(!ops)) | 72 | if (unlikely(!ops)) |
91 | goto unlock; | 73 | break; |
92 | 74 | ||
93 | if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) | 75 | if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) |
94 | break; | 76 | break; |
95 | } | 77 | } |
96 | 78 | ||
97 | if (unlikely(!pskb_may_pull(skb, 8))) | 79 | if (unlikely(!pskb_may_pull(skb, 8))) |
98 | goto unlock; | 80 | break; |
99 | 81 | ||
100 | opth = (void *)skb->data; | 82 | opth = (void *)skb->data; |
101 | len = opth->hdrlen * 8 + 8; | 83 | len = opth->hdrlen * 8 + 8; |
102 | 84 | ||
103 | if (unlikely(!pskb_may_pull(skb, len))) | 85 | if (unlikely(!pskb_may_pull(skb, len))) |
104 | goto unlock; | 86 | break; |
105 | 87 | ||
106 | proto = opth->nexthdr; | 88 | proto = opth->nexthdr; |
107 | __skb_pull(skb, len); | 89 | __skb_pull(skb, len); |
108 | } | 90 | } |
109 | 91 | ||
110 | skb->h.raw = skb->data; | 92 | return ops; |
111 | if (likely(ops->gso_segment)) | 93 | } |
112 | segs = ops->gso_segment(skb, features); | 94 | |
95 | static int ipv6_gso_send_check(struct sk_buff *skb) | ||
96 | { | ||
97 | struct ipv6hdr *ipv6h; | ||
98 | struct inet6_protocol *ops; | ||
99 | int err = -EINVAL; | ||
100 | |||
101 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
102 | goto out; | ||
113 | 103 | ||
114 | unlock: | 104 | ipv6h = skb->nh.ipv6h; |
105 | __skb_pull(skb, sizeof(*ipv6h)); | ||
106 | err = -EPROTONOSUPPORT; | ||
107 | |||
108 | rcu_read_lock(); | ||
109 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
110 | if (likely(ops && ops->gso_send_check)) { | ||
111 | skb->h.raw = skb->data; | ||
112 | err = ops->gso_send_check(skb); | ||
113 | } | ||
114 | rcu_read_unlock(); | ||
115 | |||
116 | out: | ||
117 | return err; | ||
118 | } | ||
119 | |||
120 | static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | ||
121 | { | ||
122 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
123 | struct ipv6hdr *ipv6h; | ||
124 | struct inet6_protocol *ops; | ||
125 | |||
126 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
127 | ~(SKB_GSO_UDP | | ||
128 | SKB_GSO_DODGY | | ||
129 | SKB_GSO_TCP_ECN | | ||
130 | SKB_GSO_TCPV6 | | ||
131 | 0))) | ||
132 | goto out; | ||
133 | |||
134 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
135 | goto out; | ||
136 | |||
137 | ipv6h = skb->nh.ipv6h; | ||
138 | __skb_pull(skb, sizeof(*ipv6h)); | ||
139 | segs = ERR_PTR(-EPROTONOSUPPORT); | ||
140 | |||
141 | rcu_read_lock(); | ||
142 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
143 | if (likely(ops && ops->gso_segment)) { | ||
144 | skb->h.raw = skb->data; | ||
145 | segs = ops->gso_segment(skb, features); | ||
146 | } | ||
115 | rcu_read_unlock(); | 147 | rcu_read_unlock(); |
116 | 148 | ||
117 | if (unlikely(IS_ERR(segs))) | 149 | if (unlikely(IS_ERR(segs))) |
@@ -130,6 +162,7 @@ out: | |||
130 | static struct packet_type ipv6_packet_type = { | 162 | static struct packet_type ipv6_packet_type = { |
131 | .type = __constant_htons(ETH_P_IPV6), | 163 | .type = __constant_htons(ETH_P_IPV6), |
132 | .func = ipv6_rcv, | 164 | .func = ipv6_rcv, |
165 | .gso_send_check = ipv6_gso_send_check, | ||
133 | .gso_segment = ipv6_gso_segment, | 166 | .gso_segment = ipv6_gso_segment, |
134 | }; | 167 | }; |
135 | 168 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5bdcb9002cf7..923989d0520d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -552,6 +552,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
555 | static int tcp_v6_gso_send_check(struct sk_buff *skb) | ||
556 | { | ||
557 | struct ipv6hdr *ipv6h; | ||
558 | struct tcphdr *th; | ||
559 | |||
560 | if (!pskb_may_pull(skb, sizeof(*th))) | ||
561 | return -EINVAL; | ||
562 | |||
563 | ipv6h = skb->nh.ipv6h; | ||
564 | th = skb->h.th; | ||
565 | |||
566 | th->check = 0; | ||
567 | th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | ||
568 | IPPROTO_TCP, 0); | ||
569 | skb->csum = offsetof(struct tcphdr, check); | ||
570 | skb->ip_summed = CHECKSUM_HW; | ||
571 | return 0; | ||
572 | } | ||
555 | 573 | ||
556 | static void tcp_v6_send_reset(struct sk_buff *skb) | 574 | static void tcp_v6_send_reset(struct sk_buff *skb) |
557 | { | 575 | { |
@@ -1603,6 +1621,7 @@ struct proto tcpv6_prot = { | |||
1603 | static struct inet6_protocol tcpv6_protocol = { | 1621 | static struct inet6_protocol tcpv6_protocol = { |
1604 | .handler = tcp_v6_rcv, | 1622 | .handler = tcp_v6_rcv, |
1605 | .err_handler = tcp_v6_err, | 1623 | .err_handler = tcp_v6_err, |
1624 | .gso_send_check = tcp_v6_gso_send_check, | ||
1606 | .gso_segment = tcp_tso_segment, | 1625 | .gso_segment = tcp_tso_segment, |
1607 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 1626 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
1608 | }; | 1627 | }; |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 48fccb1eca08..0eea60ea9ebc 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb) | |||
122 | { | 122 | { |
123 | struct sk_buff *segs; | 123 | struct sk_buff *segs; |
124 | 124 | ||
125 | if (!skb_shinfo(skb)->gso_size) | 125 | if (!skb_is_gso(skb)) |
126 | return xfrm6_output_finish2(skb); | 126 | return xfrm6_output_finish2(skb); |
127 | 127 | ||
128 | skb->protocol = htons(ETH_P_IP); | 128 | skb->protocol = htons(ETH_P_IP); |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 389a4119e1b4..1d50f801f181 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -66,6 +66,14 @@ static DEFINE_SPINLOCK(nr_list_lock); | |||
66 | static const struct proto_ops nr_proto_ops; | 66 | static const struct proto_ops nr_proto_ops; |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * NETROM network devices are virtual network devices encapsulating NETROM | ||
70 | * frames into AX.25 which will be sent through an AX.25 device, so form a | ||
71 | * special "super class" of normal net devices; split their locks off into a | ||
72 | * separate class since they always nest. | ||
73 | */ | ||
74 | static struct lock_class_key nr_netdev_xmit_lock_key; | ||
75 | |||
76 | /* | ||
69 | * Socket removal during an interrupt is now safe. | 77 | * Socket removal during an interrupt is now safe. |
70 | */ | 78 | */ |
71 | static void nr_remove_socket(struct sock *sk) | 79 | static void nr_remove_socket(struct sock *sk) |
@@ -986,18 +994,18 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) | |||
986 | nr_make->vl = 0; | 994 | nr_make->vl = 0; |
987 | nr_make->state = NR_STATE_3; | 995 | nr_make->state = NR_STATE_3; |
988 | sk_acceptq_added(sk); | 996 | sk_acceptq_added(sk); |
989 | |||
990 | nr_insert_socket(make); | ||
991 | |||
992 | skb_queue_head(&sk->sk_receive_queue, skb); | 997 | skb_queue_head(&sk->sk_receive_queue, skb); |
993 | 998 | ||
994 | nr_start_heartbeat(make); | ||
995 | nr_start_idletimer(make); | ||
996 | |||
997 | if (!sock_flag(sk, SOCK_DEAD)) | 999 | if (!sock_flag(sk, SOCK_DEAD)) |
998 | sk->sk_data_ready(sk, skb->len); | 1000 | sk->sk_data_ready(sk, skb->len); |
999 | 1001 | ||
1000 | bh_unlock_sock(sk); | 1002 | bh_unlock_sock(sk); |
1003 | |||
1004 | nr_insert_socket(make); | ||
1005 | |||
1006 | nr_start_heartbeat(make); | ||
1007 | nr_start_idletimer(make); | ||
1008 | |||
1001 | return 1; | 1009 | return 1; |
1002 | } | 1010 | } |
1003 | 1011 | ||
@@ -1382,14 +1390,12 @@ static int __init nr_proto_init(void) | |||
1382 | return -1; | 1390 | return -1; |
1383 | } | 1391 | } |
1384 | 1392 | ||
1385 | dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); | 1393 | dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); |
1386 | if (dev_nr == NULL) { | 1394 | if (dev_nr == NULL) { |
1387 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); | 1395 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); |
1388 | return -1; | 1396 | return -1; |
1389 | } | 1397 | } |
1390 | 1398 | ||
1391 | memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *)); | ||
1392 | |||
1393 | for (i = 0; i < nr_ndevs; i++) { | 1399 | for (i = 0; i < nr_ndevs; i++) { |
1394 | char name[IFNAMSIZ]; | 1400 | char name[IFNAMSIZ]; |
1395 | struct net_device *dev; | 1401 | struct net_device *dev; |
@@ -1407,6 +1413,7 @@ static int __init nr_proto_init(void) | |||
1407 | free_netdev(dev); | 1413 | free_netdev(dev); |
1408 | goto fail; | 1414 | goto fail; |
1409 | } | 1415 | } |
1416 | lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key); | ||
1410 | dev_nr[i] = dev; | 1417 | dev_nr[i] = dev; |
1411 | } | 1418 | } |
1412 | 1419 | ||
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index 75b72d389ba9..ddba1c144260 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c | |||
@@ -138,8 +138,8 @@ static void nr_heartbeat_expiry(unsigned long param) | |||
138 | if (sock_flag(sk, SOCK_DESTROY) || | 138 | if (sock_flag(sk, SOCK_DESTROY) || |
139 | (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { | 139 | (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { |
140 | sock_hold(sk); | 140 | sock_hold(sk); |
141 | nr_destroy_socket(sk); | ||
142 | bh_unlock_sock(sk); | 141 | bh_unlock_sock(sk); |
142 | nr_destroy_socket(sk); | ||
143 | sock_put(sk); | 143 | sock_put(sk); |
144 | return; | 144 | return; |
145 | } | 145 | } |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index d0a67bb31363..08a542855654 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -67,6 +67,14 @@ static struct proto_ops rose_proto_ops; | |||
67 | ax25_address rose_callsign; | 67 | ax25_address rose_callsign; |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * ROSE network devices are virtual network devices encapsulating ROSE | ||
71 | * frames into AX.25 which will be sent through an AX.25 device, so form a | ||
72 | * special "super class" of normal net devices; split their locks off into a | ||
73 | * separate class since they always nest. | ||
74 | */ | ||
75 | static struct lock_class_key rose_netdev_xmit_lock_key; | ||
76 | |||
77 | /* | ||
70 | * Convert a ROSE address into text. | 78 | * Convert a ROSE address into text. |
71 | */ | 79 | */ |
72 | const char *rose2asc(const rose_address *addr) | 80 | const char *rose2asc(const rose_address *addr) |
@@ -1490,14 +1498,13 @@ static int __init rose_proto_init(void) | |||
1490 | 1498 | ||
1491 | rose_callsign = null_ax25_address; | 1499 | rose_callsign = null_ax25_address; |
1492 | 1500 | ||
1493 | dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); | 1501 | dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); |
1494 | if (dev_rose == NULL) { | 1502 | if (dev_rose == NULL) { |
1495 | printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); | 1503 | printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); |
1496 | rc = -ENOMEM; | 1504 | rc = -ENOMEM; |
1497 | goto out_proto_unregister; | 1505 | goto out_proto_unregister; |
1498 | } | 1506 | } |
1499 | 1507 | ||
1500 | memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*)); | ||
1501 | for (i = 0; i < rose_ndevs; i++) { | 1508 | for (i = 0; i < rose_ndevs; i++) { |
1502 | struct net_device *dev; | 1509 | struct net_device *dev; |
1503 | char name[IFNAMSIZ]; | 1510 | char name[IFNAMSIZ]; |
@@ -1516,6 +1523,7 @@ static int __init rose_proto_init(void) | |||
1516 | free_netdev(dev); | 1523 | free_netdev(dev); |
1517 | goto fail; | 1524 | goto fail; |
1518 | } | 1525 | } |
1526 | lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key); | ||
1519 | dev_rose[i] = dev; | 1527 | dev_rose[i] = dev; |
1520 | } | 1528 | } |
1521 | 1529 | ||
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 599423cc9d0d..9affeeedf107 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -602,8 +602,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid) | |||
602 | return err; | 602 | return err; |
603 | 603 | ||
604 | rtattr_failure: | 604 | rtattr_failure: |
605 | module_put(a->ops->owner); | ||
606 | nlmsg_failure: | 605 | nlmsg_failure: |
606 | module_put(a->ops->owner); | ||
607 | err_out: | 607 | err_out: |
608 | kfree_skb(skb); | 608 | kfree_skb(skb); |
609 | kfree(a); | 609 | kfree(a); |
@@ -884,8 +884,6 @@ static int __init tc_action_init(void) | |||
884 | link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action; | 884 | link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action; |
885 | } | 885 | } |
886 | 886 | ||
887 | printk("TC classifier action (bugs to netdev@vger.kernel.org cc " | ||
888 | "hadi@cyberus.ca)\n"); | ||
889 | return 0; | 887 | return 0; |
890 | } | 888 | } |
891 | 889 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 34afe41fa2f3..cc5f339e6f91 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -196,7 +196,7 @@ struct htb_class | |||
196 | struct qdisc_rate_table *rate; /* rate table of the class itself */ | 196 | struct qdisc_rate_table *rate; /* rate table of the class itself */ |
197 | struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ | 197 | struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ |
198 | long buffer,cbuffer; /* token bucket depth/rate */ | 198 | long buffer,cbuffer; /* token bucket depth/rate */ |
199 | long mbuffer; /* max wait time */ | 199 | psched_tdiff_t mbuffer; /* max wait time */ |
200 | long tokens,ctokens; /* current number of tokens */ | 200 | long tokens,ctokens; /* current number of tokens */ |
201 | psched_time_t t_c; /* checkpoint time */ | 201 | psched_time_t t_c; /* checkpoint time */ |
202 | }; | 202 | }; |
@@ -1601,7 +1601,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1601 | /* set class to be in HTB_CAN_SEND state */ | 1601 | /* set class to be in HTB_CAN_SEND state */ |
1602 | cl->tokens = hopt->buffer; | 1602 | cl->tokens = hopt->buffer; |
1603 | cl->ctokens = hopt->cbuffer; | 1603 | cl->ctokens = hopt->cbuffer; |
1604 | cl->mbuffer = 60000000; /* 1min */ | 1604 | cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */ |
1605 | PSCHED_GET_TIME(cl->t_c); | 1605 | PSCHED_GET_TIME(cl->t_c); |
1606 | cl->cmode = HTB_CAN_SEND; | 1606 | cl->cmode = HTB_CAN_SEND; |
1607 | 1607 | ||