aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c81
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/sock.c20
3 files changed, 40 insertions, 63 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index da9bf71421a7..81987df536eb 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -211,74 +211,45 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
211int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, 211int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
212 struct iovec *to, int len) 212 struct iovec *to, int len)
213{ 213{
214 int start = skb_headlen(skb); 214 int i, err, fraglen, end = 0;
215 int i, copy = start - offset; 215 struct sk_buff *next = skb_shinfo(skb)->frag_list;
216 216next_skb:
217 /* Copy header. */ 217 fraglen = skb_headlen(skb);
218 if (copy > 0) { 218 i = -1;
219 if (copy > len)
220 copy = len;
221 if (memcpy_toiovec(to, skb->data + offset, copy))
222 goto fault;
223 if ((len -= copy) == 0)
224 return 0;
225 offset += copy;
226 }
227 219
228 /* Copy paged appendix. Hmm... why does this look so complicated? */ 220 while (1) {
229 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 221 int start = end;
230 int end;
231
232 BUG_TRAP(start <= offset + len);
233 222
234 end = start + skb_shinfo(skb)->frags[i].size; 223 if ((end += fraglen) > offset) {
235 if ((copy = end - offset) > 0) { 224 int copy = end - offset, o = offset - start;
236 int err;
237 u8 *vaddr;
238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
239 struct page *page = frag->page;
240 225
241 if (copy > len) 226 if (copy > len)
242 copy = len; 227 copy = len;
243 vaddr = kmap(page); 228 if (i == -1)
244 err = memcpy_toiovec(to, vaddr + frag->page_offset + 229 err = memcpy_toiovec(to, skb->data + o, copy);
245 offset - start, copy); 230 else {
246 kunmap(page); 231 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
232 struct page *page = frag->page;
233 void *p = kmap(page) + frag->page_offset + o;
234 err = memcpy_toiovec(to, p, copy);
235 kunmap(page);
236 }
247 if (err) 237 if (err)
248 goto fault; 238 goto fault;
249 if (!(len -= copy)) 239 if (!(len -= copy))
250 return 0; 240 return 0;
251 offset += copy; 241 offset += copy;
252 } 242 }
253 start = end; 243 if (++i >= skb_shinfo(skb)->nr_frags)
244 break;
245 fraglen = skb_shinfo(skb)->frags[i].size;
254 } 246 }
255 247 if (next) {
256 if (skb_shinfo(skb)->frag_list) { 248 skb = next;
257 struct sk_buff *list = skb_shinfo(skb)->frag_list; 249 BUG_ON(skb_shinfo(skb)->frag_list);
258 250 next = skb->next;
259 for (; list; list = list->next) { 251 goto next_skb;
260 int end;
261
262 BUG_TRAP(start <= offset + len);
263
264 end = start + list->len;
265 if ((copy = end - offset) > 0) {
266 if (copy > len)
267 copy = len;
268 if (skb_copy_datagram_iovec(list,
269 offset - start,
270 to, copy))
271 goto fault;
272 if ((len -= copy) == 0)
273 return 0;
274 offset += copy;
275 }
276 start = end;
277 }
278 } 252 }
279 if (!len)
280 return 0;
281
282fault: 253fault:
283 return -EFAULT; 254 return -EFAULT;
284} 255}
diff --git a/net/core/dev.c b/net/core/dev.c
index 37c881070963..9066c874e273 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1259,6 +1259,8 @@ int dev_queue_xmit(struct sk_buff *skb)
1259 if (skb_checksum_help(skb, 0)) 1259 if (skb_checksum_help(skb, 0))
1260 goto out_kfree_skb; 1260 goto out_kfree_skb;
1261 1261
1262 spin_lock_prefetch(&dev->queue_lock);
1263
1262 /* Disable soft irqs for various locks below. Also 1264 /* Disable soft irqs for various locks below. Also
1263 * stops preemption for RCU. 1265 * stops preemption for RCU.
1264 */ 1266 */
diff --git a/net/core/sock.c b/net/core/sock.c
index ac63b56e23b2..928d2a1d6d8e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -660,16 +660,20 @@ struct sock *sk_alloc(int family, unsigned int __nocast priority,
660 sock_lock_init(sk); 660 sock_lock_init(sk);
661 } 661 }
662 662
663 if (security_sk_alloc(sk, family, priority)) { 663 if (security_sk_alloc(sk, family, priority))
664 if (slab != NULL) 664 goto out_free;
665 kmem_cache_free(slab, sk); 665
666 else 666 if (!try_module_get(prot->owner))
667 kfree(sk); 667 goto out_free;
668 sk = NULL;
669 } else
670 __module_get(prot->owner);
671 } 668 }
672 return sk; 669 return sk;
670
671out_free:
672 if (slab != NULL)
673 kmem_cache_free(slab, sk);
674 else
675 kfree(sk);
676 return NULL;
673} 677}
674 678
675void sk_free(struct sock *sk) 679void sk_free(struct sock *sk)