aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2014-07-14 12:13:41 -0400
committerTakashi Iwai <tiwai@suse.de>2014-07-14 12:13:41 -0400
commit03fe805a098422d27cbbbad29a54ff1349da3622 (patch)
tree0db9cc9a701203ddcee58e5b907d4fb8ad31b421 /net/core
parent50dd9050e45ce11c575eabcaee8a914caf78fb74 (diff)
parent5646eda5851e6cfdfa22d41895e3f5daffa643d3 (diff)
Merge branch 'topic/monotonic' into for-next
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dst.c16
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/iovec.c55
-rw-r--r--net/core/skbuff.c2
4 files changed, 17 insertions, 66 deletions
diff --git a/net/core/dst.c b/net/core/dst.c
index 80d6286c8b62..a028409ee438 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -269,6 +269,15 @@ again:
269} 269}
270EXPORT_SYMBOL(dst_destroy); 270EXPORT_SYMBOL(dst_destroy);
271 271
272static void dst_destroy_rcu(struct rcu_head *head)
273{
274 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
275
276 dst = dst_destroy(dst);
277 if (dst)
278 __dst_free(dst);
279}
280
272void dst_release(struct dst_entry *dst) 281void dst_release(struct dst_entry *dst)
273{ 282{
274 if (dst) { 283 if (dst) {
@@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
276 285
277 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
278 WARN_ON(newrefcnt < 0); 287 WARN_ON(newrefcnt < 0);
279 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { 288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
280 dst = dst_destroy(dst); 289 call_rcu(&dst->rcu_head, dst_destroy_rcu);
281 if (dst)
282 __dst_free(dst);
283 }
284 } 290 }
285} 291}
286EXPORT_SYMBOL(dst_release); 292EXPORT_SYMBOL(dst_release);
diff --git a/net/core/filter.c b/net/core/filter.c
index 735fad897496..1dbf6462f766 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -840,11 +840,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
842 842
843 if (len <= 0 || len >= BPF_MAXINSNS) 843 if (len <= 0 || len > BPF_MAXINSNS)
844 return -EINVAL; 844 return -EINVAL;
845 845
846 if (new_prog) { 846 if (new_prog) {
847 addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL); 847 addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
848 if (!addrs) 848 if (!addrs)
849 return -ENOMEM; 849 return -ENOMEM;
850 } 850 }
@@ -1101,7 +1101,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
1101 1101
1102 BUILD_BUG_ON(BPF_MEMWORDS > 16); 1102 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1103 1103
1104 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); 1104 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
1105 if (!masks) 1105 if (!masks)
1106 return -ENOMEM; 1106 return -ENOMEM;
1107 1107
@@ -1382,7 +1382,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
1383 if (fp_new) { 1383 if (fp_new) {
1384 *fp_new = *fp; 1384 *fp_new = *fp;
1385 /* As we're kepping orig_prog in fp_new along, 1385 /* As we're keeping orig_prog in fp_new along,
1386 * we need to make sure we're not evicting it 1386 * we need to make sure we're not evicting it
1387 * from the old fp. 1387 * from the old fp.
1388 */ 1388 */
@@ -1524,8 +1524,8 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1524 1524
1525/** 1525/**
1526 * sk_unattached_filter_create - create an unattached filter 1526 * sk_unattached_filter_create - create an unattached filter
1527 * @fprog: the filter program
1528 * @pfp: the unattached filter that is created 1527 * @pfp: the unattached filter that is created
1528 * @fprog: the filter program
1529 * 1529 *
1530 * Create a filter independent of any socket. We first run some 1530 * Create a filter independent of any socket. We first run some
1531 * sanity checks on it to make sure it does not explode on us later. 1531 * sanity checks on it to make sure it does not explode on us later.
diff --git a/net/core/iovec.c b/net/core/iovec.c
index b61869429f4c..827dd6beb49c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -75,61 +75,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75} 75}
76 76
77/* 77/*
78 * Copy kernel to iovec. Returns -EFAULT on error.
79 */
80
81int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
82 int offset, int len)
83{
84 int copy;
85 for (; len > 0; ++iov) {
86 /* Skip over the finished iovecs */
87 if (unlikely(offset >= iov->iov_len)) {
88 offset -= iov->iov_len;
89 continue;
90 }
91 copy = min_t(unsigned int, iov->iov_len - offset, len);
92 if (copy_to_user(iov->iov_base + offset, kdata, copy))
93 return -EFAULT;
94 offset = 0;
95 kdata += copy;
96 len -= copy;
97 }
98
99 return 0;
100}
101EXPORT_SYMBOL(memcpy_toiovecend);
102
103/*
104 * Copy iovec to kernel. Returns -EFAULT on error.
105 */
106
107int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
108 int offset, int len)
109{
110 /* Skip over the finished iovecs */
111 while (offset >= iov->iov_len) {
112 offset -= iov->iov_len;
113 iov++;
114 }
115
116 while (len > 0) {
117 u8 __user *base = iov->iov_base + offset;
118 int copy = min_t(unsigned int, len, iov->iov_len - offset);
119
120 offset = 0;
121 if (copy_from_user(kdata, base, copy))
122 return -EFAULT;
123 len -= copy;
124 kdata += copy;
125 iov++;
126 }
127
128 return 0;
129}
130EXPORT_SYMBOL(memcpy_fromiovecend);
131
132/*
133 * And now for the all-in-one: copy and checksum from a user iovec 78 * And now for the all-in-one: copy and checksum from a user iovec
134 * directly to a datagram 79 * directly to a datagram
135 * Calls to csum_partial but the last must be in 32 bit chunks 80 * Calls to csum_partial but the last must be in 32 bit chunks
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9cd5344fad73..c1a33033cbe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2993 skb_put(nskb, len), 2993 skb_put(nskb, len),
2994 len, 0); 2994 len, 0);
2995 SKB_GSO_CB(nskb)->csum_start = 2995 SKB_GSO_CB(nskb)->csum_start =
2996 skb_headroom(nskb) + offset; 2996 skb_headroom(nskb) + doffset;
2997 continue; 2997 continue;
2998 } 2998 }
2999 2999