From 1a028e50729b85d0a038fad13daf0ee201a37454 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Apr 2007 15:21:23 -0700 Subject: [NET]: Revert sk_buff walker cleanups. This reverts eefa3906283a2b60a6d02a2cda593a7d7d7946c5 The simplification made in that change works with the assumption that the 'offset' parameter to these functions is always positive or zero, which is not true. It can be and often is negative in order to access SKB header values in front of skb->data. Signed-off-by: David S. Miller --- net/appletalk/ddp.c | 25 +++++++---- net/core/datagram.c | 50 ++++++++++++++------- net/core/skbuff.c | 122 +++++++++++++++++++++++++++++++++------------------ net/core/user_dma.c | 25 +++++++---- net/xfrm/xfrm_algo.c | 22 ++++++---- 5 files changed, 158 insertions(+), 86 deletions(-) diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 16eda21fb38c..f6a92a0b7aa6 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -937,11 +937,11 @@ static unsigned long atalk_sum_partial(const unsigned char *data, static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, int len, unsigned long sum) { - int end = skb_headlen(skb); + int start = skb_headlen(skb); int i, copy; /* checksum stuff in header space */ - if ((copy = end - offset) > 0) { + if ( (copy = start - offset) > 0) { if (copy > len) copy = len; sum = atalk_sum_partial(skb->data + offset, copy, sum); @@ -953,9 +953,11 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, /* checksum stuff in frags */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; - end = offset + skb_shinfo(skb)->frags[i].size; + BUG_TRAP(start <= offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -963,31 +965,36 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, if (copy > len) copy = len; vaddr = kmap_skb_frag(frag); - sum = atalk_sum_partial(vaddr + frag->page_offset, - copy, sum); + sum = atalk_sum_partial(vaddr + frag->page_offset + + offset - start, copy, sum); kunmap_skb_frag(vaddr); if (!(len -= copy)) return sum; offset += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + list->len; + end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; - sum = atalk_sum_skb(list, 0, copy, sum); + sum = atalk_sum_skb(list, offset - start, + copy, sum); if ((len -= copy) == 0) return sum; offset += copy; } + start = end; } } diff --git a/net/core/datagram.c b/net/core/datagram.c index e1afa7679445..cb056f476126 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -247,8 +247,8 @@ EXPORT_SYMBOL(skb_kill_datagram); int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, struct iovec *to, int len) { - int end = skb_headlen(skb); - int i, copy = end - offset; + int start = skb_headlen(skb); + int i, copy = start - offset; /* Copy header. */ if (copy > 0) { @@ -263,9 +263,11 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, /* Copy paged appendix. Hmm... why does this look so complicated? */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; - end = offset + skb_shinfo(skb)->frags[i].size; + BUG_TRAP(start <= offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { int err; u8 *vaddr; @@ -275,8 +277,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, if (copy > len) copy = len; vaddr = kmap(page); - err = memcpy_toiovec(to, vaddr + frag->page_offset, - copy); + err = memcpy_toiovec(to, vaddr + frag->page_offset + + offset - start, copy); kunmap(page); if (err) goto fault; @@ -284,24 +286,30 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, return 0; offset += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + list->len; + end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (skb_copy_datagram_iovec(list, 0, to, copy)) + if (skb_copy_datagram_iovec(list, + offset - start, + to, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; } + start = end; } } if (!len) @@ -315,9 +323,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 __user *to, int len, __wsum *csump) { - int end = skb_headlen(skb); + int start = skb_headlen(skb); int pos = 0; - int i, copy = end - offset; + int i, copy = start - offset; /* Copy header. */ if (copy > 0) { @@ -336,9 +344,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; - end = offset + skb_shinfo(skb)->frags[i].size; + BUG_TRAP(start <= offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { __wsum csum2; int err = 0; @@ -350,7 +360,8 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, copy = len; vaddr = kmap(page); csum2 = csum_and_copy_to_user(vaddr + - frag->page_offset, + frag->page_offset + + offset - start, to, copy, 0, &err); kunmap(page); if (err) @@ -362,20 +373,24 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, to += copy; pos += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list=list->next) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + list->len; + end = start + list->len; if ((copy = end - offset) > 0) { __wsum csum2 = 0; if (copy > len) copy = len; - if (skb_copy_and_csum_datagram(list, 0, + if (skb_copy_and_csum_datagram(list, + offset - start, to, copy, &csum2)) goto fault; @@ -386,6 +401,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, to += copy; pos += copy; } + start = end; } } if (!len) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 32f087b5233e..142257307fa2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1045,13 +1045,13 @@ pull_pages: int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) { int i, copy; - int end = skb_headlen(skb); + int start = skb_headlen(skb); if (offset > (int)skb->len - len) goto fault; /* Copy header. */ - if ((copy = end - offset) > 0) { + if ((copy = start - offset) > 0) { if (copy > len) copy = len; skb_copy_from_linear_data_offset(skb, offset, to, copy); @@ -1062,9 +1062,11 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; - end = offset + skb_shinfo(skb)->frags[i].size; + BUG_TRAP(start <= offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { u8 *vaddr; @@ -1073,8 +1075,8 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); memcpy(to, - vaddr + skb_shinfo(skb)->frags[i].page_offset, - copy); + vaddr + skb_shinfo(skb)->frags[i].page_offset+ + offset - start, copy); kunmap_skb_frag(vaddr); if ((len -= copy) == 0) @@ -1082,25 +1084,30 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) offset += copy; to += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + list->len; + end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (skb_copy_bits(list, 0, to, copy)) + if (skb_copy_bits(list, offset - start, + to, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; to += copy; } + start = end; } } if (!len) @@ -1125,12 +1132,12 @@ fault: int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) { int i, copy; - int end = skb_headlen(skb); + int start = skb_headlen(skb); if (offset > (int)skb->len - len) goto fault; - if ((copy = end - offset) > 0) { + if ((copy = start - offset) > 0) { if (copy > len) copy = len; skb_copy_to_linear_data_offset(skb, offset, from, copy); @@ -1142,9 +1149,11 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + frag->size; + end = start + frag->size; if ((copy = end - offset) > 0) { u8 *vaddr; @@ -1152,7 +1161,8 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) copy = len; vaddr = kmap_skb_frag(frag); - memcpy(vaddr + frag->page_offset, from, copy); + memcpy(vaddr + frag->page_offset + offset - start, + from, copy); kunmap_skb_frag(vaddr); if ((len -= copy) == 0) @@ -1160,25 +1170,30 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) offset += copy; from += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { - BUG_TRAP(len >= 0); + int end; - end = offset + list->len; + BUG_TRAP(start <= offset + len); + + end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (skb_store_bits(list, 0, from, copy)) + if (skb_store_bits(list, offset - start, + from, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; from += copy; } + start = end; } } if (!len) @@ -1195,8 +1210,8 @@ EXPORT_SYMBOL(skb_store_bits); __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum) { - int end = skb_headlen(skb); - int i, copy = end - offset; + int start = skb_headlen(skb); + int i, copy = start - offset; int pos = 0; /* Checksum header. */ @@ -1211,9 +1226,11 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + skb_shinfo(skb)->frags[i].size; + end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { __wsum csum2; u8 *vaddr; @@ -1222,8 +1239,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, if (copy > len) copy = len; vaddr = kmap_skb_frag(frag); - csum2 = csum_partial(vaddr + frag->page_offset, - copy, 0); + csum2 = csum_partial(vaddr + frag->page_offset + + offset - start, copy, 0); kunmap_skb_frag(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) @@ -1231,26 +1248,31 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, offset += copy; pos += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { - BUG_TRAP(len >= 0); + int end; - end = offset + list->len; + BUG_TRAP(start <= offset + len); + + end = start + list->len; if ((copy = end - offset) > 0) { __wsum csum2; if (copy > len) copy = len; - csum2 = skb_checksum(list, 0, copy, 0); + csum2 = skb_checksum(list, offset - start, + copy, 0); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; pos += copy; } + start = end; } } BUG_ON(len); @@ -1263,8 +1285,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum) { - int end = skb_headlen(skb); - int i, copy = end - offset; + int start = skb_headlen(skb); + int i, copy = start - offset; int pos = 0; /* Copy header. */ @@ -1281,9 +1303,11 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + skb_shinfo(skb)->frags[i].size; + end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { __wsum csum2; u8 *vaddr; @@ -1293,8 +1317,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, copy = len; vaddr = kmap_skb_frag(frag); csum2 = csum_partial_copy_nocheck(vaddr + - frag->page_offset, - to, copy, 0); + frag->page_offset + + offset - start, to, + copy, 0); kunmap_skb_frag(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) @@ -1303,6 +1328,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, to += copy; pos += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { @@ -1310,13 +1336,16 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, for (; list; list = list->next) { __wsum csum2; - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + list->len; + end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; - csum2 = skb_copy_and_csum_bits(list, 0, + csum2 = skb_copy_and_csum_bits(list, + offset - start, to, copy, 0); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) @@ -1325,6 +1354,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, to += copy; pos += copy; } + start = end; } } BUG_ON(len); @@ -1996,8 +2026,8 @@ void __init skb_init(void) int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { - int end = skb_headlen(skb); - int i, copy = end - offset; + int start = skb_headlen(skb); + int i, copy = start - offset; int elt = 0; if (copy > 0) { @@ -2013,39 +2043,45 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; - end = offset + skb_shinfo(skb)->frags[i].size; + BUG_TRAP(start <= offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; sg[elt].page = frag->page; - sg[elt].offset = frag->page_offset; + sg[elt].offset = frag->page_offset+offset-start; sg[elt].length = copy; elt++; if (!(len -= copy)) return elt; offset += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + list->len; + end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; - elt += skb_to_sgvec(list, sg+elt, 0, copy); + elt += skb_to_sgvec(list, sg+elt, offset - start, copy); if ((len -= copy) == 0) return elt; offset += copy; } + start = end; } } BUG_ON(len); diff --git a/net/core/user_dma.c b/net/core/user_dma.c index 89241cdeea3f..0ad1cd57bc39 100644 --- a/net/core/user_dma.c +++ b/net/core/user_dma.c @@ -49,8 +49,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan, struct sk_buff *skb, int offset, struct iovec *to, size_t len, struct dma_pinned_list *pinned_list) { - int end = skb_headlen(skb); - int i, copy = end - offset; + int start = skb_headlen(skb); + int i, copy = start - offset; dma_cookie_t cookie = 0; /* Copy header. */ @@ -69,9 +69,11 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan, /* Copy paged appendix. Hmm... why does this look so complicated? */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; - end = offset + skb_shinfo(skb)->frags[i].size; + BUG_TRAP(start <= offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; copy = end - offset; if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -80,8 +82,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan, if (copy > len) copy = len; - cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, - page, frag->page_offset, copy); + cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page, + frag->page_offset + offset - start, copy); if (cookie < 0) goto fault; len -= copy; @@ -89,21 +91,25 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan, goto end; offset += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + list->len; + end = start + list->len; copy = end - offset; if (copy > 0) { if (copy > len) copy = len; cookie = dma_skb_copy_datagram_iovec(chan, list, - 0, to, copy, pinned_list); + offset - start, to, copy, + pinned_list); if (cookie < 0) goto fault; len -= copy; @@ -111,6 +117,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan, goto end; offset += copy; } + start = end; } } diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index be529c4241a6..6249a9405bb8 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -532,8 +532,8 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, int offset, int len, icv_update_fn_t icv_update) { - int end = skb_headlen(skb); - int i, copy = end - offset; + int start = skb_headlen(skb); + int i, copy = start - offset; int err; struct scatterlist sg; @@ -556,9 +556,11 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_TRAP(len >= 0); + int end; - end = offset + skb_shinfo(skb)->frags[i].size; + BUG_TRAP(start <= offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -566,7 +568,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, copy = len; sg.page = frag->page; - sg.offset = frag->page_offset; + sg.offset = frag->page_offset + offset-start; sg.length = copy; err = icv_update(desc, &sg, copy); @@ -577,19 +579,22 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, return 0; offset += copy; } + start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { - BUG_TRAP(len >= 0); + int end; + + BUG_TRAP(start <= offset + len); - end = offset + list->len; + end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; - err = skb_icv_walk(list, desc, 0, + err = skb_icv_walk(list, desc, offset-start, copy, icv_update); if (unlikely(err)) return err; @@ -597,6 +602,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, return 0; offset += copy; } + start = end; } } BUG_ON(len); -- cgit v1.2.2 From 47051a2152f8b2355ee70249a0faaf7b682e8ce5 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 27 Apr 2007 15:26:30 -0700 Subject: [AFS]: Fix VLocation record update wakeup Fix the wakeup transitions after a VLocation record update completes one way or another. This builds on Dave Miller's partial fix. Also move wakeups outside the spinlocked sections. Signed-off-by: David Howells Signed-off-by: David S. Miller --- fs/afs/vlocation.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index 74cce174882a..6c8e95a7c2c9 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c @@ -416,8 +416,8 @@ fill_in_record: goto error_abandon; spin_lock(&vl->lock); vl->state = AFS_VL_VALID; - wake_up(&vl->waitq); spin_unlock(&vl->lock); + wake_up(&vl->waitq); /* schedule for regular updates */ afs_vlocation_queue_for_updates(vl); @@ -442,7 +442,7 @@ found_in_memory: _debug("invalid [state %d]", state); - if ((state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME)) { + if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) { vl->state = AFS_VL_CREATING; spin_unlock(&vl->lock); goto fill_in_record; @@ -453,11 +453,10 @@ found_in_memory: _debug("wait"); spin_unlock(&vl->lock); - ret = wait_event_interruptible( - vl->waitq, - vl->state == AFS_VL_NEW || - vl->state == AFS_VL_VALID || - vl->state == AFS_VL_NO_VOLUME); + ret = wait_event_interruptible(vl->waitq, + vl->state == AFS_VL_NEW || + vl->state == AFS_VL_VALID || + vl->state == AFS_VL_NO_VOLUME); if (ret < 0) goto error; spin_lock(&vl->lock); @@ -471,8 +470,8 @@ success: error_abandon: spin_lock(&vl->lock); vl->state = AFS_VL_NEW; - wake_up(&vl->waitq); spin_unlock(&vl->lock); + wake_up(&vl->waitq); error: ASSERT(vl != NULL); afs_put_vlocation(vl); @@ -675,7 +674,6 @@ static void afs_vlocation_updater(struct work_struct *work) case 0: afs_vlocation_apply_update(vl, &vldb); vl->state = AFS_VL_VALID; - wake_up(&vl->waitq); break; case -ENOMEDIUM: vl->state = AFS_VL_VOLUME_DELETED; @@ -685,6 +683,7 @@ static void afs_vlocation_updater(struct work_struct *work) break; } spin_unlock(&vl->lock); + wake_up(&vl->waitq); /* and then reschedule */ _debug("reschedule"); -- cgit v1.2.2 From b1bdb691c3c38b4fbaf99fa8474f5cfa99b2d774 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 27 Apr 2007 15:28:45 -0700 Subject: [AF_RXRPC/AFS]: Arch-specific fixes. Fixes for various arch compilation problems: (*) Missing module exports. (*) Variable name collision when rxkad and af_rxrpc both built in (rxrpc_debug). (*) Large constant representation problem (AFS_UUID_TO_UNIX_TIME). (*) Configuration dependencies. (*) printk() format warnings. Signed-off-by: David Howells Signed-off-by: David S. Miller --- arch/ia64/lib/csum_partial_copy.c | 2 ++ fs/Kconfig | 1 + fs/afs/internal.h | 2 +- fs/afs/rxrpc.c | 2 +- fs/afs/use-rtnetlink.c | 2 +- net/rxrpc/Kconfig | 5 +++++ net/rxrpc/rxkad.c | 1 + 7 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c index 503dfe6d1450..118daf5a0632 100644 --- a/arch/ia64/lib/csum_partial_copy.c +++ b/arch/ia64/lib/csum_partial_copy.c @@ -128,6 +128,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst, return (__force __wsum)result; } +EXPORT_SYMBOL(csum_partial_copy_from_user); + __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { diff --git a/fs/Kconfig b/fs/Kconfig index e33c08924572..a42f767dcdd5 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -2020,6 +2020,7 @@ config AFS_FS tristate "Andrew File System support (AFS) (EXPERIMENTAL)" depends on INET && EXPERIMENTAL select AF_RXRPC + select KEYS help If you say Y here, you will get an experimental Andrew File System driver. It currently only supports unsecured read-only AFS access. diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 6dd3197d1d8d..34665f7d7a19 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -367,7 +367,7 @@ struct afs_uuid { u32 time_low; /* low part of timestamp */ u16 time_mid; /* mid part of timestamp */ u16 time_hi_and_version; /* high part of timestamp and version */ -#define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000 +#define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000ULL #define AFS_UUID_TIMEHI_MASK 0x0fff #define AFS_UUID_VERSION_TIME 0x1000 /* time-based UUID */ #define AFS_UUID_VERSION_NAME 0x3000 /* name-based UUID */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index e7b047328a39..222c1a3abbb8 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -772,7 +772,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb, if (call->offset < count) { if (last) { - _leave(" = -EBADMSG [%d < %lu]", call->offset, count); + _leave(" = -EBADMSG [%d < %zu]", call->offset, count); return -EBADMSG; } _leave(" = -EAGAIN"); diff --git a/fs/afs/use-rtnetlink.c b/fs/afs/use-rtnetlink.c index 82f0daa28970..f8991c700e02 100644 --- a/fs/afs/use-rtnetlink.c +++ b/fs/afs/use-rtnetlink.c @@ -243,7 +243,7 @@ static int afs_read_rtm(struct afs_rtm_desc *desc) desc->datalen = kernel_recvmsg(desc->nlsock, &msg, iov, 1, desc->datamax, 0); if (desc->datalen < 0) { - _leave(" = %ld [recv]", desc->datalen); + _leave(" = %zd [recv]", desc->datalen); return desc->datalen; } diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig index d72380e304ae..8750f6da6bc7 100644 --- a/net/rxrpc/Kconfig +++ b/net/rxrpc/Kconfig @@ -30,6 +30,11 @@ config AF_RXRPC_DEBUG config RXKAD tristate "RxRPC Kerberos security" depends on AF_RXRPC && KEYS + select CRYPTO + select CRYPTO_MANAGER + select CRYPTO_BLKCIPHER + select CRYPTO_PCBC + select CRYPTO_FCRYPT help Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC through the use of the key retention service. diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 1eaf529efac1..5ec705144e10 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -18,6 +18,7 @@ #include #include #include +#define rxrpc_debug rxkad_debug #include "ar-internal.h" #define RXKAD_VERSION 2 -- cgit v1.2.2 From b8b8fd2dc23725fba77f66b3fef11b11f983fc08 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 27 Apr 2007 15:31:24 -0700 Subject: [NET]: Fix networking compilation errors Fix miscellaneous networking compilation errors. (*) Export ktime_add_ns() for modules. (*) wext_proc_init() should have an ANSI declaration. Signed-off-by: David Howells Signed-off-by: David S. Miller --- include/net/wext.h | 2 +- kernel/hrtimer.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/net/wext.h b/include/net/wext.h index 55741836a675..c02b8decf3af 100644 --- a/include/net/wext.h +++ b/include/net/wext.h @@ -10,7 +10,7 @@ extern int wext_proc_init(void); extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, void __user *arg); #else -static inline int wext_proc_init() +static inline int wext_proc_init(void) { return 0; } diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f5cfde8c9025..1b3033105b40 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -279,6 +279,8 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) return ktime_add(kt, tmp); } + +EXPORT_SYMBOL_GPL(ktime_add_ns); # endif /* !CONFIG_KTIME_SCALAR */ /* -- cgit v1.2.2