aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/core/kmap_skb.h19
-rw-r--r--net/core/skbuff.c42
3 files changed, 24 insertions, 43 deletions
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bfa9ab93eda5..0301b328cf0f 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -63,7 +63,7 @@
63#include <net/tcp_states.h> 63#include <net/tcp_states.h>
64#include <net/route.h> 64#include <net/route.h>
65#include <linux/atalk.h> 65#include <linux/atalk.h>
66#include "../core/kmap_skb.h" 66#include <linux/highmem.h>
67 67
68struct datalink_proto *ddp_dl, *aarp_dl; 68struct datalink_proto *ddp_dl, *aarp_dl;
69static const struct proto_ops atalk_dgram_ops; 69static const struct proto_ops atalk_dgram_ops;
@@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
960 960
961 if (copy > len) 961 if (copy > len)
962 copy = len; 962 copy = len;
963 vaddr = kmap_skb_frag(frag); 963 vaddr = kmap_atomic(skb_frag_page(frag));
964 sum = atalk_sum_partial(vaddr + frag->page_offset + 964 sum = atalk_sum_partial(vaddr + frag->page_offset +
965 offset - start, copy, sum); 965 offset - start, copy, sum);
966 kunmap_skb_frag(vaddr); 966 kunmap_atomic(vaddr);
967 967
968 if (!(len -= copy)) 968 if (!(len -= copy))
969 return sum; 969 return sum;
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
deleted file mode 100644
index 52d0a4459041..000000000000
--- a/net/core/kmap_skb.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#include <linux/highmem.h>
2
3static inline void *kmap_skb_frag(const skb_frag_t *frag)
4{
5#ifdef CONFIG_HIGHMEM
6 BUG_ON(in_irq());
7
8 local_bh_disable();
9#endif
10 return kmap_atomic(skb_frag_page(frag));
11}
12
13static inline void kunmap_skb_frag(void *vaddr)
14{
15 kunmap_atomic(vaddr);
16#ifdef CONFIG_HIGHMEM
17 local_bh_enable();
18#endif
19}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a690cae91cdd..b2595adb605f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -68,8 +68,7 @@
68#include <asm/uaccess.h> 68#include <asm/uaccess.h>
69#include <asm/system.h> 69#include <asm/system.h>
70#include <trace/events/skb.h> 70#include <trace/events/skb.h>
71 71#include <linux/highmem.h>
72#include "kmap_skb.h"
73 72
74static struct kmem_cache *skbuff_head_cache __read_mostly; 73static struct kmem_cache *skbuff_head_cache __read_mostly;
75static struct kmem_cache *skbuff_fclone_cache __read_mostly; 74static struct kmem_cache *skbuff_fclone_cache __read_mostly;
@@ -708,10 +707,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
708 } 707 }
709 return -ENOMEM; 708 return -ENOMEM;
710 } 709 }
711 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 710 vaddr = kmap_atomic(skb_frag_page(f));
712 memcpy(page_address(page), 711 memcpy(page_address(page),
713 vaddr + f->page_offset, skb_frag_size(f)); 712 vaddr + f->page_offset, skb_frag_size(f));
714 kunmap_skb_frag(vaddr); 713 kunmap_atomic(vaddr);
715 page->private = (unsigned long)head; 714 page->private = (unsigned long)head;
716 head = page; 715 head = page;
717 } 716 }
@@ -1486,21 +1485,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1486 1485
1487 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1486 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1488 int end; 1487 int end;
1488 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1489 1489
1490 WARN_ON(start > offset + len); 1490 WARN_ON(start > offset + len);
1491 1491
1492 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1492 end = start + skb_frag_size(f);
1493 if ((copy = end - offset) > 0) { 1493 if ((copy = end - offset) > 0) {
1494 u8 *vaddr; 1494 u8 *vaddr;
1495 1495
1496 if (copy > len) 1496 if (copy > len)
1497 copy = len; 1497 copy = len;
1498 1498
1499 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1499 vaddr = kmap_atomic(skb_frag_page(f));
1500 memcpy(to, 1500 memcpy(to,
1501 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1501 vaddr + f->page_offset + offset - start,
1502 offset - start, copy); 1502 copy);
1503 kunmap_skb_frag(vaddr); 1503 kunmap_atomic(vaddr);
1504 1504
1505 if ((len -= copy) == 0) 1505 if ((len -= copy) == 0)
1506 return 0; 1506 return 0;
@@ -1805,10 +1805,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1805 if (copy > len) 1805 if (copy > len)
1806 copy = len; 1806 copy = len;
1807 1807
1808 vaddr = kmap_skb_frag(frag); 1808 vaddr = kmap_atomic(skb_frag_page(frag));
1809 memcpy(vaddr + frag->page_offset + offset - start, 1809 memcpy(vaddr + frag->page_offset + offset - start,
1810 from, copy); 1810 from, copy);
1811 kunmap_skb_frag(vaddr); 1811 kunmap_atomic(vaddr);
1812 1812
1813 if ((len -= copy) == 0) 1813 if ((len -= copy) == 0)
1814 return 0; 1814 return 0;
@@ -1868,21 +1868,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1868 1868
1869 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1869 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1870 int end; 1870 int end;
1871 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1871 1872
1872 WARN_ON(start > offset + len); 1873 WARN_ON(start > offset + len);
1873 1874
1874 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1875 end = start + skb_frag_size(frag);
1875 if ((copy = end - offset) > 0) { 1876 if ((copy = end - offset) > 0) {
1876 __wsum csum2; 1877 __wsum csum2;
1877 u8 *vaddr; 1878 u8 *vaddr;
1878 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1879 1879
1880 if (copy > len) 1880 if (copy > len)
1881 copy = len; 1881 copy = len;
1882 vaddr = kmap_skb_frag(frag); 1882 vaddr = kmap_atomic(skb_frag_page(frag));
1883 csum2 = csum_partial(vaddr + frag->page_offset + 1883 csum2 = csum_partial(vaddr + frag->page_offset +
1884 offset - start, copy, 0); 1884 offset - start, copy, 0);
1885 kunmap_skb_frag(vaddr); 1885 kunmap_atomic(vaddr);
1886 csum = csum_block_add(csum, csum2, pos); 1886 csum = csum_block_add(csum, csum2, pos);
1887 if (!(len -= copy)) 1887 if (!(len -= copy))
1888 return csum; 1888 return csum;
@@ -1954,12 +1954,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1954 1954
1955 if (copy > len) 1955 if (copy > len)
1956 copy = len; 1956 copy = len;
1957 vaddr = kmap_skb_frag(frag); 1957 vaddr = kmap_atomic(skb_frag_page(frag));
1958 csum2 = csum_partial_copy_nocheck(vaddr + 1958 csum2 = csum_partial_copy_nocheck(vaddr +
1959 frag->page_offset + 1959 frag->page_offset +
1960 offset - start, to, 1960 offset - start, to,
1961 copy, 0); 1961 copy, 0);
1962 kunmap_skb_frag(vaddr); 1962 kunmap_atomic(vaddr);
1963 csum = csum_block_add(csum, csum2, pos); 1963 csum = csum_block_add(csum, csum2, pos);
1964 if (!(len -= copy)) 1964 if (!(len -= copy))
1965 return csum; 1965 return csum;
@@ -2479,7 +2479,7 @@ next_skb:
2479 2479
2480 if (abs_offset < block_limit) { 2480 if (abs_offset < block_limit) {
2481 if (!st->frag_data) 2481 if (!st->frag_data)
2482 st->frag_data = kmap_skb_frag(frag); 2482 st->frag_data = kmap_atomic(skb_frag_page(frag));
2483 2483
2484 *data = (u8 *) st->frag_data + frag->page_offset + 2484 *data = (u8 *) st->frag_data + frag->page_offset +
2485 (abs_offset - st->stepped_offset); 2485 (abs_offset - st->stepped_offset);
@@ -2488,7 +2488,7 @@ next_skb:
2488 } 2488 }
2489 2489
2490 if (st->frag_data) { 2490 if (st->frag_data) {
2491 kunmap_skb_frag(st->frag_data); 2491 kunmap_atomic(st->frag_data);
2492 st->frag_data = NULL; 2492 st->frag_data = NULL;
2493 } 2493 }
2494 2494
@@ -2497,7 +2497,7 @@ next_skb:
2497 } 2497 }
2498 2498
2499 if (st->frag_data) { 2499 if (st->frag_data) {
2500 kunmap_skb_frag(st->frag_data); 2500 kunmap_atomic(st->frag_data);
2501 st->frag_data = NULL; 2501 st->frag_data = NULL;
2502 } 2502 }
2503 2503
@@ -2525,7 +2525,7 @@ EXPORT_SYMBOL(skb_seq_read);
2525void skb_abort_seq_read(struct skb_seq_state *st) 2525void skb_abort_seq_read(struct skb_seq_state *st)
2526{ 2526{
2527 if (st->frag_data) 2527 if (st->frag_data)
2528 kunmap_skb_frag(st->frag_data); 2528 kunmap_atomic(st->frag_data);
2529} 2529}
2530EXPORT_SYMBOL(skb_abort_seq_read); 2530EXPORT_SYMBOL(skb_abort_seq_read);
2531 2531