aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2012-04-05 05:35:15 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-05 05:36:43 -0400
commit51c56b004e2c9a46207bb8a116589c2f84b92e5d (patch)
treea11714f3749262d1ab084960ab441ff0a1379331 /net/core/skbuff.c
parent109d2446052a484c58f07f71f9457bf7b71017f8 (diff)
net: remove k{un}map_skb_frag()
Since commit 3e4d3af501 (mm: stack based kmap_atomic()) we dont have to disable BH anymore while mapping skb frags. We can remove kmap_skb_frag() / kunmap_skb_frag() helpers and use kmap_atomic() / kunmap_atomic() Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a690cae91cd..b2595adb605 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -68,8 +68,7 @@
68#include <asm/uaccess.h> 68#include <asm/uaccess.h>
69#include <asm/system.h> 69#include <asm/system.h>
70#include <trace/events/skb.h> 70#include <trace/events/skb.h>
71 71#include <linux/highmem.h>
72#include "kmap_skb.h"
73 72
74static struct kmem_cache *skbuff_head_cache __read_mostly; 73static struct kmem_cache *skbuff_head_cache __read_mostly;
75static struct kmem_cache *skbuff_fclone_cache __read_mostly; 74static struct kmem_cache *skbuff_fclone_cache __read_mostly;
@@ -708,10 +707,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
708 } 707 }
709 return -ENOMEM; 708 return -ENOMEM;
710 } 709 }
711 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 710 vaddr = kmap_atomic(skb_frag_page(f));
712 memcpy(page_address(page), 711 memcpy(page_address(page),
713 vaddr + f->page_offset, skb_frag_size(f)); 712 vaddr + f->page_offset, skb_frag_size(f));
714 kunmap_skb_frag(vaddr); 713 kunmap_atomic(vaddr);
715 page->private = (unsigned long)head; 714 page->private = (unsigned long)head;
716 head = page; 715 head = page;
717 } 716 }
@@ -1486,21 +1485,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1486 1485
1487 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1486 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1488 int end; 1487 int end;
1488 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1489 1489
1490 WARN_ON(start > offset + len); 1490 WARN_ON(start > offset + len);
1491 1491
1492 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1492 end = start + skb_frag_size(f);
1493 if ((copy = end - offset) > 0) { 1493 if ((copy = end - offset) > 0) {
1494 u8 *vaddr; 1494 u8 *vaddr;
1495 1495
1496 if (copy > len) 1496 if (copy > len)
1497 copy = len; 1497 copy = len;
1498 1498
1499 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1499 vaddr = kmap_atomic(skb_frag_page(f));
1500 memcpy(to, 1500 memcpy(to,
1501 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1501 vaddr + f->page_offset + offset - start,
1502 offset - start, copy); 1502 copy);
1503 kunmap_skb_frag(vaddr); 1503 kunmap_atomic(vaddr);
1504 1504
1505 if ((len -= copy) == 0) 1505 if ((len -= copy) == 0)
1506 return 0; 1506 return 0;
@@ -1805,10 +1805,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1805 if (copy > len) 1805 if (copy > len)
1806 copy = len; 1806 copy = len;
1807 1807
1808 vaddr = kmap_skb_frag(frag); 1808 vaddr = kmap_atomic(skb_frag_page(frag));
1809 memcpy(vaddr + frag->page_offset + offset - start, 1809 memcpy(vaddr + frag->page_offset + offset - start,
1810 from, copy); 1810 from, copy);
1811 kunmap_skb_frag(vaddr); 1811 kunmap_atomic(vaddr);
1812 1812
1813 if ((len -= copy) == 0) 1813 if ((len -= copy) == 0)
1814 return 0; 1814 return 0;
@@ -1868,21 +1868,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1868 1868
1869 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1869 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1870 int end; 1870 int end;
1871 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1871 1872
1872 WARN_ON(start > offset + len); 1873 WARN_ON(start > offset + len);
1873 1874
1874 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1875 end = start + skb_frag_size(frag);
1875 if ((copy = end - offset) > 0) { 1876 if ((copy = end - offset) > 0) {
1876 __wsum csum2; 1877 __wsum csum2;
1877 u8 *vaddr; 1878 u8 *vaddr;
1878 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1879 1879
1880 if (copy > len) 1880 if (copy > len)
1881 copy = len; 1881 copy = len;
1882 vaddr = kmap_skb_frag(frag); 1882 vaddr = kmap_atomic(skb_frag_page(frag));
1883 csum2 = csum_partial(vaddr + frag->page_offset + 1883 csum2 = csum_partial(vaddr + frag->page_offset +
1884 offset - start, copy, 0); 1884 offset - start, copy, 0);
1885 kunmap_skb_frag(vaddr); 1885 kunmap_atomic(vaddr);
1886 csum = csum_block_add(csum, csum2, pos); 1886 csum = csum_block_add(csum, csum2, pos);
1887 if (!(len -= copy)) 1887 if (!(len -= copy))
1888 return csum; 1888 return csum;
@@ -1954,12 +1954,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1954 1954
1955 if (copy > len) 1955 if (copy > len)
1956 copy = len; 1956 copy = len;
1957 vaddr = kmap_skb_frag(frag); 1957 vaddr = kmap_atomic(skb_frag_page(frag));
1958 csum2 = csum_partial_copy_nocheck(vaddr + 1958 csum2 = csum_partial_copy_nocheck(vaddr +
1959 frag->page_offset + 1959 frag->page_offset +
1960 offset - start, to, 1960 offset - start, to,
1961 copy, 0); 1961 copy, 0);
1962 kunmap_skb_frag(vaddr); 1962 kunmap_atomic(vaddr);
1963 csum = csum_block_add(csum, csum2, pos); 1963 csum = csum_block_add(csum, csum2, pos);
1964 if (!(len -= copy)) 1964 if (!(len -= copy))
1965 return csum; 1965 return csum;
@@ -2479,7 +2479,7 @@ next_skb:
2479 2479
2480 if (abs_offset < block_limit) { 2480 if (abs_offset < block_limit) {
2481 if (!st->frag_data) 2481 if (!st->frag_data)
2482 st->frag_data = kmap_skb_frag(frag); 2482 st->frag_data = kmap_atomic(skb_frag_page(frag));
2483 2483
2484 *data = (u8 *) st->frag_data + frag->page_offset + 2484 *data = (u8 *) st->frag_data + frag->page_offset +
2485 (abs_offset - st->stepped_offset); 2485 (abs_offset - st->stepped_offset);
@@ -2488,7 +2488,7 @@ next_skb:
2488 } 2488 }
2489 2489
2490 if (st->frag_data) { 2490 if (st->frag_data) {
2491 kunmap_skb_frag(st->frag_data); 2491 kunmap_atomic(st->frag_data);
2492 st->frag_data = NULL; 2492 st->frag_data = NULL;
2493 } 2493 }
2494 2494
@@ -2497,7 +2497,7 @@ next_skb:
2497 } 2497 }
2498 2498
2499 if (st->frag_data) { 2499 if (st->frag_data) {
2500 kunmap_skb_frag(st->frag_data); 2500 kunmap_atomic(st->frag_data);
2501 st->frag_data = NULL; 2501 st->frag_data = NULL;
2502 } 2502 }
2503 2503
@@ -2525,7 +2525,7 @@ EXPORT_SYMBOL(skb_seq_read);
2525void skb_abort_seq_read(struct skb_seq_state *st) 2525void skb_abort_seq_read(struct skb_seq_state *st)
2526{ 2526{
2527 if (st->frag_data) 2527 if (st->frag_data)
2528 kunmap_skb_frag(st->frag_data); 2528 kunmap_atomic(st->frag_data);
2529} 2529}
2530EXPORT_SYMBOL(skb_abort_seq_read); 2530EXPORT_SYMBOL(skb_abort_seq_read);
2531 2531