aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChangli Gao <xiaosuo@gmail.com>2010-11-30 21:52:20 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-06 15:59:06 -0500
commit0af55bb58f8fa7865004ac48d16affe125ac1b7f (patch)
treeb91fb024121975b3897d2132da744abe288be378 /net
parentf7fce74e387e0563e5a165704664aa5ee8b2f48b (diff)
af_packet: use vmalloc_to_page() instead for the addresss returned by vmalloc()
The following commit causes the pgv->buffer may point to the memory returned by vmalloc(). And we can't use virt_to_page() for the vmalloc address. This patch introduces a new inline function pgv_to_page(), which calls vmalloc_to_page() for the vmalloc address, and virt_to_page() for the __get_free_pages address. We used to increase page pointer to get the next page at the next page address, after Neil's patch, it is wrong, as the physical address may be not continuous. This patch also fixes this issue. commit 0e3125c755445664f00ad036e4fc2cd32fd52877 Author: Neil Horman <nhorman@tuxdriver.com> Date: Tue Nov 16 10:26:47 2010 -0800 packet: Enhance AF_PACKET implementation to not require high order contiguous memory allocation (v4) Signed-off-by: Changli Gao <xiaosuo@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/packet/af_packet.c36
1 files changed, 19 insertions, 17 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 422705d62b5b..26fbeb140a6a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -224,6 +224,13 @@ struct packet_skb_cb {
224 224
225#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 225#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
226 226
227static inline struct page *pgv_to_page(void *addr)
228{
229 if (is_vmalloc_addr(addr))
230 return vmalloc_to_page(addr);
231 return virt_to_page(addr);
232}
233
227static void __packet_set_status(struct packet_sock *po, void *frame, int status) 234static void __packet_set_status(struct packet_sock *po, void *frame, int status)
228{ 235{
229 union { 236 union {
@@ -236,11 +243,11 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
236 switch (po->tp_version) { 243 switch (po->tp_version) {
237 case TPACKET_V1: 244 case TPACKET_V1:
238 h.h1->tp_status = status; 245 h.h1->tp_status = status;
239 flush_dcache_page(virt_to_page(&h.h1->tp_status)); 246 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
240 break; 247 break;
241 case TPACKET_V2: 248 case TPACKET_V2:
242 h.h2->tp_status = status; 249 h.h2->tp_status = status;
243 flush_dcache_page(virt_to_page(&h.h2->tp_status)); 250 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
244 break; 251 break;
245 default: 252 default:
246 pr_err("TPACKET version not supported\n"); 253 pr_err("TPACKET version not supported\n");
@@ -263,10 +270,10 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
263 h.raw = frame; 270 h.raw = frame;
264 switch (po->tp_version) { 271 switch (po->tp_version) {
265 case TPACKET_V1: 272 case TPACKET_V1:
266 flush_dcache_page(virt_to_page(&h.h1->tp_status)); 273 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
267 return h.h1->tp_status; 274 return h.h1->tp_status;
268 case TPACKET_V2: 275 case TPACKET_V2:
269 flush_dcache_page(virt_to_page(&h.h2->tp_status)); 276 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
270 return h.h2->tp_status; 277 return h.h2->tp_status;
271 default: 278 default:
272 pr_err("TPACKET version not supported\n"); 279 pr_err("TPACKET version not supported\n");
@@ -800,15 +807,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
800 __packet_set_status(po, h.raw, status); 807 __packet_set_status(po, h.raw, status);
801 smp_mb(); 808 smp_mb();
802 { 809 {
803 struct page *p_start, *p_end; 810 u8 *start, *end;
804 u8 *h_end = h.raw + macoff + snaplen - 1; 811
805 812 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
806 p_start = virt_to_page(h.raw); 813 for (start = h.raw; start < end; start += PAGE_SIZE)
807 p_end = virt_to_page(h_end); 814 flush_dcache_page(pgv_to_page(start));
808 while (p_start <= p_end) {
809 flush_dcache_page(p_start);
810 p_start++;
811 }
812 } 815 }
813 816
814 sk->sk_data_ready(sk, 0); 817 sk->sk_data_ready(sk, 0);
@@ -915,7 +918,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
915 } 918 }
916 919
917 err = -EFAULT; 920 err = -EFAULT;
918 page = virt_to_page(data);
919 offset = offset_in_page(data); 921 offset = offset_in_page(data);
920 len_max = PAGE_SIZE - offset; 922 len_max = PAGE_SIZE - offset;
921 len = ((to_write > len_max) ? len_max : to_write); 923 len = ((to_write > len_max) ? len_max : to_write);
@@ -934,11 +936,11 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
934 return -EFAULT; 936 return -EFAULT;
935 } 937 }
936 938
939 page = pgv_to_page(data);
940 data += len;
937 flush_dcache_page(page); 941 flush_dcache_page(page);
938 get_page(page); 942 get_page(page);
939 skb_fill_page_desc(skb, 943 skb_fill_page_desc(skb, nr_frags, page, offset, len);
940 nr_frags,
941 page++, offset, len);
942 to_write -= len; 944 to_write -= len;
943 offset = 0; 945 offset = 0;
944 len_max = PAGE_SIZE; 946 len_max = PAGE_SIZE;