aboutsummaryrefslogtreecommitdiffstats
path: root/net/packet
diff options
context:
space:
mode:
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 246a04a13234..e79efaf06389 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2501,22 +2501,20 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2501 mutex_lock(&po->pg_vec_lock); 2501 mutex_lock(&po->pg_vec_lock);
2502 if (closing || atomic_read(&po->mapped) == 0) { 2502 if (closing || atomic_read(&po->mapped) == 0) {
2503 err = 0; 2503 err = 0;
2504#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2505 spin_lock_bh(&rb_queue->lock); 2504 spin_lock_bh(&rb_queue->lock);
2506 pg_vec = XC(rb->pg_vec, pg_vec); 2505 swap(rb->pg_vec, pg_vec);
2507 rb->frame_max = (req->tp_frame_nr - 1); 2506 rb->frame_max = (req->tp_frame_nr - 1);
2508 rb->head = 0; 2507 rb->head = 0;
2509 rb->frame_size = req->tp_frame_size; 2508 rb->frame_size = req->tp_frame_size;
2510 spin_unlock_bh(&rb_queue->lock); 2509 spin_unlock_bh(&rb_queue->lock);
2511 2510
2512 order = XC(rb->pg_vec_order, order); 2511 swap(rb->pg_vec_order, order);
2513 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr); 2512 swap(rb->pg_vec_len, req->tp_block_nr);
2514 2513
2515 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 2514 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2516 po->prot_hook.func = (po->rx_ring.pg_vec) ? 2515 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2517 tpacket_rcv : packet_rcv; 2516 tpacket_rcv : packet_rcv;
2518 skb_queue_purge(rb_queue); 2517 skb_queue_purge(rb_queue);
2519#undef XC
2520 if (atomic_read(&po->mapped)) 2518 if (atomic_read(&po->mapped))
2521 pr_err("packet_mmap: vma is busy: %d\n", 2519 pr_err("packet_mmap: vma is busy: %d\n",
2522 atomic_read(&po->mapped)); 2520 atomic_read(&po->mapped));