aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-09 10:25:08 -0400
committerPatrick McHardy <kaber@trash.net>2010-06-09 10:25:08 -0400
commit144ad2a6c56b6109ff0f64074863ae5cf1c1698a (patch)
tree58b3934b3b2ef3c739cd4a77b08a809d0290be7e
parent5756d346c7cdefcd84a8ac4901167cdfb5447b69 (diff)
netfilter: ip6_queue: rwlock to spinlock conversion
Converts queue_lock rwlock to a spinlock. (readlocked part can be changed by reads of integer values) One atomic operation instead of four per ipq_enqueue_packet() call. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
-rw-r--r--net/ipv6/netfilter/ip6_queue.c57
1 files changed, 25 insertions, 32 deletions
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 8c201743d96d..413ab0754e1f 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -43,7 +43,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
43 43
44static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 44static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
45static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 45static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
46static DEFINE_RWLOCK(queue_lock); 46static DEFINE_SPINLOCK(queue_lock);
47static int peer_pid __read_mostly; 47static int peer_pid __read_mostly;
48static unsigned int copy_range __read_mostly; 48static unsigned int copy_range __read_mostly;
49static unsigned int queue_total; 49static unsigned int queue_total;
@@ -73,10 +73,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
73 break; 73 break;
74 74
75 case IPQ_COPY_PACKET: 75 case IPQ_COPY_PACKET:
76 copy_mode = mode; 76 if (range > 0xFFFF)
77 range = 0xFFFF;
77 copy_range = range; 78 copy_range = range;
78 if (copy_range > 0xFFFF) 79 copy_mode = mode;
79 copy_range = 0xFFFF;
80 break; 80 break;
81 81
82 default: 82 default:
@@ -102,7 +102,7 @@ ipq_find_dequeue_entry(unsigned long id)
102{ 102{
103 struct nf_queue_entry *entry = NULL, *i; 103 struct nf_queue_entry *entry = NULL, *i;
104 104
105 write_lock_bh(&queue_lock); 105 spin_lock_bh(&queue_lock);
106 106
107 list_for_each_entry(i, &queue_list, list) { 107 list_for_each_entry(i, &queue_list, list) {
108 if ((unsigned long)i == id) { 108 if ((unsigned long)i == id) {
@@ -116,7 +116,7 @@ ipq_find_dequeue_entry(unsigned long id)
116 queue_total--; 116 queue_total--;
117 } 117 }
118 118
119 write_unlock_bh(&queue_lock); 119 spin_unlock_bh(&queue_lock);
120 return entry; 120 return entry;
121} 121}
122 122
@@ -137,9 +137,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
137static void 137static void
138ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 138ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
139{ 139{
140 write_lock_bh(&queue_lock); 140 spin_lock_bh(&queue_lock);
141 __ipq_flush(cmpfn, data); 141 __ipq_flush(cmpfn, data);
142 write_unlock_bh(&queue_lock); 142 spin_unlock_bh(&queue_lock);
143} 143}
144 144
145static struct sk_buff * 145static struct sk_buff *
@@ -153,9 +153,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
153 struct nlmsghdr *nlh; 153 struct nlmsghdr *nlh;
154 struct timeval tv; 154 struct timeval tv;
155 155
156 read_lock_bh(&queue_lock); 156 switch (ACCESS_ONCE(copy_mode)) {
157
158 switch (copy_mode) {
159 case IPQ_COPY_META: 157 case IPQ_COPY_META:
160 case IPQ_COPY_NONE: 158 case IPQ_COPY_NONE:
161 size = NLMSG_SPACE(sizeof(*pmsg)); 159 size = NLMSG_SPACE(sizeof(*pmsg));
@@ -163,26 +161,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
163 161
164 case IPQ_COPY_PACKET: 162 case IPQ_COPY_PACKET:
165 if (entry->skb->ip_summed == CHECKSUM_PARTIAL && 163 if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
166 (*errp = skb_checksum_help(entry->skb))) { 164 (*errp = skb_checksum_help(entry->skb)))
167 read_unlock_bh(&queue_lock);
168 return NULL; 165 return NULL;
169 } 166
170 if (copy_range == 0 || copy_range > entry->skb->len) 167 data_len = ACCESS_ONCE(copy_range);
168 if (data_len == 0 || data_len > entry->skb->len)
171 data_len = entry->skb->len; 169 data_len = entry->skb->len;
172 else
173 data_len = copy_range;
174 170
175 size = NLMSG_SPACE(sizeof(*pmsg) + data_len); 171 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
176 break; 172 break;
177 173
178 default: 174 default:
179 *errp = -EINVAL; 175 *errp = -EINVAL;
180 read_unlock_bh(&queue_lock);
181 return NULL; 176 return NULL;
182 } 177 }
183 178
184 read_unlock_bh(&queue_lock);
185
186 skb = alloc_skb(size, GFP_ATOMIC); 179 skb = alloc_skb(size, GFP_ATOMIC);
187 if (!skb) 180 if (!skb)
188 goto nlmsg_failure; 181 goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
242 if (nskb == NULL) 235 if (nskb == NULL)
243 return status; 236 return status;
244 237
245 write_lock_bh(&queue_lock); 238 spin_lock_bh(&queue_lock);
246 239
247 if (!peer_pid) 240 if (!peer_pid)
248 goto err_out_free_nskb; 241 goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
266 259
267 __ipq_enqueue_entry(entry); 260 __ipq_enqueue_entry(entry);
268 261
269 write_unlock_bh(&queue_lock); 262 spin_unlock_bh(&queue_lock);
270 return status; 263 return status;
271 264
272err_out_free_nskb: 265err_out_free_nskb:
273 kfree_skb(nskb); 266 kfree_skb(nskb);
274 267
275err_out_unlock: 268err_out_unlock:
276 write_unlock_bh(&queue_lock); 269 spin_unlock_bh(&queue_lock);
277 return status; 270 return status;
278} 271}
279 272
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
342{ 335{
343 int status; 336 int status;
344 337
345 write_lock_bh(&queue_lock); 338 spin_lock_bh(&queue_lock);
346 status = __ipq_set_mode(mode, range); 339 status = __ipq_set_mode(mode, range);
347 write_unlock_bh(&queue_lock); 340 spin_unlock_bh(&queue_lock);
348 return status; 341 return status;
349} 342}
350 343
@@ -441,11 +434,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
441 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 434 if (security_netlink_recv(skb, CAP_NET_ADMIN))
442 RCV_SKB_FAIL(-EPERM); 435 RCV_SKB_FAIL(-EPERM);
443 436
444 write_lock_bh(&queue_lock); 437 spin_lock_bh(&queue_lock);
445 438
446 if (peer_pid) { 439 if (peer_pid) {
447 if (peer_pid != pid) { 440 if (peer_pid != pid) {
448 write_unlock_bh(&queue_lock); 441 spin_unlock_bh(&queue_lock);
449 RCV_SKB_FAIL(-EBUSY); 442 RCV_SKB_FAIL(-EBUSY);
450 } 443 }
451 } else { 444 } else {
@@ -453,7 +446,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
453 peer_pid = pid; 446 peer_pid = pid;
454 } 447 }
455 448
456 write_unlock_bh(&queue_lock); 449 spin_unlock_bh(&queue_lock);
457 450
458 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 451 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
459 nlmsglen - NLMSG_LENGTH(0)); 452 nlmsglen - NLMSG_LENGTH(0));
@@ -498,10 +491,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
498 struct netlink_notify *n = ptr; 491 struct netlink_notify *n = ptr;
499 492
500 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) { 493 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
501 write_lock_bh(&queue_lock); 494 spin_lock_bh(&queue_lock);
502 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) 495 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
503 __ipq_reset(); 496 __ipq_reset();
504 write_unlock_bh(&queue_lock); 497 spin_unlock_bh(&queue_lock);
505 } 498 }
506 return NOTIFY_DONE; 499 return NOTIFY_DONE;
507} 500}
@@ -528,7 +521,7 @@ static ctl_table ipq_table[] = {
528#ifdef CONFIG_PROC_FS 521#ifdef CONFIG_PROC_FS
529static int ip6_queue_show(struct seq_file *m, void *v) 522static int ip6_queue_show(struct seq_file *m, void *v)
530{ 523{
531 read_lock_bh(&queue_lock); 524 spin_lock_bh(&queue_lock);
532 525
533 seq_printf(m, 526 seq_printf(m,
534 "Peer PID : %d\n" 527 "Peer PID : %d\n"
@@ -546,7 +539,7 @@ static int ip6_queue_show(struct seq_file *m, void *v)
546 queue_dropped, 539 queue_dropped,
547 queue_user_dropped); 540 queue_user_dropped);
548 541
549 read_unlock_bh(&queue_lock); 542 spin_unlock_bh(&queue_lock);
550 return 0; 543 return 0;
551} 544}
552 545