aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-09 09:47:41 -0400
committerPatrick McHardy <kaber@trash.net>2010-06-09 09:47:41 -0400
commit5756d346c7cdefcd84a8ac4901167cdfb5447b69 (patch)
tree4ac2c15adef1bd1e62292815a04ce7c8aefb765a /net/ipv4
parent2bf074825403e0e0d623bac7573580773b78abef (diff)
netfilter: ip_queue: rwlock to spinlock conversion
Converts queue_lock rwlock to a spinlock. (readlocked part can be changed by reads of integer values) One atomic operation instead of four per ipq_enqueue_packet() call. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/ip_queue.c57
1 files changed, 25 insertions, 32 deletions
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index a4e5fc5df4b..d2c1311cb28 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
42 42
43static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 43static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
44static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 44static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
45static DEFINE_RWLOCK(queue_lock); 45static DEFINE_SPINLOCK(queue_lock);
46static int peer_pid __read_mostly; 46static int peer_pid __read_mostly;
47static unsigned int copy_range __read_mostly; 47static unsigned int copy_range __read_mostly;
48static unsigned int queue_total; 48static unsigned int queue_total;
@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
72 break; 72 break;
73 73
74 case IPQ_COPY_PACKET: 74 case IPQ_COPY_PACKET:
75 copy_mode = mode; 75 if (range > 0xFFFF)
76 range = 0xFFFF;
76 copy_range = range; 77 copy_range = range;
77 if (copy_range > 0xFFFF) 78 copy_mode = mode;
78 copy_range = 0xFFFF;
79 break; 79 break;
80 80
81 default: 81 default:
@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id)
101{ 101{
102 struct nf_queue_entry *entry = NULL, *i; 102 struct nf_queue_entry *entry = NULL, *i;
103 103
104 write_lock_bh(&queue_lock); 104 spin_lock_bh(&queue_lock);
105 105
106 list_for_each_entry(i, &queue_list, list) { 106 list_for_each_entry(i, &queue_list, list) {
107 if ((unsigned long)i == id) { 107 if ((unsigned long)i == id) {
@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id)
115 queue_total--; 115 queue_total--;
116 } 116 }
117 117
118 write_unlock_bh(&queue_lock); 118 spin_unlock_bh(&queue_lock);
119 return entry; 119 return entry;
120} 120}
121 121
@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
136static void 136static void
137ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 137ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
138{ 138{
139 write_lock_bh(&queue_lock); 139 spin_lock_bh(&queue_lock);
140 __ipq_flush(cmpfn, data); 140 __ipq_flush(cmpfn, data);
141 write_unlock_bh(&queue_lock); 141 spin_unlock_bh(&queue_lock);
142} 142}
143 143
144static struct sk_buff * 144static struct sk_buff *
@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
152 struct nlmsghdr *nlh; 152 struct nlmsghdr *nlh;
153 struct timeval tv; 153 struct timeval tv;
154 154
155 read_lock_bh(&queue_lock); 155 switch (ACCESS_ONCE(copy_mode)) {
156
157 switch (copy_mode) {
158 case IPQ_COPY_META: 156 case IPQ_COPY_META:
159 case IPQ_COPY_NONE: 157 case IPQ_COPY_NONE:
160 size = NLMSG_SPACE(sizeof(*pmsg)); 158 size = NLMSG_SPACE(sizeof(*pmsg));
@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
162 160
163 case IPQ_COPY_PACKET: 161 case IPQ_COPY_PACKET:
164 if (entry->skb->ip_summed == CHECKSUM_PARTIAL && 162 if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
165 (*errp = skb_checksum_help(entry->skb))) { 163 (*errp = skb_checksum_help(entry->skb)))
166 read_unlock_bh(&queue_lock);
167 return NULL; 164 return NULL;
168 } 165
169 if (copy_range == 0 || copy_range > entry->skb->len) 166 data_len = ACCESS_ONCE(copy_range);
167 if (data_len == 0 || data_len > entry->skb->len)
170 data_len = entry->skb->len; 168 data_len = entry->skb->len;
171 else
172 data_len = copy_range;
173 169
174 size = NLMSG_SPACE(sizeof(*pmsg) + data_len); 170 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
175 break; 171 break;
176 172
177 default: 173 default:
178 *errp = -EINVAL; 174 *errp = -EINVAL;
179 read_unlock_bh(&queue_lock);
180 return NULL; 175 return NULL;
181 } 176 }
182 177
183 read_unlock_bh(&queue_lock);
184
185 skb = alloc_skb(size, GFP_ATOMIC); 178 skb = alloc_skb(size, GFP_ATOMIC);
186 if (!skb) 179 if (!skb)
187 goto nlmsg_failure; 180 goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
242 if (nskb == NULL) 235 if (nskb == NULL)
243 return status; 236 return status;
244 237
245 write_lock_bh(&queue_lock); 238 spin_lock_bh(&queue_lock);
246 239
247 if (!peer_pid) 240 if (!peer_pid)
248 goto err_out_free_nskb; 241 goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
266 259
267 __ipq_enqueue_entry(entry); 260 __ipq_enqueue_entry(entry);
268 261
269 write_unlock_bh(&queue_lock); 262 spin_unlock_bh(&queue_lock);
270 return status; 263 return status;
271 264
272err_out_free_nskb: 265err_out_free_nskb:
273 kfree_skb(nskb); 266 kfree_skb(nskb);
274 267
275err_out_unlock: 268err_out_unlock:
276 write_unlock_bh(&queue_lock); 269 spin_unlock_bh(&queue_lock);
277 return status; 270 return status;
278} 271}
279 272
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
342{ 335{
343 int status; 336 int status;
344 337
345 write_lock_bh(&queue_lock); 338 spin_lock_bh(&queue_lock);
346 status = __ipq_set_mode(mode, range); 339 status = __ipq_set_mode(mode, range);
347 write_unlock_bh(&queue_lock); 340 spin_unlock_bh(&queue_lock);
348 return status; 341 return status;
349} 342}
350 343
@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
440 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 433 if (security_netlink_recv(skb, CAP_NET_ADMIN))
441 RCV_SKB_FAIL(-EPERM); 434 RCV_SKB_FAIL(-EPERM);
442 435
443 write_lock_bh(&queue_lock); 436 spin_lock_bh(&queue_lock);
444 437
445 if (peer_pid) { 438 if (peer_pid) {
446 if (peer_pid != pid) { 439 if (peer_pid != pid) {
447 write_unlock_bh(&queue_lock); 440 spin_unlock_bh(&queue_lock);
448 RCV_SKB_FAIL(-EBUSY); 441 RCV_SKB_FAIL(-EBUSY);
449 } 442 }
450 } else { 443 } else {
@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
452 peer_pid = pid; 445 peer_pid = pid;
453 } 446 }
454 447
455 write_unlock_bh(&queue_lock); 448 spin_unlock_bh(&queue_lock);
456 449
457 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 450 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
458 nlmsglen - NLMSG_LENGTH(0)); 451 nlmsglen - NLMSG_LENGTH(0));
@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
497 struct netlink_notify *n = ptr; 490 struct netlink_notify *n = ptr;
498 491
499 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { 492 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
500 write_lock_bh(&queue_lock); 493 spin_lock_bh(&queue_lock);
501 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) 494 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
502 __ipq_reset(); 495 __ipq_reset();
503 write_unlock_bh(&queue_lock); 496 spin_unlock_bh(&queue_lock);
504 } 497 }
505 return NOTIFY_DONE; 498 return NOTIFY_DONE;
506} 499}
@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = {
527#ifdef CONFIG_PROC_FS 520#ifdef CONFIG_PROC_FS
528static int ip_queue_show(struct seq_file *m, void *v) 521static int ip_queue_show(struct seq_file *m, void *v)
529{ 522{
530 read_lock_bh(&queue_lock); 523 spin_lock_bh(&queue_lock);
531 524
532 seq_printf(m, 525 seq_printf(m,
533 "Peer PID : %d\n" 526 "Peer PID : %d\n"
@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v)
545 queue_dropped, 538 queue_dropped,
546 queue_user_dropped); 539 queue_user_dropped);
547 540
548 read_unlock_bh(&queue_lock); 541 spin_unlock_bh(&queue_lock);
549 return 0; 542 return 0;
550} 543}
551 544