aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-12-15 14:45:28 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-21 15:30:12 -0500
commit12b16dadbc2406144d408754f96d0f44aa016239 (patch)
tree1503124f18c7e181963889f254a49a597bf59baa /net/core
parentcb8f404893bab40431f7eeb2511454031b07e7df (diff)
filter: optimize accesses to ancillary data
We can translate pseudo load instructions at filter check time to dedicated instructions to speed up filtering and avoid one switch(). libpcap currently uses SKF_AD_PROTOCOL, but custom filters probably use other ancillary accesses. Note : I made the assertion that ancillary data was always accessed with BPF_LD|BPF_?|BPF_ABS instructions, not with BPF_LD|BPF_?|BPF_IND ones (offset given by K constant, not by K + X register) On x86_64, this saves a few bytes of text : # size net/core/filter.o.* text data bss dec hex filename 4864 0 0 4864 1300 net/core/filter.o.new 4944 0 0 4944 1350 net/core/filter.o.old Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c72
1 files changed, 44 insertions, 28 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index e8a6ac411ffb..2b27d4efdd48 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -85,6 +85,17 @@ enum {
85 BPF_S_JMP_JGT_X, 85 BPF_S_JMP_JGT_X,
86 BPF_S_JMP_JSET_K, 86 BPF_S_JMP_JSET_K,
87 BPF_S_JMP_JSET_X, 87 BPF_S_JMP_JSET_X,
88 /* Ancillary data */
89 BPF_S_ANC_PROTOCOL,
90 BPF_S_ANC_PKTTYPE,
91 BPF_S_ANC_IFINDEX,
92 BPF_S_ANC_NLATTR,
93 BPF_S_ANC_NLATTR_NEST,
94 BPF_S_ANC_MARK,
95 BPF_S_ANC_QUEUE,
96 BPF_S_ANC_HATYPE,
97 BPF_S_ANC_RXHASH,
98 BPF_S_ANC_CPU,
88}; 99};
89 100
90/* No hurry in this branch */ 101/* No hurry in this branch */
@@ -107,11 +118,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
107{ 118{
108 if (k >= 0) 119 if (k >= 0)
109 return skb_header_pointer(skb, k, size, buffer); 120 return skb_header_pointer(skb, k, size, buffer);
110 else { 121 return __load_pointer(skb, k, size);
111 if (k >= SKF_AD_OFF)
112 return NULL;
113 return __load_pointer(skb, k, size);
114 }
115} 122}
116 123
117/** 124/**
@@ -269,7 +276,7 @@ load_w:
269 A = get_unaligned_be32(ptr); 276 A = get_unaligned_be32(ptr);
270 continue; 277 continue;
271 } 278 }
272 break; 279 return 0;
273 case BPF_S_LD_H_ABS: 280 case BPF_S_LD_H_ABS:
274 k = K; 281 k = K;
275load_h: 282load_h:
@@ -278,7 +285,7 @@ load_h:
278 A = get_unaligned_be16(ptr); 285 A = get_unaligned_be16(ptr);
279 continue; 286 continue;
280 } 287 }
281 break; 288 return 0;
282 case BPF_S_LD_B_ABS: 289 case BPF_S_LD_B_ABS:
283 k = K; 290 k = K;
284load_b: 291load_b:
@@ -287,7 +294,7 @@ load_b:
287 A = *(u8 *)ptr; 294 A = *(u8 *)ptr;
288 continue; 295 continue;
289 } 296 }
290 break; 297 return 0;
291 case BPF_S_LD_W_LEN: 298 case BPF_S_LD_W_LEN:
292 A = skb->len; 299 A = skb->len;
293 continue; 300 continue;
@@ -338,45 +345,35 @@ load_b:
338 case BPF_S_STX: 345 case BPF_S_STX:
339 mem[K] = X; 346 mem[K] = X;
340 continue; 347 continue;
341 default: 348 case BPF_S_ANC_PROTOCOL:
342 WARN_ON(1);
343 return 0;
344 }
345
346 /*
347 * Handle ancillary data, which are impossible
348 * (or very difficult) to get parsing packet contents.
349 */
350 switch (k-SKF_AD_OFF) {
351 case SKF_AD_PROTOCOL:
352 A = ntohs(skb->protocol); 349 A = ntohs(skb->protocol);
353 continue; 350 continue;
354 case SKF_AD_PKTTYPE: 351 case BPF_S_ANC_PKTTYPE:
355 A = skb->pkt_type; 352 A = skb->pkt_type;
356 continue; 353 continue;
357 case SKF_AD_IFINDEX: 354 case BPF_S_ANC_IFINDEX:
358 if (!skb->dev) 355 if (!skb->dev)
359 return 0; 356 return 0;
360 A = skb->dev->ifindex; 357 A = skb->dev->ifindex;
361 continue; 358 continue;
362 case SKF_AD_MARK: 359 case BPF_S_ANC_MARK:
363 A = skb->mark; 360 A = skb->mark;
364 continue; 361 continue;
365 case SKF_AD_QUEUE: 362 case BPF_S_ANC_QUEUE:
366 A = skb->queue_mapping; 363 A = skb->queue_mapping;
367 continue; 364 continue;
368 case SKF_AD_HATYPE: 365 case BPF_S_ANC_HATYPE:
369 if (!skb->dev) 366 if (!skb->dev)
370 return 0; 367 return 0;
371 A = skb->dev->type; 368 A = skb->dev->type;
372 continue; 369 continue;
373 case SKF_AD_RXHASH: 370 case BPF_S_ANC_RXHASH:
374 A = skb->rxhash; 371 A = skb->rxhash;
375 continue; 372 continue;
376 case SKF_AD_CPU: 373 case BPF_S_ANC_CPU:
377 A = raw_smp_processor_id(); 374 A = raw_smp_processor_id();
378 continue; 375 continue;
379 case SKF_AD_NLATTR: { 376 case BPF_S_ANC_NLATTR: {
380 struct nlattr *nla; 377 struct nlattr *nla;
381 378
382 if (skb_is_nonlinear(skb)) 379 if (skb_is_nonlinear(skb))
@@ -392,7 +389,7 @@ load_b:
392 A = 0; 389 A = 0;
393 continue; 390 continue;
394 } 391 }
395 case SKF_AD_NLATTR_NEST: { 392 case BPF_S_ANC_NLATTR_NEST: {
396 struct nlattr *nla; 393 struct nlattr *nla;
397 394
398 if (skb_is_nonlinear(skb)) 395 if (skb_is_nonlinear(skb))
@@ -412,6 +409,7 @@ load_b:
412 continue; 409 continue;
413 } 410 }
414 default: 411 default:
412 WARN_ON(1);
415 return 0; 413 return 0;
416 } 414 }
417 } 415 }
@@ -600,6 +598,24 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
600 pc + ftest->jf + 1 >= flen) 598 pc + ftest->jf + 1 >= flen)
601 return -EINVAL; 599 return -EINVAL;
602 break; 600 break;
601 case BPF_S_LD_W_ABS:
602 case BPF_S_LD_H_ABS:
603 case BPF_S_LD_B_ABS:
604#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
605 code = BPF_S_ANC_##CODE; \
606 break
607 switch (ftest->k) {
608 ANCILLARY(PROTOCOL);
609 ANCILLARY(PKTTYPE);
610 ANCILLARY(IFINDEX);
611 ANCILLARY(NLATTR);
612 ANCILLARY(NLATTR_NEST);
613 ANCILLARY(MARK);
614 ANCILLARY(QUEUE);
615 ANCILLARY(HATYPE);
616 ANCILLARY(RXHASH);
617 ANCILLARY(CPU);
618 }
603 } 619 }
604 ftest->code = code; 620 ftest->code = code;
605 } 621 }