aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorZi Shen Lim <zlim.lnx@gmail.com>2014-07-03 10:56:54 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-08 17:20:00 -0400
commit9f12fbe603f7ae346b2b46008e325f0c9a68e55d (patch)
tree440fb57e08880b9a7fe5f9180dd7da9dae50a19c /net/core
parent3d5baba0ecfdd5de35bb7ce41ef9218f2b17b006 (diff)
net: filter: move load_pointer() into filter.h
load_pointer() is already a static inline function. Let's move it into filter.h so BPF JIT implementations can reuse this function. Since we're exporting this function, let's also rename it to bpf_load_pointer() for clarity. Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com> Reviewed-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c15
1 files changed, 3 insertions, 12 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 1dbf6462f766..87af1e3e56c0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -84,15 +84,6 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
84 return NULL; 84 return NULL;
85} 85}
86 86
87static inline void *load_pointer(const struct sk_buff *skb, int k,
88 unsigned int size, void *buffer)
89{
90 if (k >= 0)
91 return skb_header_pointer(skb, k, size, buffer);
92
93 return bpf_internal_load_pointer_neg_helper(skb, k, size);
94}
95
96/** 87/**
97 * sk_filter - run a packet through a socket filter 88 * sk_filter - run a packet through a socket filter
98 * @sk: sock associated with &sk_buff 89 * @sk: sock associated with &sk_buff
@@ -537,7 +528,7 @@ load_word:
537 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness 528 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
538 */ 529 */
539 530
540 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp); 531 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
541 if (likely(ptr != NULL)) { 532 if (likely(ptr != NULL)) {
542 BPF_R0 = get_unaligned_be32(ptr); 533 BPF_R0 = get_unaligned_be32(ptr);
543 CONT; 534 CONT;
@@ -547,7 +538,7 @@ load_word:
547 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */ 538 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
548 off = IMM; 539 off = IMM;
549load_half: 540load_half:
550 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp); 541 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
551 if (likely(ptr != NULL)) { 542 if (likely(ptr != NULL)) {
552 BPF_R0 = get_unaligned_be16(ptr); 543 BPF_R0 = get_unaligned_be16(ptr);
553 CONT; 544 CONT;
@@ -557,7 +548,7 @@ load_half:
557 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */ 548 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
558 off = IMM; 549 off = IMM;
559load_byte: 550load_byte:
560 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp); 551 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
561 if (likely(ptr != NULL)) { 552 if (likely(ptr != NULL)) {
562 BPF_R0 = *(u8 *)ptr; 553 BPF_R0 = *(u8 *)ptr;
563 CONT; 554 CONT;