aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-12-06 15:50:09 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-08 13:30:34 -0500
commit62ab0812137ec4f9884dd7de346238841ac03283 (patch)
treeda0807aee4597522b0ecabc51d2d9fc300895d98 /net
parent38f49e8801565674c424896c3dcb4228410b43a8 (diff)
filter: constify sk_run_filter()
sk_run_filter() doesnt write on skb, change its prototype to reflect this. Fix two af_packet comments. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/timestamping.c2
-rw-r--r--net/packet/af_packet.c31
3 files changed, 21 insertions, 19 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index ac4920a87be5..25500f16a18a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -88,7 +88,7 @@ enum {
88}; 88};
89 89
90/* No hurry in this branch */ 90/* No hurry in this branch */
91static void *__load_pointer(struct sk_buff *skb, int k) 91static void *__load_pointer(const struct sk_buff *skb, int k)
92{ 92{
93 u8 *ptr = NULL; 93 u8 *ptr = NULL;
94 94
@@ -102,7 +102,7 @@ static void *__load_pointer(struct sk_buff *skb, int k)
102 return NULL; 102 return NULL;
103} 103}
104 104
105static inline void *load_pointer(struct sk_buff *skb, int k, 105static inline void *load_pointer(const struct sk_buff *skb, int k,
106 unsigned int size, void *buffer) 106 unsigned int size, void *buffer)
107{ 107{
108 if (k >= 0) 108 if (k >= 0)
@@ -160,7 +160,8 @@ EXPORT_SYMBOL(sk_filter);
160 * and last instruction guaranteed to be a RET, we dont need to check 160 * and last instruction guaranteed to be a RET, we dont need to check
161 * flen. (We used to pass to this function the length of filter) 161 * flen. (We used to pass to this function the length of filter)
162 */ 162 */
163unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry) 163unsigned int sk_run_filter(const struct sk_buff *skb,
164 const struct sock_filter *fentry)
164{ 165{
165 void *ptr; 166 void *ptr;
166 u32 A = 0; /* Accumulator */ 167 u32 A = 0; /* Accumulator */
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index dac7ed687f60..b124d28ff1c8 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -26,7 +26,7 @@ static struct sock_filter ptp_filter[] = {
26 PTP_FILTER 26 PTP_FILTER
27}; 27};
28 28
29static unsigned int classify(struct sk_buff *skb) 29static unsigned int classify(const struct sk_buff *skb)
30{ 30{
31 if (likely(skb->dev && 31 if (likely(skb->dev &&
32 skb->dev->phydev && 32 skb->dev->phydev &&
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a11c731d2ee4..17eafe5b48c6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -517,7 +517,8 @@ out_free:
517 return err; 517 return err;
518} 518}
519 519
520static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, 520static inline unsigned int run_filter(const struct sk_buff *skb,
521 const struct sock *sk,
521 unsigned int res) 522 unsigned int res)
522{ 523{
523 struct sk_filter *filter; 524 struct sk_filter *filter;
@@ -532,15 +533,15 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
532} 533}
533 534
534/* 535/*
535 This function makes lazy skb cloning in hope that most of packets 536 * This function makes lazy skb cloning in hope that most of packets
536 are discarded by BPF. 537 * are discarded by BPF.
537 538 *
538 Note tricky part: we DO mangle shared skb! skb->data, skb->len 539 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
539 and skb->cb are mangled. It works because (and until) packets 540 * and skb->cb are mangled. It works because (and until) packets
540 falling here are owned by current CPU. Output packets are cloned 541 * falling here are owned by current CPU. Output packets are cloned
541 by dev_queue_xmit_nit(), input packets are processed by net_bh 542 * by dev_queue_xmit_nit(), input packets are processed by net_bh
542 sequencially, so that if we return skb to original state on exit, 543 * sequencially, so that if we return skb to original state on exit,
543 we will not harm anyone. 544 * we will not harm anyone.
544 */ 545 */
545 546
546static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 547static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -566,11 +567,11 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
566 567
567 if (dev->header_ops) { 568 if (dev->header_ops) {
568 /* The device has an explicit notion of ll header, 569 /* The device has an explicit notion of ll header,
569 exported to higher levels. 570 * exported to higher levels.
570 571 *
571 Otherwise, the device hides datails of it frame 572 * Otherwise, the device hides details of its frame
572 structure, so that corresponding packet head 573 * structure, so that corresponding packet head is
573 never delivered to user. 574 * never delivered to user.
574 */ 575 */
575 if (sk->sk_type != SOCK_DGRAM) 576 if (sk->sk_type != SOCK_DGRAM)
576 skb_push(skb, skb->data - skb_mac_header(skb)); 577 skb_push(skb, skb->data - skb_mac_header(skb));