aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2007-03-19 18:30:44 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:24:32 -0400
commit459a98ed881802dee55897441bc7f77af614368e (patch)
treeb81f76632d8f2e21eb91ec3d885091a98398d93e /include/linux
parent4c13eb6657fe9ef7b4dc8f1a405c902e9e5234e0 (diff)
[SK_BUFF]: Introduce skb_reset_mac_header(skb)
For the common, open coded 'skb->mac.raw = skb->data' operation, so that we can later turn skb->mac.raw into a offset, reducing the size of struct sk_buff in 64bit land while possibly keeping it as a pointer on 32bit. This one touches just the most simple case, next will handle the slightly more "complex" cases. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/skbuff.h5
2 files changed, 7 insertions, 2 deletions
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index d4b333938f73..0fe562af9c8c 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -132,8 +132,8 @@ static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
132{ 132{
133 hdlc_device *hdlc = dev_to_hdlc(dev); 133 hdlc_device *hdlc = dev_to_hdlc(dev);
134 134
135 skb->mac.raw = skb->data; 135 skb->dev = dev;
136 skb->dev = dev; 136 skb_reset_mac_header(skb);
137 137
138 if (hdlc->proto->type_trans) 138 if (hdlc->proto->type_trans)
139 return hdlc->proto->type_trans(skb, dev); 139 return hdlc->proto->type_trans(skb, dev);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index df229bd5f1a9..748f254b50cc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -960,6 +960,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
960 skb->tail += len; 960 skb->tail += len;
961} 961}
962 962
963static inline void skb_reset_mac_header(struct sk_buff *skb)
964{
965 skb->mac.raw = skb->data;
966}
967
963/* 968/*
964 * CPUs often take a performance hit when accessing unaligned memory 969 * CPUs often take a performance hit when accessing unaligned memory
965 * locations. The actual performance hit varies, it can be small if the 970 * locations. The actual performance hit varies, it can be small if the