aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2007-04-11 00:22:35 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:26:21 -0400
commit2e07fa9cd3bac1e28cfe3131ed86b053afb02fc9 (patch)
tree177ad0c2cbbd60c25e54b35802219634c047aa08 /include/linux
parentb0e380b1d8a8e0aca215df97702f99815f05c094 (diff)
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures
With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h104
1 files changed, 86 insertions, 18 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c45ad1263271..2e7405500626 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -179,6 +179,16 @@ enum {
179 SKB_GSO_TCPV6 = 1 << 4, 179 SKB_GSO_TCPV6 = 1 << 4,
180}; 180};
181 181
182#if BITS_PER_LONG > 32
183#define NET_SKBUFF_DATA_USES_OFFSET 1
184#endif
185
186#ifdef NET_SKBUFF_DATA_USES_OFFSET
187typedef unsigned int sk_buff_data_t;
188#else
189typedef unsigned char *sk_buff_data_t;
190#endif
191
182/** 192/**
183 * struct sk_buff - socket buffer 193 * struct sk_buff - socket buffer
184 * @next: Next buffer in list 194 * @next: Next buffer in list
@@ -236,9 +246,9 @@ struct sk_buff {
236 int iif; 246 int iif;
237 /* 4 byte hole on 64 bit*/ 247 /* 4 byte hole on 64 bit*/
238 248
239 unsigned char *transport_header; 249 sk_buff_data_t transport_header;
240 unsigned char *network_header; 250 sk_buff_data_t network_header;
241 unsigned char *mac_header; 251 sk_buff_data_t mac_header;
242 struct dst_entry *dst; 252 struct dst_entry *dst;
243 struct sec_path *sp; 253 struct sec_path *sp;
244 254
@@ -942,50 +952,92 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
942 skb->tail += len; 952 skb->tail += len;
943} 953}
944 954
955#ifdef NET_SKBUFF_DATA_USES_OFFSET
945static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 956static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
946{ 957{
947 return skb->transport_header; 958 return skb->head + skb->transport_header;
948} 959}
949 960
950static inline void skb_reset_transport_header(struct sk_buff *skb) 961static inline void skb_reset_transport_header(struct sk_buff *skb)
951{ 962{
952 skb->transport_header = skb->data; 963 skb->transport_header = skb->data - skb->head;
953} 964}
954 965
955static inline void skb_set_transport_header(struct sk_buff *skb, 966static inline void skb_set_transport_header(struct sk_buff *skb,
956 const int offset) 967 const int offset)
957{ 968{
958 skb->transport_header = skb->data + offset; 969 skb_reset_transport_header(skb);
959} 970 skb->transport_header += offset;
960
961static inline int skb_transport_offset(const struct sk_buff *skb)
962{
963 return skb->transport_header - skb->data;
964} 971}
965 972
966static inline unsigned char *skb_network_header(const struct sk_buff *skb) 973static inline unsigned char *skb_network_header(const struct sk_buff *skb)
967{ 974{
968 return skb->network_header; 975 return skb->head + skb->network_header;
969} 976}
970 977
971static inline void skb_reset_network_header(struct sk_buff *skb) 978static inline void skb_reset_network_header(struct sk_buff *skb)
972{ 979{
973 skb->network_header = skb->data; 980 skb->network_header = skb->data - skb->head;
974} 981}
975 982
976static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 983static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
977{ 984{
978 skb->network_header = skb->data + offset; 985 skb_reset_network_header(skb);
986 skb->network_header += offset;
979} 987}
980 988
981static inline int skb_network_offset(const struct sk_buff *skb) 989static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
982{ 990{
983 return skb->network_header - skb->data; 991 return skb->head + skb->mac_header;
984} 992}
985 993
986static inline u32 skb_network_header_len(const struct sk_buff *skb) 994static inline int skb_mac_header_was_set(const struct sk_buff *skb)
987{ 995{
988 return skb->transport_header - skb->network_header; 996 return skb->mac_header != ~0U;
997}
998
999static inline void skb_reset_mac_header(struct sk_buff *skb)
1000{
1001 skb->mac_header = skb->data - skb->head;
1002}
1003
1004static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1005{
1006 skb_reset_mac_header(skb);
1007 skb->mac_header += offset;
1008}
1009
1010#else /* NET_SKBUFF_DATA_USES_OFFSET */
1011
1012static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1013{
1014 return skb->transport_header;
1015}
1016
1017static inline void skb_reset_transport_header(struct sk_buff *skb)
1018{
1019 skb->transport_header = skb->data;
1020}
1021
1022static inline void skb_set_transport_header(struct sk_buff *skb,
1023 const int offset)
1024{
1025 skb->transport_header = skb->data + offset;
1026}
1027
1028static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1029{
1030 return skb->network_header;
1031}
1032
1033static inline void skb_reset_network_header(struct sk_buff *skb)
1034{
1035 skb->network_header = skb->data;
1036}
1037
1038static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1039{
1040 skb->network_header = skb->data + offset;
989} 1041}
990 1042
991static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1043static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
@@ -1007,6 +1059,22 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1007{ 1059{
1008 skb->mac_header = skb->data + offset; 1060 skb->mac_header = skb->data + offset;
1009} 1061}
1062#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1063
1064static inline int skb_transport_offset(const struct sk_buff *skb)
1065{
1066 return skb_transport_header(skb) - skb->data;
1067}
1068
1069static inline u32 skb_network_header_len(const struct sk_buff *skb)
1070{
1071 return skb->transport_header - skb->network_header;
1072}
1073
1074static inline int skb_network_offset(const struct sk_buff *skb)
1075{
1076 return skb_network_header(skb) - skb->data;
1077}
1010 1078
1011/* 1079/*
1012 * CPUs often take a performance hit when accessing unaligned memory 1080 * CPUs often take a performance hit when accessing unaligned memory