aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/ip6_offload.c
diff options
context:
space:
mode:
authorVlad Yasevich <vyasevic@redhat.com>2012-11-15 03:49:23 -0500
committerDavid S. Miller <davem@davemloft.net>2012-11-15 17:39:51 -0500
commitf191a1d17f227032c159e5499809f545402b6dc6 (patch)
tree48fc87a0b34bd2da06fedcd7e5e3ed6b08f7a3ac /net/ipv6/ip6_offload.c
parentc6b641a4c6b32f39db678c2441cb1ef824110d74 (diff)
net: Remove code duplication between offload structures
Move the offload callbacks into its own structure. Signed-off-by: Vlad Yasevich <vyasevic@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/ip6_offload.c')
-rw-r--r--net/ipv6/ip6_offload.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 63d79d9005bd..f26f0da7f095 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -70,9 +70,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
70 ops = rcu_dereference(inet6_offloads[ 70 ops = rcu_dereference(inet6_offloads[
71 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); 71 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
72 72
73 if (likely(ops && ops->gso_send_check)) { 73 if (likely(ops && ops->callbacks.gso_send_check)) {
74 skb_reset_transport_header(skb); 74 skb_reset_transport_header(skb);
75 err = ops->gso_send_check(skb); 75 err = ops->callbacks.gso_send_check(skb);
76 } 76 }
77 rcu_read_unlock(); 77 rcu_read_unlock();
78 78
@@ -113,9 +113,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
113 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 113 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
114 rcu_read_lock(); 114 rcu_read_lock();
115 ops = rcu_dereference(inet6_offloads[proto]); 115 ops = rcu_dereference(inet6_offloads[proto]);
116 if (likely(ops && ops->gso_segment)) { 116 if (likely(ops && ops->callbacks.gso_segment)) {
117 skb_reset_transport_header(skb); 117 skb_reset_transport_header(skb);
118 segs = ops->gso_segment(skb, features); 118 segs = ops->callbacks.gso_segment(skb, features);
119 } 119 }
120 rcu_read_unlock(); 120 rcu_read_unlock();
121 121
@@ -173,7 +173,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
173 rcu_read_lock(); 173 rcu_read_lock();
174 proto = iph->nexthdr; 174 proto = iph->nexthdr;
175 ops = rcu_dereference(inet6_offloads[proto]); 175 ops = rcu_dereference(inet6_offloads[proto]);
176 if (!ops || !ops->gro_receive) { 176 if (!ops || !ops->callbacks.gro_receive) {
177 __pskb_pull(skb, skb_gro_offset(skb)); 177 __pskb_pull(skb, skb_gro_offset(skb));
178 proto = ipv6_gso_pull_exthdrs(skb, proto); 178 proto = ipv6_gso_pull_exthdrs(skb, proto);
179 skb_gro_pull(skb, -skb_transport_offset(skb)); 179 skb_gro_pull(skb, -skb_transport_offset(skb));
@@ -181,7 +181,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
181 __skb_push(skb, skb_gro_offset(skb)); 181 __skb_push(skb, skb_gro_offset(skb));
182 182
183 ops = rcu_dereference(inet6_offloads[proto]); 183 ops = rcu_dereference(inet6_offloads[proto]);
184 if (!ops || !ops->gro_receive) 184 if (!ops || !ops->callbacks.gro_receive)
185 goto out_unlock; 185 goto out_unlock;
186 186
187 iph = ipv6_hdr(skb); 187 iph = ipv6_hdr(skb);
@@ -220,7 +220,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
220 csum = skb->csum; 220 csum = skb->csum;
221 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); 221 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
222 222
223 pp = ops->gro_receive(head, skb); 223 pp = ops->callbacks.gro_receive(head, skb);
224 224
225 skb->csum = csum; 225 skb->csum = csum;
226 226
@@ -244,10 +244,10 @@ static int ipv6_gro_complete(struct sk_buff *skb)
244 244
245 rcu_read_lock(); 245 rcu_read_lock();
246 ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]); 246 ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
247 if (WARN_ON(!ops || !ops->gro_complete)) 247 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
248 goto out_unlock; 248 goto out_unlock;
249 249
250 err = ops->gro_complete(skb); 250 err = ops->callbacks.gro_complete(skb);
251 251
252out_unlock: 252out_unlock:
253 rcu_read_unlock(); 253 rcu_read_unlock();
@@ -257,10 +257,12 @@ out_unlock:
257 257
258static struct packet_offload ipv6_packet_offload __read_mostly = { 258static struct packet_offload ipv6_packet_offload __read_mostly = {
259 .type = cpu_to_be16(ETH_P_IPV6), 259 .type = cpu_to_be16(ETH_P_IPV6),
260 .gso_send_check = ipv6_gso_send_check, 260 .callbacks = {
261 .gso_segment = ipv6_gso_segment, 261 .gso_send_check = ipv6_gso_send_check,
262 .gro_receive = ipv6_gro_receive, 262 .gso_segment = ipv6_gso_segment,
263 .gro_complete = ipv6_gro_complete, 263 .gro_receive = ipv6_gro_receive,
264 .gro_complete = ipv6_gro_complete,
265 },
264}; 266};
265 267
266static int __init ipv6_offload_init(void) 268static int __init ipv6_offload_init(void)