aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorVlad Yasevich <vyasevic@redhat.com>2012-11-15 03:49:14 -0500
committerDavid S. Miller <davem@davemloft.net>2012-11-15 17:36:17 -0500
commitbca49f843eac59cbb1ddd1f4a5d65fcc23b62efd (patch)
treeb3b0e256222b968854154c90ab32bf42b640d1c5 /net/ipv4
parent8ca896cfdd17f32f5aa2747644733ebf3725360d (diff)
ipv4: Switch to using the new offload infrastructure.
Switch IPv4 code base to using the new GRO/GSO calls and data. Signed-off-by: Vlad Yasevich <vyasevic@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 3918d869d6d4..66f63ce07ac8 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1251,7 +1251,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
1251 1251
1252static int inet_gso_send_check(struct sk_buff *skb) 1252static int inet_gso_send_check(struct sk_buff *skb)
1253{ 1253{
1254 const struct net_protocol *ops; 1254 const struct net_offload *ops;
1255 const struct iphdr *iph; 1255 const struct iphdr *iph;
1256 int proto; 1256 int proto;
1257 int ihl; 1257 int ihl;
@@ -1275,7 +1275,7 @@ static int inet_gso_send_check(struct sk_buff *skb)
1275 err = -EPROTONOSUPPORT; 1275 err = -EPROTONOSUPPORT;
1276 1276
1277 rcu_read_lock(); 1277 rcu_read_lock();
1278 ops = rcu_dereference(inet_protos[proto]); 1278 ops = rcu_dereference(inet_offloads[proto]);
1279 if (likely(ops && ops->gso_send_check)) 1279 if (likely(ops && ops->gso_send_check))
1280 err = ops->gso_send_check(skb); 1280 err = ops->gso_send_check(skb);
1281 rcu_read_unlock(); 1281 rcu_read_unlock();
@@ -1288,7 +1288,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1288 netdev_features_t features) 1288 netdev_features_t features)
1289{ 1289{
1290 struct sk_buff *segs = ERR_PTR(-EINVAL); 1290 struct sk_buff *segs = ERR_PTR(-EINVAL);
1291 const struct net_protocol *ops; 1291 const struct net_offload *ops;
1292 struct iphdr *iph; 1292 struct iphdr *iph;
1293 int proto; 1293 int proto;
1294 int ihl; 1294 int ihl;
@@ -1325,7 +1325,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1325 segs = ERR_PTR(-EPROTONOSUPPORT); 1325 segs = ERR_PTR(-EPROTONOSUPPORT);
1326 1326
1327 rcu_read_lock(); 1327 rcu_read_lock();
1328 ops = rcu_dereference(inet_protos[proto]); 1328 ops = rcu_dereference(inet_offloads[proto]);
1329 if (likely(ops && ops->gso_segment)) 1329 if (likely(ops && ops->gso_segment))
1330 segs = ops->gso_segment(skb, features); 1330 segs = ops->gso_segment(skb, features);
1331 rcu_read_unlock(); 1331 rcu_read_unlock();
@@ -1356,7 +1356,7 @@ out:
1356static struct sk_buff **inet_gro_receive(struct sk_buff **head, 1356static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1357 struct sk_buff *skb) 1357 struct sk_buff *skb)
1358{ 1358{
1359 const struct net_protocol *ops; 1359 const struct net_offload *ops;
1360 struct sk_buff **pp = NULL; 1360 struct sk_buff **pp = NULL;
1361 struct sk_buff *p; 1361 struct sk_buff *p;
1362 const struct iphdr *iph; 1362 const struct iphdr *iph;
@@ -1378,7 +1378,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1378 proto = iph->protocol; 1378 proto = iph->protocol;
1379 1379
1380 rcu_read_lock(); 1380 rcu_read_lock();
1381 ops = rcu_dereference(inet_protos[proto]); 1381 ops = rcu_dereference(inet_offloads[proto]);
1382 if (!ops || !ops->gro_receive) 1382 if (!ops || !ops->gro_receive)
1383 goto out_unlock; 1383 goto out_unlock;
1384 1384
@@ -1435,7 +1435,7 @@ static int inet_gro_complete(struct sk_buff *skb)
1435{ 1435{
1436 __be16 newlen = htons(skb->len - skb_network_offset(skb)); 1436 __be16 newlen = htons(skb->len - skb_network_offset(skb));
1437 struct iphdr *iph = ip_hdr(skb); 1437 struct iphdr *iph = ip_hdr(skb);
1438 const struct net_protocol *ops; 1438 const struct net_offload *ops;
1439 int proto = iph->protocol; 1439 int proto = iph->protocol;
1440 int err = -ENOSYS; 1440 int err = -ENOSYS;
1441 1441
@@ -1443,7 +1443,7 @@ static int inet_gro_complete(struct sk_buff *skb)
1443 iph->tot_len = newlen; 1443 iph->tot_len = newlen;
1444 1444
1445 rcu_read_lock(); 1445 rcu_read_lock();
1446 ops = rcu_dereference(inet_protos[proto]); 1446 ops = rcu_dereference(inet_offloads[proto]);
1447 if (WARN_ON(!ops || !ops->gro_complete)) 1447 if (WARN_ON(!ops || !ops->gro_complete))
1448 goto out_unlock; 1448 goto out_unlock;
1449 1449
@@ -1558,10 +1558,6 @@ static const struct net_protocol tcp_protocol = {
1558 .early_demux = tcp_v4_early_demux, 1558 .early_demux = tcp_v4_early_demux,
1559 .handler = tcp_v4_rcv, 1559 .handler = tcp_v4_rcv,
1560 .err_handler = tcp_v4_err, 1560 .err_handler = tcp_v4_err,
1561 .gso_send_check = tcp_v4_gso_send_check,
1562 .gso_segment = tcp_tso_segment,
1563 .gro_receive = tcp4_gro_receive,
1564 .gro_complete = tcp4_gro_complete,
1565 .no_policy = 1, 1561 .no_policy = 1,
1566 .netns_ok = 1, 1562 .netns_ok = 1,
1567}; 1563};
@@ -1576,8 +1572,6 @@ static const struct net_offload tcp_offload = {
1576static const struct net_protocol udp_protocol = { 1572static const struct net_protocol udp_protocol = {
1577 .handler = udp_rcv, 1573 .handler = udp_rcv,
1578 .err_handler = udp_err, 1574 .err_handler = udp_err,
1579 .gso_send_check = udp4_ufo_send_check,
1580 .gso_segment = udp4_ufo_fragment,
1581 .no_policy = 1, 1575 .no_policy = 1,
1582 .netns_ok = 1, 1576 .netns_ok = 1,
1583}; 1577};
@@ -1726,6 +1720,14 @@ static int __init inet_init(void)
1726 tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; 1720 tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
1727 1721
1728 /* 1722 /*
1723 * Add offloads
1724 */
1725 if (inet_add_offload(&udp_offload, IPPROTO_UDP) < 0)
1726 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1727 if (inet_add_offload(&tcp_offload, IPPROTO_TCP) < 0)
1728 pr_crit("%s: Cannot add TCP protocol offlaod\n", __func__);
1729
1730 /*
1729 * Add all the base protocols. 1731 * Add all the base protocols.
1730 */ 1732 */
1731 1733