aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c10
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c36
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h23
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c309
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c4
8 files changed, 378 insertions, 25 deletions
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index cedacddf50fb..7e7704daf5f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -143,7 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
143 struct nlattr *tb[], struct nlattr *data[], 143 struct nlattr *tb[], struct nlattr *data[],
144 struct netlink_ext_ack *extack) 144 struct netlink_ext_ack *extack)
145{ 145{
146 int ingress_format = RMNET_INGRESS_FORMAT_DEAGGREGATION; 146 u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
147 struct net_device *real_dev; 147 struct net_device *real_dev;
148 int mode = RMNET_EPMODE_VND; 148 int mode = RMNET_EPMODE_VND;
149 struct rmnet_endpoint *ep; 149 struct rmnet_endpoint *ep;
@@ -185,11 +185,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
185 struct ifla_vlan_flags *flags; 185 struct ifla_vlan_flags *flags;
186 186
187 flags = nla_data(data[IFLA_VLAN_FLAGS]); 187 flags = nla_data(data[IFLA_VLAN_FLAGS]);
188 ingress_format = flags->flags & flags->mask; 188 data_format = flags->flags & flags->mask;
189 } 189 }
190 190
191 netdev_dbg(dev, "data format [ingress 0x%08X]\n", ingress_format); 191 netdev_dbg(dev, "data format [0x%08X]\n", data_format);
192 port->ingress_data_format = ingress_format; 192 port->data_format = data_format;
193 193
194 return 0; 194 return 0;
195 195
@@ -353,7 +353,7 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
353 struct ifla_vlan_flags *flags; 353 struct ifla_vlan_flags *flags;
354 354
355 flags = nla_data(data[IFLA_VLAN_FLAGS]); 355 flags = nla_data(data[IFLA_VLAN_FLAGS]);
356 port->ingress_data_format = flags->flags & flags->mask; 356 port->data_format = flags->flags & flags->mask;
357 } 357 }
358 358
359 return 0; 359 return 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 2ea9fe326571..00e4634100d3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -32,7 +32,7 @@ struct rmnet_endpoint {
32 */ 32 */
33struct rmnet_port { 33struct rmnet_port {
34 struct net_device *dev; 34 struct net_device *dev;
35 u32 ingress_data_format; 35 u32 data_format;
36 u8 nr_rmnet_devs; 36 u8 nr_rmnet_devs;
37 u8 rmnet_mode; 37 u8 rmnet_mode;
38 struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; 38 struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 05539321ba3a..601edec28c5f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -16,6 +16,7 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/netdev_features.h> 17#include <linux/netdev_features.h>
18#include <linux/if_arp.h> 18#include <linux/if_arp.h>
19#include <net/sock.h>
19#include "rmnet_private.h" 20#include "rmnet_private.h"
20#include "rmnet_config.h" 21#include "rmnet_config.h"
21#include "rmnet_vnd.h" 22#include "rmnet_vnd.h"
@@ -65,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
65 struct rmnet_port *port) 66 struct rmnet_port *port)
66{ 67{
67 struct rmnet_endpoint *ep; 68 struct rmnet_endpoint *ep;
69 u16 len, pad;
68 u8 mux_id; 70 u8 mux_id;
69 u16 len;
70 71
71 if (RMNET_MAP_GET_CD_BIT(skb)) { 72 if (RMNET_MAP_GET_CD_BIT(skb)) {
72 if (port->ingress_data_format 73 if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
73 & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
74 return rmnet_map_command(skb, port); 74 return rmnet_map_command(skb, port);
75 75
76 goto free_skb; 76 goto free_skb;
77 } 77 }
78 78
79 mux_id = RMNET_MAP_GET_MUX_ID(skb); 79 mux_id = RMNET_MAP_GET_MUX_ID(skb);
80 len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); 80 pad = RMNET_MAP_GET_PAD(skb);
81 len = RMNET_MAP_GET_LENGTH(skb) - pad;
81 82
82 if (mux_id >= RMNET_MAX_LOGICAL_EP) 83 if (mux_id >= RMNET_MAX_LOGICAL_EP)
83 goto free_skb; 84 goto free_skb;
@@ -90,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
90 91
91 /* Subtract MAP header */ 92 /* Subtract MAP header */
92 skb_pull(skb, sizeof(struct rmnet_map_header)); 93 skb_pull(skb, sizeof(struct rmnet_map_header));
93 skb_trim(skb, len);
94 rmnet_set_skb_proto(skb); 94 rmnet_set_skb_proto(skb);
95
96 if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
97 if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
98 skb->ip_summed = CHECKSUM_UNNECESSARY;
99 }
100
101 skb_trim(skb, len);
95 rmnet_deliver_skb(skb); 102 rmnet_deliver_skb(skb);
96 return; 103 return;
97 104
@@ -114,8 +121,8 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
114 skb_push(skb, ETH_HLEN); 121 skb_push(skb, ETH_HLEN);
115 } 122 }
116 123
117 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { 124 if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
118 while ((skbn = rmnet_map_deaggregate(skb)) != NULL) 125 while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
119 __rmnet_map_ingress_handler(skbn, port); 126 __rmnet_map_ingress_handler(skbn, port);
120 127
121 consume_skb(skb); 128 consume_skb(skb);
@@ -134,19 +141,24 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
134 additional_header_len = 0; 141 additional_header_len = 0;
135 required_headroom = sizeof(struct rmnet_map_header); 142 required_headroom = sizeof(struct rmnet_map_header);
136 143
144 if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
145 additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
146 required_headroom += additional_header_len;
147 }
148
137 if (skb_headroom(skb) < required_headroom) { 149 if (skb_headroom(skb) < required_headroom) {
138 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) 150 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
139 goto fail; 151 goto fail;
140 } 152 }
141 153
154 if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
155 rmnet_map_checksum_uplink_packet(skb, orig_dev);
156
142 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); 157 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
143 if (!map_header) 158 if (!map_header)
144 goto fail; 159 goto fail;
145 160
146 if (mux_id == 0xff) 161 map_header->mux_id = mux_id;
147 map_header->mux_id = 0;
148 else
149 map_header->mux_id = mux_id;
150 162
151 skb->protocol = htons(ETH_P_MAP); 163 skb->protocol = htons(ETH_P_MAP);
152 164
@@ -208,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
208 struct rmnet_priv *priv; 220 struct rmnet_priv *priv;
209 u8 mux_id; 221 u8 mux_id;
210 222
223 sk_pacing_shift_update(skb->sk, 8);
224
211 orig_dev = skb->dev; 225 orig_dev = skb->dev;
212 priv = netdev_priv(orig_dev); 226 priv = netdev_priv(orig_dev);
213 skb->dev = priv->real_dev; 227 skb->dev = priv->real_dev;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 4df359de28c5..6ce31e29136d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -47,6 +47,22 @@ struct rmnet_map_header {
47 u16 pkt_len; 47 u16 pkt_len;
48} __aligned(1); 48} __aligned(1);
49 49
50struct rmnet_map_dl_csum_trailer {
51 u8 reserved1;
52 u8 valid:1;
53 u8 reserved2:7;
54 u16 csum_start_offset;
55 u16 csum_length;
56 __be16 csum_value;
57} __aligned(1);
58
59struct rmnet_map_ul_csum_header {
60 __be16 csum_start_offset;
61 u16 csum_insert_offset:14;
62 u16 udp_ip4_ind:1;
63 u16 csum_enabled:1;
64} __aligned(1);
65
50#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ 66#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
51 (Y)->data)->mux_id) 67 (Y)->data)->mux_id)
52#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ 68#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@@ -67,10 +83,13 @@ struct rmnet_map_header {
67#define RMNET_MAP_NO_PAD_BYTES 0 83#define RMNET_MAP_NO_PAD_BYTES 0
68#define RMNET_MAP_ADD_PAD_BYTES 1 84#define RMNET_MAP_ADD_PAD_BYTES 1
69 85
70u8 rmnet_map_demultiplex(struct sk_buff *skb); 86struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
71struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb); 87 struct rmnet_port *port);
72struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, 88struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
73 int hdrlen, int pad); 89 int hdrlen, int pad);
74void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); 90void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
91int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
92void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
93 struct net_device *orig_dev);
75 94
76#endif /* _RMNET_MAP_H_ */ 95#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 51e604923ac1..6bc328fb88e1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
58} 58}
59 59
60static void rmnet_map_send_ack(struct sk_buff *skb, 60static void rmnet_map_send_ack(struct sk_buff *skb,
61 unsigned char type) 61 unsigned char type,
62 struct rmnet_port *port)
62{ 63{
63 struct rmnet_map_control_command *cmd; 64 struct rmnet_map_control_command *cmd;
64 int xmit_status; 65 int xmit_status;
65 66
67 if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
68 if (skb->len < sizeof(struct rmnet_map_header) +
69 RMNET_MAP_GET_LENGTH(skb) +
70 sizeof(struct rmnet_map_dl_csum_trailer)) {
71 kfree_skb(skb);
72 return;
73 }
74
75 skb_trim(skb, skb->len -
76 sizeof(struct rmnet_map_dl_csum_trailer));
77 }
78
66 skb->protocol = htons(ETH_P_MAP); 79 skb->protocol = htons(ETH_P_MAP);
67 80
68 cmd = RMNET_MAP_GET_CMD_START(skb); 81 cmd = RMNET_MAP_GET_CMD_START(skb);
@@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
100 break; 113 break;
101 } 114 }
102 if (rc == RMNET_MAP_COMMAND_ACK) 115 if (rc == RMNET_MAP_COMMAND_ACK)
103 rmnet_map_send_ack(skb, rc); 116 rmnet_map_send_ack(skb, rc, port);
104} 117}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 86b8c758f94e..c74a6c56d315 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -14,6 +14,9 @@
14 */ 14 */
15 15
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/ip.h>
18#include <linux/ipv6.h>
19#include <net/ip6_checksum.h>
17#include "rmnet_config.h" 20#include "rmnet_config.h"
18#include "rmnet_map.h" 21#include "rmnet_map.h"
19#include "rmnet_private.h" 22#include "rmnet_private.h"
@@ -21,6 +24,233 @@
21#define RMNET_MAP_DEAGGR_SPACING 64 24#define RMNET_MAP_DEAGGR_SPACING 64
22#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) 25#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
23 26
27static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
28 const void *txporthdr)
29{
30 __sum16 *check = NULL;
31
32 switch (protocol) {
33 case IPPROTO_TCP:
34 check = &(((struct tcphdr *)txporthdr)->check);
35 break;
36
37 case IPPROTO_UDP:
38 check = &(((struct udphdr *)txporthdr)->check);
39 break;
40
41 default:
42 check = NULL;
43 break;
44 }
45
46 return check;
47}
48
49static int
50rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
51 struct rmnet_map_dl_csum_trailer *csum_trailer)
52{
53 __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
54 u16 csum_value, csum_value_final;
55 struct iphdr *ip4h;
56 void *txporthdr;
57 __be16 addend;
58
59 ip4h = (struct iphdr *)(skb->data);
60 if ((ntohs(ip4h->frag_off) & IP_MF) ||
61 ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
62 return -EOPNOTSUPP;
63
64 txporthdr = skb->data + ip4h->ihl * 4;
65
66 csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
67
68 if (!csum_field)
69 return -EPROTONOSUPPORT;
70
71 /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
72 if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
73 return 0;
74
75 csum_value = ~ntohs(csum_trailer->csum_value);
76 hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
77 ip_payload_csum = csum16_sub((__force __sum16)csum_value,
78 (__force __be16)hdr_csum);
79
80 pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
81 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
82 ip4h->protocol, 0);
83 addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
84 pseudo_csum = csum16_add(ip_payload_csum, addend);
85
86 addend = (__force __be16)ntohs((__force __be16)*csum_field);
87 csum_temp = ~csum16_sub(pseudo_csum, addend);
88 csum_value_final = (__force u16)csum_temp;
89
90 if (unlikely(csum_value_final == 0)) {
91 switch (ip4h->protocol) {
92 case IPPROTO_UDP:
93 /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
94 csum_value_final = ~csum_value_final;
95 break;
96
97 case IPPROTO_TCP:
98 /* DL4 Non-RFC compliant TCP checksum found */
99 if (*csum_field == (__force __sum16)0xFFFF)
100 csum_value_final = ~csum_value_final;
101 break;
102 }
103 }
104
105 if (csum_value_final == ntohs((__force __be16)*csum_field))
106 return 0;
107 else
108 return -EINVAL;
109}
110
111#if IS_ENABLED(CONFIG_IPV6)
112static int
113rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
114 struct rmnet_map_dl_csum_trailer *csum_trailer)
115{
116 __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
117 u16 csum_value, csum_value_final;
118 __be16 ip6_hdr_csum, addend;
119 struct ipv6hdr *ip6h;
120 void *txporthdr;
121 u32 length;
122
123 ip6h = (struct ipv6hdr *)(skb->data);
124
125 txporthdr = skb->data + sizeof(struct ipv6hdr);
126 csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
127
128 if (!csum_field)
129 return -EPROTONOSUPPORT;
130
131 csum_value = ~ntohs(csum_trailer->csum_value);
132 ip6_hdr_csum = (__force __be16)
133 ~ntohs((__force __be16)ip_compute_csum(ip6h,
134 (int)(txporthdr - (void *)(skb->data))));
135 ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
136 ip6_hdr_csum);
137
138 length = (ip6h->nexthdr == IPPROTO_UDP) ?
139 ntohs(((struct udphdr *)txporthdr)->len) :
140 ntohs(ip6h->payload_len);
141 pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
142 length, ip6h->nexthdr, 0));
143 addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
144 pseudo_csum = csum16_add(ip6_payload_csum, addend);
145
146 addend = (__force __be16)ntohs((__force __be16)*csum_field);
147 csum_temp = ~csum16_sub(pseudo_csum, addend);
148 csum_value_final = (__force u16)csum_temp;
149
150 if (unlikely(csum_value_final == 0)) {
151 switch (ip6h->nexthdr) {
152 case IPPROTO_UDP:
153 /* RFC 2460 section 8.1
154 * DL6 One's complement rule for UDP checksum 0
155 */
156 csum_value_final = ~csum_value_final;
157 break;
158
159 case IPPROTO_TCP:
160 /* DL6 Non-RFC compliant TCP checksum found */
161 if (*csum_field == (__force __sum16)0xFFFF)
162 csum_value_final = ~csum_value_final;
163 break;
164 }
165 }
166
167 if (csum_value_final == ntohs((__force __be16)*csum_field))
168 return 0;
169 else
170 return -EINVAL;
171}
172#endif
173
174static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
175{
176 struct iphdr *ip4h = (struct iphdr *)iphdr;
177 void *txphdr;
178 u16 *csum;
179
180 txphdr = iphdr + ip4h->ihl * 4;
181
182 if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
183 csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
184 *csum = ~(*csum);
185 }
186}
187
188static void
189rmnet_map_ipv4_ul_csum_header(void *iphdr,
190 struct rmnet_map_ul_csum_header *ul_header,
191 struct sk_buff *skb)
192{
193 struct iphdr *ip4h = (struct iphdr *)iphdr;
194 __be16 *hdr = (__be16 *)ul_header, offset;
195
196 offset = htons((__force u16)(skb_transport_header(skb) -
197 (unsigned char *)iphdr));
198 ul_header->csum_start_offset = offset;
199 ul_header->csum_insert_offset = skb->csum_offset;
200 ul_header->csum_enabled = 1;
201 if (ip4h->protocol == IPPROTO_UDP)
202 ul_header->udp_ip4_ind = 1;
203 else
204 ul_header->udp_ip4_ind = 0;
205
206 /* Changing remaining fields to network order */
207 hdr++;
208 *hdr = htons((__force u16)*hdr);
209
210 skb->ip_summed = CHECKSUM_NONE;
211
212 rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
213}
214
215#if IS_ENABLED(CONFIG_IPV6)
216static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
217{
218 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
219 void *txphdr;
220 u16 *csum;
221
222 txphdr = ip6hdr + sizeof(struct ipv6hdr);
223
224 if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
225 csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
226 *csum = ~(*csum);
227 }
228}
229
230static void
231rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
232 struct rmnet_map_ul_csum_header *ul_header,
233 struct sk_buff *skb)
234{
235 __be16 *hdr = (__be16 *)ul_header, offset;
236
237 offset = htons((__force u16)(skb_transport_header(skb) -
238 (unsigned char *)ip6hdr));
239 ul_header->csum_start_offset = offset;
240 ul_header->csum_insert_offset = skb->csum_offset;
241 ul_header->csum_enabled = 1;
242 ul_header->udp_ip4_ind = 0;
243
244 /* Changing remaining fields to network order */
245 hdr++;
246 *hdr = htons((__force u16)*hdr);
247
248 skb->ip_summed = CHECKSUM_NONE;
249
250 rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
251}
252#endif
253
24/* Adds MAP header to front of skb->data 254/* Adds MAP header to front of skb->data
25 * Padding is calculated and set appropriately in MAP header. Mux ID is 255 * Padding is calculated and set appropriately in MAP header. Mux ID is
26 * initialized to 0. 256 * initialized to 0.
@@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
32 u32 padding, map_datalen; 262 u32 padding, map_datalen;
33 u8 *padbytes; 263 u8 *padbytes;
34 264
35 if (skb_headroom(skb) < sizeof(struct rmnet_map_header))
36 return NULL;
37
38 map_datalen = skb->len - hdrlen; 265 map_datalen = skb->len - hdrlen;
39 map_header = (struct rmnet_map_header *) 266 map_header = (struct rmnet_map_header *)
40 skb_push(skb, sizeof(struct rmnet_map_header)); 267 skb_push(skb, sizeof(struct rmnet_map_header));
@@ -69,7 +296,8 @@ done:
69 * returned, indicating that there are no more packets to deaggregate. Caller 296 * returned, indicating that there are no more packets to deaggregate. Caller
70 * is responsible for freeing the original skb. 297 * is responsible for freeing the original skb.
71 */ 298 */
72struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) 299struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
300 struct rmnet_port *port)
73{ 301{
74 struct rmnet_map_header *maph; 302 struct rmnet_map_header *maph;
75 struct sk_buff *skbn; 303 struct sk_buff *skbn;
@@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
81 maph = (struct rmnet_map_header *)skb->data; 309 maph = (struct rmnet_map_header *)skb->data;
82 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); 310 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
83 311
312 if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)
313 packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
314
84 if (((int)skb->len - (int)packet_len) < 0) 315 if (((int)skb->len - (int)packet_len) < 0)
85 return NULL; 316 return NULL;
86 317
@@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
100 331
101 return skbn; 332 return skbn;
102} 333}
334
335/* Validates packet checksums. Function takes a pointer to
336 * the beginning of a buffer which contains the IP payload +
337 * padding + checksum trailer.
338 * Only IPv4 and IPv6 are supported along with TCP & UDP.
339 * Fragmented or tunneled packets are not supported.
340 */
341int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
342{
343 struct rmnet_map_dl_csum_trailer *csum_trailer;
344
345 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
346 return -EOPNOTSUPP;
347
348 csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
349
350 if (!csum_trailer->valid)
351 return -EINVAL;
352
353 if (skb->protocol == htons(ETH_P_IP))
354 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
355 else if (skb->protocol == htons(ETH_P_IPV6))
356#if IS_ENABLED(CONFIG_IPV6)
357 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
358#else
359 return -EPROTONOSUPPORT;
360#endif
361
362 return 0;
363}
364
365/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
366 * packets that are supported for UL checksum offload.
367 */
368void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
369 struct net_device *orig_dev)
370{
371 struct rmnet_map_ul_csum_header *ul_header;
372 void *iphdr;
373
374 ul_header = (struct rmnet_map_ul_csum_header *)
375 skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
376
377 if (unlikely(!(orig_dev->features &
378 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
379 goto sw_csum;
380
381 if (skb->ip_summed == CHECKSUM_PARTIAL) {
382 iphdr = (char *)ul_header +
383 sizeof(struct rmnet_map_ul_csum_header);
384
385 if (skb->protocol == htons(ETH_P_IP)) {
386 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
387 return;
388 } else if (skb->protocol == htons(ETH_P_IPV6)) {
389#if IS_ENABLED(CONFIG_IPV6)
390 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
391 return;
392#else
393 goto sw_csum;
394#endif
395 }
396 }
397
398sw_csum:
399 ul_header->csum_start_offset = 0;
400 ul_header->csum_insert_offset = 0;
401 ul_header->csum_enabled = 0;
402 ul_header->udp_ip4_ind = 0;
403}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index d21428078504..de0143eaa05a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -21,6 +21,8 @@
21/* Constants */ 21/* Constants */
22#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0) 22#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0)
23#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1) 23#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1)
24#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2)
25#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3)
24 26
25/* Replace skb->dev to a virtual rmnet device and pass up the stack */ 27/* Replace skb->dev to a virtual rmnet device and pass up the stack */
26#define RMNET_EPMODE_VND (1) 28#define RMNET_EPMODE_VND (1)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 5bb29f44d114..570a227acdd8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -188,6 +188,10 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
188 if (rmnet_get_endpoint(port, id)) 188 if (rmnet_get_endpoint(port, id))
189 return -EBUSY; 189 return -EBUSY;
190 190
191 rmnet_dev->hw_features = NETIF_F_RXCSUM;
192 rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
193 rmnet_dev->hw_features |= NETIF_F_SG;
194
191 rc = register_netdevice(rmnet_dev); 195 rc = register_netdevice(rmnet_dev);
192 if (!rc) { 196 if (!rc) {
193 ep->egress_dev = rmnet_dev; 197 ep->egress_dev = rmnet_dev;