aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/aquantia/atlantic/aq_ring.c')
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c71
1 files changed, 43 insertions, 28 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index b5f1f62e8e25..74550ccc7a20 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -29,8 +29,8 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
29 goto err_exit; 29 goto err_exit;
30 } 30 }
31 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), 31 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
32 self->size * self->dx_size, 32 self->size * self->dx_size,
33 &self->dx_ring_pa, GFP_KERNEL); 33 &self->dx_ring_pa, GFP_KERNEL);
34 if (!self->dx_ring) { 34 if (!self->dx_ring) {
35 err = -ENOMEM; 35 err = -ENOMEM;
36 goto err_exit; 36 goto err_exit;
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
172 return !!budget; 172 return !!budget;
173} 173}
174 174
175static void aq_rx_checksum(struct aq_ring_s *self,
176 struct aq_ring_buff_s *buff,
177 struct sk_buff *skb)
178{
179 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
180 return;
181
182 if (unlikely(buff->is_cso_err)) {
183 ++self->stats.rx.errors;
184 skb->ip_summed = CHECKSUM_NONE;
185 return;
186 }
187 if (buff->is_ip_cso) {
188 __skb_incr_checksum_unnecessary(skb);
189 if (buff->is_udp_cso || buff->is_tcp_cso)
190 __skb_incr_checksum_unnecessary(skb);
191 } else {
192 skb->ip_summed = CHECKSUM_NONE;
193 }
194}
195
175#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 196#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
176int aq_ring_rx_clean(struct aq_ring_s *self, 197int aq_ring_rx_clean(struct aq_ring_s *self,
177 struct napi_struct *napi, 198 struct napi_struct *napi,
@@ -225,9 +246,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
225 } 246 }
226 247
227 /* for single fragment packets use build_skb() */ 248 /* for single fragment packets use build_skb() */
228 if (buff->is_eop) { 249 if (buff->is_eop &&
250 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
229 skb = build_skb(page_address(buff->page), 251 skb = build_skb(page_address(buff->page),
230 buff->len + AQ_SKB_ALIGN); 252 AQ_CFG_RX_FRAME_MAX);
231 if (unlikely(!skb)) { 253 if (unlikely(!skb)) {
232 err = -ENOMEM; 254 err = -ENOMEM;
233 goto err_exit; 255 goto err_exit;
@@ -247,34 +269,27 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
247 buff->len - ETH_HLEN, 269 buff->len - ETH_HLEN,
248 SKB_TRUESIZE(buff->len - ETH_HLEN)); 270 SKB_TRUESIZE(buff->len - ETH_HLEN));
249 271
250 for (i = 1U, next_ = buff->next, 272 if (!buff->is_eop) {
251 buff_ = &self->buff_ring[next_]; true; 273 for (i = 1U, next_ = buff->next,
252 next_ = buff_->next, 274 buff_ = &self->buff_ring[next_];
253 buff_ = &self->buff_ring[next_], ++i) { 275 true; next_ = buff_->next,
254 skb_add_rx_frag(skb, i, buff_->page, 0, 276 buff_ = &self->buff_ring[next_], ++i) {
255 buff_->len, 277 skb_add_rx_frag(skb, i,
256 SKB_TRUESIZE(buff->len - 278 buff_->page, 0,
257 ETH_HLEN)); 279 buff_->len,
258 buff_->is_cleaned = 1; 280 SKB_TRUESIZE(buff->len -
259 281 ETH_HLEN));
260 if (buff_->is_eop) 282 buff_->is_cleaned = 1;
261 break; 283
284 if (buff_->is_eop)
285 break;
286 }
262 } 287 }
263 } 288 }
264 289
265 skb->protocol = eth_type_trans(skb, ndev); 290 skb->protocol = eth_type_trans(skb, ndev);
266 if (unlikely(buff->is_cso_err)) { 291
267 ++self->stats.rx.errors; 292 aq_rx_checksum(self, buff, skb);
268 skb->ip_summed = CHECKSUM_NONE;
269 } else {
270 if (buff->is_ip_cso) {
271 __skb_incr_checksum_unnecessary(skb);
272 if (buff->is_udp_cso || buff->is_tcp_cso)
273 __skb_incr_checksum_unnecessary(skb);
274 } else {
275 skb->ip_summed = CHECKSUM_NONE;
276 }
277 }
278 293
279 skb_set_hash(skb, buff->rss_hash, 294 skb_set_hash(skb, buff->rss_hash,
280 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : 295 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :