aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2011-08-26 13:05:11 -0400
committerBen Hutchings <bhutchings@solarflare.com>2012-01-26 19:11:00 -0500
commitdb3395697cad6e9dff8d21249e0b59dc9bb83b48 (patch)
treef3cfd937eda7e0a88733c4c49e1512fa382253e5 /drivers/net
parent1ddceb4c69463e09b6929c750046c59589d45d82 (diff)
sfc: Replace efx_rx_buffer::is_page and other booleans with a flags field
Replace checksummed and discard booleans from efx_handle_rx_event() with a bitmask, added to the flags field. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/sfc/efx.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h16
-rw-r--r--drivers/net/ethernet/sfc/nic.c32
-rw-r--r--drivers/net/ethernet/sfc/rx.c65
5 files changed, 59 insertions, 61 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index a352c55cb54a..952d0bf7695a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -229,8 +229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
229 229
230 /* Deliver last RX packet. */ 230 /* Deliver last RX packet. */
231 if (channel->rx_pkt) { 231 if (channel->rx_pkt) {
232 __efx_rx_packet(channel, channel->rx_pkt, 232 __efx_rx_packet(channel, channel->rx_pkt);
233 channel->rx_pkt_csummed);
234 channel->rx_pkt = NULL; 233 channel->rx_pkt = NULL;
235 } 234 }
236 235
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index e0b66d158d79..7f546e2c39e2 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -40,9 +40,9 @@ extern void efx_rx_strategy(struct efx_channel *channel);
40extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 40extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
41extern void efx_rx_slow_fill(unsigned long context); 41extern void efx_rx_slow_fill(unsigned long context);
42extern void __efx_rx_packet(struct efx_channel *channel, 42extern void __efx_rx_packet(struct efx_channel *channel,
43 struct efx_rx_buffer *rx_buf, bool checksummed); 43 struct efx_rx_buffer *rx_buf);
44extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 44extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
45 unsigned int len, bool checksummed, bool discard); 45 unsigned int len, u16 flags);
46extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 46extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
47 47
48#define EFX_MAX_DMAQ_SIZE 4096UL 48#define EFX_MAX_DMAQ_SIZE 4096UL
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c20483efc22c..53864014c2b4 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -205,12 +205,12 @@ struct efx_tx_queue {
205/** 205/**
206 * struct efx_rx_buffer - An Efx RX data buffer 206 * struct efx_rx_buffer - An Efx RX data buffer
207 * @dma_addr: DMA base address of the buffer 207 * @dma_addr: DMA base address of the buffer
208 * @skb: The associated socket buffer, if any. 208 * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
209 * If both this and page are %NULL, the buffer slot is currently free. 209 * Will be %NULL if the buffer slot is currently free.
210 * @page: The associated page buffer, if any. 210 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
211 * If both this and skb are %NULL, the buffer slot is currently free. 211 * Will be %NULL if the buffer slot is currently free.
212 * @len: Buffer length, in bytes. 212 * @len: Buffer length, in bytes.
213 * @is_page: Indicates if @page is valid. If false, @skb is valid. 213 * @flags: Flags for buffer and packet state.
214 */ 214 */
215struct efx_rx_buffer { 215struct efx_rx_buffer {
216 dma_addr_t dma_addr; 216 dma_addr_t dma_addr;
@@ -219,8 +219,11 @@ struct efx_rx_buffer {
219 struct page *page; 219 struct page *page;
220 } u; 220 } u;
221 unsigned int len; 221 unsigned int len;
222 bool is_page; 222 u16 flags;
223}; 223};
224#define EFX_RX_BUF_PAGE 0x0001
225#define EFX_RX_PKT_CSUMMED 0x0002
226#define EFX_RX_PKT_DISCARD 0x0004
224 227
225/** 228/**
226 * struct efx_rx_page_state - Page-based rx buffer state 229 * struct efx_rx_page_state - Page-based rx buffer state
@@ -378,7 +381,6 @@ struct efx_channel {
378 * access with prefetches. 381 * access with prefetches.
379 */ 382 */
380 struct efx_rx_buffer *rx_pkt; 383 struct efx_rx_buffer *rx_pkt;
381 bool rx_pkt_csummed;
382 384
383 struct efx_rx_queue rx_queue; 385 struct efx_rx_queue rx_queue;
384 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; 386 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index cd250f197b38..a43d1ca270c0 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -743,10 +743,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
743} 743}
744 744
745/* Detect errors included in the rx_evt_pkt_ok bit. */ 745/* Detect errors included in the rx_evt_pkt_ok bit. */
746static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 746static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
747 const efx_qword_t *event, 747 const efx_qword_t *event)
748 bool *rx_ev_pkt_ok,
749 bool *discard)
750{ 748{
751 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 749 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
752 struct efx_nic *efx = rx_queue->efx; 750 struct efx_nic *efx = rx_queue->efx;
@@ -791,10 +789,6 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
791 ++channel->n_rx_tcp_udp_chksum_err; 789 ++channel->n_rx_tcp_udp_chksum_err;
792 } 790 }
793 791
794 /* The frame must be discarded if any of these are true. */
795 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
796 rx_ev_tobe_disc | rx_ev_pause_frm);
797
798 /* TOBE_DISC is expected on unicast mismatches; don't print out an 792 /* TOBE_DISC is expected on unicast mismatches; don't print out an
799 * error message. FRM_TRUNC indicates RXDP dropped the packet due 793 * error message. FRM_TRUNC indicates RXDP dropped the packet due
800 * to a FIFO overflow. 794 * to a FIFO overflow.
@@ -817,6 +811,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
817 rx_ev_pause_frm ? " [PAUSE]" : ""); 811 rx_ev_pause_frm ? " [PAUSE]" : "");
818 } 812 }
819#endif 813#endif
814
815 /* The frame must be discarded if any of these are true. */
816 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
817 rx_ev_tobe_disc | rx_ev_pause_frm) ?
818 EFX_RX_PKT_DISCARD : 0;
820} 819}
821 820
822/* Handle receive events that are not in-order. */ 821/* Handle receive events that are not in-order. */
@@ -849,7 +848,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
849 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 848 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
850 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 849 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
851 unsigned expected_ptr; 850 unsigned expected_ptr;
852 bool rx_ev_pkt_ok, discard = false, checksummed; 851 bool rx_ev_pkt_ok;
852 u16 flags;
853 struct efx_rx_queue *rx_queue; 853 struct efx_rx_queue *rx_queue;
854 854
855 /* Basic packet information */ 855 /* Basic packet information */
@@ -872,12 +872,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
872 /* If packet is marked as OK and packet type is TCP/IP or 872 /* If packet is marked as OK and packet type is TCP/IP or
873 * UDP/IP, then we can rely on the hardware checksum. 873 * UDP/IP, then we can rely on the hardware checksum.
874 */ 874 */
875 checksummed = 875 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
876 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 876 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
877 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; 877 EFX_RX_PKT_CSUMMED : 0;
878 } else { 878 } else {
879 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); 879 flags = efx_handle_rx_not_ok(rx_queue, event);
880 checksummed = false;
881 } 880 }
882 881
883 /* Detect multicast packets that didn't match the filter */ 882 /* Detect multicast packets that didn't match the filter */
@@ -888,15 +887,14 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
888 887
889 if (unlikely(!rx_ev_mcast_hash_match)) { 888 if (unlikely(!rx_ev_mcast_hash_match)) {
890 ++channel->n_rx_mcast_mismatch; 889 ++channel->n_rx_mcast_mismatch;
891 discard = true; 890 flags |= EFX_RX_PKT_DISCARD;
892 } 891 }
893 } 892 }
894 893
895 channel->irq_mod_score += 2; 894 channel->irq_mod_score += 2;
896 895
897 /* Handle received packet */ 896 /* Handle received packet */
898 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, 897 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
899 checksummed, discard);
900} 898}
901 899
902static void 900static void
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d97c6ebcf06d..a33aef25ead9 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -108,7 +108,7 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
108 108
109static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) 109static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
110{ 110{
111 if (buf->is_page) 111 if (buf->flags & EFX_RX_BUF_PAGE)
112 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); 112 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
113 else 113 else
114 return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; 114 return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
@@ -158,7 +158,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
158 /* Adjust the SKB for padding and checksum */ 158 /* Adjust the SKB for padding and checksum */
159 skb_reserve(skb, NET_IP_ALIGN); 159 skb_reserve(skb, NET_IP_ALIGN);
160 rx_buf->len = skb_len - NET_IP_ALIGN; 160 rx_buf->len = skb_len - NET_IP_ALIGN;
161 rx_buf->is_page = false; 161 rx_buf->flags = 0;
162 skb->ip_summed = CHECKSUM_UNNECESSARY; 162 skb->ip_summed = CHECKSUM_UNNECESSARY;
163 163
164 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 164 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
@@ -227,7 +227,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
227 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 227 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
228 rx_buf->u.page = page; 228 rx_buf->u.page = page;
229 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 229 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
230 rx_buf->is_page = true; 230 rx_buf->flags = EFX_RX_BUF_PAGE;
231 ++rx_queue->added_count; 231 ++rx_queue->added_count;
232 ++rx_queue->alloc_page_count; 232 ++rx_queue->alloc_page_count;
233 ++state->refcnt; 233 ++state->refcnt;
@@ -248,7 +248,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
248static void efx_unmap_rx_buffer(struct efx_nic *efx, 248static void efx_unmap_rx_buffer(struct efx_nic *efx,
249 struct efx_rx_buffer *rx_buf) 249 struct efx_rx_buffer *rx_buf)
250{ 250{
251 if (rx_buf->is_page && rx_buf->u.page) { 251 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
252 struct efx_rx_page_state *state; 252 struct efx_rx_page_state *state;
253 253
254 state = page_address(rx_buf->u.page); 254 state = page_address(rx_buf->u.page);
@@ -258,7 +258,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
258 efx_rx_buf_size(efx), 258 efx_rx_buf_size(efx),
259 PCI_DMA_FROMDEVICE); 259 PCI_DMA_FROMDEVICE);
260 } 260 }
261 } else if (!rx_buf->is_page && rx_buf->u.skb) { 261 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
262 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 262 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
263 rx_buf->len, PCI_DMA_FROMDEVICE); 263 rx_buf->len, PCI_DMA_FROMDEVICE);
264 } 264 }
@@ -267,10 +267,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
267static void efx_free_rx_buffer(struct efx_nic *efx, 267static void efx_free_rx_buffer(struct efx_nic *efx,
268 struct efx_rx_buffer *rx_buf) 268 struct efx_rx_buffer *rx_buf)
269{ 269{
270 if (rx_buf->is_page && rx_buf->u.page) { 270 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
271 __free_pages(rx_buf->u.page, efx->rx_buffer_order); 271 __free_pages(rx_buf->u.page, efx->rx_buffer_order);
272 rx_buf->u.page = NULL; 272 rx_buf->u.page = NULL;
273 } else if (!rx_buf->is_page && rx_buf->u.skb) { 273 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
274 dev_kfree_skb_any(rx_buf->u.skb); 274 dev_kfree_skb_any(rx_buf->u.skb);
275 rx_buf->u.skb = NULL; 275 rx_buf->u.skb = NULL;
276 } 276 }
@@ -310,7 +310,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
310 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 310 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
311 new_buf->u.page = rx_buf->u.page; 311 new_buf->u.page = rx_buf->u.page;
312 new_buf->len = rx_buf->len; 312 new_buf->len = rx_buf->len;
313 new_buf->is_page = true; 313 new_buf->flags = EFX_RX_BUF_PAGE;
314 ++rx_queue->added_count; 314 ++rx_queue->added_count;
315} 315}
316 316
@@ -324,7 +324,10 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
324 struct efx_rx_buffer *new_buf; 324 struct efx_rx_buffer *new_buf;
325 unsigned index; 325 unsigned index;
326 326
327 if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 327 rx_buf->flags &= EFX_RX_BUF_PAGE;
328
329 if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
330 efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
328 page_count(rx_buf->u.page) == 1) 331 page_count(rx_buf->u.page) == 1)
329 efx_resurrect_rx_buffer(rx_queue, rx_buf); 332 efx_resurrect_rx_buffer(rx_queue, rx_buf);
330 333
@@ -411,8 +414,7 @@ void efx_rx_slow_fill(unsigned long context)
411 414
412static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 415static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
413 struct efx_rx_buffer *rx_buf, 416 struct efx_rx_buffer *rx_buf,
414 int len, bool *discard, 417 int len, bool *leak_packet)
415 bool *leak_packet)
416{ 418{
417 struct efx_nic *efx = rx_queue->efx; 419 struct efx_nic *efx = rx_queue->efx;
418 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 420 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -423,7 +425,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
423 /* The packet must be discarded, but this is only a fatal error 425 /* The packet must be discarded, but this is only a fatal error
424 * if the caller indicated it was 426 * if the caller indicated it was
425 */ 427 */
426 *discard = true; 428 rx_buf->flags |= EFX_RX_PKT_DISCARD;
427 429
428 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 430 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
429 if (net_ratelimit()) 431 if (net_ratelimit())
@@ -436,7 +438,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
436 * data at the end of the skb will be trashed. So 438 * data at the end of the skb will be trashed. So
437 * we have no choice but to leak the fragment. 439 * we have no choice but to leak the fragment.
438 */ 440 */
439 *leak_packet = !rx_buf->is_page; 441 *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
440 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 442 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
441 } else { 443 } else {
442 if (net_ratelimit()) 444 if (net_ratelimit())
@@ -456,13 +458,13 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
456 */ 458 */
457static void efx_rx_packet_gro(struct efx_channel *channel, 459static void efx_rx_packet_gro(struct efx_channel *channel,
458 struct efx_rx_buffer *rx_buf, 460 struct efx_rx_buffer *rx_buf,
459 const u8 *eh, bool checksummed) 461 const u8 *eh)
460{ 462{
461 struct napi_struct *napi = &channel->napi_str; 463 struct napi_struct *napi = &channel->napi_str;
462 gro_result_t gro_result; 464 gro_result_t gro_result;
463 465
464 /* Pass the skb/page into the GRO engine */ 466 /* Pass the skb/page into the GRO engine */
465 if (rx_buf->is_page) { 467 if (rx_buf->flags & EFX_RX_BUF_PAGE) {
466 struct efx_nic *efx = channel->efx; 468 struct efx_nic *efx = channel->efx;
467 struct page *page = rx_buf->u.page; 469 struct page *page = rx_buf->u.page;
468 struct sk_buff *skb; 470 struct sk_buff *skb;
@@ -484,8 +486,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
484 skb->len = rx_buf->len; 486 skb->len = rx_buf->len;
485 skb->data_len = rx_buf->len; 487 skb->data_len = rx_buf->len;
486 skb->truesize += rx_buf->len; 488 skb->truesize += rx_buf->len;
487 skb->ip_summed = 489 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
488 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 490 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
489 491
490 skb_record_rx_queue(skb, channel->channel); 492 skb_record_rx_queue(skb, channel->channel);
491 493
@@ -493,7 +495,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
493 } else { 495 } else {
494 struct sk_buff *skb = rx_buf->u.skb; 496 struct sk_buff *skb = rx_buf->u.skb;
495 497
496 EFX_BUG_ON_PARANOID(!checksummed); 498 EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
497 rx_buf->u.skb = NULL; 499 rx_buf->u.skb = NULL;
498 500
499 gro_result = napi_gro_receive(napi, skb); 501 gro_result = napi_gro_receive(napi, skb);
@@ -508,7 +510,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
508} 510}
509 511
510void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 512void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
511 unsigned int len, bool checksummed, bool discard) 513 unsigned int len, u16 flags)
512{ 514{
513 struct efx_nic *efx = rx_queue->efx; 515 struct efx_nic *efx = rx_queue->efx;
514 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 516 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
@@ -516,6 +518,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
516 bool leak_packet = false; 518 bool leak_packet = false;
517 519
518 rx_buf = efx_rx_buffer(rx_queue, index); 520 rx_buf = efx_rx_buffer(rx_queue, index);
521 rx_buf->flags |= flags;
519 522
520 /* This allows the refill path to post another buffer. 523 /* This allows the refill path to post another buffer.
521 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 524 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -524,18 +527,17 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
524 rx_queue->removed_count++; 527 rx_queue->removed_count++;
525 528
526 /* Validate the length encoded in the event vs the descriptor pushed */ 529 /* Validate the length encoded in the event vs the descriptor pushed */
527 efx_rx_packet__check_len(rx_queue, rx_buf, len, 530 efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
528 &discard, &leak_packet);
529 531
530 netif_vdbg(efx, rx_status, efx->net_dev, 532 netif_vdbg(efx, rx_status, efx->net_dev,
531 "RX queue %d received id %x at %llx+%x %s%s\n", 533 "RX queue %d received id %x at %llx+%x %s%s\n",
532 efx_rx_queue_index(rx_queue), index, 534 efx_rx_queue_index(rx_queue), index,
533 (unsigned long long)rx_buf->dma_addr, len, 535 (unsigned long long)rx_buf->dma_addr, len,
534 (checksummed ? " [SUMMED]" : ""), 536 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
535 (discard ? " [DISCARD]" : "")); 537 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
536 538
537 /* Discard packet, if instructed to do so */ 539 /* Discard packet, if instructed to do so */
538 if (unlikely(discard)) { 540 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
539 if (unlikely(leak_packet)) 541 if (unlikely(leak_packet))
540 channel->n_skbuff_leaks++; 542 channel->n_skbuff_leaks++;
541 else 543 else
@@ -562,10 +564,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
562 rx_buf->len = len - efx->type->rx_buffer_hash_size; 564 rx_buf->len = len - efx->type->rx_buffer_hash_size;
563out: 565out:
564 if (channel->rx_pkt) 566 if (channel->rx_pkt)
565 __efx_rx_packet(channel, 567 __efx_rx_packet(channel, channel->rx_pkt);
566 channel->rx_pkt, channel->rx_pkt_csummed);
567 channel->rx_pkt = rx_buf; 568 channel->rx_pkt = rx_buf;
568 channel->rx_pkt_csummed = checksummed;
569} 569}
570 570
571static void efx_rx_deliver(struct efx_channel *channel, 571static void efx_rx_deliver(struct efx_channel *channel,
@@ -588,8 +588,7 @@ static void efx_rx_deliver(struct efx_channel *channel,
588} 588}
589 589
590/* Handle a received packet. Second half: Touches packet payload. */ 590/* Handle a received packet. Second half: Touches packet payload. */
591void __efx_rx_packet(struct efx_channel *channel, 591void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
592 struct efx_rx_buffer *rx_buf, bool checksummed)
593{ 592{
594 struct efx_nic *efx = channel->efx; 593 struct efx_nic *efx = channel->efx;
595 u8 *eh = efx_rx_buf_eh(efx, rx_buf); 594 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
@@ -603,7 +602,7 @@ void __efx_rx_packet(struct efx_channel *channel,
603 return; 602 return;
604 } 603 }
605 604
606 if (!rx_buf->is_page) { 605 if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
607 struct sk_buff *skb = rx_buf->u.skb; 606 struct sk_buff *skb = rx_buf->u.skb;
608 607
609 prefetch(skb_shinfo(skb)); 608 prefetch(skb_shinfo(skb));
@@ -622,10 +621,10 @@ void __efx_rx_packet(struct efx_channel *channel,
622 } 621 }
623 622
624 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 623 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
625 checksummed = false; 624 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
626 625
627 if (likely(checksummed || rx_buf->is_page)) 626 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
628 efx_rx_packet_gro(channel, rx_buf, eh, checksummed); 627 efx_rx_packet_gro(channel, rx_buf, eh);
629 else 628 else
630 efx_rx_deliver(channel, rx_buf); 629 efx_rx_deliver(channel, rx_buf);
631} 630}