aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c39
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c66
-rw-r--r--drivers/net/ethernet/sfc/rx.c6
6 files changed, 101 insertions, 23 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 958d8c7fa02f..a35c63d43ae5 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx)
585 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 585 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
586 efx->type->rx_buffer_padding); 586 efx->type->rx_buffer_padding);
587 rx_buf_len = (sizeof(struct efx_rx_page_state) + 587 rx_buf_len = (sizeof(struct efx_rx_page_state) +
588 NET_IP_ALIGN + efx->rx_dma_len); 588 efx->rx_ip_align + efx->rx_dma_len);
589 if (rx_buf_len <= PAGE_SIZE) { 589 if (rx_buf_len <= PAGE_SIZE) {
590 efx->rx_scatter = efx->type->always_rx_scatter; 590 efx->rx_scatter = efx->type->always_rx_scatter;
591 efx->rx_buffer_order = 0; 591 efx->rx_buffer_order = 0;
@@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx)
645 WARN_ON(channel->rx_pkt_n_frags); 645 WARN_ON(channel->rx_pkt_n_frags);
646 } 646 }
647 647
648 efx_ptp_start_datapath(efx);
649
648 if (netif_device_present(efx->net_dev)) 650 if (netif_device_present(efx->net_dev))
649 netif_tx_wake_all_queues(efx->net_dev); 651 netif_tx_wake_all_queues(efx->net_dev);
650} 652}
@@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
659 EFX_ASSERT_RESET_SERIALISED(efx); 661 EFX_ASSERT_RESET_SERIALISED(efx);
660 BUG_ON(efx->port_enabled); 662 BUG_ON(efx->port_enabled);
661 663
664 efx_ptp_stop_datapath(efx);
665
662 /* Stop RX refill */ 666 /* Stop RX refill */
663 efx_for_each_channel(channel, efx) { 667 efx_for_each_channel(channel, efx) {
664 efx_for_each_channel_rx_queue(rx_queue, channel) 668 efx_for_each_channel_rx_queue(rx_queue, channel)
@@ -2542,6 +2546,8 @@ static int efx_init_struct(struct efx_nic *efx,
2542 2546
2543 efx->net_dev = net_dev; 2547 efx->net_dev = net_dev;
2544 efx->rx_prefix_size = efx->type->rx_prefix_size; 2548 efx->rx_prefix_size = efx->type->rx_prefix_size;
2549 efx->rx_ip_align =
2550 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2545 efx->rx_packet_hash_offset = 2551 efx->rx_packet_hash_offset =
2546 efx->type->rx_hash_offset - efx->type->rx_prefix_size; 2552 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2547 spin_lock_init(&efx->stats_lock); 2553 spin_lock_init(&efx->stats_lock);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 366c8e3e3784..4b0bd8a1514d 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -50,6 +50,7 @@ struct efx_mcdi_async_param {
50static void efx_mcdi_timeout_async(unsigned long context); 50static void efx_mcdi_timeout_async(unsigned long context);
51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
52 bool *was_attached_out); 52 bool *was_attached_out);
53static bool efx_mcdi_poll_once(struct efx_nic *efx);
53 54
54static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 55static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
55{ 56{
@@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
237 } 238 }
238} 239}
239 240
241static bool efx_mcdi_poll_once(struct efx_nic *efx)
242{
243 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
244
245 rmb();
246 if (!efx->type->mcdi_poll_response(efx))
247 return false;
248
249 spin_lock_bh(&mcdi->iface_lock);
250 efx_mcdi_read_response_header(efx);
251 spin_unlock_bh(&mcdi->iface_lock);
252
253 return true;
254}
255
240static int efx_mcdi_poll(struct efx_nic *efx) 256static int efx_mcdi_poll(struct efx_nic *efx)
241{ 257{
242 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 258 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
272 288
273 time = jiffies; 289 time = jiffies;
274 290
275 rmb(); 291 if (efx_mcdi_poll_once(efx))
276 if (efx->type->mcdi_poll_response(efx))
277 break; 292 break;
278 293
279 if (time_after(time, finish)) 294 if (time_after(time, finish))
280 return -ETIMEDOUT; 295 return -ETIMEDOUT;
281 } 296 }
282 297
283 spin_lock_bh(&mcdi->iface_lock);
284 efx_mcdi_read_response_header(efx);
285 spin_unlock_bh(&mcdi->iface_lock);
286
287 /* Return rc=0 like wait_event_timeout() */ 298 /* Return rc=0 like wait_event_timeout() */
288 return 0; 299 return 0;
289} 300}
@@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
619 rc = efx_mcdi_await_completion(efx); 630 rc = efx_mcdi_await_completion(efx);
620 631
621 if (rc != 0) { 632 if (rc != 0) {
633 netif_err(efx, hw, efx->net_dev,
634 "MC command 0x%x inlen %d mode %d timed out\n",
635 cmd, (int)inlen, mcdi->mode);
636
637 if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
638 netif_err(efx, hw, efx->net_dev,
639 "MCDI request was completed without an event\n");
640 rc = 0;
641 }
642
622 /* Close the race with efx_mcdi_ev_cpl() executing just too late 643 /* Close the race with efx_mcdi_ev_cpl() executing just too late
623 * and completing a request we've just cancelled, by ensuring 644 * and completing a request we've just cancelled, by ensuring
624 * that the seqno check therein fails. 645 * that the seqno check therein fails.
@@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
627 ++mcdi->seqno; 648 ++mcdi->seqno;
628 ++mcdi->credits; 649 ++mcdi->credits;
629 spin_unlock_bh(&mcdi->iface_lock); 650 spin_unlock_bh(&mcdi->iface_lock);
651 }
630 652
631 netif_err(efx, hw, efx->net_dev, 653 if (rc == 0) {
632 "MC command 0x%x inlen %d mode %d timed out\n",
633 cmd, (int)inlen, mcdi->mode);
634 } else {
635 size_t hdr_len, data_len; 654 size_t hdr_len, data_len;
636 655
637 /* At the very least we need a memory barrier here to ensure 656 /* At the very least we need a memory barrier here to ensure
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b14a717ac3e8..542a0d252ae0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -683,6 +683,8 @@ struct vfdi_status;
683 * @n_channels: Number of channels in use 683 * @n_channels: Number of channels in use
684 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 684 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
685 * @n_tx_channels: Number of channels used for TX 685 * @n_tx_channels: Number of channels used for TX
686 * @rx_ip_align: RX DMA address offset to have IP header aligned in
687 * in accordance with NET_IP_ALIGN
686 * @rx_dma_len: Current maximum RX DMA length 688 * @rx_dma_len: Current maximum RX DMA length
687 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 689 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
688 * @rx_buffer_truesize: Amortised allocation size of an RX buffer, 690 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
@@ -816,6 +818,7 @@ struct efx_nic {
816 unsigned rss_spread; 818 unsigned rss_spread;
817 unsigned tx_channel_offset; 819 unsigned tx_channel_offset;
818 unsigned n_tx_channels; 820 unsigned n_tx_channels;
821 unsigned int rx_ip_align;
819 unsigned int rx_dma_len; 822 unsigned int rx_dma_len;
820 unsigned int rx_buffer_order; 823 unsigned int rx_buffer_order;
821 unsigned int rx_buffer_truesize; 824 unsigned int rx_buffer_truesize;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 33852e824f12..fe71e81ee8bf 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -561,6 +561,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
561bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 561bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
562int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 562int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
563void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); 563void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
564void efx_ptp_start_datapath(struct efx_nic *efx);
565void efx_ptp_stop_datapath(struct efx_nic *efx);
564 566
565extern const struct efx_nic_type falcon_a1_nic_type; 567extern const struct efx_nic_type falcon_a1_nic_type;
566extern const struct efx_nic_type falcon_b0_nic_type; 568extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index afd4d3a50460..34b25864b121 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -220,6 +220,7 @@ struct efx_ptp_timeset {
220 * @evt_list: List of MC receive events awaiting packets 220 * @evt_list: List of MC receive events awaiting packets
221 * @evt_free_list: List of free events 221 * @evt_free_list: List of free events
222 * @evt_lock: Lock for manipulating evt_list and evt_free_list 222 * @evt_lock: Lock for manipulating evt_list and evt_free_list
223 * @evt_overflow: Boolean indicating that event list has overflowed
223 * @rx_evts: Instantiated events (on evt_list and evt_free_list) 224 * @rx_evts: Instantiated events (on evt_list and evt_free_list)
224 * @workwq: Work queue for processing pending PTP operations 225 * @workwq: Work queue for processing pending PTP operations
225 * @work: Work task 226 * @work: Work task
@@ -270,6 +271,7 @@ struct efx_ptp_data {
270 struct list_head evt_list; 271 struct list_head evt_list;
271 struct list_head evt_free_list; 272 struct list_head evt_free_list;
272 spinlock_t evt_lock; 273 spinlock_t evt_lock;
274 bool evt_overflow;
273 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; 275 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
274 struct workqueue_struct *workwq; 276 struct workqueue_struct *workwq;
275 struct work_struct work; 277 struct work_struct work;
@@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
635 } 637 }
636 } 638 }
637 } 639 }
640 /* If the event overflow flag is set and the event list is now empty
641 * clear the flag to re-enable the overflow warning message.
642 */
643 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
644 ptp->evt_overflow = false;
638 spin_unlock_bh(&ptp->evt_lock); 645 spin_unlock_bh(&ptp->evt_lock);
639} 646}
640 647
@@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
676 break; 683 break;
677 } 684 }
678 } 685 }
686 /* If the event overflow flag is set and the event list is now empty
687 * clear the flag to re-enable the overflow warning message.
688 */
689 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
690 ptp->evt_overflow = false;
679 spin_unlock_bh(&ptp->evt_lock); 691 spin_unlock_bh(&ptp->evt_lock);
680 692
681 return rc; 693 return rc;
@@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
705 __skb_queue_tail(q, skb); 717 __skb_queue_tail(q, skb);
706 } else if (time_after(jiffies, match->expiry)) { 718 } else if (time_after(jiffies, match->expiry)) {
707 match->state = PTP_PACKET_STATE_TIMED_OUT; 719 match->state = PTP_PACKET_STATE_TIMED_OUT;
708 netif_warn(efx, rx_err, efx->net_dev, 720 if (net_ratelimit())
709 "PTP packet - no timestamp seen\n"); 721 netif_warn(efx, rx_err, efx->net_dev,
722 "PTP packet - no timestamp seen\n");
710 __skb_queue_tail(q, skb); 723 __skb_queue_tail(q, skb);
711 } else { 724 } else {
712 /* Replace unprocessed entry and stop */ 725 /* Replace unprocessed entry and stop */
@@ -788,9 +801,14 @@ fail:
788static int efx_ptp_stop(struct efx_nic *efx) 801static int efx_ptp_stop(struct efx_nic *efx)
789{ 802{
790 struct efx_ptp_data *ptp = efx->ptp_data; 803 struct efx_ptp_data *ptp = efx->ptp_data;
791 int rc = efx_ptp_disable(efx);
792 struct list_head *cursor; 804 struct list_head *cursor;
793 struct list_head *next; 805 struct list_head *next;
806 int rc;
807
808 if (ptp == NULL)
809 return 0;
810
811 rc = efx_ptp_disable(efx);
794 812
795 if (ptp->rxfilter_installed) { 813 if (ptp->rxfilter_installed) {
796 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, 814 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
@@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx)
809 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { 827 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
810 list_move(cursor, &efx->ptp_data->evt_free_list); 828 list_move(cursor, &efx->ptp_data->evt_free_list);
811 } 829 }
830 ptp->evt_overflow = false;
812 spin_unlock_bh(&efx->ptp_data->evt_lock); 831 spin_unlock_bh(&efx->ptp_data->evt_lock);
813 832
814 return rc; 833 return rc;
815} 834}
816 835
836static int efx_ptp_restart(struct efx_nic *efx)
837{
838 if (efx->ptp_data && efx->ptp_data->enabled)
839 return efx_ptp_start(efx);
840 return 0;
841}
842
817static void efx_ptp_pps_worker(struct work_struct *work) 843static void efx_ptp_pps_worker(struct work_struct *work)
818{ 844{
819 struct efx_ptp_data *ptp = 845 struct efx_ptp_data *ptp =
@@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
901 spin_lock_init(&ptp->evt_lock); 927 spin_lock_init(&ptp->evt_lock);
902 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) 928 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
903 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); 929 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
930 ptp->evt_overflow = false;
904 931
905 ptp->phc_clock_info.owner = THIS_MODULE; 932 ptp->phc_clock_info.owner = THIS_MODULE;
906 snprintf(ptp->phc_clock_info.name, 933 snprintf(ptp->phc_clock_info.name,
@@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
989 skb->len >= PTP_MIN_LENGTH && 1016 skb->len >= PTP_MIN_LENGTH &&
990 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && 1017 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
991 likely(skb->protocol == htons(ETH_P_IP)) && 1018 likely(skb->protocol == htons(ETH_P_IP)) &&
1019 skb_transport_header_was_set(skb) &&
1020 skb_network_header_len(skb) >= sizeof(struct iphdr) &&
992 ip_hdr(skb)->protocol == IPPROTO_UDP && 1021 ip_hdr(skb)->protocol == IPPROTO_UDP &&
1022 skb_headlen(skb) >=
1023 skb_transport_offset(skb) + sizeof(struct udphdr) &&
993 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); 1024 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
994} 1025}
995 1026
@@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
1106{ 1137{
1107 if ((enable_wanted != efx->ptp_data->enabled) || 1138 if ((enable_wanted != efx->ptp_data->enabled) ||
1108 (enable_wanted && (efx->ptp_data->mode != new_mode))) { 1139 (enable_wanted && (efx->ptp_data->mode != new_mode))) {
1109 int rc; 1140 int rc = 0;
1110 1141
1111 if (enable_wanted) { 1142 if (enable_wanted) {
1112 /* Change of mode requires disable */ 1143 /* Change of mode requires disable */
@@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
1123 * succeed. 1154 * succeed.
1124 */ 1155 */
1125 efx->ptp_data->mode = new_mode; 1156 efx->ptp_data->mode = new_mode;
1126 rc = efx_ptp_start(efx); 1157 if (netif_running(efx->net_dev))
1158 rc = efx_ptp_start(efx);
1127 if (rc == 0) { 1159 if (rc == 0) {
1128 rc = efx_ptp_synchronize(efx, 1160 rc = efx_ptp_synchronize(efx,
1129 PTP_SYNC_ATTEMPTS * 2); 1161 PTP_SYNC_ATTEMPTS * 2);
@@ -1304,8 +1336,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
1304 list_add_tail(&evt->link, &ptp->evt_list); 1336 list_add_tail(&evt->link, &ptp->evt_list);
1305 1337
1306 queue_work(ptp->workwq, &ptp->work); 1338 queue_work(ptp->workwq, &ptp->work);
1307 } else { 1339 } else if (!ptp->evt_overflow) {
1308 netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); 1340 /* Log a warning message and set the event overflow flag.
1341 * The message won't be logged again until the event queue
1342 * becomes empty.
1343 */
1344 netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
1345 ptp->evt_overflow = true;
1309 } 1346 }
1310 spin_unlock_bh(&ptp->evt_lock); 1347 spin_unlock_bh(&ptp->evt_lock);
1311} 1348}
@@ -1398,7 +1435,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1398 if (rc != 0) 1435 if (rc != 0)
1399 return rc; 1436 return rc;
1400 1437
1401 ptp_data->current_adjfreq = delta; 1438 ptp_data->current_adjfreq = adjustment_ns;
1402 return 0; 1439 return 0;
1403} 1440}
1404 1441
@@ -1413,7 +1450,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
1413 1450
1414 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); 1451 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1415 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 1452 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
1416 MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); 1453 MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
1417 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); 1454 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
1418 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); 1455 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
1419 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 1456 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1500,3 +1537,14 @@ void efx_ptp_probe(struct efx_nic *efx)
1500 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = 1537 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
1501 &efx_ptp_channel_type; 1538 &efx_ptp_channel_type;
1502} 1539}
1540
1541void efx_ptp_start_datapath(struct efx_nic *efx)
1542{
1543 if (efx_ptp_restart(efx))
1544 netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
1545}
1546
1547void efx_ptp_stop_datapath(struct efx_nic *efx)
1548{
1549 efx_ptp_stop(efx);
1550}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8f09e686fc23..42488df1f4ec 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
94 94
95void efx_rx_config_page_split(struct efx_nic *efx) 95void efx_rx_config_page_split(struct efx_nic *efx)
96{ 96{
97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, 97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
98 EFX_RX_BUF_ALIGNMENT); 98 EFX_RX_BUF_ALIGNMENT);
99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / 100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
@@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
189 do { 189 do {
190 index = rx_queue->added_count & rx_queue->ptr_mask; 190 index = rx_queue->added_count & rx_queue->ptr_mask;
191 rx_buf = efx_rx_buffer(rx_queue, index); 191 rx_buf = efx_rx_buffer(rx_queue, index);
192 rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; 192 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
193 rx_buf->page = page; 193 rx_buf->page = page;
194 rx_buf->page_offset = page_offset + NET_IP_ALIGN; 194 rx_buf->page_offset = page_offset + efx->rx_ip_align;
195 rx_buf->len = efx->rx_dma_len; 195 rx_buf->len = efx->rx_dma_len;
196 rx_buf->flags = 0; 196 rx_buf->flags = 0;
197 ++rx_queue->added_count; 197 ++rx_queue->added_count;