aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/rx.c25
3 files changed, 30 insertions, 15 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index bf57b3cb16ab..0bc00991d310 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
779 tx_queue->txd.entries); 779 tx_queue->txd.entries);
780 } 780 }
781 781
782 efx_device_detach_sync(efx);
782 efx_stop_all(efx); 783 efx_stop_all(efx);
783 efx_stop_interrupts(efx, true); 784 efx_stop_interrupts(efx, true);
784 785
@@ -832,6 +833,7 @@ out:
832 833
833 efx_start_interrupts(efx, true); 834 efx_start_interrupts(efx, true);
834 efx_start_all(efx); 835 efx_start_all(efx);
836 netif_device_attach(efx->net_dev);
835 return rc; 837 return rc;
836 838
837rollback: 839rollback:
@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
1641 /* Flush efx_mac_work(), refill_workqueue, monitor_work */ 1643 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
1642 efx_flush_all(efx); 1644 efx_flush_all(efx);
1643 1645
1644 /* Stop the kernel transmit interface late, so the watchdog 1646 /* Stop the kernel transmit interface. This is only valid if
1645 * timer isn't ticking over the flush */ 1647 * the device is stopped or detached; otherwise the watchdog
1648 * may fire immediately.
1649 */
1650 WARN_ON(netif_running(efx->net_dev) &&
1651 netif_device_present(efx->net_dev));
1646 netif_tx_disable(efx->net_dev); 1652 netif_tx_disable(efx->net_dev);
1647 1653
1648 efx_stop_datapath(efx); 1654 efx_stop_datapath(efx);
@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1963 if (new_mtu > EFX_MAX_MTU) 1969 if (new_mtu > EFX_MAX_MTU)
1964 return -EINVAL; 1970 return -EINVAL;
1965 1971
1966 efx_stop_all(efx);
1967
1968 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1972 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1969 1973
1974 efx_device_detach_sync(efx);
1975 efx_stop_all(efx);
1976
1970 mutex_lock(&efx->mac_lock); 1977 mutex_lock(&efx->mac_lock);
1971 net_dev->mtu = new_mtu; 1978 net_dev->mtu = new_mtu;
1972 efx->type->reconfigure_mac(efx); 1979 efx->type->reconfigure_mac(efx);
1973 mutex_unlock(&efx->mac_lock); 1980 mutex_unlock(&efx->mac_lock);
1974 1981
1975 efx_start_all(efx); 1982 efx_start_all(efx);
1983 netif_device_attach(efx->net_dev);
1976 return 0; 1984 return 0;
1977} 1985}
1978 1986
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 2d756c1d7142..0a90abd2421b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -210,6 +210,7 @@ struct efx_tx_queue {
210 * Will be %NULL if the buffer slot is currently free. 210 * Will be %NULL if the buffer slot is currently free.
211 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. 211 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
212 * Will be %NULL if the buffer slot is currently free. 212 * Will be %NULL if the buffer slot is currently free.
213 * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
213 * @len: Buffer length, in bytes. 214 * @len: Buffer length, in bytes.
214 * @flags: Flags for buffer and packet state. 215 * @flags: Flags for buffer and packet state.
215 */ 216 */
@@ -219,7 +220,8 @@ struct efx_rx_buffer {
219 struct sk_buff *skb; 220 struct sk_buff *skb;
220 struct page *page; 221 struct page *page;
221 } u; 222 } u;
222 unsigned int len; 223 u16 page_offset;
224 u16 len;
223 u16 flags; 225 u16 flags;
224}; 226};
225#define EFX_RX_BUF_PAGE 0x0001 227#define EFX_RX_BUF_PAGE 0x0001
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d780a0d096b4..879ff5849bbd 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, 90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
91 struct efx_rx_buffer *buf) 91 struct efx_rx_buffer *buf)
92{ 92{
93 /* Offset is always within one page, so we don't need to consider 93 return buf->page_offset + efx->type->rx_buffer_hash_size;
94 * the page order.
95 */
96 return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
97 efx->type->rx_buffer_hash_size;
98} 94}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 95static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 96{
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
187 struct efx_nic *efx = rx_queue->efx; 183 struct efx_nic *efx = rx_queue->efx;
188 struct efx_rx_buffer *rx_buf; 184 struct efx_rx_buffer *rx_buf;
189 struct page *page; 185 struct page *page;
186 unsigned int page_offset;
190 struct efx_rx_page_state *state; 187 struct efx_rx_page_state *state;
191 dma_addr_t dma_addr; 188 dma_addr_t dma_addr;
192 unsigned index, count; 189 unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
211 state->dma_addr = dma_addr; 208 state->dma_addr = dma_addr;
212 209
213 dma_addr += sizeof(struct efx_rx_page_state); 210 dma_addr += sizeof(struct efx_rx_page_state);
211 page_offset = sizeof(struct efx_rx_page_state);
214 212
215 split: 213 split:
216 index = rx_queue->added_count & rx_queue->ptr_mask; 214 index = rx_queue->added_count & rx_queue->ptr_mask;
217 rx_buf = efx_rx_buffer(rx_queue, index); 215 rx_buf = efx_rx_buffer(rx_queue, index);
218 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
219 rx_buf->u.page = page; 217 rx_buf->u.page = page;
218 rx_buf->page_offset = page_offset;
220 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
221 rx_buf->flags = EFX_RX_BUF_PAGE; 220 rx_buf->flags = EFX_RX_BUF_PAGE;
222 ++rx_queue->added_count; 221 ++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
227 /* Use the second half of the page */ 226 /* Use the second half of the page */
228 get_page(page); 227 get_page(page);
229 dma_addr += (PAGE_SIZE >> 1); 228 dma_addr += (PAGE_SIZE >> 1);
229 page_offset += (PAGE_SIZE >> 1);
230 ++count; 230 ++count;
231 goto split; 231 goto split;
232 } 232 }
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
236} 236}
237 237
238static void efx_unmap_rx_buffer(struct efx_nic *efx, 238static void efx_unmap_rx_buffer(struct efx_nic *efx,
239 struct efx_rx_buffer *rx_buf) 239 struct efx_rx_buffer *rx_buf,
240 unsigned int used_len)
240{ 241{
241 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 242 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
242 struct efx_rx_page_state *state; 243 struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
247 state->dma_addr, 248 state->dma_addr,
248 efx_rx_buf_size(efx), 249 efx_rx_buf_size(efx),
249 DMA_FROM_DEVICE); 250 DMA_FROM_DEVICE);
251 } else if (used_len) {
252 dma_sync_single_for_cpu(&efx->pci_dev->dev,
253 rx_buf->dma_addr, used_len,
254 DMA_FROM_DEVICE);
250 } 255 }
251 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { 256 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
252 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, 257 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
269static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 274static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
270 struct efx_rx_buffer *rx_buf) 275 struct efx_rx_buffer *rx_buf)
271{ 276{
272 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 277 efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
273 efx_free_rx_buffer(rx_queue->efx, rx_buf); 278 efx_free_rx_buffer(rx_queue->efx, rx_buf);
274} 279}
275 280
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
535 goto out; 540 goto out;
536 } 541 }
537 542
538 /* Release card resources - assumes all RX buffers consumed in-order 543 /* Release and/or sync DMA mapping - assumes all RX buffers
539 * per RX queue 544 * consumed in-order per RX queue
540 */ 545 */
541 efx_unmap_rx_buffer(efx, rx_buf); 546 efx_unmap_rx_buffer(efx, rx_buf, len);
542 547
543 /* Prefetch nice and early so data will (hopefully) be in cache by 548 /* Prefetch nice and early so data will (hopefully) be in cache by
544 * the time we look at it. 549 * the time we look at it.