aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-11-05 21:50:43 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-05 21:50:43 -0500
commit2c99cd914d4fed9160d98849c9dd38034616768e (patch)
treefd6c145593ff1c901a9980765952700586d6fe20
parent25de4668d094f00e44a8f2428dd3c1a4ecfa0053 (diff)
parent5cdec679671741bcca1f8280699a64b42c9fa2b4 (diff)
Merge branch 'amd-xgbe-next'
Tom Lendacky says: ==================== amd-xgbe: AMD XGBE driver updates 2014-11-04 The following series of patches includes functional updates to the driver as well as some trivial changes for function renaming and spelling fixes. - Move channel and ring structure allocation into the device open path - Rename the pre_xmit function to dev_xmit - Explicitly use the u32 data type for the device descriptors - Use page allocation for the receive buffers - Add support for split header/payload receive - Add support for per DMA channel interrupts - Add support for receive side scaling (RSS) - Add support for ethtool receive side scaling commands - Fix the spelling of descriptors - After a PCS reset, sync the PCS and PHY modes - Add dependency on HAS_IOMEM to both the amd-xgbe and amd-xgbe-phy drivers This patch series is based on net-next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe.txt12
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h42
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c193
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c288
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c445
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c82
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c86
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h77
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c3
11 files changed, 955 insertions, 277 deletions
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 41354f730beb..26efd526d16c 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -7,7 +7,10 @@ Required properties:
7 - PCS registers 7 - PCS registers
8- interrupt-parent: Should be the phandle for the interrupt controller 8- interrupt-parent: Should be the phandle for the interrupt controller
9 that services interrupts for this device 9 that services interrupts for this device
10- interrupts: Should contain the amd-xgbe interrupt 10- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
11 listed is required and is the general device interrupt. If the optional
12 amd,per-channel-interrupt property is specified, then one additional
13 interrupt for each DMA channel supported by the device should be specified
11- clocks: 14- clocks:
12 - DMA clock for the amd-xgbe device (used for calculating the 15 - DMA clock for the amd-xgbe device (used for calculating the
13 correct Rx interrupt watchdog timer value on a DMA channel 16 correct Rx interrupt watchdog timer value on a DMA channel
@@ -23,6 +26,9 @@ Optional properties:
23- mac-address: mac address to be assigned to the device. Can be overridden 26- mac-address: mac address to be assigned to the device. Can be overridden
24 by UEFI. 27 by UEFI.
25- dma-coherent: Present if dma operations are coherent 28- dma-coherent: Present if dma operations are coherent
29- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
30 a unique interrupt for each DMA channel - this requires an additional
31 interrupt be configured for each DMA channel
26 32
27Example: 33Example:
28 xgbe@e0700000 { 34 xgbe@e0700000 {
@@ -30,7 +36,9 @@ Example:
30 reg = <0 0xe0700000 0 0x80000>, 36 reg = <0 0xe0700000 0 0x80000>,
31 <0 0xe0780000 0 0x80000>; 37 <0 0xe0780000 0 0x80000>;
32 interrupt-parent = <&gic>; 38 interrupt-parent = <&gic>;
33 interrupts = <0 325 4>; 39 interrupts = <0 325 4>,
40 <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
41 amd,per-channel-interrupt;
34 clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>; 42 clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
35 clock-names = "dma_clk", "ptp_clk"; 43 clock-names = "dma_clk", "ptp_clk";
36 phy-handle = <&phy>; 44 phy-handle = <&phy>;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 8319c99331b0..7a5e4aa5415e 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@ config SUNLANCE
179 179
180config AMD_XGBE 180config AMD_XGBE
181 tristate "AMD 10GbE Ethernet driver" 181 tristate "AMD 10GbE Ethernet driver"
182 depends on OF_NET 182 depends on OF_NET && HAS_IOMEM
183 select PHYLIB 183 select PHYLIB
184 select AMD_XGBE_PHY 184 select AMD_XGBE_PHY
185 select BITREVERSE 185 select BITREVERSE
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index caade30820d5..2fe8fc71fe01 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -207,6 +207,8 @@
207/* DMA channel register entry bit positions and sizes */ 207/* DMA channel register entry bit positions and sizes */
208#define DMA_CH_CR_PBLX8_INDEX 16 208#define DMA_CH_CR_PBLX8_INDEX 16
209#define DMA_CH_CR_PBLX8_WIDTH 1 209#define DMA_CH_CR_PBLX8_WIDTH 1
210#define DMA_CH_CR_SPH_INDEX 24
211#define DMA_CH_CR_SPH_WIDTH 1
210#define DMA_CH_IER_AIE_INDEX 15 212#define DMA_CH_IER_AIE_INDEX 15
211#define DMA_CH_IER_AIE_WIDTH 1 213#define DMA_CH_IER_AIE_WIDTH 1
212#define DMA_CH_IER_FBEE_INDEX 12 214#define DMA_CH_IER_FBEE_INDEX 12
@@ -306,6 +308,9 @@
306#define MAC_MACA0LR 0x0304 308#define MAC_MACA0LR 0x0304
307#define MAC_MACA1HR 0x0308 309#define MAC_MACA1HR 0x0308
308#define MAC_MACA1LR 0x030c 310#define MAC_MACA1LR 0x030c
311#define MAC_RSSCR 0x0c80
312#define MAC_RSSAR 0x0c88
313#define MAC_RSSDR 0x0c8c
309#define MAC_TSCR 0x0d00 314#define MAC_TSCR 0x0d00
310#define MAC_SSIR 0x0d04 315#define MAC_SSIR 0x0d04
311#define MAC_STSR 0x0d08 316#define MAC_STSR 0x0d08
@@ -429,6 +434,8 @@
429#define MAC_RCR_CST_WIDTH 1 434#define MAC_RCR_CST_WIDTH 1
430#define MAC_RCR_DCRCC_INDEX 3 435#define MAC_RCR_DCRCC_INDEX 3
431#define MAC_RCR_DCRCC_WIDTH 1 436#define MAC_RCR_DCRCC_WIDTH 1
437#define MAC_RCR_HDSMS_INDEX 12
438#define MAC_RCR_HDSMS_WIDTH 3
432#define MAC_RCR_IPC_INDEX 9 439#define MAC_RCR_IPC_INDEX 9
433#define MAC_RCR_IPC_WIDTH 1 440#define MAC_RCR_IPC_WIDTH 1
434#define MAC_RCR_JE_INDEX 8 441#define MAC_RCR_JE_INDEX 8
@@ -445,6 +452,24 @@
445#define MAC_RFCR_UP_WIDTH 1 452#define MAC_RFCR_UP_WIDTH 1
446#define MAC_RQC0R_RXQ0EN_INDEX 0 453#define MAC_RQC0R_RXQ0EN_INDEX 0
447#define MAC_RQC0R_RXQ0EN_WIDTH 2 454#define MAC_RQC0R_RXQ0EN_WIDTH 2
455#define MAC_RSSAR_ADDRT_INDEX 2
456#define MAC_RSSAR_ADDRT_WIDTH 1
457#define MAC_RSSAR_CT_INDEX 1
458#define MAC_RSSAR_CT_WIDTH 1
459#define MAC_RSSAR_OB_INDEX 0
460#define MAC_RSSAR_OB_WIDTH 1
461#define MAC_RSSAR_RSSIA_INDEX 8
462#define MAC_RSSAR_RSSIA_WIDTH 8
463#define MAC_RSSCR_IP2TE_INDEX 1
464#define MAC_RSSCR_IP2TE_WIDTH 1
465#define MAC_RSSCR_RSSE_INDEX 0
466#define MAC_RSSCR_RSSE_WIDTH 1
467#define MAC_RSSCR_TCP4TE_INDEX 2
468#define MAC_RSSCR_TCP4TE_WIDTH 1
469#define MAC_RSSCR_UDP4TE_INDEX 3
470#define MAC_RSSCR_UDP4TE_WIDTH 1
471#define MAC_RSSDR_DMCH_INDEX 0
472#define MAC_RSSDR_DMCH_WIDTH 4
448#define MAC_SSIR_SNSINC_INDEX 8 473#define MAC_SSIR_SNSINC_INDEX 8
449#define MAC_SSIR_SNSINC_WIDTH 8 474#define MAC_SSIR_SNSINC_WIDTH 8
450#define MAC_SSIR_SSINC_INDEX 16 475#define MAC_SSIR_SSINC_INDEX 16
@@ -844,9 +869,13 @@
844#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 869#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
845#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 870#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
846#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 871#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
872#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
873#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
847 874
848#define RX_NORMAL_DESC0_OVT_INDEX 0 875#define RX_NORMAL_DESC0_OVT_INDEX 0
849#define RX_NORMAL_DESC0_OVT_WIDTH 16 876#define RX_NORMAL_DESC0_OVT_WIDTH 16
877#define RX_NORMAL_DESC2_HL_INDEX 0
878#define RX_NORMAL_DESC2_HL_WIDTH 10
850#define RX_NORMAL_DESC3_CDA_INDEX 27 879#define RX_NORMAL_DESC3_CDA_INDEX 27
851#define RX_NORMAL_DESC3_CDA_WIDTH 1 880#define RX_NORMAL_DESC3_CDA_WIDTH 1
852#define RX_NORMAL_DESC3_CTXT_INDEX 30 881#define RX_NORMAL_DESC3_CTXT_INDEX 30
@@ -855,14 +884,27 @@
855#define RX_NORMAL_DESC3_ES_WIDTH 1 884#define RX_NORMAL_DESC3_ES_WIDTH 1
856#define RX_NORMAL_DESC3_ETLT_INDEX 16 885#define RX_NORMAL_DESC3_ETLT_INDEX 16
857#define RX_NORMAL_DESC3_ETLT_WIDTH 4 886#define RX_NORMAL_DESC3_ETLT_WIDTH 4
887#define RX_NORMAL_DESC3_FD_INDEX 29
888#define RX_NORMAL_DESC3_FD_WIDTH 1
858#define RX_NORMAL_DESC3_INTE_INDEX 30 889#define RX_NORMAL_DESC3_INTE_INDEX 30
859#define RX_NORMAL_DESC3_INTE_WIDTH 1 890#define RX_NORMAL_DESC3_INTE_WIDTH 1
891#define RX_NORMAL_DESC3_L34T_INDEX 20
892#define RX_NORMAL_DESC3_L34T_WIDTH 4
860#define RX_NORMAL_DESC3_LD_INDEX 28 893#define RX_NORMAL_DESC3_LD_INDEX 28
861#define RX_NORMAL_DESC3_LD_WIDTH 1 894#define RX_NORMAL_DESC3_LD_WIDTH 1
862#define RX_NORMAL_DESC3_OWN_INDEX 31 895#define RX_NORMAL_DESC3_OWN_INDEX 31
863#define RX_NORMAL_DESC3_OWN_WIDTH 1 896#define RX_NORMAL_DESC3_OWN_WIDTH 1
864#define RX_NORMAL_DESC3_PL_INDEX 0 897#define RX_NORMAL_DESC3_PL_INDEX 0
865#define RX_NORMAL_DESC3_PL_WIDTH 14 898#define RX_NORMAL_DESC3_PL_WIDTH 14
899#define RX_NORMAL_DESC3_RSV_INDEX 26
900#define RX_NORMAL_DESC3_RSV_WIDTH 1
901
902#define RX_DESC3_L34T_IPV4_TCP 1
903#define RX_DESC3_L34T_IPV4_UDP 2
904#define RX_DESC3_L34T_IPV4_ICMP 3
905#define RX_DESC3_L34T_IPV6_TCP 9
906#define RX_DESC3_L34T_IPV6_UDP 10
907#define RX_DESC3_L34T_IPV6_ICMP 11
866 908
867#define RX_CONTEXT_DESC3_TSA_INDEX 4 909#define RX_CONTEXT_DESC3_TSA_INDEX 4
868#define RX_CONTEXT_DESC3_TSA_WIDTH 1 910#define RX_CONTEXT_DESC3_TSA_WIDTH 1
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 6fc5da01437d..e6b9f54b9697 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -117,7 +117,7 @@
117#include "xgbe.h" 117#include "xgbe.h"
118#include "xgbe-common.h" 118#include "xgbe-common.h"
119 119
120static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); 120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
121 121
122static void xgbe_free_ring(struct xgbe_prv_data *pdata, 122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123 struct xgbe_ring *ring) 123 struct xgbe_ring *ring)
@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
131 if (ring->rdata) { 131 if (ring->rdata) {
132 for (i = 0; i < ring->rdesc_count; i++) { 132 for (i = 0; i < ring->rdesc_count; i++) {
133 rdata = XGBE_GET_DESC_DATA(ring, i); 133 rdata = XGBE_GET_DESC_DATA(ring, i);
134 xgbe_unmap_skb(pdata, rdata); 134 xgbe_unmap_rdata(pdata, rdata);
135 } 135 }
136 136
137 kfree(ring->rdata); 137 kfree(ring->rdata);
138 ring->rdata = NULL; 138 ring->rdata = NULL;
139 } 139 }
140 140
141 if (ring->rx_hdr_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_hdr_pa.pages);
145
146 ring->rx_hdr_pa.pages = NULL;
147 ring->rx_hdr_pa.pages_len = 0;
148 ring->rx_hdr_pa.pages_offset = 0;
149 ring->rx_hdr_pa.pages_dma = 0;
150 }
151
152 if (ring->rx_buf_pa.pages) {
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
155 put_page(ring->rx_buf_pa.pages);
156
157 ring->rx_buf_pa.pages = NULL;
158 ring->rx_buf_pa.pages_len = 0;
159 ring->rx_buf_pa.pages_offset = 0;
160 ring->rx_buf_pa.pages_dma = 0;
161 }
162
141 if (ring->rdesc) { 163 if (ring->rdesc) {
142 dma_free_coherent(pdata->dev, 164 dma_free_coherent(pdata->dev,
143 (sizeof(struct xgbe_ring_desc) * 165 (sizeof(struct xgbe_ring_desc) *
@@ -233,6 +255,96 @@ err_ring:
233 return ret; 255 return ret;
234} 256}
235 257
258static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
259 struct xgbe_page_alloc *pa, gfp_t gfp, int order)
260{
261 struct page *pages = NULL;
262 dma_addr_t pages_dma;
263 int ret;
264
265 /* Try to obtain pages, decreasing order if necessary */
266 gfp |= __GFP_COLD | __GFP_COMP;
267 while (order >= 0) {
268 pages = alloc_pages(gfp, order);
269 if (pages)
270 break;
271
272 order--;
273 }
274 if (!pages)
275 return -ENOMEM;
276
277 /* Map the pages */
278 pages_dma = dma_map_page(pdata->dev, pages, 0,
279 PAGE_SIZE << order, DMA_FROM_DEVICE);
280 ret = dma_mapping_error(pdata->dev, pages_dma);
281 if (ret) {
282 put_page(pages);
283 return ret;
284 }
285
286 pa->pages = pages;
287 pa->pages_len = PAGE_SIZE << order;
288 pa->pages_offset = 0;
289 pa->pages_dma = pages_dma;
290
291 return 0;
292}
293
294static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
295 struct xgbe_page_alloc *pa,
296 unsigned int len)
297{
298 get_page(pa->pages);
299 bd->pa = *pa;
300
301 bd->dma = pa->pages_dma + pa->pages_offset;
302 bd->dma_len = len;
303
304 pa->pages_offset += len;
305 if ((pa->pages_offset + len) > pa->pages_len) {
306 /* This data descriptor is responsible for unmapping page(s) */
307 bd->pa_unmap = *pa;
308
309 /* Get a new allocation next time */
310 pa->pages = NULL;
311 pa->pages_len = 0;
312 pa->pages_offset = 0;
313 pa->pages_dma = 0;
314 }
315}
316
317static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
318 struct xgbe_ring *ring,
319 struct xgbe_ring_data *rdata)
320{
321 int order, ret;
322
323 if (!ring->rx_hdr_pa.pages) {
324 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
325 if (ret)
326 return ret;
327 }
328
329 if (!ring->rx_buf_pa.pages) {
330 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
331 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
332 order);
333 if (ret)
334 return ret;
335 }
336
337 /* Set up the header page info */
338 xgbe_set_buffer_data(&rdata->rx_hdr, &ring->rx_hdr_pa,
339 XGBE_SKB_ALLOC_SIZE);
340
341 /* Set up the buffer page info */
342 xgbe_set_buffer_data(&rdata->rx_buf, &ring->rx_buf_pa,
343 pdata->rx_buf_size);
344
345 return 0;
346}
347
236static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) 348static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
237{ 349{
238 struct xgbe_hw_if *hw_if = &pdata->hw_if; 350 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
281 struct xgbe_ring *ring; 393 struct xgbe_ring *ring;
282 struct xgbe_ring_desc *rdesc; 394 struct xgbe_ring_desc *rdesc;
283 struct xgbe_ring_data *rdata; 395 struct xgbe_ring_data *rdata;
284 dma_addr_t rdesc_dma, skb_dma; 396 dma_addr_t rdesc_dma;
285 struct sk_buff *skb = NULL;
286 unsigned int i, j; 397 unsigned int i, j;
287 398
288 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); 399 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
302 rdata->rdesc = rdesc; 413 rdata->rdesc = rdesc;
303 rdata->rdesc_dma = rdesc_dma; 414 rdata->rdesc_dma = rdesc_dma;
304 415
305 /* Allocate skb & assign to each rdesc */ 416 if (xgbe_map_rx_buffer(pdata, ring, rdata))
306 skb = dev_alloc_skb(pdata->rx_buf_size);
307 if (skb == NULL)
308 break;
309 skb_dma = dma_map_single(pdata->dev, skb->data,
310 pdata->rx_buf_size,
311 DMA_FROM_DEVICE);
312 if (dma_mapping_error(pdata->dev, skb_dma)) {
313 netdev_alert(pdata->netdev,
314 "failed to do the dma map\n");
315 dev_kfree_skb_any(skb);
316 break; 417 break;
317 }
318 rdata->skb = skb;
319 rdata->skb_dma = skb_dma;
320 rdata->skb_dma_len = pdata->rx_buf_size;
321 418
322 rdesc++; 419 rdesc++;
323 rdesc_dma += sizeof(struct xgbe_ring_desc); 420 rdesc_dma += sizeof(struct xgbe_ring_desc);
@@ -334,8 +431,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
334 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); 431 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
335} 432}
336 433
337static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, 434static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
338 struct xgbe_ring_data *rdata) 435 struct xgbe_ring_data *rdata)
339{ 436{
340 if (rdata->skb_dma) { 437 if (rdata->skb_dma) {
341 if (rdata->mapped_as_page) { 438 if (rdata->mapped_as_page) {
@@ -354,6 +451,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
354 rdata->skb = NULL; 451 rdata->skb = NULL;
355 } 452 }
356 453
454 if (rdata->rx_hdr.pa.pages)
455 put_page(rdata->rx_hdr.pa.pages);
456
457 if (rdata->rx_hdr.pa_unmap.pages) {
458 dma_unmap_page(pdata->dev, rdata->rx_hdr.pa_unmap.pages_dma,
459 rdata->rx_hdr.pa_unmap.pages_len,
460 DMA_FROM_DEVICE);
461 put_page(rdata->rx_hdr.pa_unmap.pages);
462 }
463
464 if (rdata->rx_buf.pa.pages)
465 put_page(rdata->rx_buf.pa.pages);
466
467 if (rdata->rx_buf.pa_unmap.pages) {
468 dma_unmap_page(pdata->dev, rdata->rx_buf.pa_unmap.pages_dma,
469 rdata->rx_buf.pa_unmap.pages_len,
470 DMA_FROM_DEVICE);
471 put_page(rdata->rx_buf.pa_unmap.pages);
472 }
473
474 memset(&rdata->rx_hdr, 0, sizeof(rdata->rx_hdr));
475 memset(&rdata->rx_buf, 0, sizeof(rdata->rx_buf));
476
357 rdata->tso_header = 0; 477 rdata->tso_header = 0;
358 rdata->len = 0; 478 rdata->len = 0;
359 rdata->interrupt = 0; 479 rdata->interrupt = 0;
@@ -494,7 +614,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
494err_out: 614err_out:
495 while (start_index < cur_index) { 615 while (start_index < cur_index) {
496 rdata = XGBE_GET_DESC_DATA(ring, start_index++); 616 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
497 xgbe_unmap_skb(pdata, rdata); 617 xgbe_unmap_rdata(pdata, rdata);
498 } 618 }
499 619
500 DBGPR("<--xgbe_map_tx_skb: count=0\n"); 620 DBGPR("<--xgbe_map_tx_skb: count=0\n");
@@ -502,40 +622,25 @@ err_out:
502 return 0; 622 return 0;
503} 623}
504 624
505static void xgbe_realloc_skb(struct xgbe_channel *channel) 625static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
506{ 626{
507 struct xgbe_prv_data *pdata = channel->pdata; 627 struct xgbe_prv_data *pdata = channel->pdata;
508 struct xgbe_hw_if *hw_if = &pdata->hw_if; 628 struct xgbe_hw_if *hw_if = &pdata->hw_if;
509 struct xgbe_ring *ring = channel->rx_ring; 629 struct xgbe_ring *ring = channel->rx_ring;
510 struct xgbe_ring_data *rdata; 630 struct xgbe_ring_data *rdata;
511 struct sk_buff *skb = NULL;
512 dma_addr_t skb_dma;
513 int i; 631 int i;
514 632
515 DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n", 633 DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
516 ring->rx.realloc_index); 634 ring->rx.realloc_index);
517 635
518 for (i = 0; i < ring->dirty; i++) { 636 for (i = 0; i < ring->dirty; i++) {
519 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index); 637 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
520 638
521 /* Reset rdata values */ 639 /* Reset rdata values */
522 xgbe_unmap_skb(pdata, rdata); 640 xgbe_unmap_rdata(pdata, rdata);
523 641
524 /* Allocate skb & assign to each rdesc */ 642 if (xgbe_map_rx_buffer(pdata, ring, rdata))
525 skb = dev_alloc_skb(pdata->rx_buf_size);
526 if (skb == NULL)
527 break; 643 break;
528 skb_dma = dma_map_single(pdata->dev, skb->data,
529 pdata->rx_buf_size, DMA_FROM_DEVICE);
530 if (dma_mapping_error(pdata->dev, skb_dma)) {
531 netdev_alert(pdata->netdev,
532 "failed to do the dma map\n");
533 dev_kfree_skb_any(skb);
534 break;
535 }
536 rdata->skb = skb;
537 rdata->skb_dma = skb_dma;
538 rdata->skb_dma_len = pdata->rx_buf_size;
539 644
540 hw_if->rx_desc_reset(rdata); 645 hw_if->rx_desc_reset(rdata);
541 646
@@ -543,7 +648,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
543 } 648 }
544 ring->dirty = 0; 649 ring->dirty = 0;
545 650
546 DBGPR("<--xgbe_realloc_skb\n"); 651 DBGPR("<--xgbe_realloc_rx_buffer\n");
547} 652}
548 653
549void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) 654void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
@@ -553,8 +658,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
553 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; 658 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
554 desc_if->free_ring_resources = xgbe_free_ring_resources; 659 desc_if->free_ring_resources = xgbe_free_ring_resources;
555 desc_if->map_tx_skb = xgbe_map_tx_skb; 660 desc_if->map_tx_skb = xgbe_map_tx_skb;
556 desc_if->realloc_skb = xgbe_realloc_skb; 661 desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
557 desc_if->unmap_skb = xgbe_unmap_skb; 662 desc_if->unmap_rdata = xgbe_unmap_rdata;
558 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; 663 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
559 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; 664 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
560 665
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 9da3a03e8c07..7daa2cd9af5f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
335 } 335 }
336} 336}
337 337
338static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
339{
340 struct xgbe_channel *channel;
341 unsigned int i;
342
343 channel = pdata->channel;
344 for (i = 0; i < pdata->channel_count; i++, channel++) {
345 if (!channel->rx_ring)
346 break;
347
348 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
349 }
350
351 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
352}
353
354static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
355 unsigned int index, unsigned int val)
356{
357 unsigned int wait;
358 int ret = 0;
359
360 mutex_lock(&pdata->rss_mutex);
361
362 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
363 ret = -EBUSY;
364 goto unlock;
365 }
366
367 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
368
369 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
370 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
371 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
372 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
373
374 wait = 1000;
375 while (wait--) {
376 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
377 goto unlock;
378
379 usleep_range(1000, 1500);
380 }
381
382 ret = -EBUSY;
383
384unlock:
385 mutex_unlock(&pdata->rss_mutex);
386
387 return ret;
388}
389
390static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
391{
392 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
393 unsigned int *key = (unsigned int *)&pdata->rss_key;
394 int ret;
395
396 while (key_regs--) {
397 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
398 key_regs, *key++);
399 if (ret)
400 return ret;
401 }
402
403 return 0;
404}
405
406static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
407{
408 unsigned int i;
409 int ret;
410
411 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
412 ret = xgbe_write_rss_reg(pdata,
413 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
414 pdata->rss_table[i]);
415 if (ret)
416 return ret;
417 }
418
419 return 0;
420}
421
422static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
423{
424 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
425
426 return xgbe_write_rss_hash_key(pdata);
427}
428
429static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
430 const u32 *table)
431{
432 unsigned int i;
433
434 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
435 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
436
437 return xgbe_write_rss_lookup_table(pdata);
438}
439
440static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
441{
442 int ret;
443
444 if (!pdata->hw_feat.rss)
445 return -EOPNOTSUPP;
446
447 /* Program the hash key */
448 ret = xgbe_write_rss_hash_key(pdata);
449 if (ret)
450 return ret;
451
452 /* Program the lookup table */
453 ret = xgbe_write_rss_lookup_table(pdata);
454 if (ret)
455 return ret;
456
457 /* Set the RSS options */
458 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
459
460 /* Enable RSS */
461 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
462
463 return 0;
464}
465
466static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
467{
468 if (!pdata->hw_feat.rss)
469 return -EOPNOTSUPP;
470
471 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
472
473 return 0;
474}
475
476static void xgbe_config_rss(struct xgbe_prv_data *pdata)
477{
478 int ret;
479
480 if (!pdata->hw_feat.rss)
481 return;
482
483 if (pdata->netdev->features & NETIF_F_RXHASH)
484 ret = xgbe_enable_rss(pdata);
485 else
486 ret = xgbe_disable_rss(pdata);
487
488 if (ret)
489 netdev_err(pdata->netdev,
490 "error configuring RSS, RSS disabled\n");
491}
492
338static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 493static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
339{ 494{
340 unsigned int max_q_count, q_count; 495 unsigned int max_q_count, q_count;
@@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
465 620
466 if (channel->tx_ring) { 621 if (channel->tx_ring) {
467 /* Enable the following Tx interrupts 622 /* Enable the following Tx interrupts
468 * TIE - Transmit Interrupt Enable (unless polling) 623 * TIE - Transmit Interrupt Enable (unless using
624 * per channel interrupts)
469 */ 625 */
470 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 626 if (!pdata->per_channel_irq)
627 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
471 } 628 }
472 if (channel->rx_ring) { 629 if (channel->rx_ring) {
473 /* Enable following Rx interrupts 630 /* Enable following Rx interrupts
474 * RBUE - Receive Buffer Unavailable Enable 631 * RBUE - Receive Buffer Unavailable Enable
475 * RIE - Receive Interrupt Enable 632 * RIE - Receive Interrupt Enable (unless using
633 * per channel interrupts)
476 */ 634 */
477 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); 635 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
478 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 636 if (!pdata->per_channel_irq)
637 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
479 } 638 }
480 639
481 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 640 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
@@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
880 rdesc->desc1 = 0; 1039 rdesc->desc1 = 0;
881 rdesc->desc2 = 0; 1040 rdesc->desc2 = 0;
882 rdesc->desc3 = 0; 1041 rdesc->desc3 = 0;
1042
1043 /* Make sure ownership is written to the descriptor */
1044 wmb();
883} 1045}
884 1046
885static void xgbe_tx_desc_init(struct xgbe_channel *channel) 1047static void xgbe_tx_desc_init(struct xgbe_channel *channel)
886{ 1048{
887 struct xgbe_ring *ring = channel->tx_ring; 1049 struct xgbe_ring *ring = channel->tx_ring;
888 struct xgbe_ring_data *rdata; 1050 struct xgbe_ring_data *rdata;
889 struct xgbe_ring_desc *rdesc;
890 int i; 1051 int i;
891 int start_index = ring->cur; 1052 int start_index = ring->cur;
892 1053
@@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
895 /* Initialze all descriptors */ 1056 /* Initialze all descriptors */
896 for (i = 0; i < ring->rdesc_count; i++) { 1057 for (i = 0; i < ring->rdesc_count; i++) {
897 rdata = XGBE_GET_DESC_DATA(ring, i); 1058 rdata = XGBE_GET_DESC_DATA(ring, i);
898 rdesc = rdata->rdesc;
899 1059
900 /* Initialize Tx descriptor 1060 /* Initialize Tx descriptor */
901 * Set buffer 1 (lo) address to zero 1061 xgbe_tx_desc_reset(rdata);
902 * Set buffer 1 (hi) address to zero
903 * Reset all other control bits (IC, TTSE, B2L & B1L)
904 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
905 * etc)
906 */
907 rdesc->desc0 = 0;
908 rdesc->desc1 = 0;
909 rdesc->desc2 = 0;
910 rdesc->desc3 = 0;
911 } 1062 }
912 1063
913 /* Make sure everything is written to the descriptor(s) before
914 * telling the device about them
915 */
916 wmb();
917
918 /* Update the total number of Tx descriptors */ 1064 /* Update the total number of Tx descriptors */
919 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 1065 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
920 1066
@@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
933 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1079 struct xgbe_ring_desc *rdesc = rdata->rdesc;
934 1080
935 /* Reset the Rx descriptor 1081 /* Reset the Rx descriptor
936 * Set buffer 1 (lo) address to dma address (lo) 1082 * Set buffer 1 (lo) address to header dma address (lo)
937 * Set buffer 1 (hi) address to dma address (hi) 1083 * Set buffer 1 (hi) address to header dma address (hi)
938 * Set buffer 2 (lo) address to zero 1084 * Set buffer 2 (lo) address to buffer dma address (lo)
939 * Set buffer 2 (hi) address to zero and set control bits 1085 * Set buffer 2 (hi) address to buffer dma address (hi) and
940 * OWN and INTE 1086 * set control bits OWN and INTE
941 */ 1087 */
942 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1088 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_hdr.dma));
943 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1089 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_hdr.dma));
944 rdesc->desc2 = 0; 1090 rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx_buf.dma));
1091 rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx_buf.dma));
945 1092
946 rdesc->desc3 = 0; 1093 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
947 if (rdata->interrupt) 1094 rdata->interrupt ? 1 : 0);
948 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
949 1095
950 /* Since the Rx DMA engine is likely running, make sure everything 1096 /* Since the Rx DMA engine is likely running, make sure everything
951 * is written to the descriptor(s) before setting the OWN bit 1097 * is written to the descriptor(s) before setting the OWN bit
@@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
964 struct xgbe_prv_data *pdata = channel->pdata; 1110 struct xgbe_prv_data *pdata = channel->pdata;
965 struct xgbe_ring *ring = channel->rx_ring; 1111 struct xgbe_ring *ring = channel->rx_ring;
966 struct xgbe_ring_data *rdata; 1112 struct xgbe_ring_data *rdata;
967 struct xgbe_ring_desc *rdesc;
968 unsigned int start_index = ring->cur; 1113 unsigned int start_index = ring->cur;
969 unsigned int rx_coalesce, rx_frames; 1114 unsigned int rx_coalesce, rx_frames;
970 unsigned int i; 1115 unsigned int i;
@@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
977 /* Initialize all descriptors */ 1122 /* Initialize all descriptors */
978 for (i = 0; i < ring->rdesc_count; i++) { 1123 for (i = 0; i < ring->rdesc_count; i++) {
979 rdata = XGBE_GET_DESC_DATA(ring, i); 1124 rdata = XGBE_GET_DESC_DATA(ring, i);
980 rdesc = rdata->rdesc;
981 1125
982 /* Initialize Rx descriptor 1126 /* Set interrupt on completion bit as appropriate */
983 * Set buffer 1 (lo) address to dma address (lo) 1127 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
984 * Set buffer 1 (hi) address to dma address (hi)
985 * Set buffer 2 (lo) address to zero
986 * Set buffer 2 (hi) address to zero and set control
987 * bits OWN and INTE appropriateley
988 */
989 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
990 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
991 rdesc->desc2 = 0;
992 rdesc->desc3 = 0;
993 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
994 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
995 rdata->interrupt = 1;
996 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
997 /* Clear interrupt on completion bit */
998 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
999 0);
1000 rdata->interrupt = 0; 1128 rdata->interrupt = 0;
1001 } 1129 else
1002 } 1130 rdata->interrupt = 1;
1003 1131
1004 /* Make sure everything is written to the descriptors before 1132 /* Initialize Rx descriptor */
1005 * telling the device about them 1133 xgbe_rx_desc_reset(rdata);
1006 */ 1134 }
1007 wmb();
1008 1135
1009 /* Update the total number of Rx descriptors */ 1136 /* Update the total number of Rx descriptors */
1010 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 1137 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
@@ -1198,7 +1325,7 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
1198 xgbe_config_flow_control(pdata); 1325 xgbe_config_flow_control(pdata);
1199} 1326}
1200 1327
1201static void xgbe_pre_xmit(struct xgbe_channel *channel) 1328static void xgbe_dev_xmit(struct xgbe_channel *channel)
1202{ 1329{
1203 struct xgbe_prv_data *pdata = channel->pdata; 1330 struct xgbe_prv_data *pdata = channel->pdata;
1204 struct xgbe_ring *ring = channel->tx_ring; 1331 struct xgbe_ring *ring = channel->tx_ring;
@@ -1211,7 +1338,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
1211 int start_index = ring->cur; 1338 int start_index = ring->cur;
1212 int i; 1339 int i;
1213 1340
1214 DBGPR("-->xgbe_pre_xmit\n"); 1341 DBGPR("-->xgbe_dev_xmit\n");
1215 1342
1216 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1343 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1217 CSUM_ENABLE); 1344 CSUM_ENABLE);
@@ -1410,7 +1537,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
1410 channel->name, start_index & (ring->rdesc_count - 1), 1537 channel->name, start_index & (ring->rdesc_count - 1),
1411 (ring->cur - 1) & (ring->rdesc_count - 1)); 1538 (ring->cur - 1) & (ring->rdesc_count - 1));
1412 1539
1413 DBGPR("<--xgbe_pre_xmit\n"); 1540 DBGPR("<--xgbe_dev_xmit\n");
1414} 1541}
1415 1542
1416static int xgbe_dev_read(struct xgbe_channel *channel) 1543static int xgbe_dev_read(struct xgbe_channel *channel)
@@ -1420,7 +1547,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1420 struct xgbe_ring_desc *rdesc; 1547 struct xgbe_ring_desc *rdesc;
1421 struct xgbe_packet_data *packet = &ring->packet_data; 1548 struct xgbe_packet_data *packet = &ring->packet_data;
1422 struct net_device *netdev = channel->pdata->netdev; 1549 struct net_device *netdev = channel->pdata->netdev;
1423 unsigned int err, etlt; 1550 unsigned int err, etlt, l34t;
1424 1551
1425 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); 1552 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1426 1553
@@ -1454,6 +1581,31 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1454 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1581 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1455 CONTEXT_NEXT, 1); 1582 CONTEXT_NEXT, 1);
1456 1583
1584 /* Get the header length */
1585 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
1586 rdata->hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1587 RX_NORMAL_DESC2, HL);
1588
1589 /* Get the RSS hash */
1590 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1591 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1592 RSS_HASH, 1);
1593
1594 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1595
1596 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1597 switch (l34t) {
1598 case RX_DESC3_L34T_IPV4_TCP:
1599 case RX_DESC3_L34T_IPV4_UDP:
1600 case RX_DESC3_L34T_IPV6_TCP:
1601 case RX_DESC3_L34T_IPV6_UDP:
1602 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1603
1604 default:
1605 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1606 }
1607 }
1608
1457 /* Get the packet length */ 1609 /* Get the packet length */
1458 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1610 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1459 1611
@@ -2485,6 +2637,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
2485 xgbe_config_tx_coalesce(pdata); 2637 xgbe_config_tx_coalesce(pdata);
2486 xgbe_config_rx_buffer_size(pdata); 2638 xgbe_config_rx_buffer_size(pdata);
2487 xgbe_config_tso_mode(pdata); 2639 xgbe_config_tso_mode(pdata);
2640 xgbe_config_sph_mode(pdata);
2641 xgbe_config_rss(pdata);
2488 desc_if->wrapper_tx_desc_init(pdata); 2642 desc_if->wrapper_tx_desc_init(pdata);
2489 desc_if->wrapper_rx_desc_init(pdata); 2643 desc_if->wrapper_rx_desc_init(pdata);
2490 xgbe_enable_dma_interrupts(pdata); 2644 xgbe_enable_dma_interrupts(pdata);
@@ -2561,7 +2715,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2561 hw_if->powerup_rx = xgbe_powerup_rx; 2715 hw_if->powerup_rx = xgbe_powerup_rx;
2562 hw_if->powerdown_rx = xgbe_powerdown_rx; 2716 hw_if->powerdown_rx = xgbe_powerdown_rx;
2563 2717
2564 hw_if->pre_xmit = xgbe_pre_xmit; 2718 hw_if->dev_xmit = xgbe_dev_xmit;
2565 hw_if->dev_read = xgbe_dev_read; 2719 hw_if->dev_read = xgbe_dev_read;
2566 hw_if->enable_int = xgbe_enable_int; 2720 hw_if->enable_int = xgbe_enable_int;
2567 hw_if->disable_int = xgbe_disable_int; 2721 hw_if->disable_int = xgbe_disable_int;
@@ -2620,5 +2774,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2620 hw_if->config_dcb_tc = xgbe_config_dcb_tc; 2774 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
2621 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; 2775 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
2622 2776
2777 /* For Receive Side Scaling */
2778 hw_if->enable_rss = xgbe_enable_rss;
2779 hw_if->disable_rss = xgbe_disable_rss;
2780 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
2781 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
2782
2623 DBGPR("<--xgbe_init_function_ptrs\n"); 2783 DBGPR("<--xgbe_init_function_ptrs\n");
2624} 2784}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2349ea970255..ced9f52eb45b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -114,6 +114,7 @@
114 * THE POSSIBILITY OF SUCH DAMAGE. 114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */ 115 */
116 116
117#include <linux/platform_device.h>
117#include <linux/spinlock.h> 118#include <linux/spinlock.h>
118#include <linux/tcp.h> 119#include <linux/tcp.h>
119#include <linux/if_vlan.h> 120#include <linux/if_vlan.h>
@@ -126,9 +127,99 @@
126#include "xgbe.h" 127#include "xgbe.h"
127#include "xgbe-common.h" 128#include "xgbe-common.h"
128 129
129static int xgbe_poll(struct napi_struct *, int); 130static int xgbe_one_poll(struct napi_struct *, int);
131static int xgbe_all_poll(struct napi_struct *, int);
130static void xgbe_set_rx_mode(struct net_device *); 132static void xgbe_set_rx_mode(struct net_device *);
131 133
134static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
135{
136 struct xgbe_channel *channel_mem, *channel;
137 struct xgbe_ring *tx_ring, *rx_ring;
138 unsigned int count, i;
139 int ret = -ENOMEM;
140
141 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
142
143 channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
144 if (!channel_mem)
145 goto err_channel;
146
147 tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
148 GFP_KERNEL);
149 if (!tx_ring)
150 goto err_tx_ring;
151
152 rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
153 GFP_KERNEL);
154 if (!rx_ring)
155 goto err_rx_ring;
156
157 for (i = 0, channel = channel_mem; i < count; i++, channel++) {
158 snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
159 channel->pdata = pdata;
160 channel->queue_index = i;
161 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
162 (DMA_CH_INC * i);
163
164 if (pdata->per_channel_irq) {
165 /* Get the DMA interrupt (offset 1) */
166 ret = platform_get_irq(pdata->pdev, i + 1);
167 if (ret < 0) {
168 netdev_err(pdata->netdev,
169 "platform_get_irq %u failed\n",
170 i + 1);
171 goto err_irq;
172 }
173
174 channel->dma_irq = ret;
175 }
176
177 if (i < pdata->tx_ring_count) {
178 spin_lock_init(&tx_ring->lock);
179 channel->tx_ring = tx_ring++;
180 }
181
182 if (i < pdata->rx_ring_count) {
183 spin_lock_init(&rx_ring->lock);
184 channel->rx_ring = rx_ring++;
185 }
186
187 DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
188 channel->name, channel->queue_index, channel->dma_regs,
189 channel->dma_irq, channel->tx_ring, channel->rx_ring);
190 }
191
192 pdata->channel = channel_mem;
193 pdata->channel_count = count;
194
195 return 0;
196
197err_irq:
198 kfree(rx_ring);
199
200err_rx_ring:
201 kfree(tx_ring);
202
203err_tx_ring:
204 kfree(channel_mem);
205
206err_channel:
207 return ret;
208}
209
210static void xgbe_free_channels(struct xgbe_prv_data *pdata)
211{
212 if (!pdata->channel)
213 return;
214
215 kfree(pdata->channel->rx_ring);
216 kfree(pdata->channel->tx_ring);
217 kfree(pdata->channel);
218
219 pdata->channel = NULL;
220 pdata->channel_count = 0;
221}
222
132static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) 223static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
133{ 224{
134 return (ring->rdesc_count - (ring->cur - ring->dirty)); 225 return (ring->rdesc_count - (ring->cur - ring->dirty));
@@ -144,8 +235,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
144 } 235 }
145 236
146 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 237 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
147 if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE) 238 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
148 rx_buf_size = XGBE_RX_MIN_BUF_SIZE; 239
149 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & 240 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
150 ~(XGBE_RX_BUF_ALIGN - 1); 241 ~(XGBE_RX_BUF_ALIGN - 1);
151 242
@@ -213,11 +304,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
213 if (!dma_isr) 304 if (!dma_isr)
214 goto isr_done; 305 goto isr_done;
215 306
216 DBGPR("-->xgbe_isr\n");
217
218 DBGPR(" DMA_ISR = %08x\n", dma_isr); 307 DBGPR(" DMA_ISR = %08x\n", dma_isr);
219 DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
220 DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
221 308
222 for (i = 0; i < pdata->channel_count; i++) { 309 for (i = 0; i < pdata->channel_count; i++) {
223 if (!(dma_isr & (1 << i))) 310 if (!(dma_isr & (1 << i)))
@@ -228,6 +315,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
228 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 315 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
229 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); 316 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
230 317
318 /* If we get a TI or RI interrupt that means per channel DMA
319 * interrupts are not enabled, so we use the private data napi
320 * structure, not the per channel napi structure
321 */
231 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || 322 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
232 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { 323 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
233 if (napi_schedule_prep(&pdata->napi)) { 324 if (napi_schedule_prep(&pdata->napi)) {
@@ -270,12 +361,28 @@ static irqreturn_t xgbe_isr(int irq, void *data)
270 361
271 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); 362 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
272 363
273 DBGPR("<--xgbe_isr\n");
274
275isr_done: 364isr_done:
276 return IRQ_HANDLED; 365 return IRQ_HANDLED;
277} 366}
278 367
368static irqreturn_t xgbe_dma_isr(int irq, void *data)
369{
370 struct xgbe_channel *channel = data;
371
372 /* Per channel DMA interrupts are enabled, so we use the per
373 * channel napi structure and not the private data napi structure
374 */
375 if (napi_schedule_prep(&channel->napi)) {
376 /* Disable Tx and Rx interrupts */
377 disable_irq(channel->dma_irq);
378
379 /* Turn on polling */
380 __napi_schedule(&channel->napi);
381 }
382
383 return IRQ_HANDLED;
384}
385
279static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) 386static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
280{ 387{
281 struct xgbe_channel *channel = container_of(timer, 388 struct xgbe_channel *channel = container_of(timer,
@@ -283,18 +390,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
283 tx_timer); 390 tx_timer);
284 struct xgbe_ring *ring = channel->tx_ring; 391 struct xgbe_ring *ring = channel->tx_ring;
285 struct xgbe_prv_data *pdata = channel->pdata; 392 struct xgbe_prv_data *pdata = channel->pdata;
393 struct napi_struct *napi;
286 unsigned long flags; 394 unsigned long flags;
287 395
288 DBGPR("-->xgbe_tx_timer\n"); 396 DBGPR("-->xgbe_tx_timer\n");
289 397
398 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
399
290 spin_lock_irqsave(&ring->lock, flags); 400 spin_lock_irqsave(&ring->lock, flags);
291 401
292 if (napi_schedule_prep(&pdata->napi)) { 402 if (napi_schedule_prep(napi)) {
293 /* Disable Tx and Rx interrupts */ 403 /* Disable Tx and Rx interrupts */
294 xgbe_disable_rx_tx_ints(pdata); 404 if (pdata->per_channel_irq)
405 disable_irq(channel->dma_irq);
406 else
407 xgbe_disable_rx_tx_ints(pdata);
295 408
296 /* Turn on polling */ 409 /* Turn on polling */
297 __napi_schedule(&pdata->napi); 410 __napi_schedule(napi);
298 } 411 }
299 412
300 channel->tx_timer_active = 0; 413 channel->tx_timer_active = 0;
@@ -430,18 +543,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
430 543
431static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) 544static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
432{ 545{
433 if (add) 546 struct xgbe_channel *channel;
434 netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll, 547 unsigned int i;
435 NAPI_POLL_WEIGHT); 548
436 napi_enable(&pdata->napi); 549 if (pdata->per_channel_irq) {
550 channel = pdata->channel;
551 for (i = 0; i < pdata->channel_count; i++, channel++) {
552 if (add)
553 netif_napi_add(pdata->netdev, &channel->napi,
554 xgbe_one_poll, NAPI_POLL_WEIGHT);
555
556 napi_enable(&channel->napi);
557 }
558 } else {
559 if (add)
560 netif_napi_add(pdata->netdev, &pdata->napi,
561 xgbe_all_poll, NAPI_POLL_WEIGHT);
562
563 napi_enable(&pdata->napi);
564 }
437} 565}
438 566
439static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) 567static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
440{ 568{
441 napi_disable(&pdata->napi); 569 struct xgbe_channel *channel;
570 unsigned int i;
571
572 if (pdata->per_channel_irq) {
573 channel = pdata->channel;
574 for (i = 0; i < pdata->channel_count; i++, channel++) {
575 napi_disable(&channel->napi);
576
577 if (del)
578 netif_napi_del(&channel->napi);
579 }
580 } else {
581 napi_disable(&pdata->napi);
442 582
443 if (del) 583 if (del)
444 netif_napi_del(&pdata->napi); 584 netif_napi_del(&pdata->napi);
585 }
445} 586}
446 587
447void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 588void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -472,7 +613,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
472 DBGPR("<--xgbe_init_rx_coalesce\n"); 613 DBGPR("<--xgbe_init_rx_coalesce\n");
473} 614}
474 615
475static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) 616static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
476{ 617{
477 struct xgbe_desc_if *desc_if = &pdata->desc_if; 618 struct xgbe_desc_if *desc_if = &pdata->desc_if;
478 struct xgbe_channel *channel; 619 struct xgbe_channel *channel;
@@ -480,7 +621,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
480 struct xgbe_ring_data *rdata; 621 struct xgbe_ring_data *rdata;
481 unsigned int i, j; 622 unsigned int i, j;
482 623
483 DBGPR("-->xgbe_free_tx_skbuff\n"); 624 DBGPR("-->xgbe_free_tx_data\n");
484 625
485 channel = pdata->channel; 626 channel = pdata->channel;
486 for (i = 0; i < pdata->channel_count; i++, channel++) { 627 for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -490,14 +631,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
490 631
491 for (j = 0; j < ring->rdesc_count; j++) { 632 for (j = 0; j < ring->rdesc_count; j++) {
492 rdata = XGBE_GET_DESC_DATA(ring, j); 633 rdata = XGBE_GET_DESC_DATA(ring, j);
493 desc_if->unmap_skb(pdata, rdata); 634 desc_if->unmap_rdata(pdata, rdata);
494 } 635 }
495 } 636 }
496 637
497 DBGPR("<--xgbe_free_tx_skbuff\n"); 638 DBGPR("<--xgbe_free_tx_data\n");
498} 639}
499 640
500static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) 641static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
501{ 642{
502 struct xgbe_desc_if *desc_if = &pdata->desc_if; 643 struct xgbe_desc_if *desc_if = &pdata->desc_if;
503 struct xgbe_channel *channel; 644 struct xgbe_channel *channel;
@@ -505,7 +646,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
505 struct xgbe_ring_data *rdata; 646 struct xgbe_ring_data *rdata;
506 unsigned int i, j; 647 unsigned int i, j;
507 648
508 DBGPR("-->xgbe_free_rx_skbuff\n"); 649 DBGPR("-->xgbe_free_rx_data\n");
509 650
510 channel = pdata->channel; 651 channel = pdata->channel;
511 for (i = 0; i < pdata->channel_count; i++, channel++) { 652 for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -515,11 +656,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
515 656
516 for (j = 0; j < ring->rdesc_count; j++) { 657 for (j = 0; j < ring->rdesc_count; j++) {
517 rdata = XGBE_GET_DESC_DATA(ring, j); 658 rdata = XGBE_GET_DESC_DATA(ring, j);
518 desc_if->unmap_skb(pdata, rdata); 659 desc_if->unmap_rdata(pdata, rdata);
519 } 660 }
520 } 661 }
521 662
522 DBGPR("<--xgbe_free_rx_skbuff\n"); 663 DBGPR("<--xgbe_free_rx_data\n");
523} 664}
524 665
525static void xgbe_adjust_link(struct net_device *netdev) 666static void xgbe_adjust_link(struct net_device *netdev)
@@ -754,7 +895,9 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
754 895
755static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) 896static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
756{ 897{
898 struct xgbe_channel *channel;
757 struct xgbe_hw_if *hw_if = &pdata->hw_if; 899 struct xgbe_hw_if *hw_if = &pdata->hw_if;
900 unsigned int i;
758 901
759 DBGPR("-->xgbe_restart_dev\n"); 902 DBGPR("-->xgbe_restart_dev\n");
760 903
@@ -763,10 +906,15 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
763 return; 906 return;
764 907
765 xgbe_stop(pdata); 908 xgbe_stop(pdata);
766 synchronize_irq(pdata->irq_number); 909 synchronize_irq(pdata->dev_irq);
910 if (pdata->per_channel_irq) {
911 channel = pdata->channel;
912 for (i = 0; i < pdata->channel_count; i++, channel++)
913 synchronize_irq(channel->dma_irq);
914 }
767 915
768 xgbe_free_tx_skbuff(pdata); 916 xgbe_free_tx_data(pdata);
769 xgbe_free_rx_skbuff(pdata); 917 xgbe_free_rx_data(pdata);
770 918
771 /* Issue software reset to device if requested */ 919 /* Issue software reset to device if requested */
772 if (reset) 920 if (reset)
@@ -1037,13 +1185,13 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1037 packet->rdesc_count = 0; 1185 packet->rdesc_count = 0;
1038 1186
1039 if (xgbe_is_tso(skb)) { 1187 if (xgbe_is_tso(skb)) {
1040 /* TSO requires an extra desriptor if mss is different */ 1188 /* TSO requires an extra descriptor if mss is different */
1041 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { 1189 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1042 context_desc = 1; 1190 context_desc = 1;
1043 packet->rdesc_count++; 1191 packet->rdesc_count++;
1044 } 1192 }
1045 1193
1046 /* TSO requires an extra desriptor for TSO header */ 1194 /* TSO requires an extra descriptor for TSO header */
1047 packet->rdesc_count++; 1195 packet->rdesc_count++;
1048 1196
1049 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1197 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
@@ -1091,6 +1239,9 @@ static int xgbe_open(struct net_device *netdev)
1091 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1239 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1092 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1240 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1093 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1241 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1242 struct xgbe_channel *channel = NULL;
1243 char dma_irq_name[IFNAMSIZ + 32];
1244 unsigned int i = 0;
1094 int ret; 1245 int ret;
1095 1246
1096 DBGPR("-->xgbe_open\n"); 1247 DBGPR("-->xgbe_open\n");
@@ -1119,24 +1270,47 @@ static int xgbe_open(struct net_device *netdev)
1119 goto err_ptpclk; 1270 goto err_ptpclk;
1120 pdata->rx_buf_size = ret; 1271 pdata->rx_buf_size = ret;
1121 1272
1273 /* Allocate the channel and ring structures */
1274 ret = xgbe_alloc_channels(pdata);
1275 if (ret)
1276 goto err_ptpclk;
1277
1122 /* Allocate the ring descriptors and buffers */ 1278 /* Allocate the ring descriptors and buffers */
1123 ret = desc_if->alloc_ring_resources(pdata); 1279 ret = desc_if->alloc_ring_resources(pdata);
1124 if (ret) 1280 if (ret)
1125 goto err_ptpclk; 1281 goto err_channels;
1126 1282
1127 /* Initialize the device restart and Tx timestamp work struct */ 1283 /* Initialize the device restart and Tx timestamp work struct */
1128 INIT_WORK(&pdata->restart_work, xgbe_restart); 1284 INIT_WORK(&pdata->restart_work, xgbe_restart);
1129 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1285 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1130 1286
1131 /* Request interrupts */ 1287 /* Request interrupts */
1132 ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, 1288 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1133 netdev->name, pdata); 1289 netdev->name, pdata);
1134 if (ret) { 1290 if (ret) {
1135 netdev_alert(netdev, "error requesting irq %d\n", 1291 netdev_alert(netdev, "error requesting irq %d\n",
1136 pdata->irq_number); 1292 pdata->dev_irq);
1137 goto err_irq; 1293 goto err_rings;
1294 }
1295
1296 if (pdata->per_channel_irq) {
1297 channel = pdata->channel;
1298 for (i = 0; i < pdata->channel_count; i++, channel++) {
1299 snprintf(dma_irq_name, sizeof(dma_irq_name) - 1,
1300 "%s-TxRx-%u", netdev_name(netdev),
1301 channel->queue_index);
1302
1303 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1304 xgbe_dma_isr, 0, dma_irq_name,
1305 channel);
1306 if (ret) {
1307 netdev_alert(netdev,
1308 "error requesting irq %d\n",
1309 channel->dma_irq);
1310 goto err_irq;
1311 }
1312 }
1138 } 1313 }
1139 pdata->irq_number = netdev->irq;
1140 1314
1141 ret = xgbe_start(pdata); 1315 ret = xgbe_start(pdata);
1142 if (ret) 1316 if (ret)
@@ -1149,12 +1323,21 @@ static int xgbe_open(struct net_device *netdev)
1149err_start: 1323err_start:
1150 hw_if->exit(pdata); 1324 hw_if->exit(pdata);
1151 1325
1152 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
1153 pdata->irq_number = 0;
1154
1155err_irq: 1326err_irq:
1327 if (pdata->per_channel_irq) {
1328 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1329 for (i--, channel--; i < pdata->channel_count; i--, channel--)
1330 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1331 }
1332
1333 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1334
1335err_rings:
1156 desc_if->free_ring_resources(pdata); 1336 desc_if->free_ring_resources(pdata);
1157 1337
1338err_channels:
1339 xgbe_free_channels(pdata);
1340
1158err_ptpclk: 1341err_ptpclk:
1159 clk_disable_unprepare(pdata->ptpclk); 1342 clk_disable_unprepare(pdata->ptpclk);
1160 1343
@@ -1172,6 +1355,8 @@ static int xgbe_close(struct net_device *netdev)
1172 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1355 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1173 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1356 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1174 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1357 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1358 struct xgbe_channel *channel;
1359 unsigned int i;
1175 1360
1176 DBGPR("-->xgbe_close\n"); 1361 DBGPR("-->xgbe_close\n");
1177 1362
@@ -1181,13 +1366,18 @@ static int xgbe_close(struct net_device *netdev)
1181 /* Issue software reset to device */ 1366 /* Issue software reset to device */
1182 hw_if->exit(pdata); 1367 hw_if->exit(pdata);
1183 1368
1184 /* Free all the ring data */ 1369 /* Free the ring descriptors and buffers */
1185 desc_if->free_ring_resources(pdata); 1370 desc_if->free_ring_resources(pdata);
1186 1371
1187 /* Release the interrupt */ 1372 /* Free the channel and ring structures */
1188 if (pdata->irq_number != 0) { 1373 xgbe_free_channels(pdata);
1189 devm_free_irq(pdata->dev, pdata->irq_number, pdata); 1374
1190 pdata->irq_number = 0; 1375 /* Release the interrupts */
1376 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1377 if (pdata->per_channel_irq) {
1378 channel = pdata->channel;
1379 for (i = 0; i < pdata->channel_count; i++, channel++)
1380 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1191 } 1381 }
1192 1382
1193 /* Disable the clocks */ 1383 /* Disable the clocks */
@@ -1258,7 +1448,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1258 xgbe_prep_tx_tstamp(pdata, skb, packet); 1448 xgbe_prep_tx_tstamp(pdata, skb, packet);
1259 1449
1260 /* Configure required descriptor fields for transmission */ 1450 /* Configure required descriptor fields for transmission */
1261 hw_if->pre_xmit(channel); 1451 hw_if->dev_xmit(channel);
1262 1452
1263#ifdef XGMAC_ENABLE_TX_PKT_DUMP 1453#ifdef XGMAC_ENABLE_TX_PKT_DUMP
1264 xgbe_print_pkt(netdev, skb, true); 1454 xgbe_print_pkt(netdev, skb, true);
@@ -1420,14 +1610,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1420static void xgbe_poll_controller(struct net_device *netdev) 1610static void xgbe_poll_controller(struct net_device *netdev)
1421{ 1611{
1422 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1612 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1613 struct xgbe_channel *channel;
1614 unsigned int i;
1423 1615
1424 DBGPR("-->xgbe_poll_controller\n"); 1616 DBGPR("-->xgbe_poll_controller\n");
1425 1617
1426 disable_irq(pdata->irq_number); 1618 if (pdata->per_channel_irq) {
1427 1619 channel = pdata->channel;
1428 xgbe_isr(pdata->irq_number, pdata); 1620 for (i = 0; i < pdata->channel_count; i++, channel++)
1429 1621 xgbe_dma_isr(channel->dma_irq, channel);
1430 enable_irq(pdata->irq_number); 1622 } else {
1623 disable_irq(pdata->dev_irq);
1624 xgbe_isr(pdata->dev_irq, pdata);
1625 enable_irq(pdata->dev_irq);
1626 }
1431 1627
1432 DBGPR("<--xgbe_poll_controller\n"); 1628 DBGPR("<--xgbe_poll_controller\n");
1433} 1629}
@@ -1465,12 +1661,21 @@ static int xgbe_set_features(struct net_device *netdev,
1465{ 1661{
1466 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1662 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1467 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1663 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1468 netdev_features_t rxcsum, rxvlan, rxvlan_filter; 1664 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
1665 int ret = 0;
1469 1666
1667 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
1470 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; 1668 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1471 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; 1669 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1472 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; 1670 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1473 1671
1672 if ((features & NETIF_F_RXHASH) && !rxhash)
1673 ret = hw_if->enable_rss(pdata);
1674 else if (!(features & NETIF_F_RXHASH) && rxhash)
1675 ret = hw_if->disable_rss(pdata);
1676 if (ret)
1677 return ret;
1678
1474 if ((features & NETIF_F_RXCSUM) && !rxcsum) 1679 if ((features & NETIF_F_RXCSUM) && !rxcsum)
1475 hw_if->enable_rx_csum(pdata); 1680 hw_if->enable_rx_csum(pdata);
1476 else if (!(features & NETIF_F_RXCSUM) && rxcsum) 1681 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
@@ -1524,7 +1729,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
1524 struct xgbe_ring *ring = channel->rx_ring; 1729 struct xgbe_ring *ring = channel->rx_ring;
1525 struct xgbe_ring_data *rdata; 1730 struct xgbe_ring_data *rdata;
1526 1731
1527 desc_if->realloc_skb(channel); 1732 desc_if->realloc_rx_buffer(channel);
1528 1733
1529 /* Update the Rx Tail Pointer Register with address of 1734 /* Update the Rx Tail Pointer Register with address of
1530 * the last cleaned entry */ 1735 * the last cleaned entry */
@@ -1533,6 +1738,31 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
1533 lower_32_bits(rdata->rdesc_dma)); 1738 lower_32_bits(rdata->rdesc_dma));
1534} 1739}
1535 1740
1741static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1742 struct xgbe_ring_data *rdata,
1743 unsigned int *len)
1744{
1745 struct net_device *netdev = pdata->netdev;
1746 struct sk_buff *skb;
1747 u8 *packet;
1748 unsigned int copy_len;
1749
1750 skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len);
1751 if (!skb)
1752 return NULL;
1753
1754 packet = page_address(rdata->rx_hdr.pa.pages) +
1755 rdata->rx_hdr.pa.pages_offset;
1756 copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len;
1757 copy_len = min(rdata->rx_hdr.dma_len, copy_len);
1758 skb_copy_to_linear_data(skb, packet, copy_len);
1759 skb_put(skb, copy_len);
1760
1761 *len -= copy_len;
1762
1763 return skb;
1764}
1765
1536static int xgbe_tx_poll(struct xgbe_channel *channel) 1766static int xgbe_tx_poll(struct xgbe_channel *channel)
1537{ 1767{
1538 struct xgbe_prv_data *pdata = channel->pdata; 1768 struct xgbe_prv_data *pdata = channel->pdata;
@@ -1566,7 +1796,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1566#endif 1796#endif
1567 1797
1568 /* Free the SKB and reset the descriptor for re-use */ 1798 /* Free the SKB and reset the descriptor for re-use */
1569 desc_if->unmap_skb(pdata, rdata); 1799 desc_if->unmap_rdata(pdata, rdata);
1570 hw_if->tx_desc_reset(rdata); 1800 hw_if->tx_desc_reset(rdata);
1571 1801
1572 processed++; 1802 processed++;
@@ -1594,6 +1824,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1594 struct xgbe_ring_data *rdata; 1824 struct xgbe_ring_data *rdata;
1595 struct xgbe_packet_data *packet; 1825 struct xgbe_packet_data *packet;
1596 struct net_device *netdev = pdata->netdev; 1826 struct net_device *netdev = pdata->netdev;
1827 struct napi_struct *napi;
1597 struct sk_buff *skb; 1828 struct sk_buff *skb;
1598 struct skb_shared_hwtstamps *hwtstamps; 1829 struct skb_shared_hwtstamps *hwtstamps;
1599 unsigned int incomplete, error, context_next, context; 1830 unsigned int incomplete, error, context_next, context;
@@ -1607,6 +1838,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1607 if (!ring) 1838 if (!ring)
1608 return 0; 1839 return 0;
1609 1840
1841 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1842
1610 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1843 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1611 packet = &ring->packet_data; 1844 packet = &ring->packet_data;
1612 while (packet_count < budget) { 1845 while (packet_count < budget) {
@@ -1641,10 +1874,6 @@ read_again:
1641 ring->cur++; 1874 ring->cur++;
1642 ring->dirty++; 1875 ring->dirty++;
1643 1876
1644 dma_unmap_single(pdata->dev, rdata->skb_dma,
1645 rdata->skb_dma_len, DMA_FROM_DEVICE);
1646 rdata->skb_dma = 0;
1647
1648 incomplete = XGMAC_GET_BITS(packet->attributes, 1877 incomplete = XGMAC_GET_BITS(packet->attributes,
1649 RX_PACKET_ATTRIBUTES, 1878 RX_PACKET_ATTRIBUTES,
1650 INCOMPLETE); 1879 INCOMPLETE);
@@ -1668,26 +1897,33 @@ read_again:
1668 1897
1669 if (!context) { 1898 if (!context) {
1670 put_len = rdata->len - len; 1899 put_len = rdata->len - len;
1671 if (skb) { 1900 len += put_len;
1672 if (pskb_expand_head(skb, 0, put_len, 1901
1673 GFP_ATOMIC)) { 1902 if (!skb) {
1674 DBGPR("pskb_expand_head error\n"); 1903 dma_sync_single_for_cpu(pdata->dev,
1675 if (incomplete) { 1904 rdata->rx_hdr.dma,
1676 error = 1; 1905 rdata->rx_hdr.dma_len,
1677 goto read_again; 1906 DMA_FROM_DEVICE);
1678 } 1907
1679 1908 skb = xgbe_create_skb(pdata, rdata, &put_len);
1680 dev_kfree_skb(skb); 1909 if (!skb) {
1681 goto next_packet; 1910 error = 1;
1911 goto read_again;
1682 } 1912 }
1683 memcpy(skb_tail_pointer(skb), rdata->skb->data,
1684 put_len);
1685 } else {
1686 skb = rdata->skb;
1687 rdata->skb = NULL;
1688 } 1913 }
1689 skb_put(skb, put_len); 1914
1690 len += put_len; 1915 if (put_len) {
1916 dma_sync_single_for_cpu(pdata->dev,
1917 rdata->rx_buf.dma,
1918 rdata->rx_buf.dma_len,
1919 DMA_FROM_DEVICE);
1920
1921 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1922 rdata->rx_buf.pa.pages,
1923 rdata->rx_buf.pa.pages_offset,
1924 put_len, rdata->rx_buf.dma_len);
1925 rdata->rx_buf.pa.pages = NULL;
1926 }
1691 } 1927 }
1692 1928
1693 if (incomplete || context_next) 1929 if (incomplete || context_next)
@@ -1733,13 +1969,18 @@ read_again:
1733 hwtstamps->hwtstamp = ns_to_ktime(nsec); 1969 hwtstamps->hwtstamp = ns_to_ktime(nsec);
1734 } 1970 }
1735 1971
1972 if (XGMAC_GET_BITS(packet->attributes,
1973 RX_PACKET_ATTRIBUTES, RSS_HASH))
1974 skb_set_hash(skb, packet->rss_hash,
1975 packet->rss_hash_type);
1976
1736 skb->dev = netdev; 1977 skb->dev = netdev;
1737 skb->protocol = eth_type_trans(skb, netdev); 1978 skb->protocol = eth_type_trans(skb, netdev);
1738 skb_record_rx_queue(skb, channel->queue_index); 1979 skb_record_rx_queue(skb, channel->queue_index);
1739 skb_mark_napi_id(skb, &pdata->napi); 1980 skb_mark_napi_id(skb, napi);
1740 1981
1741 netdev->last_rx = jiffies; 1982 netdev->last_rx = jiffies;
1742 napi_gro_receive(&pdata->napi, skb); 1983 napi_gro_receive(napi, skb);
1743 1984
1744next_packet: 1985next_packet:
1745 packet_count++; 1986 packet_count++;
@@ -1761,7 +2002,35 @@ next_packet:
1761 return packet_count; 2002 return packet_count;
1762} 2003}
1763 2004
1764static int xgbe_poll(struct napi_struct *napi, int budget) 2005static int xgbe_one_poll(struct napi_struct *napi, int budget)
2006{
2007 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2008 napi);
2009 int processed = 0;
2010
2011 DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2012
2013 /* Cleanup Tx ring first */
2014 xgbe_tx_poll(channel);
2015
2016 /* Process Rx ring next */
2017 processed = xgbe_rx_poll(channel, budget);
2018
2019 /* If we processed everything, we are done */
2020 if (processed < budget) {
2021 /* Turn off polling */
2022 napi_complete(napi);
2023
2024 /* Enable Tx and Rx interrupts */
2025 enable_irq(channel->dma_irq);
2026 }
2027
2028 DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2029
2030 return processed;
2031}
2032
2033static int xgbe_all_poll(struct napi_struct *napi, int budget)
1765{ 2034{
1766 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, 2035 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
1767 napi); 2036 napi);
@@ -1770,7 +2039,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
1770 int processed, last_processed; 2039 int processed, last_processed;
1771 unsigned int i; 2040 unsigned int i;
1772 2041
1773 DBGPR("-->xgbe_poll: budget=%d\n", budget); 2042 DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
1774 2043
1775 processed = 0; 2044 processed = 0;
1776 ring_budget = budget / pdata->rx_ring_count; 2045 ring_budget = budget / pdata->rx_ring_count;
@@ -1798,7 +2067,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
1798 xgbe_enable_rx_tx_ints(pdata); 2067 xgbe_enable_rx_tx_ints(pdata);
1799 } 2068 }
1800 2069
1801 DBGPR("<--xgbe_poll: received = %d\n", processed); 2070 DBGPR("<--xgbe_all_poll: received = %d\n", processed);
1802 2071
1803 return processed; 2072 return processed;
1804} 2073}
@@ -1812,10 +2081,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1812 while (count--) { 2081 while (count--) {
1813 rdata = XGBE_GET_DESC_DATA(ring, idx); 2082 rdata = XGBE_GET_DESC_DATA(ring, idx);
1814 rdesc = rdata->rdesc; 2083 rdesc = rdata->rdesc;
1815 DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, 2084 pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1816 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", 2085 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1817 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), 2086 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1818 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); 2087 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1819 idx++; 2088 idx++;
1820 } 2089 }
1821} 2090}
@@ -1823,9 +2092,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1823void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, 2092void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1824 unsigned int idx) 2093 unsigned int idx)
1825{ 2094{
1826 DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, 2095 pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1827 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), 2096 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1828 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); 2097 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1829} 2098}
1830 2099
1831void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) 2100void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 49508ec98b72..95d44538357f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
452 rx_usecs); 452 rx_usecs);
453 return -EINVAL; 453 return -EINVAL;
454 } 454 }
455 if (rx_frames > pdata->channel->rx_ring->rdesc_count) { 455 if (rx_frames > pdata->rx_desc_count) {
456 netdev_alert(netdev, "rx-frames is limited to %d frames\n", 456 netdev_alert(netdev, "rx-frames is limited to %d frames\n",
457 pdata->channel->rx_ring->rdesc_count); 457 pdata->rx_desc_count);
458 return -EINVAL; 458 return -EINVAL;
459 } 459 }
460 460
@@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
462 tx_frames = ec->tx_max_coalesced_frames; 462 tx_frames = ec->tx_max_coalesced_frames;
463 463
464 /* Check the bounds of values for Tx */ 464 /* Check the bounds of values for Tx */
465 if (tx_frames > pdata->channel->tx_ring->rdesc_count) { 465 if (tx_frames > pdata->tx_desc_count) {
466 netdev_alert(netdev, "tx-frames is limited to %d frames\n", 466 netdev_alert(netdev, "tx-frames is limited to %d frames\n",
467 pdata->channel->tx_ring->rdesc_count); 467 pdata->tx_desc_count);
468 return -EINVAL; 468 return -EINVAL;
469 } 469 }
470 470
@@ -481,6 +481,75 @@ static int xgbe_set_coalesce(struct net_device *netdev,
481 return 0; 481 return 0;
482} 482}
483 483
484static int xgbe_get_rxnfc(struct net_device *netdev,
485 struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
486{
487 struct xgbe_prv_data *pdata = netdev_priv(netdev);
488
489 switch (rxnfc->cmd) {
490 case ETHTOOL_GRXRINGS:
491 rxnfc->data = pdata->rx_ring_count;
492 break;
493 default:
494 return -EOPNOTSUPP;
495 }
496
497 return 0;
498}
499
500static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
501{
502 struct xgbe_prv_data *pdata = netdev_priv(netdev);
503
504 return sizeof(pdata->rss_key);
505}
506
507static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
508{
509 struct xgbe_prv_data *pdata = netdev_priv(netdev);
510
511 return ARRAY_SIZE(pdata->rss_table);
512}
513
514static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
515{
516 struct xgbe_prv_data *pdata = netdev_priv(netdev);
517 unsigned int i;
518
519 if (indir) {
520 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
521 indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
522 MAC_RSSDR, DMCH);
523 }
524
525 if (key)
526 memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
527
528 return 0;
529}
530
531static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
532 const u8 *key)
533{
534 struct xgbe_prv_data *pdata = netdev_priv(netdev);
535 struct xgbe_hw_if *hw_if = &pdata->hw_if;
536 unsigned int ret;
537
538 if (indir) {
539 ret = hw_if->set_rss_lookup_table(pdata, indir);
540 if (ret)
541 return ret;
542 }
543
544 if (key) {
545 ret = hw_if->set_rss_hash_key(pdata, key);
546 if (ret)
547 return ret;
548 }
549
550 return 0;
551}
552
484static int xgbe_get_ts_info(struct net_device *netdev, 553static int xgbe_get_ts_info(struct net_device *netdev,
485 struct ethtool_ts_info *ts_info) 554 struct ethtool_ts_info *ts_info)
486{ 555{
@@ -526,6 +595,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
526 .get_strings = xgbe_get_strings, 595 .get_strings = xgbe_get_strings,
527 .get_ethtool_stats = xgbe_get_ethtool_stats, 596 .get_ethtool_stats = xgbe_get_ethtool_stats,
528 .get_sset_count = xgbe_get_sset_count, 597 .get_sset_count = xgbe_get_sset_count,
598 .get_rxnfc = xgbe_get_rxnfc,
599 .get_rxfh_key_size = xgbe_get_rxfh_key_size,
600 .get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
601 .get_rxfh = xgbe_get_rxfh,
602 .set_rxfh = xgbe_set_rxfh,
529 .get_ts_info = xgbe_get_ts_info, 603 .get_ts_info = xgbe_get_ts_info,
530}; 604};
531 605
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index f5a8fa03921a..05fbdf96e77e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL");
133MODULE_VERSION(XGBE_DRV_VERSION); 133MODULE_VERSION(XGBE_DRV_VERSION);
134MODULE_DESCRIPTION(XGBE_DRV_DESC); 134MODULE_DESCRIPTION(XGBE_DRV_DESC);
135 135
136static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
137{
138 struct xgbe_channel *channel_mem, *channel;
139 struct xgbe_ring *tx_ring, *rx_ring;
140 unsigned int count, i;
141
142 DBGPR("-->xgbe_alloc_rings\n");
143
144 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
145
146 channel_mem = devm_kcalloc(pdata->dev, count,
147 sizeof(struct xgbe_channel), GFP_KERNEL);
148 if (!channel_mem)
149 return NULL;
150
151 tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
152 sizeof(struct xgbe_ring), GFP_KERNEL);
153 if (!tx_ring)
154 return NULL;
155
156 rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
157 sizeof(struct xgbe_ring), GFP_KERNEL);
158 if (!rx_ring)
159 return NULL;
160
161 for (i = 0, channel = channel_mem; i < count; i++, channel++) {
162 snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
163 channel->pdata = pdata;
164 channel->queue_index = i;
165 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
166 (DMA_CH_INC * i);
167
168 if (i < pdata->tx_ring_count) {
169 spin_lock_init(&tx_ring->lock);
170 channel->tx_ring = tx_ring++;
171 }
172
173 if (i < pdata->rx_ring_count) {
174 spin_lock_init(&rx_ring->lock);
175 channel->rx_ring = rx_ring++;
176 }
177
178 DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
179 channel->name, channel->queue_index, channel->dma_regs,
180 channel->tx_ring, channel->rx_ring);
181 }
182
183 pdata->channel_count = count;
184
185 DBGPR("<--xgbe_alloc_rings\n");
186
187 return channel_mem;
188}
189
190static void xgbe_default_config(struct xgbe_prv_data *pdata) 136static void xgbe_default_config(struct xgbe_prv_data *pdata)
191{ 137{
192 DBGPR("-->xgbe_default_config\n"); 138 DBGPR("-->xgbe_default_config\n");
@@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev)
224 struct device *dev = &pdev->dev; 170 struct device *dev = &pdev->dev;
225 struct resource *res; 171 struct resource *res;
226 const u8 *mac_addr; 172 const u8 *mac_addr;
173 unsigned int i;
227 int ret; 174 int ret;
228 175
229 DBGPR("--> xgbe_probe\n"); 176 DBGPR("--> xgbe_probe\n");
@@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev)
244 191
245 spin_lock_init(&pdata->lock); 192 spin_lock_init(&pdata->lock);
246 mutex_init(&pdata->xpcs_mutex); 193 mutex_init(&pdata->xpcs_mutex);
194 mutex_init(&pdata->rss_mutex);
247 spin_lock_init(&pdata->tstamp_lock); 195 spin_lock_init(&pdata->tstamp_lock);
248 196
249 /* Set and validate the number of descriptors for a ring */ 197 /* Set and validate the number of descriptors for a ring */
@@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev)
318 pdata->awcache = XGBE_DMA_SYS_AWCACHE; 266 pdata->awcache = XGBE_DMA_SYS_AWCACHE;
319 } 267 }
320 268
269 /* Check for per channel interrupt support */
270 if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
271 pdata->per_channel_irq = 1;
272
321 ret = platform_get_irq(pdev, 0); 273 ret = platform_get_irq(pdev, 0);
322 if (ret < 0) { 274 if (ret < 0) {
323 dev_err(dev, "platform_get_irq failed\n"); 275 dev_err(dev, "platform_get_irq 0 failed\n");
324 goto err_io; 276 goto err_io;
325 } 277 }
326 netdev->irq = ret; 278 pdata->dev_irq = ret;
279
280 netdev->irq = pdata->dev_irq;
327 netdev->base_addr = (unsigned long)pdata->xgmac_regs; 281 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
328 282
329 /* Set all the function pointers */ 283 /* Set all the function pointers */
@@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev)
383 goto err_io; 337 goto err_io;
384 } 338 }
385 339
386 /* Allocate the rings for the DMA channels */ 340 /* Initialize RSS hash key and lookup table */
387 pdata->channel = xgbe_alloc_rings(pdata); 341 get_random_bytes(pdata->rss_key, sizeof(pdata->rss_key));
388 if (!pdata->channel) { 342
389 dev_err(dev, "ring allocation failed\n"); 343 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
390 ret = -ENOMEM; 344 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
391 goto err_io; 345 i % pdata->rx_ring_count);
392 } 346
347 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
348 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
349 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
393 350
394 /* Prepare to regsiter with MDIO */ 351 /* Prepare to regsiter with MDIO */
395 pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); 352 pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
@@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev)
421 NETIF_F_HW_VLAN_CTAG_TX | 378 NETIF_F_HW_VLAN_CTAG_TX |
422 NETIF_F_HW_VLAN_CTAG_FILTER; 379 NETIF_F_HW_VLAN_CTAG_FILTER;
423 380
381 if (pdata->hw_feat.rss)
382 netdev->hw_features |= NETIF_F_RXHASH;
383
424 netdev->vlan_features |= NETIF_F_SG | 384 netdev->vlan_features |= NETIF_F_SG |
425 NETIF_F_IP_CSUM | 385 NETIF_F_IP_CSUM |
426 NETIF_F_IPV6_CSUM | 386 NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 789957d43a13..aa8da9f4f1f9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -142,6 +142,8 @@
142 142
143#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 143#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
144#define XGBE_RX_BUF_ALIGN 64 144#define XGBE_RX_BUF_ALIGN 64
145#define XGBE_SKB_ALLOC_SIZE 256
146#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
145 147
146#define XGBE_MAX_DMA_CHANNELS 16 148#define XGBE_MAX_DMA_CHANNELS 16
147#define XGBE_MAX_QUEUES 16 149#define XGBE_MAX_QUEUES 16
@@ -171,6 +173,7 @@
171/* Device-tree clock names */ 173/* Device-tree clock names */
172#define XGBE_DMA_CLOCK "dma_clk" 174#define XGBE_DMA_CLOCK "dma_clk"
173#define XGBE_PTP_CLOCK "ptp_clk" 175#define XGBE_PTP_CLOCK "ptp_clk"
176#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
174 177
175/* Timestamp support - values based on 50MHz PTP clock 178/* Timestamp support - values based on 50MHz PTP clock
176 * 50MHz => 20 nsec 179 * 50MHz => 20 nsec
@@ -212,6 +215,12 @@
212/* Maximum MAC address hash table size (256 bits = 8 bytes) */ 215/* Maximum MAC address hash table size (256 bits = 8 bytes) */
213#define XGBE_MAC_HASH_TABLE_SIZE 8 216#define XGBE_MAC_HASH_TABLE_SIZE 8
214 217
218/* Receive Side Scaling */
219#define XGBE_RSS_HASH_KEY_SIZE 40
220#define XGBE_RSS_MAX_TABLE_SIZE 256
221#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
222#define XGBE_RSS_HASH_KEY_TYPE 1
223
215struct xgbe_prv_data; 224struct xgbe_prv_data;
216 225
217struct xgbe_packet_data { 226struct xgbe_packet_data {
@@ -230,14 +239,35 @@ struct xgbe_packet_data {
230 unsigned short vlan_ctag; 239 unsigned short vlan_ctag;
231 240
232 u64 rx_tstamp; 241 u64 rx_tstamp;
242
243 u32 rss_hash;
244 enum pkt_hash_types rss_hash_type;
233}; 245};
234 246
235/* Common Rx and Tx descriptor mapping */ 247/* Common Rx and Tx descriptor mapping */
236struct xgbe_ring_desc { 248struct xgbe_ring_desc {
237 unsigned int desc0; 249 u32 desc0;
238 unsigned int desc1; 250 u32 desc1;
239 unsigned int desc2; 251 u32 desc2;
240 unsigned int desc3; 252 u32 desc3;
253};
254
255/* Page allocation related values */
256struct xgbe_page_alloc {
257 struct page *pages;
258 unsigned int pages_len;
259 unsigned int pages_offset;
260
261 dma_addr_t pages_dma;
262};
263
264/* Ring entry buffer data */
265struct xgbe_buffer_data {
266 struct xgbe_page_alloc pa;
267 struct xgbe_page_alloc pa_unmap;
268
269 dma_addr_t dma;
270 unsigned int dma_len;
241}; 271};
242 272
243/* Structure used to hold information related to the descriptor 273/* Structure used to hold information related to the descriptor
@@ -253,6 +283,10 @@ struct xgbe_ring_data {
253 unsigned int skb_dma_len; /* Length of SKB DMA area */ 283 unsigned int skb_dma_len; /* Length of SKB DMA area */
254 unsigned int tso_header; /* TSO header indicator */ 284 unsigned int tso_header; /* TSO header indicator */
255 285
286 struct xgbe_buffer_data rx_hdr; /* Header locations */
287 struct xgbe_buffer_data rx_buf; /* Payload locations */
288
289 unsigned short hdr_len; /* Length of received header */
256 unsigned short len; /* Length of received Rx packet */ 290 unsigned short len; /* Length of received Rx packet */
257 291
258 unsigned int interrupt; /* Interrupt indicator */ 292 unsigned int interrupt; /* Interrupt indicator */
@@ -291,6 +325,10 @@ struct xgbe_ring {
291 */ 325 */
292 struct xgbe_ring_data *rdata; 326 struct xgbe_ring_data *rdata;
293 327
328 /* Page allocation for RX buffers */
329 struct xgbe_page_alloc rx_hdr_pa;
330 struct xgbe_page_alloc rx_buf_pa;
331
294 /* Ring index values 332 /* Ring index values
295 * cur - Tx: index of descriptor to be used for current transfer 333 * cur - Tx: index of descriptor to be used for current transfer
296 * Rx: index of descriptor to check for packet availability 334 * Rx: index of descriptor to check for packet availability
@@ -331,6 +369,12 @@ struct xgbe_channel {
331 unsigned int queue_index; 369 unsigned int queue_index;
332 void __iomem *dma_regs; 370 void __iomem *dma_regs;
333 371
372 /* Per channel interrupt irq number */
373 int dma_irq;
374
375 /* Netdev related settings */
376 struct napi_struct napi;
377
334 unsigned int saved_ier; 378 unsigned int saved_ier;
335 379
336 unsigned int tx_timer_active; 380 unsigned int tx_timer_active;
@@ -456,7 +500,7 @@ struct xgbe_hw_if {
456 500
457 int (*enable_int)(struct xgbe_channel *, enum xgbe_int); 501 int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
458 int (*disable_int)(struct xgbe_channel *, enum xgbe_int); 502 int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
459 void (*pre_xmit)(struct xgbe_channel *); 503 void (*dev_xmit)(struct xgbe_channel *);
460 int (*dev_read)(struct xgbe_channel *); 504 int (*dev_read)(struct xgbe_channel *);
461 void (*tx_desc_init)(struct xgbe_channel *); 505 void (*tx_desc_init)(struct xgbe_channel *);
462 void (*rx_desc_init)(struct xgbe_channel *); 506 void (*rx_desc_init)(struct xgbe_channel *);
@@ -509,14 +553,20 @@ struct xgbe_hw_if {
509 /* For Data Center Bridging config */ 553 /* For Data Center Bridging config */
510 void (*config_dcb_tc)(struct xgbe_prv_data *); 554 void (*config_dcb_tc)(struct xgbe_prv_data *);
511 void (*config_dcb_pfc)(struct xgbe_prv_data *); 555 void (*config_dcb_pfc)(struct xgbe_prv_data *);
556
557 /* For Receive Side Scaling */
558 int (*enable_rss)(struct xgbe_prv_data *);
559 int (*disable_rss)(struct xgbe_prv_data *);
560 int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
561 int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
512}; 562};
513 563
514struct xgbe_desc_if { 564struct xgbe_desc_if {
515 int (*alloc_ring_resources)(struct xgbe_prv_data *); 565 int (*alloc_ring_resources)(struct xgbe_prv_data *);
516 void (*free_ring_resources)(struct xgbe_prv_data *); 566 void (*free_ring_resources)(struct xgbe_prv_data *);
517 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); 567 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
518 void (*realloc_skb)(struct xgbe_channel *); 568 void (*realloc_rx_buffer)(struct xgbe_channel *);
519 void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *); 569 void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
520 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); 570 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
521 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); 571 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
522}; 572};
@@ -581,7 +631,11 @@ struct xgbe_prv_data {
581 /* XPCS indirect addressing mutex */ 631 /* XPCS indirect addressing mutex */
582 struct mutex xpcs_mutex; 632 struct mutex xpcs_mutex;
583 633
584 int irq_number; 634 /* RSS addressing mutex */
635 struct mutex rss_mutex;
636
637 int dev_irq;
638 unsigned int per_channel_irq;
585 639
586 struct xgbe_hw_if hw_if; 640 struct xgbe_hw_if hw_if;
587 struct xgbe_desc_if desc_if; 641 struct xgbe_desc_if desc_if;
@@ -624,7 +678,7 @@ struct xgbe_prv_data {
624 unsigned int rx_riwt; 678 unsigned int rx_riwt;
625 unsigned int rx_frames; 679 unsigned int rx_frames;
626 680
627 /* Current MTU */ 681 /* Current Rx buffer size */
628 unsigned int rx_buf_size; 682 unsigned int rx_buf_size;
629 683
630 /* Flow control settings */ 684 /* Flow control settings */
@@ -632,6 +686,11 @@ struct xgbe_prv_data {
632 unsigned int tx_pause; 686 unsigned int tx_pause;
633 unsigned int rx_pause; 687 unsigned int rx_pause;
634 688
689 /* Receive Side Scaling settings */
690 u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
691 u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
692 u32 rss_options;
693
635 /* MDIO settings */ 694 /* MDIO settings */
636 struct module *phy_module; 695 struct module *phy_module;
637 char *mii_bus_id; 696 char *mii_bus_id;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 75472cf734de..b4b0f804e84c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -26,7 +26,7 @@ config AMD_PHY
26 26
27config AMD_XGBE_PHY 27config AMD_XGBE_PHY
28 tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" 28 tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
29 depends on OF 29 depends on OF && HAS_IOMEM
30 ---help--- 30 ---help---
31 Currently supports the AMD 10GbE PHY 31 Currently supports the AMD 10GbE PHY
32 32
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index c456559f6e7f..37b9f3fff1ce 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
992 if (ret & MDIO_CTRL1_RESET) 992 if (ret & MDIO_CTRL1_RESET)
993 return -ETIMEDOUT; 993 return -ETIMEDOUT;
994 994
995 return 0; 995 /* Make sure the XPCS and SerDes are in compatible states */
996 return amd_xgbe_phy_xgmii_mode(phydev);
996} 997}
997 998
998static int amd_xgbe_phy_config_init(struct phy_device *phydev) 999static int amd_xgbe_phy_config_init(struct phy_device *phydev)