aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe
diff options
context:
space:
mode:
authorVasu Dev <vasu.dev@intel.com>2015-04-10 01:03:23 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2015-04-10 03:15:55 -0400
commitea412015a2e72faf066e90765bdd1dc4e4ecea41 (patch)
tree1373de1f1bb7af176e41cda87dffd95fab3dd65b /drivers/net/ethernet/intel/ixgbe
parent6d4c96ad4afd7265d2d214e02d28e66cfdaf9bb4 (diff)
ixgbe: adds x550 specific FCoE offloads
Adds x550 specific FCoE offloads for DDP context programming and increased DDP exchanges. Signed-off-by: Vasu Dev <vasu.dev@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c122
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h9
3 files changed, 107 insertions, 27 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 2ad91cb04dab..631c603fc966 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -71,6 +71,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
71 struct ixgbe_fcoe *fcoe; 71 struct ixgbe_fcoe *fcoe;
72 struct ixgbe_adapter *adapter; 72 struct ixgbe_adapter *adapter;
73 struct ixgbe_fcoe_ddp *ddp; 73 struct ixgbe_fcoe_ddp *ddp;
74 struct ixgbe_hw *hw;
74 u32 fcbuff; 75 u32 fcbuff;
75 76
76 if (!netdev) 77 if (!netdev)
@@ -85,25 +86,51 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
85 if (!ddp->udl) 86 if (!ddp->udl)
86 return 0; 87 return 0;
87 88
89 hw = &adapter->hw;
88 len = ddp->len; 90 len = ddp->len;
89 /* if there an error, force to invalidate ddp context */ 91 /* if no error then skip ddp context invalidation */
90 if (ddp->err) { 92 if (!ddp->err)
93 goto skip_ddpinv;
94
95 if (hw->mac.type == ixgbe_mac_X550) {
96 /* X550 does not require DDP FCoE lock */
97
98 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0);
99 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid),
100 (xid | IXGBE_FCFLTRW_WE));
101
102 /* program FCBUFF */
103 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0);
104
105 /* program FCDMARW */
106 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
107 (xid | IXGBE_FCDMARW_WE));
108
109 /* read FCBUFF to check context invalidated */
110 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
111 (xid | IXGBE_FCDMARW_RE));
112 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid));
113 } else {
114 /* other hardware requires DDP FCoE lock */
91 spin_lock_bh(&fcoe->lock); 115 spin_lock_bh(&fcoe->lock);
92 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); 116 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0);
93 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, 117 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW,
94 (xid | IXGBE_FCFLTRW_WE)); 118 (xid | IXGBE_FCFLTRW_WE));
95 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); 119 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0);
96 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 120 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
97 (xid | IXGBE_FCDMARW_WE)); 121 (xid | IXGBE_FCDMARW_WE));
98 122
99 /* guaranteed to be invalidated after 100us */ 123 /* guaranteed to be invalidated after 100us */
100 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 124 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
101 (xid | IXGBE_FCDMARW_RE)); 125 (xid | IXGBE_FCDMARW_RE));
102 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); 126 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF);
103 spin_unlock_bh(&fcoe->lock); 127 spin_unlock_bh(&fcoe->lock);
104 if (fcbuff & IXGBE_FCBUFF_VALID) 128 }
105 udelay(100); 129
106 } 130 if (fcbuff & IXGBE_FCBUFF_VALID)
131 usleep_range(100, 150);
132
133skip_ddpinv:
107 if (ddp->sgl) 134 if (ddp->sgl)
108 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, 135 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
109 DMA_FROM_DEVICE); 136 DMA_FROM_DEVICE);
@@ -272,7 +299,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
272 299
273 /* program DMA context */ 300 /* program DMA context */
274 hw = &adapter->hw; 301 hw = &adapter->hw;
275 spin_lock_bh(&fcoe->lock);
276 302
277 /* turn on last frame indication for target mode as FCP_RSPtarget is 303 /* turn on last frame indication for target mode as FCP_RSPtarget is
278 * supposed to send FCP_RSP when it is done. */ 304 * supposed to send FCP_RSP when it is done. */
@@ -283,16 +309,33 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
283 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); 309 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
284 } 310 }
285 311
286 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 312 if (hw->mac.type == ixgbe_mac_X550) {
287 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 313 /* X550 does not require DDP lock */
288 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 314
289 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); 315 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid),
290 /* program filter context */ 316 ddp->udp & DMA_BIT_MASK(32));
291 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 317 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32);
292 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 318 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff);
293 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 319 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw);
320 /* program filter context */
321 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID);
322 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0);
323 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw);
324 } else {
325 /* DDP lock for indirect DDP context access */
326 spin_lock_bh(&fcoe->lock);
327
328 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
329 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
330 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
331 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
332 /* program filter context */
333 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
334 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
335 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
294 336
295 spin_unlock_bh(&fcoe->lock); 337 spin_unlock_bh(&fcoe->lock);
338 }
296 339
297 return 1; 340 return 1;
298 341
@@ -371,6 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
371 struct fcoe_crc_eof *crc; 414 struct fcoe_crc_eof *crc;
372 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); 415 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
373 __le32 ddp_err; 416 __le32 ddp_err;
417 int ddp_max;
374 u32 fctl; 418 u32 fctl;
375 u16 xid; 419 u16 xid;
376 420
@@ -392,7 +436,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
392 else 436 else
393 xid = be16_to_cpu(fh->fh_rx_id); 437 xid = be16_to_cpu(fh->fh_rx_id);
394 438
395 if (xid >= IXGBE_FCOE_DDP_MAX) 439 ddp_max = IXGBE_FCOE_DDP_MAX;
440 /* X550 has different DDP Max limit */
441 if (adapter->hw.mac.type == ixgbe_mac_X550)
442 ddp_max = IXGBE_FCOE_DDP_MAX_X550;
443 if (xid >= ddp_max)
396 return -EINVAL; 444 return -EINVAL;
397 445
398 fcoe = &adapter->fcoe; 446 fcoe = &adapter->fcoe;
@@ -612,7 +660,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
612{ 660{
613 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 661 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
614 struct ixgbe_hw *hw = &adapter->hw; 662 struct ixgbe_hw *hw = &adapter->hw;
615 int i, fcoe_q, fcoe_i; 663 int i, fcoe_q, fcoe_i, fcoe_q_h = 0;
664 int fcreta_size;
616 u32 etqf; 665 u32 etqf;
617 666
618 /* Minimal functionality for FCoE requires at least CRC offloads */ 667 /* Minimal functionality for FCoE requires at least CRC offloads */
@@ -633,10 +682,23 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
633 return; 682 return;
634 683
635 /* Use one or more Rx queues for FCoE by redirection table */ 684 /* Use one or more Rx queues for FCoE by redirection table */
636 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 685 fcreta_size = IXGBE_FCRETA_SIZE;
686 if (adapter->hw.mac.type == ixgbe_mac_X550)
687 fcreta_size = IXGBE_FCRETA_SIZE_X550;
688
689 for (i = 0; i < fcreta_size; i++) {
690 if (adapter->hw.mac.type == ixgbe_mac_X550) {
691 int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
692 fcoe->indices);
693 fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
694 fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
695 IXGBE_FCRETA_ENTRY_HIGH_MASK;
696 }
697
637 fcoe_i = fcoe->offset + (i % fcoe->indices); 698 fcoe_i = fcoe->offset + (i % fcoe->indices);
638 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 699 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
639 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 700 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
701 fcoe_q |= fcoe_q_h;
640 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 702 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
641 } 703 }
642 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 704 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -672,13 +734,18 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
672void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 734void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
673{ 735{
674 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 736 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
675 int cpu, i; 737 int cpu, i, ddp_max;
676 738
677 /* do nothing if no DDP pools were allocated */ 739 /* do nothing if no DDP pools were allocated */
678 if (!fcoe->ddp_pool) 740 if (!fcoe->ddp_pool)
679 return; 741 return;
680 742
681 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 743 ddp_max = IXGBE_FCOE_DDP_MAX;
744 /* X550 has different DDP Max limit */
745 if (adapter->hw.mac.type == ixgbe_mac_X550)
746 ddp_max = IXGBE_FCOE_DDP_MAX_X550;
747
748 for (i = 0; i < ddp_max; i++)
682 ixgbe_fcoe_ddp_put(adapter->netdev, i); 749 ixgbe_fcoe_ddp_put(adapter->netdev, i);
683 750
684 for_each_possible_cpu(cpu) 751 for_each_possible_cpu(cpu)
@@ -758,6 +825,9 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
758 } 825 }
759 826
760 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 827 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
828 /* X550 has different DDP Max limit */
829 if (adapter->hw.mac.type == ixgbe_mac_X550)
830 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1;
761 831
762 return 0; 832 return 0;
763} 833}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 0772b7730fce..38385876effb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -46,6 +46,7 @@
46#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ 46#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
47#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ 47#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
48#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ 48#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
49#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */
49 50
50/* Default traffic class to use for FCoE */ 51/* Default traffic class to use for FCoE */
51#define IXGBE_FCOE_DEFTC 3 52#define IXGBE_FCOE_DEFTC 3
@@ -77,7 +78,7 @@ struct ixgbe_fcoe {
77 struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; 78 struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
78 atomic_t refcnt; 79 atomic_t refcnt;
79 spinlock_t lock; 80 spinlock_t lock;
80 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 81 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550];
81 void *extra_ddp_buffer; 82 void *extra_ddp_buffer;
82 dma_addr_t extra_ddp_buffer_dma; 83 dma_addr_t extra_ddp_buffer_dma;
83 unsigned long mode; 84 unsigned long mode;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8e393098638a..dd6ba5916dfe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -610,6 +610,8 @@ struct ixgbe_thermal_sensor_data {
610#define IXGBE_RTTBCNRM 0x04980 610#define IXGBE_RTTBCNRM 0x04980
611#define IXGBE_RTTQCNRM 0x04980 611#define IXGBE_RTTQCNRM 0x04980
612 612
613/* FCoE Direct DMA Context */
614#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
613/* FCoE DMA Context Registers */ 615/* FCoE DMA Context Registers */
614#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ 616#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
615#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ 617#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
@@ -636,6 +638,9 @@ struct ixgbe_thermal_sensor_data {
636#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ 638#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
637#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ 639#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
638#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ 640#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
641/* FCoE Direct Filter Context */
642#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10))
643#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4))
639/* FCoE Filter Context Registers */ 644/* FCoE Filter Context Registers */
640#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ 645#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
641#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ 646#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
@@ -666,6 +671,10 @@ struct ixgbe_thermal_sensor_data {
666#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ 671#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
667#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ 672#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
668#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ 673#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
674#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */
675/* Higher 7 bits for the queue index */
676#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000
677#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16
669 678
670/* Stats registers */ 679/* Stats registers */
671#define IXGBE_CRCERRS 0x04000 680#define IXGBE_CRCERRS 0x04000