aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-05-05 13:14:28 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-07-19 21:18:01 -0400
commit5a1ee2704bff078bd58abde38266caa10fbcd714 (patch)
treed61088d6578df3d61e85c96cfc60174ea66b2814
parent81faddefc7da7410059c036d8a5cea442c929d0a (diff)
ixgbe: Merge all FCoE percpu values into a single structure
This change merges the 2 statistics values for noddp and noddp_ext_buff and the dma_pool into a single structure that can be allocated per CPU. The advantages to this are several fold. First we only need to do one alloc_percpu call now instead of 3, so that means less overhead for handling memory allocation failures. Secondly in the case of ixgbe_fcoe_ddp_setup we only need to call get_cpu once which makes things a bit cleaner since we can drop a put_cpu() from the exception path. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c138
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c23
3 files changed, 86 insertions, 86 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index a994570eb906..e7c463c7b6a1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
134 struct ixgbe_hw *hw; 134 struct ixgbe_hw *hw;
135 struct ixgbe_fcoe *fcoe; 135 struct ixgbe_fcoe *fcoe;
136 struct ixgbe_fcoe_ddp *ddp; 136 struct ixgbe_fcoe_ddp *ddp;
137 struct ixgbe_fcoe_ddp_pool *ddp_pool;
137 struct scatterlist *sg; 138 struct scatterlist *sg;
138 unsigned int i, j, dmacount; 139 unsigned int i, j, dmacount;
139 unsigned int len; 140 unsigned int len;
@@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
144 unsigned int thislen = 0; 145 unsigned int thislen = 0;
145 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; 146 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
146 dma_addr_t addr = 0; 147 dma_addr_t addr = 0;
147 struct dma_pool *pool;
148 unsigned int cpu;
149 148
150 if (!netdev || !sgl) 149 if (!netdev || !sgl)
151 return 0; 150 return 0;
@@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
162 return 0; 161 return 0;
163 162
164 fcoe = &adapter->fcoe; 163 fcoe = &adapter->fcoe;
165 if (!fcoe->pool) {
166 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
167 return 0;
168 }
169
170 ddp = &fcoe->ddp[xid]; 164 ddp = &fcoe->ddp[xid];
171 if (ddp->sgl) { 165 if (ddp->sgl) {
172 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 166 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
@@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
175 } 169 }
176 ixgbe_fcoe_clear_ddp(ddp); 170 ixgbe_fcoe_clear_ddp(ddp);
177 171
172
173 if (!fcoe->ddp_pool) {
174 e_warn(drv, "No ddp_pool resources allocated\n");
175 return 0;
176 }
177
178 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
179 if (!ddp_pool->pool) {
180 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
181 goto out_noddp;
182 }
183
178 /* setup dma from scsi command sgl */ 184 /* setup dma from scsi command sgl */
179 dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 185 dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
180 if (dmacount == 0) { 186 if (dmacount == 0) {
181 e_err(drv, "xid 0x%x DMA map error\n", xid); 187 e_err(drv, "xid 0x%x DMA map error\n", xid);
182 return 0; 188 goto out_noddp;
183 } 189 }
184 190
185 /* alloc the udl from per cpu ddp pool */ 191 /* alloc the udl from per cpu ddp pool */
186 cpu = get_cpu(); 192 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
187 pool = *per_cpu_ptr(fcoe->pool, cpu);
188 ddp->udl = dma_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
189 if (!ddp->udl) { 193 if (!ddp->udl) {
190 e_err(drv, "failed allocated ddp context\n"); 194 e_err(drv, "failed allocated ddp context\n");
191 goto out_noddp_unmap; 195 goto out_noddp_unmap;
192 } 196 }
193 ddp->pool = pool; 197 ddp->pool = ddp_pool->pool;
194 ddp->sgl = sgl; 198 ddp->sgl = sgl;
195 ddp->sgc = sgc; 199 ddp->sgc = sgc;
196 200
@@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
201 while (len) { 205 while (len) {
202 /* max number of buffers allowed in one DDP context */ 206 /* max number of buffers allowed in one DDP context */
203 if (j >= IXGBE_BUFFCNT_MAX) { 207 if (j >= IXGBE_BUFFCNT_MAX) {
204 *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1; 208 ddp_pool->noddp++;
205 goto out_noddp_free; 209 goto out_noddp_free;
206 } 210 }
207 211
@@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
241 */ 245 */
242 if (lastsize == bufflen) { 246 if (lastsize == bufflen) {
243 if (j >= IXGBE_BUFFCNT_MAX) { 247 if (j >= IXGBE_BUFFCNT_MAX) {
244 *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1; 248 ddp_pool->noddp_ext_buff++;
245 goto out_noddp_free; 249 goto out_noddp_free;
246 } 250 }
247 251
@@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
293 return 1; 297 return 1;
294 298
295out_noddp_free: 299out_noddp_free:
296 dma_pool_free(pool, ddp->udl, ddp->udp); 300 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
297 ixgbe_fcoe_clear_ddp(ddp); 301 ixgbe_fcoe_clear_ddp(ddp);
298 302
299out_noddp_unmap: 303out_noddp_unmap:
300 dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 304 dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
305out_noddp:
301 put_cpu(); 306 put_cpu();
302 return 0; 307 return 0;
303} 308}
@@ -563,44 +568,63 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
563 return 0; 568 return 0;
564} 569}
565 570
571static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
572{
573 struct ixgbe_fcoe_ddp_pool *ddp_pool;
574
575 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
576 if (ddp_pool->pool)
577 dma_pool_destroy(ddp_pool->pool);
578 ddp_pool->pool = NULL;
579}
580
566static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) 581static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
567{ 582{
568 unsigned int cpu; 583 unsigned int cpu;
569 struct dma_pool **pool;
570 584
571 for_each_possible_cpu(cpu) { 585 for_each_possible_cpu(cpu)
572 pool = per_cpu_ptr(fcoe->pool, cpu); 586 ixgbe_fcoe_dma_pool_free(fcoe, cpu);
573 if (*pool) 587
574 dma_pool_destroy(*pool); 588 free_percpu(fcoe->ddp_pool);
575 } 589 fcoe->ddp_pool = NULL;
576 free_percpu(fcoe->pool); 590}
577 fcoe->pool = NULL; 591
592static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
593 struct device *dev,
594 unsigned int cpu)
595{
596 struct ixgbe_fcoe_ddp_pool *ddp_pool;
597 struct dma_pool *pool;
598 char pool_name[32];
599
600 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
601
602 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
603 IXGBE_FCPTR_ALIGN, PAGE_SIZE);
604 if (!pool)
605 return -ENOMEM;
606
607 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
608 ddp_pool->pool = pool;
609 ddp_pool->noddp = 0;
610 ddp_pool->noddp_ext_buff = 0;
611
612 return 0;
578} 613}
579 614
580static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) 615static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
581{ 616{
582 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 617 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
618 struct device *dev = &adapter->pdev->dev;
583 unsigned int cpu; 619 unsigned int cpu;
584 struct dma_pool **pool;
585 char pool_name[32];
586 620
587 fcoe->pool = alloc_percpu(struct dma_pool *); 621 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
588 if (!fcoe->pool) 622 if (!fcoe->ddp_pool)
589 return; 623 return;
590 624
591 /* allocate pci pool for each cpu */ 625 /* allocate pci pool for each cpu */
592 for_each_possible_cpu(cpu) { 626 for_each_possible_cpu(cpu)
593 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); 627 ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
594 pool = per_cpu_ptr(fcoe->pool, cpu);
595 *pool = dma_pool_create(pool_name, &adapter->pdev->dev,
596 IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN,
597 PAGE_SIZE);
598 if (!*pool) {
599 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
600 ixgbe_fcoe_ddp_pools_free(fcoe);
601 return;
602 }
603 }
604} 628}
605 629
606/** 630/**
@@ -617,14 +641,13 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
617 struct ixgbe_hw *hw = &adapter->hw; 641 struct ixgbe_hw *hw = &adapter->hw;
618 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 642 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
619 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 643 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
620 unsigned int cpu;
621 u32 etqf; 644 u32 etqf;
622 645
623 if (!fcoe->pool) { 646 if (!fcoe->ddp_pool) {
624 spin_lock_init(&fcoe->lock); 647 spin_lock_init(&fcoe->lock);
625 648
626 ixgbe_fcoe_ddp_pools_alloc(adapter); 649 ixgbe_fcoe_ddp_pools_alloc(adapter);
627 if (!fcoe->pool) { 650 if (!fcoe->ddp_pool) {
628 e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); 651 e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
629 return; 652 return;
630 } 653 }
@@ -646,24 +669,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
646 e_err(drv, "failed to map extra DDP buffer\n"); 669 e_err(drv, "failed to map extra DDP buffer\n");
647 goto out_extra_ddp_buffer; 670 goto out_extra_ddp_buffer;
648 } 671 }
649
650 /* Alloc per cpu mem to count the ddp alloc failure number */
651 fcoe->pcpu_noddp = alloc_percpu(u64);
652 if (!fcoe->pcpu_noddp) {
653 e_err(drv, "failed to alloc noddp counter\n");
654 goto out_pcpu_noddp_alloc_fail;
655 }
656
657 fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
658 if (!fcoe->pcpu_noddp_ext_buff) {
659 e_err(drv, "failed to alloc noddp extra buff cnt\n");
660 goto out_pcpu_noddp_extra_buff_alloc_fail;
661 }
662
663 for_each_possible_cpu(cpu) {
664 *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
665 *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
666 }
667 } 672 }
668 673
669 /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */ 674 /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */
@@ -704,13 +709,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
704 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 709 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
705 710
706 return; 711 return;
707out_pcpu_noddp_extra_buff_alloc_fail:
708 free_percpu(fcoe->pcpu_noddp);
709out_pcpu_noddp_alloc_fail:
710 dma_unmap_single(&adapter->pdev->dev,
711 fcoe->extra_ddp_buffer_dma,
712 IXGBE_FCBUFF_MIN,
713 DMA_FROM_DEVICE);
714out_extra_ddp_buffer: 712out_extra_ddp_buffer:
715 kfree(fcoe->extra_ddp_buffer); 713 kfree(fcoe->extra_ddp_buffer);
716out_ddp_pools: 714out_ddp_pools:
@@ -730,18 +728,18 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
730 int i; 728 int i;
731 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 729 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
732 730
733 if (!fcoe->pool) 731 if (!fcoe->ddp_pool)
734 return; 732 return;
735 733
736 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 734 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
737 ixgbe_fcoe_ddp_put(adapter->netdev, i); 735 ixgbe_fcoe_ddp_put(adapter->netdev, i);
736
738 dma_unmap_single(&adapter->pdev->dev, 737 dma_unmap_single(&adapter->pdev->dev,
739 fcoe->extra_ddp_buffer_dma, 738 fcoe->extra_ddp_buffer_dma,
740 IXGBE_FCBUFF_MIN, 739 IXGBE_FCBUFF_MIN,
741 DMA_FROM_DEVICE); 740 DMA_FROM_DEVICE);
742 free_percpu(fcoe->pcpu_noddp);
743 free_percpu(fcoe->pcpu_noddp_ext_buff);
744 kfree(fcoe->extra_ddp_buffer); 741 kfree(fcoe->extra_ddp_buffer);
742
745 ixgbe_fcoe_ddp_pools_free(fcoe); 743 ixgbe_fcoe_ddp_pools_free(fcoe);
746} 744}
747 745
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 0ef231a4579f..5d028739fe3f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -65,16 +65,21 @@ struct ixgbe_fcoe_ddp {
65 struct dma_pool *pool; 65 struct dma_pool *pool;
66}; 66};
67 67
68/* per cpu variables */
69struct ixgbe_fcoe_ddp_pool {
70 struct dma_pool *pool;
71 u64 noddp;
72 u64 noddp_ext_buff;
73};
74
68struct ixgbe_fcoe { 75struct ixgbe_fcoe {
69 struct dma_pool **pool; 76 struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
70 atomic_t refcnt; 77 atomic_t refcnt;
71 spinlock_t lock; 78 spinlock_t lock;
72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 79 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
73 unsigned char *extra_ddp_buffer; 80 unsigned char *extra_ddp_buffer;
74 dma_addr_t extra_ddp_buffer_dma; 81 dma_addr_t extra_ddp_buffer_dma;
75 unsigned long mode; 82 unsigned long mode;
76 u64 __percpu *pcpu_noddp;
77 u64 __percpu *pcpu_noddp_ext_buff;
78#ifdef CONFIG_IXGBE_DCB 83#ifdef CONFIG_IXGBE_DCB
79 u8 up; 84 u8 up;
80#endif 85#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f110e8868bc6..c66625945534 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5052,11 +5052,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5052 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; 5052 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5053 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; 5053 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5054 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; 5054 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
5055#ifdef IXGBE_FCOE
5056 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5057 unsigned int cpu;
5058 u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
5059#endif /* IXGBE_FCOE */
5060 5055
5061 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5056 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5062 test_bit(__IXGBE_RESETTING, &adapter->state)) 5057 test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5187,17 +5182,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5187 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5182 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5188 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5183 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5189 /* Add up per cpu counters for total ddp aloc fail */ 5184 /* Add up per cpu counters for total ddp aloc fail */
5190 if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) { 5185 if (adapter->fcoe.ddp_pool) {
5186 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5187 struct ixgbe_fcoe_ddp_pool *ddp_pool;
5188 unsigned int cpu;
5189 u64 noddp = 0, noddp_ext_buff = 0;
5191 for_each_possible_cpu(cpu) { 5190 for_each_possible_cpu(cpu) {
5192 fcoe_noddp_counts_sum += 5191 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
5193 *per_cpu_ptr(fcoe->pcpu_noddp, cpu); 5192 noddp += ddp_pool->noddp;
5194 fcoe_noddp_ext_buff_counts_sum += 5193 noddp_ext_buff += ddp_pool->noddp_ext_buff;
5195 *per_cpu_ptr(fcoe->
5196 pcpu_noddp_ext_buff, cpu);
5197 } 5194 }
5195 hwstats->fcoe_noddp = noddp;
5196 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
5198 } 5197 }
5199 hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
5200 hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
5201#endif /* IXGBE_FCOE */ 5198#endif /* IXGBE_FCOE */
5202 break; 5199 break;
5203 default: 5200 default: