diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_fcoe.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_fcoe.c | 188 |
1 files changed, 152 insertions, 36 deletions
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 072327c5e41a..05920726e824 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc) | |||
68 | static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) | 68 | static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) |
69 | { | 69 | { |
70 | ddp->len = 0; | 70 | ddp->len = 0; |
71 | ddp->err = 0; | 71 | ddp->err = 1; |
72 | ddp->udl = NULL; | 72 | ddp->udl = NULL; |
73 | ddp->udp = 0UL; | 73 | ddp->udp = 0UL; |
74 | ddp->sgl = NULL; | 74 | ddp->sgl = NULL; |
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) | |||
92 | struct ixgbe_fcoe *fcoe; | 92 | struct ixgbe_fcoe *fcoe; |
93 | struct ixgbe_adapter *adapter; | 93 | struct ixgbe_adapter *adapter; |
94 | struct ixgbe_fcoe_ddp *ddp; | 94 | struct ixgbe_fcoe_ddp *ddp; |
95 | u32 fcbuff; | ||
95 | 96 | ||
96 | if (!netdev) | 97 | if (!netdev) |
97 | goto out_ddp_put; | 98 | goto out_ddp_put; |
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) | |||
115 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); | 116 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); |
116 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, | 117 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, |
117 | (xid | IXGBE_FCDMARW_WE)); | 118 | (xid | IXGBE_FCDMARW_WE)); |
119 | |||
120 | /* guaranteed to be invalidated after 100us */ | ||
121 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, | ||
122 | (xid | IXGBE_FCDMARW_RE)); | ||
123 | fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); | ||
118 | spin_unlock_bh(&fcoe->lock); | 124 | spin_unlock_bh(&fcoe->lock); |
125 | if (fcbuff & IXGBE_FCBUFF_VALID) | ||
126 | udelay(100); | ||
119 | } | 127 | } |
120 | if (ddp->sgl) | 128 | if (ddp->sgl) |
121 | pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, | 129 | pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, |
@@ -127,22 +135,19 @@ out_ddp_put: | |||
127 | return len; | 135 | return len; |
128 | } | 136 | } |
129 | 137 | ||
138 | |||
130 | /** | 139 | /** |
131 | * ixgbe_fcoe_ddp_get - called to set up ddp context | 140 | * ixgbe_fcoe_ddp_setup - called to set up ddp context |
132 | * @netdev: the corresponding net_device | 141 | * @netdev: the corresponding net_device |
133 | * @xid: the exchange id requesting ddp | 142 | * @xid: the exchange id requesting ddp |
134 | * @sgl: the scatter-gather list for this request | 143 | * @sgl: the scatter-gather list for this request |
135 | * @sgc: the number of scatter-gather items | 144 | * @sgc: the number of scatter-gather items |
136 | * | 145 | * |
137 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup | ||
138 | * and is expected to be called from ULD, e.g., FCP layer of libfc | ||
139 | * to set up ddp for the corresponding xid of the given sglist for | ||
140 | * the corresponding I/O. | ||
141 | * | ||
142 | * Returns : 1 for success and 0 for no ddp | 146 | * Returns : 1 for success and 0 for no ddp |
143 | */ | 147 | */ |
144 | int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | 148 | static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, |
145 | struct scatterlist *sgl, unsigned int sgc) | 149 | struct scatterlist *sgl, unsigned int sgc, |
150 | int target_mode) | ||
146 | { | 151 | { |
147 | struct ixgbe_adapter *adapter; | 152 | struct ixgbe_adapter *adapter; |
148 | struct ixgbe_hw *hw; | 153 | struct ixgbe_hw *hw; |
@@ -151,13 +156,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
151 | struct scatterlist *sg; | 156 | struct scatterlist *sg; |
152 | unsigned int i, j, dmacount; | 157 | unsigned int i, j, dmacount; |
153 | unsigned int len; | 158 | unsigned int len; |
154 | static const unsigned int bufflen = 4096; | 159 | static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
155 | unsigned int firstoff = 0; | 160 | unsigned int firstoff = 0; |
156 | unsigned int lastsize; | 161 | unsigned int lastsize; |
157 | unsigned int thisoff = 0; | 162 | unsigned int thisoff = 0; |
158 | unsigned int thislen = 0; | 163 | unsigned int thislen = 0; |
159 | u32 fcbuff, fcdmarw, fcfltrw; | 164 | u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; |
160 | dma_addr_t addr; | 165 | dma_addr_t addr = 0; |
161 | 166 | ||
162 | if (!netdev || !sgl) | 167 | if (!netdev || !sgl) |
163 | return 0; | 168 | return 0; |
@@ -168,6 +173,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
168 | return 0; | 173 | return 0; |
169 | } | 174 | } |
170 | 175 | ||
176 | /* no DDP if we are already down or resetting */ | ||
177 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
178 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
179 | return 0; | ||
180 | |||
171 | fcoe = &adapter->fcoe; | 181 | fcoe = &adapter->fcoe; |
172 | if (!fcoe->pool) { | 182 | if (!fcoe->pool) { |
173 | e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); | 183 | e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); |
@@ -241,9 +251,30 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
241 | /* only the last buffer may have non-full bufflen */ | 251 | /* only the last buffer may have non-full bufflen */ |
242 | lastsize = thisoff + thislen; | 252 | lastsize = thisoff + thislen; |
243 | 253 | ||
254 | /* | ||
255 | * lastsize can not be buffer len. | ||
256 | * If it is then adding another buffer with lastsize = 1. | ||
257 | */ | ||
258 | if (lastsize == bufflen) { | ||
259 | if (j >= IXGBE_BUFFCNT_MAX) { | ||
260 | e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " | ||
261 | "not enough user buffers. We need an extra " | ||
262 | "buffer because lastsize is bufflen.\n", | ||
263 | xid, i, j, dmacount, (u64)addr); | ||
264 | goto out_noddp_free; | ||
265 | } | ||
266 | |||
267 | ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); | ||
268 | j++; | ||
269 | lastsize = 1; | ||
270 | } | ||
271 | |||
244 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); | 272 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
245 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); | 273 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
246 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); | 274 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
275 | /* Set WRCONTX bit to allow DDP for target */ | ||
276 | if (target_mode) | ||
277 | fcbuff |= (IXGBE_FCBUFF_WRCONTX); | ||
247 | fcbuff |= (IXGBE_FCBUFF_VALID); | 278 | fcbuff |= (IXGBE_FCBUFF_VALID); |
248 | 279 | ||
249 | fcdmarw = xid; | 280 | fcdmarw = xid; |
@@ -256,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
256 | /* program DMA context */ | 287 | /* program DMA context */ |
257 | hw = &adapter->hw; | 288 | hw = &adapter->hw; |
258 | spin_lock_bh(&fcoe->lock); | 289 | spin_lock_bh(&fcoe->lock); |
290 | |||
291 | /* turn on last frame indication for target mode as FCP_RSPtarget is | ||
292 | * supposed to send FCP_RSP when it is done. */ | ||
293 | if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { | ||
294 | set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); | ||
295 | fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); | ||
296 | fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; | ||
297 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); | ||
298 | } | ||
299 | |||
259 | IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); | 300 | IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); |
260 | IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); | 301 | IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); |
261 | IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); | 302 | IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); |
@@ -264,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
264 | IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); | 305 | IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); |
265 | IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); | 306 | IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); |
266 | IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); | 307 | IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); |
308 | |||
267 | spin_unlock_bh(&fcoe->lock); | 309 | spin_unlock_bh(&fcoe->lock); |
268 | 310 | ||
269 | return 1; | 311 | return 1; |
@@ -278,6 +320,47 @@ out_noddp_unmap: | |||
278 | } | 320 | } |
279 | 321 | ||
280 | /** | 322 | /** |
323 | * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode | ||
324 | * @netdev: the corresponding net_device | ||
325 | * @xid: the exchange id requesting ddp | ||
326 | * @sgl: the scatter-gather list for this request | ||
327 | * @sgc: the number of scatter-gather items | ||
328 | * | ||
329 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup | ||
330 | * and is expected to be called from ULD, e.g., FCP layer of libfc | ||
331 | * to set up ddp for the corresponding xid of the given sglist for | ||
332 | * the corresponding I/O. | ||
333 | * | ||
334 | * Returns : 1 for success and 0 for no ddp | ||
335 | */ | ||
336 | int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | ||
337 | struct scatterlist *sgl, unsigned int sgc) | ||
338 | { | ||
339 | return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode | ||
344 | * @netdev: the corresponding net_device | ||
345 | * @xid: the exchange id requesting ddp | ||
346 | * @sgl: the scatter-gather list for this request | ||
347 | * @sgc: the number of scatter-gather items | ||
348 | * | ||
349 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_target | ||
350 | * and is expected to be called from ULD, e.g., FCP layer of libfc | ||
351 | * to set up ddp for the corresponding xid of the given sglist for | ||
352 | * the corresponding I/O. The DDP in target mode is a write I/O request | ||
353 | * from the initiator. | ||
354 | * | ||
355 | * Returns : 1 for success and 0 for no ddp | ||
356 | */ | ||
357 | int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, | ||
358 | struct scatterlist *sgl, unsigned int sgc) | ||
359 | { | ||
360 | return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); | ||
361 | } | ||
362 | |||
363 | /** | ||
281 | * ixgbe_fcoe_ddp - check ddp status and mark it done | 364 | * ixgbe_fcoe_ddp - check ddp status and mark it done |
282 | * @adapter: ixgbe adapter | 365 | * @adapter: ixgbe adapter |
283 | * @rx_desc: advanced rx descriptor | 366 | * @rx_desc: advanced rx descriptor |
@@ -300,16 +383,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | |||
300 | struct ixgbe_fcoe *fcoe; | 383 | struct ixgbe_fcoe *fcoe; |
301 | struct ixgbe_fcoe_ddp *ddp; | 384 | struct ixgbe_fcoe_ddp *ddp; |
302 | struct fc_frame_header *fh; | 385 | struct fc_frame_header *fh; |
386 | struct fcoe_crc_eof *crc; | ||
303 | 387 | ||
304 | if (!ixgbe_rx_is_fcoe(rx_desc)) | 388 | if (!ixgbe_rx_is_fcoe(rx_desc)) |
305 | goto ddp_out; | 389 | goto ddp_out; |
306 | 390 | ||
307 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
308 | sterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 391 | sterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
309 | fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); | 392 | fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); |
310 | fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); | 393 | fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); |
311 | if (fcerr == IXGBE_FCERR_BADCRC) | 394 | if (fcerr == IXGBE_FCERR_BADCRC) |
312 | skb->ip_summed = CHECKSUM_NONE; | 395 | skb_checksum_none_assert(skb); |
396 | else | ||
397 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
313 | 398 | ||
314 | if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) | 399 | if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) |
315 | fh = (struct fc_frame_header *)(skb->data + | 400 | fh = (struct fc_frame_header *)(skb->data + |
@@ -331,8 +416,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | |||
331 | if (!ddp->udl) | 416 | if (!ddp->udl) |
332 | goto ddp_out; | 417 | goto ddp_out; |
333 | 418 | ||
334 | ddp->err = (fcerr | fceofe); | 419 | if (fcerr | fceofe) |
335 | if (ddp->err) | ||
336 | goto ddp_out; | 420 | goto ddp_out; |
337 | 421 | ||
338 | fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); | 422 | fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); |
@@ -343,6 +427,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | |||
343 | if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { | 427 | if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { |
344 | pci_unmap_sg(adapter->pdev, ddp->sgl, | 428 | pci_unmap_sg(adapter->pdev, ddp->sgl, |
345 | ddp->sgc, DMA_FROM_DEVICE); | 429 | ddp->sgc, DMA_FROM_DEVICE); |
430 | ddp->err = (fcerr | fceofe); | ||
346 | ddp->sgl = NULL; | 431 | ddp->sgl = NULL; |
347 | ddp->sgc = 0; | 432 | ddp->sgc = 0; |
348 | } | 433 | } |
@@ -352,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | |||
352 | else if (ddp->len) | 437 | else if (ddp->len) |
353 | rc = ddp->len; | 438 | rc = ddp->len; |
354 | } | 439 | } |
355 | 440 | /* In target mode, check the last data frame of the sequence. | |
441 | * For DDP in target mode, data is already DDPed but the header | ||
442 | * indication of the last data frame ould allow is to tell if we | ||
443 | * got all the data and the ULP can send FCP_RSP back, as this is | ||
444 | * not a full fcoe frame, we fill the trailer here so it won't be | ||
445 | * dropped by the ULP stack. | ||
446 | */ | ||
447 | if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && | ||
448 | (fctl & FC_FC_END_SEQ)) { | ||
449 | crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); | ||
450 | crc->fcoe_eof = FC_EOF_T; | ||
451 | } | ||
356 | ddp_out: | 452 | ddp_out: |
357 | return rc; | 453 | return rc; |
358 | } | 454 | } |
@@ -471,7 +567,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, | |||
471 | 567 | ||
472 | /* write context desc */ | 568 | /* write context desc */ |
473 | i = tx_ring->next_to_use; | 569 | i = tx_ring->next_to_use; |
474 | context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); | 570 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); |
475 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | 571 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
476 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); | 572 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); |
477 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); | 573 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); |
@@ -518,6 +614,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
518 | e_err(drv, "failed to allocated FCoE DDP pool\n"); | 614 | e_err(drv, "failed to allocated FCoE DDP pool\n"); |
519 | 615 | ||
520 | spin_lock_init(&fcoe->lock); | 616 | spin_lock_init(&fcoe->lock); |
617 | |||
618 | /* Extra buffer to be shared by all DDPs for HW work around */ | ||
619 | fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | ||
620 | if (fcoe->extra_ddp_buffer == NULL) { | ||
621 | e_err(drv, "failed to allocated extra DDP buffer\n"); | ||
622 | goto out_extra_ddp_buffer_alloc; | ||
623 | } | ||
624 | |||
625 | fcoe->extra_ddp_buffer_dma = | ||
626 | dma_map_single(&adapter->pdev->dev, | ||
627 | fcoe->extra_ddp_buffer, | ||
628 | IXGBE_FCBUFF_MIN, | ||
629 | DMA_FROM_DEVICE); | ||
630 | if (dma_mapping_error(&adapter->pdev->dev, | ||
631 | fcoe->extra_ddp_buffer_dma)) { | ||
632 | e_err(drv, "failed to map extra DDP buffer\n"); | ||
633 | goto out_extra_ddp_buffer_dma; | ||
634 | } | ||
521 | } | 635 | } |
522 | 636 | ||
523 | /* Enable L2 eth type filter for FCoE */ | 637 | /* Enable L2 eth type filter for FCoE */ |
@@ -567,6 +681,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
567 | } | 681 | } |
568 | } | 682 | } |
569 | #endif | 683 | #endif |
684 | |||
685 | return; | ||
686 | |||
687 | out_extra_ddp_buffer_dma: | ||
688 | kfree(fcoe->extra_ddp_buffer); | ||
689 | out_extra_ddp_buffer_alloc: | ||
690 | pci_pool_destroy(fcoe->pool); | ||
691 | fcoe->pool = NULL; | ||
570 | } | 692 | } |
571 | 693 | ||
572 | /** | 694 | /** |
@@ -586,6 +708,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) | |||
586 | if (fcoe->pool) { | 708 | if (fcoe->pool) { |
587 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) | 709 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) |
588 | ixgbe_fcoe_ddp_put(adapter->netdev, i); | 710 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
711 | dma_unmap_single(&adapter->pdev->dev, | ||
712 | fcoe->extra_ddp_buffer_dma, | ||
713 | IXGBE_FCBUFF_MIN, | ||
714 | DMA_FROM_DEVICE); | ||
715 | kfree(fcoe->extra_ddp_buffer); | ||
589 | pci_pool_destroy(fcoe->pool); | 716 | pci_pool_destroy(fcoe->pool); |
590 | fcoe->pool = NULL; | 717 | fcoe->pool = NULL; |
591 | } | 718 | } |
@@ -603,11 +730,13 @@ int ixgbe_fcoe_enable(struct net_device *netdev) | |||
603 | { | 730 | { |
604 | int rc = -EINVAL; | 731 | int rc = -EINVAL; |
605 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 732 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
733 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; | ||
606 | 734 | ||
607 | 735 | ||
608 | if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) | 736 | if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) |
609 | goto out_enable; | 737 | goto out_enable; |
610 | 738 | ||
739 | atomic_inc(&fcoe->refcnt); | ||
611 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | 740 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) |
612 | goto out_enable; | 741 | goto out_enable; |
613 | 742 | ||
@@ -647,6 +776,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev) | |||
647 | { | 776 | { |
648 | int rc = -EINVAL; | 777 | int rc = -EINVAL; |
649 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 778 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
779 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; | ||
650 | 780 | ||
651 | if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) | 781 | if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) |
652 | goto out_disable; | 782 | goto out_disable; |
@@ -654,6 +784,9 @@ int ixgbe_fcoe_disable(struct net_device *netdev) | |||
654 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | 784 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
655 | goto out_disable; | 785 | goto out_disable; |
656 | 786 | ||
787 | if (!atomic_dec_and_test(&fcoe->refcnt)) | ||
788 | goto out_disable; | ||
789 | |||
657 | e_info(drv, "Disabling FCoE offload features.\n"); | 790 | e_info(drv, "Disabling FCoE offload features.\n"); |
658 | netdev->features &= ~NETIF_F_FCOE_CRC; | 791 | netdev->features &= ~NETIF_F_FCOE_CRC; |
659 | netdev->features &= ~NETIF_F_FSO; | 792 | netdev->features &= ~NETIF_F_FSO; |
@@ -680,21 +813,6 @@ out_disable: | |||
680 | 813 | ||
681 | #ifdef CONFIG_IXGBE_DCB | 814 | #ifdef CONFIG_IXGBE_DCB |
682 | /** | 815 | /** |
683 | * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE | ||
684 | * @adapter : ixgbe adapter | ||
685 | * | ||
686 | * Finds out the corresponding user priority bitmap from the current | ||
687 | * traffic class that FCoE belongs to. Returns 0 as the invalid user | ||
688 | * priority bitmap to indicate an error. | ||
689 | * | ||
690 | * Returns : 802.1p user priority bitmap for FCoE | ||
691 | */ | ||
692 | u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter) | ||
693 | { | ||
694 | return 1 << adapter->fcoe.up; | ||
695 | } | ||
696 | |||
697 | /** | ||
698 | * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE | 816 | * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE |
699 | * @adapter : ixgbe adapter | 817 | * @adapter : ixgbe adapter |
700 | * @up : 802.1p user priority bitmap | 818 | * @up : 802.1p user priority bitmap |
@@ -771,5 +889,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) | |||
771 | } | 889 | } |
772 | return rc; | 890 | return rc; |
773 | } | 891 | } |
774 | |||
775 | |||