aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/fcoe/fcoe.c
diff options
context:
space:
mode:
authorJoe Eykholt <jeykholt@cisco.com>2009-08-25 17:04:08 -0400
committerJames Bottomley <James.Bottomley@suse.de>2009-09-10 13:08:04 -0400
commite7a51997dad4e17395be1209970e18d2e9305b24 (patch)
tree153db22788bdd7302c6bd58f91a5d5c0da0a4ab6 /drivers/scsi/fcoe/fcoe.c
parent1d490ce33ee8b93638d09e471a3bc66ae33b6606 (diff)
[SCSI] fcoe: flush per-cpu thread work when destroying interface
This fixes one cause of an occational problem when unloading libfc where the exchange manager pool doesn't have all items freed. The existing WARN_ON(mp->total_exches <= 0) isn't hit. However, note that total_exches is decremented when the exchange is completed, and it can be held with a refcnt for a while after that. I'm not sure what the offending exchange is, but I suspect it is an incoming request, because outgoing state machines should be all stopped at this point. Note that although receive is stopped before the exchange manager is freed, there could still be active threads handling received frames. This patch flushes the queues by allocating a new skb and sending it through, and have the thread handle this new skb specially. This is similar to the way the work queues are flushed now by putting work items in them and waiting until they make it through the queue. An skb->destructor function is used to inform us of the completion of the flush, and the fr_dev() is left NULL to indicate to fcoe_percpu_receive_thread() that the skb should be just freed. There's already a check for the lp being NULL which prints a message. We skip printing the message if the destructor is for flushing. Signed-off-by: Joe Eykholt <jeykholt@cisco.com> Signed-off-by: Robert Love <robert.w.love@intel.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/fcoe/fcoe.c')
-rw-r--r--drivers/scsi/fcoe/fcoe.c42
1 files changed, 40 insertions, 2 deletions
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ac481ad112ad..704b8e034946 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -57,6 +57,9 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
57 57
58DEFINE_MUTEX(fcoe_config_mutex); 58DEFINE_MUTEX(fcoe_config_mutex);
59 59
60/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
61static DECLARE_COMPLETION(fcoe_flush_completion);
62
60/* fcoe host list */ 63/* fcoe host list */
61/* must only by accessed under the RTNL mutex */ 64/* must only by accessed under the RTNL mutex */
62LIST_HEAD(fcoe_hostlist); 65LIST_HEAD(fcoe_hostlist);
@@ -827,7 +830,7 @@ static void fcoe_percpu_thread_create(unsigned int cpu)
827 thread = kthread_create(fcoe_percpu_receive_thread, 830 thread = kthread_create(fcoe_percpu_receive_thread,
828 (void *)p, "fcoethread/%d", cpu); 831 (void *)p, "fcoethread/%d", cpu);
829 832
830 if (likely(!IS_ERR(p->thread))) { 833 if (likely(!IS_ERR(thread))) {
831 kthread_bind(thread, cpu); 834 kthread_bind(thread, cpu);
832 wake_up_process(thread); 835 wake_up_process(thread);
833 836
@@ -1300,6 +1303,15 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1300} 1303}
1301 1304
1302/** 1305/**
1306 * fcoe_percpu_flush_done() - Indicate percpu queue flush completion.
1307 * @skb: the skb being completed.
1308 */
1309static void fcoe_percpu_flush_done(struct sk_buff *skb)
1310{
1311 complete(&fcoe_flush_completion);
1312}
1313
1314/**
1303 * fcoe_percpu_receive_thread() - recv thread per cpu 1315 * fcoe_percpu_receive_thread() - recv thread per cpu
1304 * @arg: ptr to the fcoe per cpu struct 1316 * @arg: ptr to the fcoe per cpu struct
1305 * 1317 *
@@ -1338,7 +1350,8 @@ int fcoe_percpu_receive_thread(void *arg)
1338 fr = fcoe_dev_from_skb(skb); 1350 fr = fcoe_dev_from_skb(skb);
1339 lp = fr->fr_dev; 1351 lp = fr->fr_dev;
1340 if (unlikely(lp == NULL)) { 1352 if (unlikely(lp == NULL)) {
1341 FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure"); 1353 if (skb->destructor != fcoe_percpu_flush_done)
1354 FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1342 kfree_skb(skb); 1355 kfree_skb(skb);
1343 continue; 1356 continue;
1344 } 1357 }
@@ -1799,6 +1812,13 @@ int fcoe_link_ok(struct fc_lport *lp)
1799/** 1812/**
1800 * fcoe_percpu_clean() - Clear the pending skbs for an lport 1813 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1801 * @lp: the fc_lport 1814 * @lp: the fc_lport
1815 *
1816 * Must be called with fcoe_create_mutex held to single-thread completion.
1817 *
1818 * This flushes the pending skbs by adding a new skb to each queue and
1819 * waiting until they are all freed. This assures us that not only are
1820 * there no packets that will be handled by the lport, but also that any
1821 * threads already handling packet have returned.
1802 */ 1822 */
1803void fcoe_percpu_clean(struct fc_lport *lp) 1823void fcoe_percpu_clean(struct fc_lport *lp)
1804{ 1824{
@@ -1823,7 +1843,25 @@ void fcoe_percpu_clean(struct fc_lport *lp)
1823 kfree_skb(skb); 1843 kfree_skb(skb);
1824 } 1844 }
1825 } 1845 }
1846
1847 if (!pp->thread || !cpu_online(cpu)) {
1848 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1849 continue;
1850 }
1851
1852 skb = dev_alloc_skb(0);
1853 if (!skb) {
1854 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1855 continue;
1856 }
1857 skb->destructor = fcoe_percpu_flush_done;
1858
1859 __skb_queue_tail(&pp->fcoe_rx_list, skb);
1860 if (pp->fcoe_rx_list.qlen == 1)
1861 wake_up_process(pp->thread);
1826 spin_unlock_bh(&pp->fcoe_rx_list.lock); 1862 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1863
1864 wait_for_completion(&fcoe_flush_completion);
1827 } 1865 }
1828} 1866}
1829 1867