diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2016-04-12 11:16:54 -0400 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2016-07-12 23:16:31 -0400 |
commit | 4b9bc86d5a999e344098303882d6395d39e36c13 (patch) | |
tree | 64a1ac75653772c9240cf840be8de26bcb69fdda | |
parent | 49a75815e996a8719463090d9666bd120d9bae91 (diff) |
fcoe: convert to kworker
The driver creates its own per-CPU threads which are updated based on
CPU hotplug events. It is also possible to use kworkers and remove some
of the kthread infrastrucure.
The code checked ->thread to decide if there is an active per-CPU
thread. By using the kworker infrastructure this is no longer
possible (or required). The thread pointer is saved in `kthread' instead
of `thread' so anything trying to use thread is caught by the
compiler. Currently only the bnx2fc driver is using struct fcoe_percpu_s
and the kthread member.
After a CPU went offline, we may still enqueue items on the "offline"
CPU. This isn't much of a problem. The work will be done on a random
CPU. The allocated crc_eof_page page won't be cleaned up. It is probably
expected that the CPU comes up at some point so it should not be a
problem. The crc_eof_page memory is released of course once the module
is removed.
This patch was only compile-tested due to -ENODEV.
Cc: Vasu Dev <vasu.dev@intel.com>
Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: fcoe-devel@open-fcoe.org
Cc: linux-scsi@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 8 | ||||
-rw-r--r-- | drivers/scsi/fcoe/fcoe.c | 276 | ||||
-rw-r--r-- | include/scsi/libfcoe.h | 6 |
3 files changed, 34 insertions, 256 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index a1881993982c..d6800afd0232 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -486,7 +486,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
486 | 486 | ||
487 | __skb_queue_tail(&bg->fcoe_rx_list, skb); | 487 | __skb_queue_tail(&bg->fcoe_rx_list, skb); |
488 | if (bg->fcoe_rx_list.qlen == 1) | 488 | if (bg->fcoe_rx_list.qlen == 1) |
489 | wake_up_process(bg->thread); | 489 | wake_up_process(bg->kthread); |
490 | 490 | ||
491 | spin_unlock(&bg->fcoe_rx_list.lock); | 491 | spin_unlock(&bg->fcoe_rx_list.lock); |
492 | 492 | ||
@@ -2715,7 +2715,7 @@ static int __init bnx2fc_mod_init(void) | |||
2715 | } | 2715 | } |
2716 | wake_up_process(l2_thread); | 2716 | wake_up_process(l2_thread); |
2717 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 2717 | spin_lock_bh(&bg->fcoe_rx_list.lock); |
2718 | bg->thread = l2_thread; | 2718 | bg->kthread = l2_thread; |
2719 | spin_unlock_bh(&bg->fcoe_rx_list.lock); | 2719 | spin_unlock_bh(&bg->fcoe_rx_list.lock); |
2720 | 2720 | ||
2721 | for_each_possible_cpu(cpu) { | 2721 | for_each_possible_cpu(cpu) { |
@@ -2788,8 +2788,8 @@ static void __exit bnx2fc_mod_exit(void) | |||
2788 | /* Destroy global thread */ | 2788 | /* Destroy global thread */ |
2789 | bg = &bnx2fc_global; | 2789 | bg = &bnx2fc_global; |
2790 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 2790 | spin_lock_bh(&bg->fcoe_rx_list.lock); |
2791 | l2_thread = bg->thread; | 2791 | l2_thread = bg->kthread; |
2792 | bg->thread = NULL; | 2792 | bg->kthread = NULL; |
2793 | while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) | 2793 | while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) |
2794 | kfree_skb(skb); | 2794 | kfree_skb(skb); |
2795 | 2795 | ||
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0efe7112fc1f..f7c7ccc156da 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -67,9 +67,6 @@ static DEFINE_MUTEX(fcoe_config_mutex); | |||
67 | 67 | ||
68 | static struct workqueue_struct *fcoe_wq; | 68 | static struct workqueue_struct *fcoe_wq; |
69 | 69 | ||
70 | /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ | ||
71 | static DECLARE_COMPLETION(fcoe_flush_completion); | ||
72 | |||
73 | /* fcoe host list */ | 70 | /* fcoe host list */ |
74 | /* must only by accessed under the RTNL mutex */ | 71 | /* must only by accessed under the RTNL mutex */ |
75 | static LIST_HEAD(fcoe_hostlist); | 72 | static LIST_HEAD(fcoe_hostlist); |
@@ -80,7 +77,6 @@ static int fcoe_reset(struct Scsi_Host *); | |||
80 | static int fcoe_xmit(struct fc_lport *, struct fc_frame *); | 77 | static int fcoe_xmit(struct fc_lport *, struct fc_frame *); |
81 | static int fcoe_rcv(struct sk_buff *, struct net_device *, | 78 | static int fcoe_rcv(struct sk_buff *, struct net_device *, |
82 | struct packet_type *, struct net_device *); | 79 | struct packet_type *, struct net_device *); |
83 | static int fcoe_percpu_receive_thread(void *); | ||
84 | static void fcoe_percpu_clean(struct fc_lport *); | 80 | static void fcoe_percpu_clean(struct fc_lport *); |
85 | static int fcoe_link_ok(struct fc_lport *); | 81 | static int fcoe_link_ok(struct fc_lport *); |
86 | 82 | ||
@@ -107,7 +103,6 @@ static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, | |||
107 | static int fcoe_ddp_done(struct fc_lport *, u16); | 103 | static int fcoe_ddp_done(struct fc_lport *, u16); |
108 | static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, | 104 | static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, |
109 | unsigned int); | 105 | unsigned int); |
110 | static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); | ||
111 | static int fcoe_dcb_app_notification(struct notifier_block *notifier, | 106 | static int fcoe_dcb_app_notification(struct notifier_block *notifier, |
112 | ulong event, void *ptr); | 107 | ulong event, void *ptr); |
113 | 108 | ||
@@ -136,11 +131,6 @@ static struct notifier_block fcoe_notifier = { | |||
136 | .notifier_call = fcoe_device_notification, | 131 | .notifier_call = fcoe_device_notification, |
137 | }; | 132 | }; |
138 | 133 | ||
139 | /* notification function for CPU hotplug events */ | ||
140 | static struct notifier_block fcoe_cpu_notifier = { | ||
141 | .notifier_call = fcoe_cpu_callback, | ||
142 | }; | ||
143 | |||
144 | /* notification function for DCB events */ | 134 | /* notification function for DCB events */ |
145 | static struct notifier_block dcb_notifier = { | 135 | static struct notifier_block dcb_notifier = { |
146 | .notifier_call = fcoe_dcb_app_notification, | 136 | .notifier_call = fcoe_dcb_app_notification, |
@@ -1245,152 +1235,21 @@ static int __exit fcoe_if_exit(void) | |||
1245 | return 0; | 1235 | return 0; |
1246 | } | 1236 | } |
1247 | 1237 | ||
1248 | /** | 1238 | static void fcoe_thread_cleanup_local(unsigned int cpu) |
1249 | * fcoe_percpu_thread_create() - Create a receive thread for an online CPU | ||
1250 | * @cpu: The CPU index of the CPU to create a receive thread for | ||
1251 | */ | ||
1252 | static void fcoe_percpu_thread_create(unsigned int cpu) | ||
1253 | { | 1239 | { |
1254 | struct fcoe_percpu_s *p; | ||
1255 | struct task_struct *thread; | ||
1256 | |||
1257 | p = &per_cpu(fcoe_percpu, cpu); | ||
1258 | |||
1259 | thread = kthread_create_on_node(fcoe_percpu_receive_thread, | ||
1260 | (void *)p, cpu_to_node(cpu), | ||
1261 | "fcoethread/%d", cpu); | ||
1262 | |||
1263 | if (likely(!IS_ERR(thread))) { | ||
1264 | kthread_bind(thread, cpu); | ||
1265 | wake_up_process(thread); | ||
1266 | |||
1267 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1268 | p->thread = thread; | ||
1269 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1270 | } | ||
1271 | } | ||
1272 | |||
1273 | /** | ||
1274 | * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU | ||
1275 | * @cpu: The CPU index of the CPU whose receive thread is to be destroyed | ||
1276 | * | ||
1277 | * Destroys a per-CPU Rx thread. Any pending skbs are moved to the | ||
1278 | * current CPU's Rx thread. If the thread being destroyed is bound to | ||
1279 | * the CPU processing this context the skbs will be freed. | ||
1280 | */ | ||
1281 | static void fcoe_percpu_thread_destroy(unsigned int cpu) | ||
1282 | { | ||
1283 | struct fcoe_percpu_s *p; | ||
1284 | struct task_struct *thread; | ||
1285 | struct page *crc_eof; | 1240 | struct page *crc_eof; |
1286 | struct sk_buff *skb; | 1241 | struct fcoe_percpu_s *p; |
1287 | #ifdef CONFIG_SMP | ||
1288 | struct fcoe_percpu_s *p0; | ||
1289 | unsigned targ_cpu = get_cpu(); | ||
1290 | #endif /* CONFIG_SMP */ | ||
1291 | |||
1292 | FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); | ||
1293 | 1242 | ||
1294 | /* Prevent any new skbs from being queued for this CPU. */ | 1243 | p = per_cpu_ptr(&fcoe_percpu, cpu); |
1295 | p = &per_cpu(fcoe_percpu, cpu); | ||
1296 | spin_lock_bh(&p->fcoe_rx_list.lock); | 1244 | spin_lock_bh(&p->fcoe_rx_list.lock); |
1297 | thread = p->thread; | ||
1298 | p->thread = NULL; | ||
1299 | crc_eof = p->crc_eof_page; | 1245 | crc_eof = p->crc_eof_page; |
1300 | p->crc_eof_page = NULL; | 1246 | p->crc_eof_page = NULL; |
1301 | p->crc_eof_offset = 0; | 1247 | p->crc_eof_offset = 0; |
1302 | spin_unlock_bh(&p->fcoe_rx_list.lock); | 1248 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
1303 | 1249 | ||
1304 | #ifdef CONFIG_SMP | ||
1305 | /* | ||
1306 | * Don't bother moving the skb's if this context is running | ||
1307 | * on the same CPU that is having its thread destroyed. This | ||
1308 | * can easily happen when the module is removed. | ||
1309 | */ | ||
1310 | if (cpu != targ_cpu) { | ||
1311 | p0 = &per_cpu(fcoe_percpu, targ_cpu); | ||
1312 | spin_lock_bh(&p0->fcoe_rx_list.lock); | ||
1313 | if (p0->thread) { | ||
1314 | FCOE_DBG("Moving frames from CPU %d to CPU %d\n", | ||
1315 | cpu, targ_cpu); | ||
1316 | |||
1317 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | ||
1318 | __skb_queue_tail(&p0->fcoe_rx_list, skb); | ||
1319 | spin_unlock_bh(&p0->fcoe_rx_list.lock); | ||
1320 | } else { | ||
1321 | /* | ||
1322 | * The targeted CPU is not initialized and cannot accept | ||
1323 | * new skbs. Unlock the targeted CPU and drop the skbs | ||
1324 | * on the CPU that is going offline. | ||
1325 | */ | ||
1326 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | ||
1327 | kfree_skb(skb); | ||
1328 | spin_unlock_bh(&p0->fcoe_rx_list.lock); | ||
1329 | } | ||
1330 | } else { | ||
1331 | /* | ||
1332 | * This scenario occurs when the module is being removed | ||
1333 | * and all threads are being destroyed. skbs will continue | ||
1334 | * to be shifted from the CPU thread that is being removed | ||
1335 | * to the CPU thread associated with the CPU that is processing | ||
1336 | * the module removal. Once there is only one CPU Rx thread it | ||
1337 | * will reach this case and we will drop all skbs and later | ||
1338 | * stop the thread. | ||
1339 | */ | ||
1340 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1341 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | ||
1342 | kfree_skb(skb); | ||
1343 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1344 | } | ||
1345 | put_cpu(); | ||
1346 | #else | ||
1347 | /* | ||
1348 | * This a non-SMP scenario where the singular Rx thread is | ||
1349 | * being removed. Free all skbs and stop the thread. | ||
1350 | */ | ||
1351 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1352 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | ||
1353 | kfree_skb(skb); | ||
1354 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1355 | #endif | ||
1356 | |||
1357 | if (thread) | ||
1358 | kthread_stop(thread); | ||
1359 | |||
1360 | if (crc_eof) | 1250 | if (crc_eof) |
1361 | put_page(crc_eof); | 1251 | put_page(crc_eof); |
1362 | } | 1252 | flush_work(&p->work); |
1363 | |||
1364 | /** | ||
1365 | * fcoe_cpu_callback() - Handler for CPU hotplug events | ||
1366 | * @nfb: The callback data block | ||
1367 | * @action: The event triggering the callback | ||
1368 | * @hcpu: The index of the CPU that the event is for | ||
1369 | * | ||
1370 | * This creates or destroys per-CPU data for fcoe | ||
1371 | * | ||
1372 | * Returns NOTIFY_OK always. | ||
1373 | */ | ||
1374 | static int fcoe_cpu_callback(struct notifier_block *nfb, | ||
1375 | unsigned long action, void *hcpu) | ||
1376 | { | ||
1377 | unsigned cpu = (unsigned long)hcpu; | ||
1378 | |||
1379 | switch (action) { | ||
1380 | case CPU_ONLINE: | ||
1381 | case CPU_ONLINE_FROZEN: | ||
1382 | FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); | ||
1383 | fcoe_percpu_thread_create(cpu); | ||
1384 | break; | ||
1385 | case CPU_DEAD: | ||
1386 | case CPU_DEAD_FROZEN: | ||
1387 | FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); | ||
1388 | fcoe_percpu_thread_destroy(cpu); | ||
1389 | break; | ||
1390 | default: | ||
1391 | break; | ||
1392 | } | ||
1393 | return NOTIFY_OK; | ||
1394 | } | 1253 | } |
1395 | 1254 | ||
1396 | /** | 1255 | /** |
@@ -1509,26 +1368,6 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1509 | 1368 | ||
1510 | fps = &per_cpu(fcoe_percpu, cpu); | 1369 | fps = &per_cpu(fcoe_percpu, cpu); |
1511 | spin_lock(&fps->fcoe_rx_list.lock); | 1370 | spin_lock(&fps->fcoe_rx_list.lock); |
1512 | if (unlikely(!fps->thread)) { | ||
1513 | /* | ||
1514 | * The targeted CPU is not ready, let's target | ||
1515 | * the first CPU now. For non-SMP systems this | ||
1516 | * will check the same CPU twice. | ||
1517 | */ | ||
1518 | FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " | ||
1519 | "ready for incoming skb- using first online " | ||
1520 | "CPU.\n"); | ||
1521 | |||
1522 | spin_unlock(&fps->fcoe_rx_list.lock); | ||
1523 | cpu = cpumask_first(cpu_online_mask); | ||
1524 | fps = &per_cpu(fcoe_percpu, cpu); | ||
1525 | spin_lock(&fps->fcoe_rx_list.lock); | ||
1526 | if (!fps->thread) { | ||
1527 | spin_unlock(&fps->fcoe_rx_list.lock); | ||
1528 | goto err; | ||
1529 | } | ||
1530 | } | ||
1531 | |||
1532 | /* | 1371 | /* |
1533 | * We now have a valid CPU that we're targeting for | 1372 | * We now have a valid CPU that we're targeting for |
1534 | * this skb. We also have this receive thread locked, | 1373 | * this skb. We also have this receive thread locked, |
@@ -1543,8 +1382,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1543 | * in softirq context. | 1382 | * in softirq context. |
1544 | */ | 1383 | */ |
1545 | __skb_queue_tail(&fps->fcoe_rx_list, skb); | 1384 | __skb_queue_tail(&fps->fcoe_rx_list, skb); |
1546 | if (fps->thread->state == TASK_INTERRUPTIBLE) | 1385 | schedule_work_on(cpu, &fps->work); |
1547 | wake_up_process(fps->thread); | ||
1548 | spin_unlock(&fps->fcoe_rx_list.lock); | 1386 | spin_unlock(&fps->fcoe_rx_list.lock); |
1549 | 1387 | ||
1550 | return NET_RX_SUCCESS; | 1388 | return NET_RX_SUCCESS; |
@@ -1713,15 +1551,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) | |||
1713 | } | 1551 | } |
1714 | 1552 | ||
1715 | /** | 1553 | /** |
1716 | * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion | ||
1717 | * @skb: The completed skb (argument required by destructor) | ||
1718 | */ | ||
1719 | static void fcoe_percpu_flush_done(struct sk_buff *skb) | ||
1720 | { | ||
1721 | complete(&fcoe_flush_completion); | ||
1722 | } | ||
1723 | |||
1724 | /** | ||
1725 | * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC | 1554 | * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC |
1726 | * @lport: The local port the frame was received on | 1555 | * @lport: The local port the frame was received on |
1727 | * @fp: The received frame | 1556 | * @fp: The received frame |
@@ -1792,8 +1621,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) | |||
1792 | fr = fcoe_dev_from_skb(skb); | 1621 | fr = fcoe_dev_from_skb(skb); |
1793 | lport = fr->fr_dev; | 1622 | lport = fr->fr_dev; |
1794 | if (unlikely(!lport)) { | 1623 | if (unlikely(!lport)) { |
1795 | if (skb->destructor != fcoe_percpu_flush_done) | 1624 | FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); |
1796 | FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); | ||
1797 | kfree_skb(skb); | 1625 | kfree_skb(skb); |
1798 | return; | 1626 | return; |
1799 | } | 1627 | } |
@@ -1857,40 +1685,28 @@ drop: | |||
1857 | } | 1685 | } |
1858 | 1686 | ||
1859 | /** | 1687 | /** |
1860 | * fcoe_percpu_receive_thread() - The per-CPU packet receive thread | 1688 | * fcoe_receive_work() - The per-CPU worker |
1861 | * @arg: The per-CPU context | 1689 | * @work: The work struct |
1862 | * | 1690 | * |
1863 | * Return: 0 for success | ||
1864 | */ | 1691 | */ |
1865 | static int fcoe_percpu_receive_thread(void *arg) | 1692 | static void fcoe_receive_work(struct work_struct *work) |
1866 | { | 1693 | { |
1867 | struct fcoe_percpu_s *p = arg; | 1694 | struct fcoe_percpu_s *p; |
1868 | struct sk_buff *skb; | 1695 | struct sk_buff *skb; |
1869 | struct sk_buff_head tmp; | 1696 | struct sk_buff_head tmp; |
1870 | 1697 | ||
1698 | p = container_of(work, struct fcoe_percpu_s, work); | ||
1871 | skb_queue_head_init(&tmp); | 1699 | skb_queue_head_init(&tmp); |
1872 | 1700 | ||
1873 | set_user_nice(current, MIN_NICE); | 1701 | spin_lock_bh(&p->fcoe_rx_list.lock); |
1874 | 1702 | skb_queue_splice_init(&p->fcoe_rx_list, &tmp); | |
1875 | while (!kthread_should_stop()) { | 1703 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
1876 | |||
1877 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1878 | skb_queue_splice_init(&p->fcoe_rx_list, &tmp); | ||
1879 | |||
1880 | if (!skb_queue_len(&tmp)) { | ||
1881 | set_current_state(TASK_INTERRUPTIBLE); | ||
1882 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1883 | schedule(); | ||
1884 | continue; | ||
1885 | } | ||
1886 | |||
1887 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1888 | 1704 | ||
1889 | while ((skb = __skb_dequeue(&tmp)) != NULL) | 1705 | if (!skb_queue_len(&tmp)) |
1890 | fcoe_recv_frame(skb); | 1706 | return; |
1891 | 1707 | ||
1892 | } | 1708 | while ((skb = __skb_dequeue(&tmp))) |
1893 | return 0; | 1709 | fcoe_recv_frame(skb); |
1894 | } | 1710 | } |
1895 | 1711 | ||
1896 | /** | 1712 | /** |
@@ -2450,36 +2266,19 @@ static int fcoe_link_ok(struct fc_lport *lport) | |||
2450 | * | 2266 | * |
2451 | * Must be called with fcoe_create_mutex held to single-thread completion. | 2267 | * Must be called with fcoe_create_mutex held to single-thread completion. |
2452 | * | 2268 | * |
2453 | * This flushes the pending skbs by adding a new skb to each queue and | 2269 | * This flushes the pending skbs by flush the work item for each CPU. The work |
2454 | * waiting until they are all freed. This assures us that not only are | 2270 | * item on each possible CPU is flushed because we may have used the per-CPU |
2455 | * there no packets that will be handled by the lport, but also that any | 2271 | * struct of an offline CPU. |
2456 | * threads already handling packet have returned. | ||
2457 | */ | 2272 | */ |
2458 | static void fcoe_percpu_clean(struct fc_lport *lport) | 2273 | static void fcoe_percpu_clean(struct fc_lport *lport) |
2459 | { | 2274 | { |
2460 | struct fcoe_percpu_s *pp; | 2275 | struct fcoe_percpu_s *pp; |
2461 | struct sk_buff *skb; | ||
2462 | unsigned int cpu; | 2276 | unsigned int cpu; |
2463 | 2277 | ||
2464 | for_each_possible_cpu(cpu) { | 2278 | for_each_possible_cpu(cpu) { |
2465 | pp = &per_cpu(fcoe_percpu, cpu); | 2279 | pp = &per_cpu(fcoe_percpu, cpu); |
2466 | 2280 | ||
2467 | if (!pp->thread || !cpu_online(cpu)) | 2281 | flush_work(&pp->work); |
2468 | continue; | ||
2469 | |||
2470 | skb = dev_alloc_skb(0); | ||
2471 | if (!skb) | ||
2472 | continue; | ||
2473 | |||
2474 | skb->destructor = fcoe_percpu_flush_done; | ||
2475 | |||
2476 | spin_lock_bh(&pp->fcoe_rx_list.lock); | ||
2477 | __skb_queue_tail(&pp->fcoe_rx_list, skb); | ||
2478 | if (pp->fcoe_rx_list.qlen == 1) | ||
2479 | wake_up_process(pp->thread); | ||
2480 | spin_unlock_bh(&pp->fcoe_rx_list.lock); | ||
2481 | |||
2482 | wait_for_completion(&fcoe_flush_completion); | ||
2483 | } | 2282 | } |
2484 | } | 2283 | } |
2485 | 2284 | ||
@@ -2625,22 +2424,11 @@ static int __init fcoe_init(void) | |||
2625 | mutex_lock(&fcoe_config_mutex); | 2424 | mutex_lock(&fcoe_config_mutex); |
2626 | 2425 | ||
2627 | for_each_possible_cpu(cpu) { | 2426 | for_each_possible_cpu(cpu) { |
2628 | p = &per_cpu(fcoe_percpu, cpu); | 2427 | p = per_cpu_ptr(&fcoe_percpu, cpu); |
2428 | INIT_WORK(&p->work, fcoe_receive_work); | ||
2629 | skb_queue_head_init(&p->fcoe_rx_list); | 2429 | skb_queue_head_init(&p->fcoe_rx_list); |
2630 | } | 2430 | } |
2631 | 2431 | ||
2632 | cpu_notifier_register_begin(); | ||
2633 | |||
2634 | for_each_online_cpu(cpu) | ||
2635 | fcoe_percpu_thread_create(cpu); | ||
2636 | |||
2637 | /* Initialize per CPU interrupt thread */ | ||
2638 | rc = __register_hotcpu_notifier(&fcoe_cpu_notifier); | ||
2639 | if (rc) | ||
2640 | goto out_free; | ||
2641 | |||
2642 | cpu_notifier_register_done(); | ||
2643 | |||
2644 | /* Setup link change notification */ | 2432 | /* Setup link change notification */ |
2645 | fcoe_dev_setup(); | 2433 | fcoe_dev_setup(); |
2646 | 2434 | ||
@@ -2652,12 +2440,6 @@ static int __init fcoe_init(void) | |||
2652 | return 0; | 2440 | return 0; |
2653 | 2441 | ||
2654 | out_free: | 2442 | out_free: |
2655 | for_each_online_cpu(cpu) { | ||
2656 | fcoe_percpu_thread_destroy(cpu); | ||
2657 | } | ||
2658 | |||
2659 | cpu_notifier_register_done(); | ||
2660 | |||
2661 | mutex_unlock(&fcoe_config_mutex); | 2443 | mutex_unlock(&fcoe_config_mutex); |
2662 | destroy_workqueue(fcoe_wq); | 2444 | destroy_workqueue(fcoe_wq); |
2663 | return rc; | 2445 | return rc; |
@@ -2690,14 +2472,8 @@ static void __exit fcoe_exit(void) | |||
2690 | } | 2472 | } |
2691 | rtnl_unlock(); | 2473 | rtnl_unlock(); |
2692 | 2474 | ||
2693 | cpu_notifier_register_begin(); | 2475 | for_each_possible_cpu(cpu) |
2694 | 2476 | fcoe_thread_cleanup_local(cpu); | |
2695 | for_each_online_cpu(cpu) | ||
2696 | fcoe_percpu_thread_destroy(cpu); | ||
2697 | |||
2698 | __unregister_hotcpu_notifier(&fcoe_cpu_notifier); | ||
2699 | |||
2700 | cpu_notifier_register_done(); | ||
2701 | 2477 | ||
2702 | mutex_unlock(&fcoe_config_mutex); | 2478 | mutex_unlock(&fcoe_config_mutex); |
2703 | 2479 | ||
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index de7e3ee60f0c..c6fbbb6581d3 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h | |||
@@ -319,14 +319,16 @@ struct fcoe_transport { | |||
319 | 319 | ||
320 | /** | 320 | /** |
321 | * struct fcoe_percpu_s - The context for FCoE receive thread(s) | 321 | * struct fcoe_percpu_s - The context for FCoE receive thread(s) |
322 | * @thread: The thread context | 322 | * @kthread: The thread context (used by bnx2fc) |
323 | * @work: The work item (used by fcoe) | ||
323 | * @fcoe_rx_list: The queue of pending packets to process | 324 | * @fcoe_rx_list: The queue of pending packets to process |
324 | * @page: The memory page for calculating frame trailer CRCs | 325 | * @page: The memory page for calculating frame trailer CRCs |
325 | * @crc_eof_offset: The offset into the CRC page pointing to available | 326 | * @crc_eof_offset: The offset into the CRC page pointing to available |
326 | * memory for a new trailer | 327 | * memory for a new trailer |
327 | */ | 328 | */ |
328 | struct fcoe_percpu_s { | 329 | struct fcoe_percpu_s { |
329 | struct task_struct *thread; | 330 | struct task_struct *kthread; |
331 | struct work_struct work; | ||
330 | struct sk_buff_head fcoe_rx_list; | 332 | struct sk_buff_head fcoe_rx_list; |
331 | struct page *crc_eof_page; | 333 | struct page *crc_eof_page; |
332 | int crc_eof_offset; | 334 | int crc_eof_offset; |