aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorRobert Love <robert.w.love@intel.com>2009-03-17 14:41:46 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-04-03 10:22:59 -0400
commit8976f424d43c80ea32b6e847226e1a8ccdb6e748 (patch)
tree361d16be95cac84197e52411ed63ccfebfb88aff /drivers/scsi
parent582b45bc577f78b5bfff3db874594ce2d962b846 (diff)
[SCSI] fcoe: create/destroy fcoe Rx threads on CPU hotplug events
This patch adds support for dynamically created Rx threads upon CPU hotplug events. There were existing synchronization problems that this patch attempts to resolve. The main problem had to do with fcoe_rcv() running in a different context than the hotplug notifications. This opened the possiblity that fcoe_rcv() would target a Rx thread for a skb. However, that thread could become NULL if the CPU was made offline. This patch uses the Rx queue's (a skb_queue) lock to protect the thread it's associated with and we use the 'thread' member of the fcoe_percpu_s to determine if the thread is ready to accept new skbs. The patch also attempts to do a better job of cleaning up, both if hotplug registration fails as well as when the module is removed. Contribution provided by Joe Eykholt <jeykholt@cisco.com> to fix incorrect use of __cpuinitdata. Signed-off-by: Yi Zou <yi.zou@intel.com> Signed-off-by: Robert Love <robert.w.love@intel.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/fcoe/libfcoe.c246
1 files changed, 199 insertions, 47 deletions
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 648a2fc04271..951d2448ad61 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -81,6 +81,156 @@ static struct notifier_block fcoe_notifier = {
81}; 81};
82 82
83/** 83/**
84 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
85 * @cpu: cpu index for the online cpu
86 */
87static void fcoe_percpu_thread_create(unsigned int cpu)
88{
89 struct fcoe_percpu_s *p;
90 struct task_struct *thread;
91
92 p = &per_cpu(fcoe_percpu, cpu);
93
94 thread = kthread_create(fcoe_percpu_receive_thread,
95 (void *)p, "fcoethread/%d", cpu);
96
97 if (likely(!IS_ERR(p->thread))) {
98 kthread_bind(thread, cpu);
99 wake_up_process(thread);
100
101 spin_lock_bh(&p->fcoe_rx_list.lock);
102 p->thread = thread;
103 spin_unlock_bh(&p->fcoe_rx_list.lock);
104 }
105}
106
107/**
108 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
109 * @cpu: cpu index the rx thread is to be removed
110 *
111 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
112 * current CPU's Rx thread. If the thread being destroyed is bound to
113 * the CPU processing this context the skbs will be freed.
114 */
115static void fcoe_percpu_thread_destroy(unsigned int cpu)
116{
117 struct fcoe_percpu_s *p;
118 struct task_struct *thread;
119 struct page *crc_eof;
120 struct sk_buff *skb;
121#ifdef CONFIG_SMP
122 struct fcoe_percpu_s *p0;
123 unsigned targ_cpu = smp_processor_id();
124#endif /* CONFIG_SMP */
125
126 printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu);
127
128 /* Prevent any new skbs from being queued for this CPU. */
129 p = &per_cpu(fcoe_percpu, cpu);
130 spin_lock_bh(&p->fcoe_rx_list.lock);
131 thread = p->thread;
132 p->thread = NULL;
133 crc_eof = p->crc_eof_page;
134 p->crc_eof_page = NULL;
135 p->crc_eof_offset = 0;
136 spin_unlock_bh(&p->fcoe_rx_list.lock);
137
138#ifdef CONFIG_SMP
139 /*
140 * Don't bother moving the skb's if this context is running
141 * on the same CPU that is having its thread destroyed. This
142 * can easily happen when the module is removed.
143 */
144 if (cpu != targ_cpu) {
145 p0 = &per_cpu(fcoe_percpu, targ_cpu);
146 spin_lock_bh(&p0->fcoe_rx_list.lock);
147 if (p0->thread) {
148 FC_DBG("Moving frames from CPU %d to CPU %d\n",
149 cpu, targ_cpu);
150
151 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
152 __skb_queue_tail(&p0->fcoe_rx_list, skb);
153 spin_unlock_bh(&p0->fcoe_rx_list.lock);
154 } else {
155 /*
156 * The targeted CPU is not initialized and cannot accept
157 * new skbs. Unlock the targeted CPU and drop the skbs
158 * on the CPU that is going offline.
159 */
160 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
161 kfree_skb(skb);
162 spin_unlock_bh(&p0->fcoe_rx_list.lock);
163 }
164 } else {
165 /*
166 * This scenario occurs when the module is being removed
167 * and all threads are being destroyed. skbs will continue
168 * to be shifted from the CPU thread that is being removed
169 * to the CPU thread associated with the CPU that is processing
170 * the module removal. Once there is only one CPU Rx thread it
171 * will reach this case and we will drop all skbs and later
172 * stop the thread.
173 */
174 spin_lock_bh(&p->fcoe_rx_list.lock);
175 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
176 kfree_skb(skb);
177 spin_unlock_bh(&p->fcoe_rx_list.lock);
178 }
179#else
180 /*
181 * This a non-SMP scenario where the singluar Rx thread is
182 * being removed. Free all skbs and stop the thread.
183 */
184 spin_lock_bh(&p->fcoe_rx_list.lock);
185 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
186 kfree_skb(skb);
187 spin_unlock_bh(&p->fcoe_rx_list.lock);
188#endif
189
190 if (thread)
191 kthread_stop(thread);
192
193 if (crc_eof)
194 put_page(crc_eof);
195}
196
197/**
198 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
199 * @nfb: callback data block
200 * @action: event triggering the callback
201 * @hcpu: index for the cpu of this event
202 *
203 * This creates or destroys per cpu data for fcoe
204 *
205 * Returns NOTIFY_OK always.
206 */
207static int fcoe_cpu_callback(struct notifier_block *nfb,
208 unsigned long action, void *hcpu)
209{
210 unsigned cpu = (unsigned long)hcpu;
211
212 switch (action) {
213 case CPU_ONLINE:
214 case CPU_ONLINE_FROZEN:
215 FC_DBG("CPU %x online: Create Rx thread\n", cpu);
216 fcoe_percpu_thread_create(cpu);
217 break;
218 case CPU_DEAD:
219 case CPU_DEAD_FROZEN:
220 FC_DBG("CPU %x offline: Remove Rx thread\n", cpu);
221 fcoe_percpu_thread_destroy(cpu);
222 break;
223 default:
224 break;
225 }
226 return NOTIFY_OK;
227}
228
229static struct notifier_block fcoe_cpu_notifier = {
230 .notifier_call = fcoe_cpu_callback,
231};
232
233/**
84 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ 234 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
85 * @skb: the receive skb 235 * @skb: the receive skb
86 * @dev: associated net device 236 * @dev: associated net device
@@ -100,7 +250,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
100 struct fc_frame_header *fh; 250 struct fc_frame_header *fh;
101 struct fcoe_percpu_s *fps; 251 struct fcoe_percpu_s *fps;
102 unsigned short oxid; 252 unsigned short oxid;
103 unsigned int cpu_idx; 253 unsigned int cpu = 0;
104 254
105 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); 255 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
106 lp = fc->lp; 256 lp = fc->lp;
@@ -140,25 +290,42 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
140 fr = fcoe_dev_from_skb(skb); 290 fr = fcoe_dev_from_skb(skb);
141 fr->fr_dev = lp; 291 fr->fr_dev = lp;
142 fr->ptype = ptype; 292 fr->ptype = ptype;
143 cpu_idx = 0;
144 293
145#ifdef CONFIG_SMP 294#ifdef CONFIG_SMP
146 /* 295 /*
147 * The incoming frame exchange id(oxid) is ANDed with num of online 296 * The incoming frame exchange id(oxid) is ANDed with num of online
148 * cpu bits to get cpu_idx and then this cpu_idx is used for selecting 297 * cpu bits to get cpu and then this cpu is used for selecting
149 * a per cpu kernel thread from fcoe_percpu. In case the cpu is 298 * a per cpu kernel thread from fcoe_percpu.
150 * offline or no kernel thread for derived cpu_idx then cpu_idx is
151 * initialize to first online cpu index.
152 */ 299 */
153 cpu_idx = oxid & (num_online_cpus() - 1); 300 cpu = oxid & (num_online_cpus() - 1);
154 if (!cpu_online(cpu_idx))
155 cpu_idx = first_cpu(cpu_online_map);
156
157#endif 301#endif
158 302
159 fps = &per_cpu(fcoe_percpu, cpu_idx); 303 fps = &per_cpu(fcoe_percpu, cpu);
160
161 spin_lock_bh(&fps->fcoe_rx_list.lock); 304 spin_lock_bh(&fps->fcoe_rx_list.lock);
305 if (unlikely(!fps->thread)) {
306 /*
307 * The targeted CPU is not ready, let's target
308 * the first CPU now. For non-SMP systems this
309 * will check the same CPU twice.
310 */
311 FC_DBG("CPU is online, but no receive thread ready "
312 "for incoming skb- using first online CPU.\n");
313
314 spin_unlock_bh(&fps->fcoe_rx_list.lock);
315 cpu = first_cpu(cpu_online_map);
316 fps = &per_cpu(fcoe_percpu, cpu);
317 spin_lock_bh(&fps->fcoe_rx_list.lock);
318 if (!fps->thread) {
319 spin_unlock_bh(&fps->fcoe_rx_list.lock);
320 goto err;
321 }
322 }
323
324 /*
325 * We now have a valid CPU that we're targeting for
326 * this skb. We also have this receive thread locked,
327 * so we're free to queue skbs into it's queue.
328 */
162 __skb_queue_tail(&fps->fcoe_rx_list, skb); 329 __skb_queue_tail(&fps->fcoe_rx_list, skb);
163 if (fps->fcoe_rx_list.qlen == 1) 330 if (fps->fcoe_rx_list.qlen == 1)
164 wake_up_process(fps->thread); 331 wake_up_process(fps->thread);
@@ -214,7 +381,7 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
214 return -ENOMEM; 381 return -ENOMEM;
215 } 382 }
216 fps->crc_eof_page = page; 383 fps->crc_eof_page = page;
217 WARN_ON(fps->crc_eof_offset != 0); 384 fps->crc_eof_offset = 0;
218 } 385 }
219 386
220 get_page(page); 387 get_page(page);
@@ -1271,6 +1438,7 @@ EXPORT_SYMBOL_GPL(fcoe_libfc_config);
1271static int __init fcoe_init(void) 1438static int __init fcoe_init(void)
1272{ 1439{
1273 unsigned int cpu; 1440 unsigned int cpu;
1441 int rc = 0;
1274 struct fcoe_percpu_s *p; 1442 struct fcoe_percpu_s *p;
1275 1443
1276 INIT_LIST_HEAD(&fcoe_hostlist); 1444 INIT_LIST_HEAD(&fcoe_hostlist);
@@ -1281,29 +1449,15 @@ static int __init fcoe_init(void)
1281 skb_queue_head_init(&p->fcoe_rx_list); 1449 skb_queue_head_init(&p->fcoe_rx_list);
1282 } 1450 }
1283 1451
1284 /* 1452 for_each_online_cpu(cpu)
1285 * initialize per CPU interrupt thread 1453 fcoe_percpu_thread_create(cpu);
1286 */
1287 for_each_online_cpu(cpu) {
1288 p = &per_cpu(fcoe_percpu, cpu);
1289 p->thread = kthread_create(fcoe_percpu_receive_thread,
1290 (void *)p, "fcoethread/%d", cpu);
1291 1454
1292 /* 1455 /* Initialize per CPU interrupt thread */
1293 * If there is no error then bind the thread to the CPU 1456 rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
1294 * and wake it up. 1457 if (rc)
1295 */ 1458 goto out_free;
1296 if (!IS_ERR(p->thread)) {
1297 kthread_bind(p->thread, cpu);
1298 wake_up_process(p->thread);
1299 } else {
1300 p->thread = NULL;
1301 }
1302 }
1303 1459
1304 /* 1460 /* Setup link change notification */
1305 * setup link change notification
1306 */
1307 fcoe_dev_setup(); 1461 fcoe_dev_setup();
1308 1462
1309 setup_timer(&fcoe_timer, fcoe_watchdog, 0); 1463 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
@@ -1316,6 +1470,13 @@ static int __init fcoe_init(void)
1316 fcoe_sw_init(); 1470 fcoe_sw_init();
1317 1471
1318 return 0; 1472 return 0;
1473
1474out_free:
1475 for_each_online_cpu(cpu) {
1476 fcoe_percpu_thread_destroy(cpu);
1477 }
1478
1479 return rc;
1319} 1480}
1320module_init(fcoe_init); 1481module_init(fcoe_init);
1321 1482
@@ -1328,8 +1489,6 @@ static void __exit fcoe_exit(void)
1328{ 1489{
1329 unsigned int cpu; 1490 unsigned int cpu;
1330 struct fcoe_softc *fc, *tmp; 1491 struct fcoe_softc *fc, *tmp;
1331 struct fcoe_percpu_s *p;
1332 struct sk_buff *skb;
1333 1492
1334 fcoe_dev_cleanup(); 1493 fcoe_dev_cleanup();
1335 1494
@@ -1340,17 +1499,10 @@ static void __exit fcoe_exit(void)
1340 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) 1499 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1341 fcoe_transport_release(fc->real_dev); 1500 fcoe_transport_release(fc->real_dev);
1342 1501
1343 for_each_possible_cpu(cpu) { 1502 unregister_hotcpu_notifier(&fcoe_cpu_notifier);
1344 p = &per_cpu(fcoe_percpu, cpu); 1503
1345 if (p->thread) { 1504 for_each_online_cpu(cpu) {
1346 kthread_stop(p->thread); 1505 fcoe_percpu_thread_destroy(cpu);
1347 spin_lock_bh(&p->fcoe_rx_list.lock);
1348 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1349 kfree_skb(skb);
1350 spin_unlock_bh(&p->fcoe_rx_list.lock);
1351 if (p->crc_eof_page)
1352 put_page(p->crc_eof_page);
1353 }
1354 } 1506 }
1355 1507
1356 /* remove sw trasnport */ 1508 /* remove sw trasnport */