diff options
Diffstat (limited to 'drivers/scsi/fcoe/fcoe.c')
-rw-r--r-- | drivers/scsi/fcoe/fcoe.c | 83 |
1 files changed, 35 insertions, 48 deletions
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ae7d15c44e2a..335e85192807 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -1436,7 +1436,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1436 | goto err; | 1436 | goto err; |
1437 | 1437 | ||
1438 | fps = &per_cpu(fcoe_percpu, cpu); | 1438 | fps = &per_cpu(fcoe_percpu, cpu); |
1439 | spin_lock_bh(&fps->fcoe_rx_list.lock); | 1439 | spin_lock(&fps->fcoe_rx_list.lock); |
1440 | if (unlikely(!fps->thread)) { | 1440 | if (unlikely(!fps->thread)) { |
1441 | /* | 1441 | /* |
1442 | * The targeted CPU is not ready, let's target | 1442 | * The targeted CPU is not ready, let's target |
@@ -1447,12 +1447,12 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1447 | "ready for incoming skb- using first online " | 1447 | "ready for incoming skb- using first online " |
1448 | "CPU.\n"); | 1448 | "CPU.\n"); |
1449 | 1449 | ||
1450 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 1450 | spin_unlock(&fps->fcoe_rx_list.lock); |
1451 | cpu = cpumask_first(cpu_online_mask); | 1451 | cpu = cpumask_first(cpu_online_mask); |
1452 | fps = &per_cpu(fcoe_percpu, cpu); | 1452 | fps = &per_cpu(fcoe_percpu, cpu); |
1453 | spin_lock_bh(&fps->fcoe_rx_list.lock); | 1453 | spin_lock(&fps->fcoe_rx_list.lock); |
1454 | if (!fps->thread) { | 1454 | if (!fps->thread) { |
1455 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 1455 | spin_unlock(&fps->fcoe_rx_list.lock); |
1456 | goto err; | 1456 | goto err; |
1457 | } | 1457 | } |
1458 | } | 1458 | } |
@@ -1463,24 +1463,17 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1463 | * so we're free to queue skbs into it's queue. | 1463 | * so we're free to queue skbs into it's queue. |
1464 | */ | 1464 | */ |
1465 | 1465 | ||
1466 | /* If this is a SCSI-FCP frame, and this is already executing on the | 1466 | /* |
1467 | * correct CPU, and the queue for this CPU is empty, then go ahead | 1467 | * Note: We used to have a set of conditions under which we would |
1468 | * and process the frame directly in the softirq context. | 1468 | * call fcoe_recv_frame directly, rather than queuing to the rx list |
1469 | * This lets us process completions without context switching from the | 1469 | * as it could save a few cycles, but doing so is prohibited, as |
1470 | * NET_RX softirq, to our receive processing thread, and then back to | 1470 | * fcoe_recv_frame has several paths that may sleep, which is forbidden |
1471 | * BLOCK softirq context. | 1471 | * in softirq context. |
1472 | */ | 1472 | */ |
1473 | if (fh->fh_type == FC_TYPE_FCP && | 1473 | __skb_queue_tail(&fps->fcoe_rx_list, skb); |
1474 | cpu == smp_processor_id() && | 1474 | if (fps->thread->state == TASK_INTERRUPTIBLE) |
1475 | skb_queue_empty(&fps->fcoe_rx_list)) { | 1475 | wake_up_process(fps->thread); |
1476 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 1476 | spin_unlock(&fps->fcoe_rx_list.lock); |
1477 | fcoe_recv_frame(skb); | ||
1478 | } else { | ||
1479 | __skb_queue_tail(&fps->fcoe_rx_list, skb); | ||
1480 | if (fps->fcoe_rx_list.qlen == 1) | ||
1481 | wake_up_process(fps->thread); | ||
1482 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | ||
1483 | } | ||
1484 | 1477 | ||
1485 | return 0; | 1478 | return 0; |
1486 | err: | 1479 | err: |
@@ -1797,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg) | |||
1797 | { | 1790 | { |
1798 | struct fcoe_percpu_s *p = arg; | 1791 | struct fcoe_percpu_s *p = arg; |
1799 | struct sk_buff *skb; | 1792 | struct sk_buff *skb; |
1793 | struct sk_buff_head tmp; | ||
1794 | |||
1795 | skb_queue_head_init(&tmp); | ||
1800 | 1796 | ||
1801 | set_user_nice(current, -20); | 1797 | set_user_nice(current, -20); |
1802 | 1798 | ||
1803 | while (!kthread_should_stop()) { | 1799 | while (!kthread_should_stop()) { |
1804 | 1800 | ||
1805 | spin_lock_bh(&p->fcoe_rx_list.lock); | 1801 | spin_lock_bh(&p->fcoe_rx_list.lock); |
1806 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { | 1802 | skb_queue_splice_init(&p->fcoe_rx_list, &tmp); |
1803 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1804 | |||
1805 | while ((skb = __skb_dequeue(&tmp)) != NULL) | ||
1806 | fcoe_recv_frame(skb); | ||
1807 | |||
1808 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1809 | if (!skb_queue_len(&p->fcoe_rx_list)) { | ||
1807 | set_current_state(TASK_INTERRUPTIBLE); | 1810 | set_current_state(TASK_INTERRUPTIBLE); |
1808 | spin_unlock_bh(&p->fcoe_rx_list.lock); | 1811 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
1809 | schedule(); | 1812 | schedule(); |
1810 | set_current_state(TASK_RUNNING); | 1813 | set_current_state(TASK_RUNNING); |
1811 | if (kthread_should_stop()) | 1814 | } else |
1812 | return 0; | 1815 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
1813 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1814 | } | ||
1815 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1816 | fcoe_recv_frame(skb); | ||
1817 | } | 1816 | } |
1818 | return 0; | 1817 | return 0; |
1819 | } | 1818 | } |
@@ -2187,8 +2186,12 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) | |||
2187 | /* start FIP Discovery and FLOGI */ | 2186 | /* start FIP Discovery and FLOGI */ |
2188 | lport->boot_time = jiffies; | 2187 | lport->boot_time = jiffies; |
2189 | fc_fabric_login(lport); | 2188 | fc_fabric_login(lport); |
2190 | if (!fcoe_link_ok(lport)) | 2189 | if (!fcoe_link_ok(lport)) { |
2190 | rtnl_unlock(); | ||
2191 | fcoe_ctlr_link_up(&fcoe->ctlr); | 2191 | fcoe_ctlr_link_up(&fcoe->ctlr); |
2192 | mutex_unlock(&fcoe_config_mutex); | ||
2193 | return rc; | ||
2194 | } | ||
2192 | 2195 | ||
2193 | out_nodev: | 2196 | out_nodev: |
2194 | rtnl_unlock(); | 2197 | rtnl_unlock(); |
@@ -2261,31 +2264,14 @@ static int fcoe_link_ok(struct fc_lport *lport) | |||
2261 | static void fcoe_percpu_clean(struct fc_lport *lport) | 2264 | static void fcoe_percpu_clean(struct fc_lport *lport) |
2262 | { | 2265 | { |
2263 | struct fcoe_percpu_s *pp; | 2266 | struct fcoe_percpu_s *pp; |
2264 | struct fcoe_rcv_info *fr; | 2267 | struct sk_buff *skb; |
2265 | struct sk_buff_head *list; | ||
2266 | struct sk_buff *skb, *next; | ||
2267 | struct sk_buff *head; | ||
2268 | unsigned int cpu; | 2268 | unsigned int cpu; |
2269 | 2269 | ||
2270 | for_each_possible_cpu(cpu) { | 2270 | for_each_possible_cpu(cpu) { |
2271 | pp = &per_cpu(fcoe_percpu, cpu); | 2271 | pp = &per_cpu(fcoe_percpu, cpu); |
2272 | spin_lock_bh(&pp->fcoe_rx_list.lock); | ||
2273 | list = &pp->fcoe_rx_list; | ||
2274 | head = list->next; | ||
2275 | for (skb = head; skb != (struct sk_buff *)list; | ||
2276 | skb = next) { | ||
2277 | next = skb->next; | ||
2278 | fr = fcoe_dev_from_skb(skb); | ||
2279 | if (fr->fr_dev == lport) { | ||
2280 | __skb_unlink(skb, list); | ||
2281 | kfree_skb(skb); | ||
2282 | } | ||
2283 | } | ||
2284 | 2272 | ||
2285 | if (!pp->thread || !cpu_online(cpu)) { | 2273 | if (!pp->thread || !cpu_online(cpu)) |
2286 | spin_unlock_bh(&pp->fcoe_rx_list.lock); | ||
2287 | continue; | 2274 | continue; |
2288 | } | ||
2289 | 2275 | ||
2290 | skb = dev_alloc_skb(0); | 2276 | skb = dev_alloc_skb(0); |
2291 | if (!skb) { | 2277 | if (!skb) { |
@@ -2294,6 +2280,7 @@ static void fcoe_percpu_clean(struct fc_lport *lport) | |||
2294 | } | 2280 | } |
2295 | skb->destructor = fcoe_percpu_flush_done; | 2281 | skb->destructor = fcoe_percpu_flush_done; |
2296 | 2282 | ||
2283 | spin_lock_bh(&pp->fcoe_rx_list.lock); | ||
2297 | __skb_queue_tail(&pp->fcoe_rx_list, skb); | 2284 | __skb_queue_tail(&pp->fcoe_rx_list, skb); |
2298 | if (pp->fcoe_rx_list.qlen == 1) | 2285 | if (pp->fcoe_rx_list.qlen == 1) |
2299 | wake_up_process(pp->thread); | 2286 | wake_up_process(pp->thread); |