diff options
author | Neil Horman <nhorman@tuxdriver.com> | 2012-03-09 17:50:24 -0500 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2012-03-28 04:31:44 -0400 |
commit | 20dc3811a2adfac65d5974e3b022a85fdbb9e205 (patch) | |
tree | cfabd1f2ebe09711cb45f550d754cca7e125bd89 /drivers | |
parent | dd060e74fb4c2513420d8af7371cda2f3eea4fa9 (diff) |
[SCSI] fcoe: reduce contention for fcoe_rx_list lock [v2]
There is potentially lots of contention for the rx_list_lock. On a cpu that is
receiving lots of fcoe traffic, the softirq context has to add and release the
lock for every frame it receives, as does the receiving per-cpu thread. We can
reduce this contention somewhat by altering the per-cpu threads loop such that
when traffic is detected on the fcoe_rx_list, we splice it to a temporary list.
In this way, we can process multiple skbs while only having to acquire and
release the fcoe_rx_list lock once.
[ Braces around single statement while loop removed by Robert Love
to satisfy checkpath.pl. ]
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Vasu Dev <vasu.dev@intel.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/fcoe/fcoe.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d86ca37b3787..58c88b0e8792 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -1471,7 +1471,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1471 | * in softirq context. | 1471 | * in softirq context. |
1472 | */ | 1472 | */ |
1473 | __skb_queue_tail(&fps->fcoe_rx_list, skb); | 1473 | __skb_queue_tail(&fps->fcoe_rx_list, skb); |
1474 | if (fps->fcoe_rx_list.qlen == 1) | 1474 | if (fps->thread->state == TASK_INTERRUPTIBLE) |
1475 | wake_up_process(fps->thread); | 1475 | wake_up_process(fps->thread); |
1476 | spin_unlock(&fps->fcoe_rx_list.lock); | 1476 | spin_unlock(&fps->fcoe_rx_list.lock); |
1477 | 1477 | ||
@@ -1790,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg) | |||
1790 | { | 1790 | { |
1791 | struct fcoe_percpu_s *p = arg; | 1791 | struct fcoe_percpu_s *p = arg; |
1792 | struct sk_buff *skb; | 1792 | struct sk_buff *skb; |
1793 | struct sk_buff_head tmp; | ||
1794 | |||
1795 | skb_queue_head_init(&tmp); | ||
1793 | 1796 | ||
1794 | set_user_nice(current, -20); | 1797 | set_user_nice(current, -20); |
1795 | 1798 | ||
1796 | while (!kthread_should_stop()) { | 1799 | while (!kthread_should_stop()) { |
1797 | 1800 | ||
1798 | spin_lock_bh(&p->fcoe_rx_list.lock); | 1801 | spin_lock_bh(&p->fcoe_rx_list.lock); |
1799 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { | 1802 | skb_queue_splice_init(&p->fcoe_rx_list, &tmp); |
1803 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1804 | |||
1805 | while ((skb = __skb_dequeue(&tmp)) != NULL) | ||
1806 | fcoe_recv_frame(skb); | ||
1807 | |||
1808 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1809 | if (!skb_queue_len(&p->fcoe_rx_list)) { | ||
1800 | set_current_state(TASK_INTERRUPTIBLE); | 1810 | set_current_state(TASK_INTERRUPTIBLE); |
1801 | spin_unlock_bh(&p->fcoe_rx_list.lock); | 1811 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
1802 | schedule(); | 1812 | schedule(); |
1803 | set_current_state(TASK_RUNNING); | 1813 | set_current_state(TASK_RUNNING); |
1804 | if (kthread_should_stop()) | 1814 | } else |
1805 | return 0; | 1815 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
1806 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1807 | } | ||
1808 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1809 | fcoe_recv_frame(skb); | ||
1810 | } | 1816 | } |
1811 | return 0; | 1817 | return 0; |
1812 | } | 1818 | } |