aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-02-06 21:29:06 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-03-12 23:23:57 -0400
commitb37c0fbe3f6dfba1f8ad2aed47fb40578a254635 (patch)
tree8059fe5b80795afc4931492fe4fa1991c4805561 /include/linux/netdevice.h
parent5c4903549c05bbb373479e0ce2992573c120654a (diff)
net: Add memory barriers to prevent possible race in byte queue limits
This change adds a memory barrier to the byte queue limit code to address a possible race as has been seen in the past with the netif_stop_queue/netif_wake_queue logic. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h49
1 files changed, 33 insertions, 16 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4bf314fe2145..4535a4ea9760 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1899,12 +1899,22 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1899{ 1899{
1900#ifdef CONFIG_BQL 1900#ifdef CONFIG_BQL
1901 dql_queued(&dev_queue->dql, bytes); 1901 dql_queued(&dev_queue->dql, bytes);
1902 if (unlikely(dql_avail(&dev_queue->dql) < 0)) { 1902
1903 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 1903 if (likely(dql_avail(&dev_queue->dql) >= 0))
1904 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 1904 return;
1905 clear_bit(__QUEUE_STATE_STACK_XOFF, 1905
1906 &dev_queue->state); 1906 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1907 } 1907
1908 /*
1909 * The XOFF flag must be set before checking the dql_avail below,
1910 * because in netdev_tx_completed_queue we update the dql_completed
1911 * before checking the XOFF flag.
1912 */
1913 smp_mb();
1914
1915 /* check again in case another CPU has just made room avail */
1916 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1917 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1908#endif 1918#endif
1909} 1919}
1910 1920
@@ -1917,16 +1927,23 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1917 unsigned pkts, unsigned bytes) 1927 unsigned pkts, unsigned bytes)
1918{ 1928{
1919#ifdef CONFIG_BQL 1929#ifdef CONFIG_BQL
1920 if (likely(bytes)) { 1930 if (unlikely(!bytes))
1921 dql_completed(&dev_queue->dql, bytes); 1931 return;
1922 if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF, 1932
1923 &dev_queue->state) && 1933 dql_completed(&dev_queue->dql, bytes);
1924 dql_avail(&dev_queue->dql) >= 0)) { 1934
1925 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, 1935 /*
1926 &dev_queue->state)) 1936 * Without the memory barrier there is a small possiblity that
1927 netif_schedule_queue(dev_queue); 1937 * netdev_tx_sent_queue will miss the update and cause the queue to
1928 } 1938 * be stopped forever
1929 } 1939 */
1940 smp_mb();
1941
1942 if (dql_avail(&dev_queue->dql) < 0)
1943 return;
1944
1945 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
1946 netif_schedule_queue(dev_queue);
1930#endif 1947#endif
1931} 1948}
1932 1949