aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2009-11-10 01:14:14 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-13 17:07:32 -0500
commit572a9d7b6fc7f20f573664063324c086be310c42 (patch)
tree0ab3655fdfa923b0b9c6c1ee51a2e31e97e9549f /net
parent9ea2bdab11da97b2ac6f87d79976d25fa6d27295 (diff)
net: allow to propagate errors through ->ndo_hard_start_xmit()
Currently the ->ndo_hard_start_xmit() callbacks are only permitted to return one of the NETDEV_TX codes. This prevents any kind of error propagation for virtual devices, like queue congestion of the underlying device in case of layered devices, or unreachability in case of tunnels. This patches changes the NET_XMIT codes to avoid clashes with the NETDEV_TX codes and changes the two callers of dev_hard_start_xmit() to expect either errno codes, NET_XMIT codes or NETDEV_TX codes as return value. In case of qdisc_restart(), all non NETDEV_TX codes are mapped to NETDEV_TX_OK since no error propagation is possible when using qdiscs. In case of dev_queue_xmit(), the error is propagated upwards. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c32
-rw-r--r--net/sched/sch_generic.c9
2 files changed, 34 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index ad8e320ceba7..548340b57296 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1757,7 +1757,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1757 struct netdev_queue *txq) 1757 struct netdev_queue *txq)
1758{ 1758{
1759 const struct net_device_ops *ops = dev->netdev_ops; 1759 const struct net_device_ops *ops = dev->netdev_ops;
1760 int rc; 1760 int rc = NETDEV_TX_OK;
1761 1761
1762 if (likely(!skb->next)) { 1762 if (likely(!skb->next)) {
1763 if (!list_empty(&ptype_all)) 1763 if (!list_empty(&ptype_all))
@@ -1805,6 +1805,8 @@ gso:
1805 nskb->next = NULL; 1805 nskb->next = NULL;
1806 rc = ops->ndo_start_xmit(nskb, dev); 1806 rc = ops->ndo_start_xmit(nskb, dev);
1807 if (unlikely(rc != NETDEV_TX_OK)) { 1807 if (unlikely(rc != NETDEV_TX_OK)) {
1808 if (rc & ~NETDEV_TX_MASK)
1809 goto out_kfree_gso_skb;
1808 nskb->next = skb->next; 1810 nskb->next = skb->next;
1809 skb->next = nskb; 1811 skb->next = nskb;
1810 return rc; 1812 return rc;
@@ -1814,11 +1816,12 @@ gso:
1814 return NETDEV_TX_BUSY; 1816 return NETDEV_TX_BUSY;
1815 } while (skb->next); 1817 } while (skb->next);
1816 1818
1817 skb->destructor = DEV_GSO_CB(skb)->destructor; 1819out_kfree_gso_skb:
1818 1820 if (likely(skb->next == NULL))
1821 skb->destructor = DEV_GSO_CB(skb)->destructor;
1819out_kfree_skb: 1822out_kfree_skb:
1820 kfree_skb(skb); 1823 kfree_skb(skb);
1821 return NETDEV_TX_OK; 1824 return rc;
1822} 1825}
1823 1826
1824static u32 skb_tx_hashrnd; 1827static u32 skb_tx_hashrnd;
@@ -1906,6 +1909,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1906 return rc; 1909 return rc;
1907} 1910}
1908 1911
1912static inline bool dev_xmit_complete(int rc)
1913{
1914 /* successful transmission */
1915 if (rc == NETDEV_TX_OK)
1916 return true;
1917
1918 /* error while transmitting, driver consumed skb */
1919 if (rc < 0)
1920 return true;
1921
1922 /* error while queueing to a different device, driver consumed skb */
1923 if (rc & NET_XMIT_MASK)
1924 return true;
1925
1926 return false;
1927}
1928
1909/** 1929/**
1910 * dev_queue_xmit - transmit a buffer 1930 * dev_queue_xmit - transmit a buffer
1911 * @skb: buffer to transmit 1931 * @skb: buffer to transmit
@@ -2003,8 +2023,8 @@ gso:
2003 HARD_TX_LOCK(dev, txq, cpu); 2023 HARD_TX_LOCK(dev, txq, cpu);
2004 2024
2005 if (!netif_tx_queue_stopped(txq)) { 2025 if (!netif_tx_queue_stopped(txq)) {
2006 rc = NET_XMIT_SUCCESS; 2026 rc = dev_hard_start_xmit(skb, dev, txq);
2007 if (!dev_hard_start_xmit(skb, dev, txq)) { 2027 if (dev_xmit_complete(rc)) {
2008 HARD_TX_UNLOCK(dev, txq); 2028 HARD_TX_UNLOCK(dev, txq);
2009 goto out; 2029 goto out;
2010 } 2030 }
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4ae6aa562f2b..b13821ad2fb6 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -120,8 +120,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
120 120
121 HARD_TX_LOCK(dev, txq, smp_processor_id()); 121 HARD_TX_LOCK(dev, txq, smp_processor_id());
122 if (!netif_tx_queue_stopped(txq) && 122 if (!netif_tx_queue_stopped(txq) &&
123 !netif_tx_queue_frozen(txq)) 123 !netif_tx_queue_frozen(txq)) {
124 ret = dev_hard_start_xmit(skb, dev, txq); 124 ret = dev_hard_start_xmit(skb, dev, txq);
125
126 /* an error implies that the skb was consumed */
127 if (ret < 0)
128 ret = NETDEV_TX_OK;
129 /* all NET_XMIT codes map to NETDEV_TX_OK */
130 ret &= ~NET_XMIT_MASK;
131 }
125 HARD_TX_UNLOCK(dev, txq); 132 HARD_TX_UNLOCK(dev, txq);
126 133
127 spin_lock(root_lock); 134 spin_lock(root_lock);