aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-03-07 21:02:35 -0500
committerDavid S. Miller <davem@davemloft.net>2012-03-09 17:34:50 -0500
commit4d29515f5a2e062c3fe82bfb574da70ed544ffad (patch)
tree276220b5921c296d1f185b2b6ff7440ade105821 /include/linux/netdevice.h
parentbdcc0924c8c6e6362bb93b6120631f257aac6f87 (diff)
net: Use bool in netdevice.h helpers.
Specifically use it in napi_disable_pending(), napi_schedule_prep(), napi_reschedule(), netif_tx_queue_stopped(), netif_queue_stopped(), netif_xmit_stopped(), netif_xmit_frozen_or_stopped(), netif_running(), __netif_subqueue_stopped(), netif_subqueue_stopped(), netif_is_multiquue(), netif_carrier_ok(), netif_dormant(), netif_oper_up(), netif_device_present(), __netif_tx_trylock(), net_gso_ok(), skb_gso_ok(), netif_needs_gso(), and netif_is_bond_slave(). Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h52
1 files changed, 26 insertions, 26 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a89933bc4f2f..b195a34440bb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -417,7 +417,7 @@ typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
417 417
418extern void __napi_schedule(struct napi_struct *n); 418extern void __napi_schedule(struct napi_struct *n);
419 419
420static inline int napi_disable_pending(struct napi_struct *n) 420static inline bool napi_disable_pending(struct napi_struct *n)
421{ 421{
422 return test_bit(NAPI_STATE_DISABLE, &n->state); 422 return test_bit(NAPI_STATE_DISABLE, &n->state);
423} 423}
@@ -431,7 +431,7 @@ static inline int napi_disable_pending(struct napi_struct *n)
431 * insure only one NAPI poll instance runs. We also make 431 * insure only one NAPI poll instance runs. We also make
432 * sure there is no pending NAPI disable. 432 * sure there is no pending NAPI disable.
433 */ 433 */
434static inline int napi_schedule_prep(struct napi_struct *n) 434static inline bool napi_schedule_prep(struct napi_struct *n)
435{ 435{
436 return !napi_disable_pending(n) && 436 return !napi_disable_pending(n) &&
437 !test_and_set_bit(NAPI_STATE_SCHED, &n->state); 437 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
@@ -451,13 +451,13 @@ static inline void napi_schedule(struct napi_struct *n)
451} 451}
452 452
453/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 453/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
454static inline int napi_reschedule(struct napi_struct *napi) 454static inline bool napi_reschedule(struct napi_struct *napi)
455{ 455{
456 if (napi_schedule_prep(napi)) { 456 if (napi_schedule_prep(napi)) {
457 __napi_schedule(napi); 457 __napi_schedule(napi);
458 return 1; 458 return true;
459 } 459 }
460 return 0; 460 return false;
461} 461}
462 462
463/** 463/**
@@ -1868,7 +1868,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
1868 } 1868 }
1869} 1869}
1870 1870
1871static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 1871static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1872{ 1872{
1873 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 1873 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1874} 1874}
@@ -1879,17 +1879,17 @@ static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1879 * 1879 *
1880 * Test if transmit queue on device is currently unable to send. 1880 * Test if transmit queue on device is currently unable to send.
1881 */ 1881 */
1882static inline int netif_queue_stopped(const struct net_device *dev) 1882static inline bool netif_queue_stopped(const struct net_device *dev)
1883{ 1883{
1884 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1884 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1885} 1885}
1886 1886
1887static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue) 1887static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
1888{ 1888{
1889 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 1889 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1890} 1890}
1891 1891
1892static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 1892static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1893{ 1893{
1894 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 1894 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1895} 1895}
@@ -1954,7 +1954,7 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
1954 * 1954 *
1955 * Test if the device has been brought up. 1955 * Test if the device has been brought up.
1956 */ 1956 */
1957static inline int netif_running(const struct net_device *dev) 1957static inline bool netif_running(const struct net_device *dev)
1958{ 1958{
1959 return test_bit(__LINK_STATE_START, &dev->state); 1959 return test_bit(__LINK_STATE_START, &dev->state);
1960} 1960}
@@ -2004,16 +2004,16 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2004 * 2004 *
2005 * Check individual transmit queue of a device with multiple transmit queues. 2005 * Check individual transmit queue of a device with multiple transmit queues.
2006 */ 2006 */
2007static inline int __netif_subqueue_stopped(const struct net_device *dev, 2007static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2008 u16 queue_index) 2008 u16 queue_index)
2009{ 2009{
2010 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2010 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2011 2011
2012 return netif_tx_queue_stopped(txq); 2012 return netif_tx_queue_stopped(txq);
2013} 2013}
2014 2014
2015static inline int netif_subqueue_stopped(const struct net_device *dev, 2015static inline bool netif_subqueue_stopped(const struct net_device *dev,
2016 struct sk_buff *skb) 2016 struct sk_buff *skb)
2017{ 2017{
2018 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 2018 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2019} 2019}
@@ -2052,7 +2052,7 @@ static inline u16 skb_tx_hash(const struct net_device *dev,
2052 * 2052 *
2053 * Check if device has multiple transmit queues 2053 * Check if device has multiple transmit queues
2054 */ 2054 */
2055static inline int netif_is_multiqueue(const struct net_device *dev) 2055static inline bool netif_is_multiqueue(const struct net_device *dev)
2056{ 2056{
2057 return dev->num_tx_queues > 1; 2057 return dev->num_tx_queues > 1;
2058} 2058}
@@ -2188,7 +2188,7 @@ extern void linkwatch_forget_dev(struct net_device *dev);
2188 * 2188 *
2189 * Check if carrier is present on device 2189 * Check if carrier is present on device
2190 */ 2190 */
2191static inline int netif_carrier_ok(const struct net_device *dev) 2191static inline bool netif_carrier_ok(const struct net_device *dev)
2192{ 2192{
2193 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 2193 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2194} 2194}
@@ -2240,7 +2240,7 @@ static inline void netif_dormant_off(struct net_device *dev)
2240 * 2240 *
2241 * Check if carrier is present on device 2241 * Check if carrier is present on device
2242 */ 2242 */
2243static inline int netif_dormant(const struct net_device *dev) 2243static inline bool netif_dormant(const struct net_device *dev)
2244{ 2244{
2245 return test_bit(__LINK_STATE_DORMANT, &dev->state); 2245 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2246} 2246}
@@ -2252,7 +2252,7 @@ static inline int netif_dormant(const struct net_device *dev)
2252 * 2252 *
2253 * Check if carrier is operational 2253 * Check if carrier is operational
2254 */ 2254 */
2255static inline int netif_oper_up(const struct net_device *dev) 2255static inline bool netif_oper_up(const struct net_device *dev)
2256{ 2256{
2257 return (dev->operstate == IF_OPER_UP || 2257 return (dev->operstate == IF_OPER_UP ||
2258 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 2258 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
@@ -2264,7 +2264,7 @@ static inline int netif_oper_up(const struct net_device *dev)
2264 * 2264 *
2265 * Check if device has not been removed from system. 2265 * Check if device has not been removed from system.
2266 */ 2266 */
2267static inline int netif_device_present(struct net_device *dev) 2267static inline bool netif_device_present(struct net_device *dev)
2268{ 2268{
2269 return test_bit(__LINK_STATE_PRESENT, &dev->state); 2269 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2270} 2270}
@@ -2334,9 +2334,9 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2334 txq->xmit_lock_owner = smp_processor_id(); 2334 txq->xmit_lock_owner = smp_processor_id();
2335} 2335}
2336 2336
2337static inline int __netif_tx_trylock(struct netdev_queue *txq) 2337static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2338{ 2338{
2339 int ok = spin_trylock(&txq->_xmit_lock); 2339 bool ok = spin_trylock(&txq->_xmit_lock);
2340 if (likely(ok)) 2340 if (likely(ok))
2341 txq->xmit_lock_owner = smp_processor_id(); 2341 txq->xmit_lock_owner = smp_processor_id();
2342 return ok; 2342 return ok;
@@ -2614,7 +2614,7 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2614 2614
2615netdev_features_t netif_skb_features(struct sk_buff *skb); 2615netdev_features_t netif_skb_features(struct sk_buff *skb);
2616 2616
2617static inline int net_gso_ok(netdev_features_t features, int gso_type) 2617static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2618{ 2618{
2619 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; 2619 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2620 2620
@@ -2629,14 +2629,14 @@ static inline int net_gso_ok(netdev_features_t features, int gso_type)
2629 return (features & feature) == feature; 2629 return (features & feature) == feature;
2630} 2630}
2631 2631
2632static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 2632static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2633{ 2633{
2634 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 2634 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2635 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2635 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2636} 2636}
2637 2637
2638static inline int netif_needs_gso(struct sk_buff *skb, 2638static inline bool netif_needs_gso(struct sk_buff *skb,
2639 netdev_features_t features) 2639 netdev_features_t features)
2640{ 2640{
2641 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 2641 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2642 unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 2642 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
@@ -2648,7 +2648,7 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
2648 dev->gso_max_size = size; 2648 dev->gso_max_size = size;
2649} 2649}
2650 2650
2651static inline int netif_is_bond_slave(struct net_device *dev) 2651static inline bool netif_is_bond_slave(struct net_device *dev)
2652{ 2652{
2653 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 2653 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2654} 2654}