aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-14 06:19:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-14 06:19:59 -0400
commit8d7ccaa545490cdffdfaff0842436a8dd85cf47b (patch)
tree8129b5907161bc6ae26deb3645ce1e280c5e1f51 /include/linux/netdevice.h
parentb2139aa0eec330c711c5a279db361e5ef1178e78 (diff)
parent30a2f3c60a84092c8084dfe788b710f8d0768cd4 (diff)
Merge commit 'v2.6.27-rc3' into x86/prototypes
Conflicts: include/asm-x86/dma-mapping.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h138
1 files changed, 81 insertions, 57 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 812bcd8b4363..488c56e649b5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -61,9 +61,7 @@ struct wireless_dev;
61#define NET_XMIT_DROP 1 /* skb dropped */ 61#define NET_XMIT_DROP 1 /* skb dropped */
62#define NET_XMIT_CN 2 /* congestion notification */ 62#define NET_XMIT_CN 2 /* congestion notification */
63#define NET_XMIT_POLICED 3 /* skb is shot by police */ 63#define NET_XMIT_POLICED 3 /* skb is shot by police */
64#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; 64#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
65 (TC use only - dev_queue_xmit
66 returns this as NET_XMIT_SUCCESS) */
67 65
68/* Backlog congestion levels */ 66/* Backlog congestion levels */
69#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 67#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
@@ -440,6 +438,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
440enum netdev_queue_state_t 438enum netdev_queue_state_t
441{ 439{
442 __QUEUE_STATE_XOFF, 440 __QUEUE_STATE_XOFF,
441 __QUEUE_STATE_FROZEN,
443}; 442};
444 443
445struct netdev_queue { 444struct netdev_queue {
@@ -636,7 +635,7 @@ struct net_device
636 unsigned int real_num_tx_queues; 635 unsigned int real_num_tx_queues;
637 636
638 unsigned long tx_queue_len; /* Max frames per queue allowed */ 637 unsigned long tx_queue_len; /* Max frames per queue allowed */
639 638 spinlock_t tx_global_lock;
640/* 639/*
641 * One part is mostly used on xmit path (device) 640 * One part is mostly used on xmit path (device)
642 */ 641 */
@@ -996,17 +995,17 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
996 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 995 netif_schedule_queue(netdev_get_tx_queue(dev, i));
997} 996}
998 997
998static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
999{
1000 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1001}
1002
999/** 1003/**
1000 * netif_start_queue - allow transmit 1004 * netif_start_queue - allow transmit
1001 * @dev: network device 1005 * @dev: network device
1002 * 1006 *
1003 * Allow upper layers to call the device hard_start_xmit routine. 1007 * Allow upper layers to call the device hard_start_xmit routine.
1004 */ 1008 */
1005static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1006{
1007 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1008}
1009
1010static inline void netif_start_queue(struct net_device *dev) 1009static inline void netif_start_queue(struct net_device *dev)
1011{ 1010{
1012 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 1011 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
@@ -1022,13 +1021,6 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
1022 } 1021 }
1023} 1022}
1024 1023
1025/**
1026 * netif_wake_queue - restart transmit
1027 * @dev: network device
1028 *
1029 * Allow upper layers to call the device hard_start_xmit routine.
1030 * Used for flow control when transmit resources are available.
1031 */
1032static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) 1024static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1033{ 1025{
1034#ifdef CONFIG_NETPOLL_TRAP 1026#ifdef CONFIG_NETPOLL_TRAP
@@ -1041,6 +1033,13 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1041 __netif_schedule(dev_queue->qdisc); 1033 __netif_schedule(dev_queue->qdisc);
1042} 1034}
1043 1035
1036/**
1037 * netif_wake_queue - restart transmit
1038 * @dev: network device
1039 *
1040 * Allow upper layers to call the device hard_start_xmit routine.
1041 * Used for flow control when transmit resources are available.
1042 */
1044static inline void netif_wake_queue(struct net_device *dev) 1043static inline void netif_wake_queue(struct net_device *dev)
1045{ 1044{
1046 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 1045 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
@@ -1056,6 +1055,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
1056 } 1055 }
1057} 1056}
1058 1057
1058static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1059{
1060 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1061}
1062
1059/** 1063/**
1060 * netif_stop_queue - stop transmitted packets 1064 * netif_stop_queue - stop transmitted packets
1061 * @dev: network device 1065 * @dev: network device
@@ -1063,11 +1067,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
1063 * Stop upper layers calling the device hard_start_xmit routine. 1067 * Stop upper layers calling the device hard_start_xmit routine.
1064 * Used for flow control when transmit resources are unavailable. 1068 * Used for flow control when transmit resources are unavailable.
1065 */ 1069 */
1066static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1067{
1068 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1069}
1070
1071static inline void netif_stop_queue(struct net_device *dev) 1070static inline void netif_stop_queue(struct net_device *dev)
1072{ 1071{
1073 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 1072 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
@@ -1083,20 +1082,25 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
1083 } 1082 }
1084} 1083}
1085 1084
1085static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1086{
1087 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1088}
1089
1086/** 1090/**
1087 * netif_queue_stopped - test if transmit queue is flowblocked 1091 * netif_queue_stopped - test if transmit queue is flowblocked
1088 * @dev: network device 1092 * @dev: network device
1089 * 1093 *
1090 * Test if transmit queue on device is currently unable to send. 1094 * Test if transmit queue on device is currently unable to send.
1091 */ 1095 */
1092static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 1096static inline int netif_queue_stopped(const struct net_device *dev)
1093{ 1097{
1094 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1098 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1095} 1099}
1096 1100
1097static inline int netif_queue_stopped(const struct net_device *dev) 1101static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1098{ 1102{
1099 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1103 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1100} 1104}
1101 1105
1102/** 1106/**
@@ -1463,13 +1467,6 @@ static inline void netif_rx_complete(struct net_device *dev,
1463 local_irq_restore(flags); 1467 local_irq_restore(flags);
1464} 1468}
1465 1469
1466/**
1467 * netif_tx_lock - grab network device transmit lock
1468 * @dev: network device
1469 * @cpu: cpu number of lock owner
1470 *
1471 * Get network device transmit lock
1472 */
1473static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 1470static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1474{ 1471{
1475 spin_lock(&txq->_xmit_lock); 1472 spin_lock(&txq->_xmit_lock);
@@ -1482,23 +1479,6 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1482 txq->xmit_lock_owner = smp_processor_id(); 1479 txq->xmit_lock_owner = smp_processor_id();
1483} 1480}
1484 1481
1485static inline void netif_tx_lock(struct net_device *dev)
1486{
1487 int cpu = smp_processor_id();
1488 unsigned int i;
1489
1490 for (i = 0; i < dev->num_tx_queues; i++) {
1491 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1492 __netif_tx_lock(txq, cpu);
1493 }
1494}
1495
1496static inline void netif_tx_lock_bh(struct net_device *dev)
1497{
1498 local_bh_disable();
1499 netif_tx_lock(dev);
1500}
1501
1502static inline int __netif_tx_trylock(struct netdev_queue *txq) 1482static inline int __netif_tx_trylock(struct netdev_queue *txq)
1503{ 1483{
1504 int ok = spin_trylock(&txq->_xmit_lock); 1484 int ok = spin_trylock(&txq->_xmit_lock);
@@ -1507,11 +1487,6 @@ static inline int __netif_tx_trylock(struct netdev_queue *txq)
1507 return ok; 1487 return ok;
1508} 1488}
1509 1489
1510static inline int netif_tx_trylock(struct net_device *dev)
1511{
1512 return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
1513}
1514
1515static inline void __netif_tx_unlock(struct netdev_queue *txq) 1490static inline void __netif_tx_unlock(struct netdev_queue *txq)
1516{ 1491{
1517 txq->xmit_lock_owner = -1; 1492 txq->xmit_lock_owner = -1;
@@ -1524,15 +1499,57 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1524 spin_unlock_bh(&txq->_xmit_lock); 1499 spin_unlock_bh(&txq->_xmit_lock);
1525} 1500}
1526 1501
1527static inline void netif_tx_unlock(struct net_device *dev) 1502/**
1503 * netif_tx_lock - grab network device transmit lock
1504 * @dev: network device
1505 * @cpu: cpu number of lock owner
1506 *
1507 * Get network device transmit lock
1508 */
1509static inline void netif_tx_lock(struct net_device *dev)
1528{ 1510{
1529 unsigned int i; 1511 unsigned int i;
1512 int cpu;
1530 1513
1514 spin_lock(&dev->tx_global_lock);
1515 cpu = smp_processor_id();
1531 for (i = 0; i < dev->num_tx_queues; i++) { 1516 for (i = 0; i < dev->num_tx_queues; i++) {
1532 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1517 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1518
1519 /* We are the only thread of execution doing a
1520 * freeze, but we have to grab the _xmit_lock in
1521 * order to synchronize with threads which are in
1522 * the ->hard_start_xmit() handler and already
1523 * checked the frozen bit.
1524 */
1525 __netif_tx_lock(txq, cpu);
1526 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1533 __netif_tx_unlock(txq); 1527 __netif_tx_unlock(txq);
1534 } 1528 }
1529}
1530
1531static inline void netif_tx_lock_bh(struct net_device *dev)
1532{
1533 local_bh_disable();
1534 netif_tx_lock(dev);
1535}
1536
1537static inline void netif_tx_unlock(struct net_device *dev)
1538{
1539 unsigned int i;
1535 1540
1541 for (i = 0; i < dev->num_tx_queues; i++) {
1542 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1543
1544 /* No need to grab the _xmit_lock here. If the
1545 * queue is not stopped for another reason, we
1546 * force a schedule.
1547 */
1548 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1549 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1550 __netif_schedule(txq->qdisc);
1551 }
1552 spin_unlock(&dev->tx_global_lock);
1536} 1553}
1537 1554
1538static inline void netif_tx_unlock_bh(struct net_device *dev) 1555static inline void netif_tx_unlock_bh(struct net_device *dev)
@@ -1556,13 +1573,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
1556static inline void netif_tx_disable(struct net_device *dev) 1573static inline void netif_tx_disable(struct net_device *dev)
1557{ 1574{
1558 unsigned int i; 1575 unsigned int i;
1576 int cpu;
1559 1577
1560 netif_tx_lock_bh(dev); 1578 local_bh_disable();
1579 cpu = smp_processor_id();
1561 for (i = 0; i < dev->num_tx_queues; i++) { 1580 for (i = 0; i < dev->num_tx_queues; i++) {
1562 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1581 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1582
1583 __netif_tx_lock(txq, cpu);
1563 netif_tx_stop_queue(txq); 1584 netif_tx_stop_queue(txq);
1585 __netif_tx_unlock(txq);
1564 } 1586 }
1565 netif_tx_unlock_bh(dev); 1587 local_bh_enable();
1566} 1588}
1567 1589
1568static inline void netif_addr_lock(struct net_device *dev) 1590static inline void netif_addr_lock(struct net_device *dev)
@@ -1645,6 +1667,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
1645extern int netdev_class_create_file(struct class_attribute *class_attr); 1667extern int netdev_class_create_file(struct class_attribute *class_attr);
1646extern void netdev_class_remove_file(struct class_attribute *class_attr); 1668extern void netdev_class_remove_file(struct class_attribute *class_attr);
1647 1669
1670extern char *netdev_drivername(struct net_device *dev, char *buffer, int len);
1671
1648extern void linkwatch_run_queue(void); 1672extern void linkwatch_run_queue(void);
1649 1673
1650extern int netdev_compute_features(unsigned long all, unsigned long one); 1674extern int netdev_compute_features(unsigned long all, unsigned long one);