diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-29 17:41:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-29 17:41:18 -0400 |
commit | 6631424fd2efd75e7394b318ad2a8597327857a9 (patch) | |
tree | 09cc31036c9c53c5faec578ad68d443fd53e1b3d | |
parent | c45140a996b511bccdcbbdbea7e36f001826bdf2 (diff) | |
parent | c0cd884af045338476b8e69a61fceb3f34ff22f1 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (33 commits)
r8169: offical fix for CVE-2009-4537 (overlength frame DMAs)
ipv6: Don't drop cache route entry unless timer actually expired.
tulip: Add missing parens.
r8169: fix broken register writes
pcnet_cs: add new id
bonding: fix broken multicast with round-robin mode
drivers/net: Fix continuation lines
e1000: do not modify tx_queue_len on link speed change
net: ipmr/ip6mr: prevent out-of-bounds vif_table access
ixgbe: Do not run all Diagnostic offline tests when VFs are active
igb: use correct bits to identify if managability is enabled
benet: Fix compile warnnings in drivers/net/benet/be_ethtool.c
net: Add MSG_WAITFORONE flag to recvmmsg
e1000e: do not modify tx_queue_len on link speed change
igbvf: do not modify tx_queue_len on link speed change
ipv4: Restart rt_intern_hash after emergency rebuild (v2)
ipv4: Cleanup struct net dereference in rt_intern_hash
net: fix netlink address dumping in IPv4/IPv6
tulip: Fix null dereference in uli526x_rx_packet()
gianfar: fix undo of reserve()
...
35 files changed, 256 insertions, 144 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 28d4bf0e483e..1a203f9626f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3083,6 +3083,7 @@ F: include/scsi/*iscsi* | |||
3083 | ISDN SUBSYSTEM | 3083 | ISDN SUBSYSTEM |
3084 | M: Karsten Keil <isdn@linux-pingi.de> | 3084 | M: Karsten Keil <isdn@linux-pingi.de> |
3085 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) | 3085 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) |
3086 | L: netdev@vger.kernel.org | ||
3086 | W: http://www.isdn4linux.de | 3087 | W: http://www.isdn4linux.de |
3087 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git | 3088 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git |
3088 | S: Maintained | 3089 | S: Maintained |
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 9ba547069db3..0ebd8208f606 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
@@ -84,7 +84,7 @@ | |||
84 | 84 | ||
85 | #define ATLX_DRIVER_VERSION "2.1.3" | 85 | #define ATLX_DRIVER_VERSION "2.1.3" |
86 | MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ | 86 | MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ |
87 | Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); | 87 | Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); |
88 | MODULE_LICENSE("GPL"); | 88 | MODULE_LICENSE("GPL"); |
89 | MODULE_VERSION(ATLX_DRIVER_VERSION); | 89 | MODULE_VERSION(ATLX_DRIVER_VERSION); |
90 | 90 | ||
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 9560d48944ab..51e1065e7897 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
@@ -490,7 +490,7 @@ be_test_ddr_dma(struct be_adapter *adapter) | |||
490 | { | 490 | { |
491 | int ret, i; | 491 | int ret, i; |
492 | struct be_dma_mem ddrdma_cmd; | 492 | struct be_dma_mem ddrdma_cmd; |
493 | u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5}; | 493 | u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL}; |
494 | 494 | ||
495 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); | 495 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); |
496 | ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, | 496 | ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 430c02267d7e..5b92fbff431d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1235,6 +1235,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
1235 | write_lock_bh(&bond->curr_slave_lock); | 1235 | write_lock_bh(&bond->curr_slave_lock); |
1236 | } | 1236 | } |
1237 | } | 1237 | } |
1238 | |||
1239 | /* resend IGMP joins since all were sent on curr_active_slave */ | ||
1240 | if (bond->params.mode == BOND_MODE_ROUNDROBIN) { | ||
1241 | bond_resend_igmp_join_requests(bond); | ||
1242 | } | ||
1238 | } | 1243 | } |
1239 | 1244 | ||
1240 | /** | 1245 | /** |
@@ -4138,22 +4143,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
4138 | struct bonding *bond = netdev_priv(bond_dev); | 4143 | struct bonding *bond = netdev_priv(bond_dev); |
4139 | struct slave *slave, *start_at; | 4144 | struct slave *slave, *start_at; |
4140 | int i, slave_no, res = 1; | 4145 | int i, slave_no, res = 1; |
4146 | struct iphdr *iph = ip_hdr(skb); | ||
4141 | 4147 | ||
4142 | read_lock(&bond->lock); | 4148 | read_lock(&bond->lock); |
4143 | 4149 | ||
4144 | if (!BOND_IS_OK(bond)) | 4150 | if (!BOND_IS_OK(bond)) |
4145 | goto out; | 4151 | goto out; |
4146 | |||
4147 | /* | 4152 | /* |
4148 | * Concurrent TX may collide on rr_tx_counter; we accept that | 4153 | * Start with the curr_active_slave that joined the bond as the |
4149 | * as being rare enough not to justify using an atomic op here | 4154 | * default for sending IGMP traffic. For failover purposes one |
4155 | * needs to maintain some consistency for the interface that will | ||
4156 | * send the join/membership reports. The curr_active_slave found | ||
4157 | * will send all of this type of traffic. | ||
4150 | */ | 4158 | */ |
4151 | slave_no = bond->rr_tx_counter++ % bond->slave_cnt; | 4159 | if ((iph->protocol == htons(IPPROTO_IGMP)) && |
4160 | (skb->protocol == htons(ETH_P_IP))) { | ||
4152 | 4161 | ||
4153 | bond_for_each_slave(bond, slave, i) { | 4162 | read_lock(&bond->curr_slave_lock); |
4154 | slave_no--; | 4163 | slave = bond->curr_active_slave; |
4155 | if (slave_no < 0) | 4164 | read_unlock(&bond->curr_slave_lock); |
4156 | break; | 4165 | |
4166 | if (!slave) | ||
4167 | goto out; | ||
4168 | } else { | ||
4169 | /* | ||
4170 | * Concurrent TX may collide on rr_tx_counter; we accept | ||
4171 | * that as being rare enough not to justify using an | ||
4172 | * atomic op here. | ||
4173 | */ | ||
4174 | slave_no = bond->rr_tx_counter++ % bond->slave_cnt; | ||
4175 | |||
4176 | bond_for_each_slave(bond, slave, i) { | ||
4177 | slave_no--; | ||
4178 | if (slave_no < 0) | ||
4179 | break; | ||
4180 | } | ||
4157 | } | 4181 | } |
4158 | 4182 | ||
4159 | start_at = slave; | 4183 | start_at = slave; |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 9902b33b7160..2f29c2131851 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -261,7 +261,6 @@ struct e1000_adapter { | |||
261 | /* TX */ | 261 | /* TX */ |
262 | struct e1000_tx_ring *tx_ring; /* One per active queue */ | 262 | struct e1000_tx_ring *tx_ring; /* One per active queue */ |
263 | unsigned int restart_queue; | 263 | unsigned int restart_queue; |
264 | unsigned long tx_queue_len; | ||
265 | u32 txd_cmd; | 264 | u32 txd_cmd; |
266 | u32 tx_int_delay; | 265 | u32 tx_int_delay; |
267 | u32 tx_abs_int_delay; | 266 | u32 tx_abs_int_delay; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 8be6faee43e6..b15ece26ed84 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
383 | adapter->alloc_rx_buf(adapter, ring, | 383 | adapter->alloc_rx_buf(adapter, ring, |
384 | E1000_DESC_UNUSED(ring)); | 384 | E1000_DESC_UNUSED(ring)); |
385 | } | 385 | } |
386 | |||
387 | adapter->tx_queue_len = netdev->tx_queue_len; | ||
388 | } | 386 | } |
389 | 387 | ||
390 | int e1000_up(struct e1000_adapter *adapter) | 388 | int e1000_up(struct e1000_adapter *adapter) |
@@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter) | |||
503 | del_timer_sync(&adapter->watchdog_timer); | 501 | del_timer_sync(&adapter->watchdog_timer); |
504 | del_timer_sync(&adapter->phy_info_timer); | 502 | del_timer_sync(&adapter->phy_info_timer); |
505 | 503 | ||
506 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
507 | adapter->link_speed = 0; | 504 | adapter->link_speed = 0; |
508 | adapter->link_duplex = 0; | 505 | adapter->link_duplex = 0; |
509 | netif_carrier_off(netdev); | 506 | netif_carrier_off(netdev); |
@@ -2316,19 +2313,15 @@ static void e1000_watchdog(unsigned long data) | |||
2316 | E1000_CTRL_RFCE) ? "RX" : ((ctrl & | 2313 | E1000_CTRL_RFCE) ? "RX" : ((ctrl & |
2317 | E1000_CTRL_TFCE) ? "TX" : "None" ))); | 2314 | E1000_CTRL_TFCE) ? "TX" : "None" ))); |
2318 | 2315 | ||
2319 | /* tweak tx_queue_len according to speed/duplex | 2316 | /* adjust timeout factor according to speed/duplex */ |
2320 | * and adjust the timeout factor */ | ||
2321 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
2322 | adapter->tx_timeout_factor = 1; | 2317 | adapter->tx_timeout_factor = 1; |
2323 | switch (adapter->link_speed) { | 2318 | switch (adapter->link_speed) { |
2324 | case SPEED_10: | 2319 | case SPEED_10: |
2325 | txb2b = false; | 2320 | txb2b = false; |
2326 | netdev->tx_queue_len = 10; | ||
2327 | adapter->tx_timeout_factor = 16; | 2321 | adapter->tx_timeout_factor = 16; |
2328 | break; | 2322 | break; |
2329 | case SPEED_100: | 2323 | case SPEED_100: |
2330 | txb2b = false; | 2324 | txb2b = false; |
2331 | netdev->tx_queue_len = 100; | ||
2332 | /* maybe add some timeout factor ? */ | 2325 | /* maybe add some timeout factor ? */ |
2333 | break; | 2326 | break; |
2334 | } | 2327 | } |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index c2ec095d2163..118bdf483593 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -279,7 +279,6 @@ struct e1000_adapter { | |||
279 | 279 | ||
280 | struct napi_struct napi; | 280 | struct napi_struct napi; |
281 | 281 | ||
282 | unsigned long tx_queue_len; | ||
283 | unsigned int restart_queue; | 282 | unsigned int restart_queue; |
284 | u32 txd_cmd; | 283 | u32 txd_cmd; |
285 | 284 | ||
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 88d54d3efcef..e1cceb606576 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -2289,8 +2289,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2289 | ew32(TCTL, tctl); | 2289 | ew32(TCTL, tctl); |
2290 | 2290 | ||
2291 | e1000e_config_collision_dist(hw); | 2291 | e1000e_config_collision_dist(hw); |
2292 | |||
2293 | adapter->tx_queue_len = adapter->netdev->tx_queue_len; | ||
2294 | } | 2292 | } |
2295 | 2293 | ||
2296 | /** | 2294 | /** |
@@ -2877,7 +2875,6 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
2877 | del_timer_sync(&adapter->watchdog_timer); | 2875 | del_timer_sync(&adapter->watchdog_timer); |
2878 | del_timer_sync(&adapter->phy_info_timer); | 2876 | del_timer_sync(&adapter->phy_info_timer); |
2879 | 2877 | ||
2880 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
2881 | netif_carrier_off(netdev); | 2878 | netif_carrier_off(netdev); |
2882 | adapter->link_speed = 0; | 2879 | adapter->link_speed = 0; |
2883 | adapter->link_duplex = 0; | 2880 | adapter->link_duplex = 0; |
@@ -3588,21 +3585,15 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3588 | "link gets many collisions.\n"); | 3585 | "link gets many collisions.\n"); |
3589 | } | 3586 | } |
3590 | 3587 | ||
3591 | /* | 3588 | /* adjust timeout factor according to speed/duplex */ |
3592 | * tweak tx_queue_len according to speed/duplex | ||
3593 | * and adjust the timeout factor | ||
3594 | */ | ||
3595 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
3596 | adapter->tx_timeout_factor = 1; | 3589 | adapter->tx_timeout_factor = 1; |
3597 | switch (adapter->link_speed) { | 3590 | switch (adapter->link_speed) { |
3598 | case SPEED_10: | 3591 | case SPEED_10: |
3599 | txb2b = 0; | 3592 | txb2b = 0; |
3600 | netdev->tx_queue_len = 10; | ||
3601 | adapter->tx_timeout_factor = 16; | 3593 | adapter->tx_timeout_factor = 16; |
3602 | break; | 3594 | break; |
3603 | case SPEED_100: | 3595 | case SPEED_100: |
3604 | txb2b = 0; | 3596 | txb2b = 0; |
3605 | netdev->tx_queue_len = 100; | ||
3606 | adapter->tx_timeout_factor = 10; | 3597 | adapter->tx_timeout_factor = 10; |
3607 | break; | 3598 | break; |
3608 | } | 3599 | } |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b6715553cf17..669de028d44f 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -2393,6 +2393,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev) | |||
2393 | * as many bytes as needed to align the data properly | 2393 | * as many bytes as needed to align the data properly |
2394 | */ | 2394 | */ |
2395 | skb_reserve(skb, alignamount); | 2395 | skb_reserve(skb, alignamount); |
2396 | GFAR_CB(skb)->alignamount = alignamount; | ||
2396 | 2397 | ||
2397 | return skb; | 2398 | return skb; |
2398 | } | 2399 | } |
@@ -2533,13 +2534,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
2533 | newskb = skb; | 2534 | newskb = skb; |
2534 | else if (skb) { | 2535 | else if (skb) { |
2535 | /* | 2536 | /* |
2536 | * We need to reset ->data to what it | 2537 | * We need to un-reserve() the skb to what it |
2537 | * was before gfar_new_skb() re-aligned | 2538 | * was before gfar_new_skb() re-aligned |
2538 | * it to an RXBUF_ALIGNMENT boundary | 2539 | * it to an RXBUF_ALIGNMENT boundary |
2539 | * before we put the skb back on the | 2540 | * before we put the skb back on the |
2540 | * recycle list. | 2541 | * recycle list. |
2541 | */ | 2542 | */ |
2542 | skb->data = skb->head + NET_SKB_PAD; | 2543 | skb_reserve(skb, -GFAR_CB(skb)->alignamount); |
2543 | __skb_queue_head(&priv->rx_recycle, skb); | 2544 | __skb_queue_head(&priv->rx_recycle, skb); |
2544 | } | 2545 | } |
2545 | } else { | 2546 | } else { |
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 3d72dc43dca5..17d25e714236 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -566,6 +566,12 @@ struct rxfcb { | |||
566 | u16 vlctl; /* VLAN control word */ | 566 | u16 vlctl; /* VLAN control word */ |
567 | }; | 567 | }; |
568 | 568 | ||
569 | struct gianfar_skb_cb { | ||
570 | int alignamount; | ||
571 | }; | ||
572 | |||
573 | #define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) | ||
574 | |||
569 | struct rmon_mib | 575 | struct rmon_mib |
570 | { | 576 | { |
571 | u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ | 577 | u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ |
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 2a8a886b37eb..be8d010e4021 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c | |||
@@ -1367,7 +1367,8 @@ out: | |||
1367 | * igb_enable_mng_pass_thru - Enable processing of ARP's | 1367 | * igb_enable_mng_pass_thru - Enable processing of ARP's |
1368 | * @hw: pointer to the HW structure | 1368 | * @hw: pointer to the HW structure |
1369 | * | 1369 | * |
1370 | * Verifies the hardware needs to allow ARPs to be processed by the host. | 1370 | * Verifies the hardware needs to leave interface enabled so that frames can |
1371 | * be directed to and from the management interface. | ||
1371 | **/ | 1372 | **/ |
1372 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw) | 1373 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw) |
1373 | { | 1374 | { |
@@ -1380,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw) | |||
1380 | 1381 | ||
1381 | manc = rd32(E1000_MANC); | 1382 | manc = rd32(E1000_MANC); |
1382 | 1383 | ||
1383 | if (!(manc & E1000_MANC_RCV_TCO_EN) || | 1384 | if (!(manc & E1000_MANC_RCV_TCO_EN)) |
1384 | !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) | ||
1385 | goto out; | 1385 | goto out; |
1386 | 1386 | ||
1387 | if (hw->mac.arc_subsystem_valid) { | 1387 | if (hw->mac.arc_subsystem_valid) { |
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index a1774b29d222..debeee2dc717 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h | |||
@@ -198,7 +198,6 @@ struct igbvf_adapter { | |||
198 | struct igbvf_ring *tx_ring /* One per active queue */ | 198 | struct igbvf_ring *tx_ring /* One per active queue */ |
199 | ____cacheline_aligned_in_smp; | 199 | ____cacheline_aligned_in_smp; |
200 | 200 | ||
201 | unsigned long tx_queue_len; | ||
202 | unsigned int restart_queue; | 201 | unsigned int restart_queue; |
203 | u32 txd_cmd; | 202 | u32 txd_cmd; |
204 | 203 | ||
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index a77afd8a14bb..b41037ed8083 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -1304,8 +1304,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) | |||
1304 | 1304 | ||
1305 | /* enable Report Status bit */ | 1305 | /* enable Report Status bit */ |
1306 | adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; | 1306 | adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; |
1307 | |||
1308 | adapter->tx_queue_len = adapter->netdev->tx_queue_len; | ||
1309 | } | 1307 | } |
1310 | 1308 | ||
1311 | /** | 1309 | /** |
@@ -1524,7 +1522,6 @@ void igbvf_down(struct igbvf_adapter *adapter) | |||
1524 | 1522 | ||
1525 | del_timer_sync(&adapter->watchdog_timer); | 1523 | del_timer_sync(&adapter->watchdog_timer); |
1526 | 1524 | ||
1527 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1528 | netif_carrier_off(netdev); | 1525 | netif_carrier_off(netdev); |
1529 | 1526 | ||
1530 | /* record the stats before reset*/ | 1527 | /* record the stats before reset*/ |
@@ -1857,21 +1854,15 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1857 | &adapter->link_duplex); | 1854 | &adapter->link_duplex); |
1858 | igbvf_print_link_info(adapter); | 1855 | igbvf_print_link_info(adapter); |
1859 | 1856 | ||
1860 | /* | 1857 | /* adjust timeout factor according to speed/duplex */ |
1861 | * tweak tx_queue_len according to speed/duplex | ||
1862 | * and adjust the timeout factor | ||
1863 | */ | ||
1864 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1865 | adapter->tx_timeout_factor = 1; | 1858 | adapter->tx_timeout_factor = 1; |
1866 | switch (adapter->link_speed) { | 1859 | switch (adapter->link_speed) { |
1867 | case SPEED_10: | 1860 | case SPEED_10: |
1868 | txb2b = 0; | 1861 | txb2b = 0; |
1869 | netdev->tx_queue_len = 10; | ||
1870 | adapter->tx_timeout_factor = 16; | 1862 | adapter->tx_timeout_factor = 16; |
1871 | break; | 1863 | break; |
1872 | case SPEED_100: | 1864 | case SPEED_100: |
1873 | txb2b = 0; | 1865 | txb2b = 0; |
1874 | netdev->tx_queue_len = 100; | ||
1875 | /* maybe add some timeout factor ? */ | 1866 | /* maybe add some timeout factor ? */ |
1876 | break; | 1867 | break; |
1877 | } | 1868 | } |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 19e94ee155a2..79c35ae3718c 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -204,14 +204,17 @@ enum ixgbe_ring_f_enum { | |||
204 | #define IXGBE_MAX_FDIR_INDICES 64 | 204 | #define IXGBE_MAX_FDIR_INDICES 64 |
205 | #ifdef IXGBE_FCOE | 205 | #ifdef IXGBE_FCOE |
206 | #define IXGBE_MAX_FCOE_INDICES 8 | 206 | #define IXGBE_MAX_FCOE_INDICES 8 |
207 | #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) | ||
208 | #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) | ||
209 | #else | ||
210 | #define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES | ||
211 | #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES | ||
207 | #endif /* IXGBE_FCOE */ | 212 | #endif /* IXGBE_FCOE */ |
208 | struct ixgbe_ring_feature { | 213 | struct ixgbe_ring_feature { |
209 | int indices; | 214 | int indices; |
210 | int mask; | 215 | int mask; |
211 | } ____cacheline_internodealigned_in_smp; | 216 | } ____cacheline_internodealigned_in_smp; |
212 | 217 | ||
213 | #define MAX_RX_QUEUES 128 | ||
214 | #define MAX_TX_QUEUES 128 | ||
215 | 218 | ||
216 | #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ | 219 | #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ |
217 | ? 8 : 1) | 220 | ? 8 : 1) |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 7949a446e4c7..1959ef76c962 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1853,6 +1853,26 @@ static void ixgbe_diag_test(struct net_device *netdev, | |||
1853 | if (ixgbe_link_test(adapter, &data[4])) | 1853 | if (ixgbe_link_test(adapter, &data[4])) |
1854 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1854 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1855 | 1855 | ||
1856 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
1857 | int i; | ||
1858 | for (i = 0; i < adapter->num_vfs; i++) { | ||
1859 | if (adapter->vfinfo[i].clear_to_send) { | ||
1860 | netdev_warn(netdev, "%s", | ||
1861 | "offline diagnostic is not " | ||
1862 | "supported when VFs are " | ||
1863 | "present\n"); | ||
1864 | data[0] = 1; | ||
1865 | data[1] = 1; | ||
1866 | data[2] = 1; | ||
1867 | data[3] = 1; | ||
1868 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1869 | clear_bit(__IXGBE_TESTING, | ||
1870 | &adapter->state); | ||
1871 | goto skip_ol_tests; | ||
1872 | } | ||
1873 | } | ||
1874 | } | ||
1875 | |||
1856 | if (if_running) | 1876 | if (if_running) |
1857 | /* indicate we're in test mode */ | 1877 | /* indicate we're in test mode */ |
1858 | dev_close(netdev); | 1878 | dev_close(netdev); |
@@ -1908,6 +1928,7 @@ skip_loopback: | |||
1908 | 1928 | ||
1909 | clear_bit(__IXGBE_TESTING, &adapter->state); | 1929 | clear_bit(__IXGBE_TESTING, &adapter->state); |
1910 | } | 1930 | } |
1931 | skip_ol_tests: | ||
1911 | msleep_interruptible(4 * 1000); | 1932 | msleep_interruptible(4 * 1000); |
1912 | } | 1933 | } |
1913 | 1934 | ||
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 700cfc0aa1b9..9276d5965b0d 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -202,6 +202,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
202 | addr = sg_dma_address(sg); | 202 | addr = sg_dma_address(sg); |
203 | len = sg_dma_len(sg); | 203 | len = sg_dma_len(sg); |
204 | while (len) { | 204 | while (len) { |
205 | /* max number of buffers allowed in one DDP context */ | ||
206 | if (j >= IXGBE_BUFFCNT_MAX) { | ||
207 | netif_err(adapter, drv, adapter->netdev, | ||
208 | "xid=%x:%d,%d,%d:addr=%llx " | ||
209 | "not enough descriptors\n", | ||
210 | xid, i, j, dmacount, (u64)addr); | ||
211 | goto out_noddp_free; | ||
212 | } | ||
213 | |||
205 | /* get the offset of length of current buffer */ | 214 | /* get the offset of length of current buffer */ |
206 | thisoff = addr & ((dma_addr_t)bufflen - 1); | 215 | thisoff = addr & ((dma_addr_t)bufflen - 1); |
207 | thislen = min((bufflen - thisoff), len); | 216 | thislen = min((bufflen - thisoff), len); |
@@ -227,20 +236,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
227 | len -= thislen; | 236 | len -= thislen; |
228 | addr += thislen; | 237 | addr += thislen; |
229 | j++; | 238 | j++; |
230 | /* max number of buffers allowed in one DDP context */ | ||
231 | if (j > IXGBE_BUFFCNT_MAX) { | ||
232 | DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx " | ||
233 | "not enough descriptors\n", | ||
234 | xid, i, j, dmacount, (u64)addr); | ||
235 | goto out_noddp_free; | ||
236 | } | ||
237 | } | 239 | } |
238 | } | 240 | } |
239 | /* only the last buffer may have non-full bufflen */ | 241 | /* only the last buffer may have non-full bufflen */ |
240 | lastsize = thisoff + thislen; | 242 | lastsize = thisoff + thislen; |
241 | 243 | ||
242 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); | 244 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
243 | fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT); | 245 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
244 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); | 246 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
245 | fcbuff |= (IXGBE_FCBUFF_VALID); | 247 | fcbuff |= (IXGBE_FCBUFF_VALID); |
246 | 248 | ||
@@ -520,6 +522,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
520 | /* Enable L2 eth type filter for FCoE */ | 522 | /* Enable L2 eth type filter for FCoE */ |
521 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), | 523 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), |
522 | (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); | 524 | (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); |
525 | /* Enable L2 eth type filter for FIP */ | ||
526 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), | ||
527 | (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); | ||
523 | if (adapter->ring_feature[RING_F_FCOE].indices) { | 528 | if (adapter->ring_feature[RING_F_FCOE].indices) { |
524 | /* Use multiple rx queues for FCoE by redirection table */ | 529 | /* Use multiple rx queues for FCoE by redirection table */ |
525 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { | 530 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { |
@@ -530,6 +535,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
530 | } | 535 | } |
531 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); | 536 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); |
532 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); | 537 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); |
538 | fcoe_i = f->mask; | ||
539 | fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; | ||
540 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; | ||
541 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), | ||
542 | IXGBE_ETQS_QUEUE_EN | | ||
543 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); | ||
533 | } else { | 544 | } else { |
534 | /* Use single rx queue for FCoE */ | 545 | /* Use single rx queue for FCoE */ |
535 | fcoe_i = f->mask; | 546 | fcoe_i = f->mask; |
@@ -539,6 +550,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
539 | IXGBE_ETQS_QUEUE_EN | | 550 | IXGBE_ETQS_QUEUE_EN | |
540 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); | 551 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); |
541 | } | 552 | } |
553 | /* send FIP frames to the first FCoE queue */ | ||
554 | fcoe_i = f->mask; | ||
555 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; | ||
556 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), | ||
557 | IXGBE_ETQS_QUEUE_EN | | ||
558 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); | ||
542 | 559 | ||
543 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, | 560 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, |
544 | IXGBE_FCRXCTRL_FCOELLI | | 561 | IXGBE_FCRXCTRL_FCOELLI | |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index d75c46ff31f6..0c553f6cb534 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -3056,6 +3056,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) | |||
3056 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | 3056 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) |
3057 | msleep(1); | 3057 | msleep(1); |
3058 | ixgbe_down(adapter); | 3058 | ixgbe_down(adapter); |
3059 | /* | ||
3060 | * If SR-IOV enabled then wait a bit before bringing the adapter | ||
3061 | * back up to give the VFs time to respond to the reset. The | ||
3062 | * two second wait is based upon the watchdog timer cycle in | ||
3063 | * the VF driver. | ||
3064 | */ | ||
3065 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
3066 | msleep(2000); | ||
3059 | ixgbe_up(adapter); | 3067 | ixgbe_up(adapter); |
3060 | clear_bit(__IXGBE_RESETTING, &adapter->state); | 3068 | clear_bit(__IXGBE_RESETTING, &adapter->state); |
3061 | } | 3069 | } |
@@ -3236,13 +3244,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3236 | 3244 | ||
3237 | /* disable receive for all VFs and wait one second */ | 3245 | /* disable receive for all VFs and wait one second */ |
3238 | if (adapter->num_vfs) { | 3246 | if (adapter->num_vfs) { |
3239 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
3240 | adapter->vfinfo[i].clear_to_send = 0; | ||
3241 | |||
3242 | /* ping all the active vfs to let them know we are going down */ | 3247 | /* ping all the active vfs to let them know we are going down */ |
3243 | ixgbe_ping_all_vfs(adapter); | 3248 | ixgbe_ping_all_vfs(adapter); |
3249 | |||
3244 | /* Disable all VFTE/VFRE TX/RX */ | 3250 | /* Disable all VFTE/VFRE TX/RX */ |
3245 | ixgbe_disable_tx_rx(adapter); | 3251 | ixgbe_disable_tx_rx(adapter); |
3252 | |||
3253 | /* Mark all the VFs as inactive */ | ||
3254 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
3255 | adapter->vfinfo[i].clear_to_send = 0; | ||
3246 | } | 3256 | } |
3247 | 3257 | ||
3248 | /* disable receives */ | 3258 | /* disable receives */ |
@@ -5638,7 +5648,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
5638 | 5648 | ||
5639 | #ifdef IXGBE_FCOE | 5649 | #ifdef IXGBE_FCOE |
5640 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 5650 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
5641 | (skb->protocol == htons(ETH_P_FCOE))) { | 5651 | ((skb->protocol == htons(ETH_P_FCOE)) || |
5652 | (skb->protocol == htons(ETH_P_FIP)))) { | ||
5642 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | 5653 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); |
5643 | txq += adapter->ring_feature[RING_F_FCOE].mask; | 5654 | txq += adapter->ring_feature[RING_F_FCOE].mask; |
5644 | return txq; | 5655 | return txq; |
@@ -5685,18 +5696,25 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
5685 | 5696 | ||
5686 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | 5697 | tx_ring = adapter->tx_ring[skb->queue_mapping]; |
5687 | 5698 | ||
5688 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | ||
5689 | (skb->protocol == htons(ETH_P_FCOE))) { | ||
5690 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
5691 | #ifdef IXGBE_FCOE | 5699 | #ifdef IXGBE_FCOE |
5700 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
5692 | #ifdef CONFIG_IXGBE_DCB | 5701 | #ifdef CONFIG_IXGBE_DCB |
5693 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK | 5702 | /* for FCoE with DCB, we force the priority to what |
5694 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | 5703 | * was specified by the switch */ |
5695 | tx_flags |= ((adapter->fcoe.up << 13) | 5704 | if ((skb->protocol == htons(ETH_P_FCOE)) || |
5696 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | 5705 | (skb->protocol == htons(ETH_P_FIP))) { |
5697 | #endif | 5706 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK |
5707 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
5708 | tx_flags |= ((adapter->fcoe.up << 13) | ||
5709 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
5710 | } | ||
5698 | #endif | 5711 | #endif |
5712 | /* flag for FCoE offloads */ | ||
5713 | if (skb->protocol == htons(ETH_P_FCOE)) | ||
5714 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
5699 | } | 5715 | } |
5716 | #endif | ||
5717 | |||
5700 | /* four things can cause us to need a context descriptor */ | 5718 | /* four things can cause us to need a context descriptor */ |
5701 | if (skb_is_gso(skb) || | 5719 | if (skb_is_gso(skb) || |
5702 | (skb->ip_summed == CHECKSUM_PARTIAL) || | 5720 | (skb->ip_summed == CHECKSUM_PARTIAL) || |
@@ -6051,7 +6069,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6051 | indices += min_t(unsigned int, num_possible_cpus(), | 6069 | indices += min_t(unsigned int, num_possible_cpus(), |
6052 | IXGBE_MAX_FCOE_INDICES); | 6070 | IXGBE_MAX_FCOE_INDICES); |
6053 | #endif | 6071 | #endif |
6054 | indices = min_t(unsigned int, indices, MAX_TX_QUEUES); | ||
6055 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); | 6072 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); |
6056 | if (!netdev) { | 6073 | if (!netdev) { |
6057 | err = -ENOMEM; | 6074 | err = -ENOMEM; |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 0ed5ab37cc53..4ec6dc1a5b75 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -1298,6 +1298,7 @@ | |||
1298 | #define IXGBE_ETQF_FILTER_BCN 1 | 1298 | #define IXGBE_ETQF_FILTER_BCN 1 |
1299 | #define IXGBE_ETQF_FILTER_FCOE 2 | 1299 | #define IXGBE_ETQF_FILTER_FCOE 2 |
1300 | #define IXGBE_ETQF_FILTER_1588 3 | 1300 | #define IXGBE_ETQF_FILTER_1588 3 |
1301 | #define IXGBE_ETQF_FILTER_FIP 4 | ||
1301 | /* VLAN Control Bit Masks */ | 1302 | /* VLAN Control Bit Masks */ |
1302 | #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ | 1303 | #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ |
1303 | #define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ | 1304 | #define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ |
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index d6cbd943a6f0..1bbbef3ee3f4 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c | |||
@@ -2943,9 +2943,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, | |||
2943 | struct ixgbevf_tx_buffer *tx_buffer_info; | 2943 | struct ixgbevf_tx_buffer *tx_buffer_info; |
2944 | unsigned int len; | 2944 | unsigned int len; |
2945 | unsigned int total = skb->len; | 2945 | unsigned int total = skb->len; |
2946 | unsigned int offset = 0, size, count = 0, i; | 2946 | unsigned int offset = 0, size, count = 0; |
2947 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 2947 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
2948 | unsigned int f; | 2948 | unsigned int f; |
2949 | int i; | ||
2949 | 2950 | ||
2950 | i = tx_ring->next_to_use; | 2951 | i = tx_ring->next_to_use; |
2951 | 2952 | ||
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 144d2e880422..0f703838e21a 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -53,8 +53,8 @@ | |||
53 | 53 | ||
54 | #define _NETXEN_NIC_LINUX_MAJOR 4 | 54 | #define _NETXEN_NIC_LINUX_MAJOR 4 |
55 | #define _NETXEN_NIC_LINUX_MINOR 0 | 55 | #define _NETXEN_NIC_LINUX_MINOR 0 |
56 | #define _NETXEN_NIC_LINUX_SUBVERSION 72 | 56 | #define _NETXEN_NIC_LINUX_SUBVERSION 73 |
57 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.72" | 57 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.73" |
58 | 58 | ||
59 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) | 59 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) |
60 | #define _major(v) (((v) >> 24) & 0xff) | 60 | #define _major(v) (((v) >> 24) & 0xff) |
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 2a8ef5fc9663..f26e54716c88 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c | |||
@@ -669,13 +669,15 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
669 | } | 669 | } |
670 | sds_ring->desc_head = (struct status_desc *)addr; | 670 | sds_ring->desc_head = (struct status_desc *)addr; |
671 | 671 | ||
672 | sds_ring->crb_sts_consumer = | 672 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { |
673 | netxen_get_ioaddr(adapter, | 673 | sds_ring->crb_sts_consumer = |
674 | recv_crb_registers[port].crb_sts_consumer[ring]); | 674 | netxen_get_ioaddr(adapter, |
675 | recv_crb_registers[port].crb_sts_consumer[ring]); | ||
675 | 676 | ||
676 | sds_ring->crb_intr_mask = | 677 | sds_ring->crb_intr_mask = |
677 | netxen_get_ioaddr(adapter, | 678 | netxen_get_ioaddr(adapter, |
678 | recv_crb_registers[port].sw_int_mask[ring]); | 679 | recv_crb_registers[port].sw_int_mask[ring]); |
680 | } | ||
679 | } | 681 | } |
680 | 682 | ||
681 | 683 | ||
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 1c63610ead42..7eb925a9f36e 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -761,7 +761,7 @@ nx_get_bios_version(struct netxen_adapter *adapter) | |||
761 | if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { | 761 | if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { |
762 | bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) | 762 | bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) |
763 | + NX_UNI_BIOS_VERSION_OFF)); | 763 | + NX_UNI_BIOS_VERSION_OFF)); |
764 | return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + | 764 | return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + |
765 | (bios_ver >> 24); | 765 | (bios_ver >> 24); |
766 | } else | 766 | } else |
767 | return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); | 767 | return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9a7a0f3c36c4..01808b28d1b6 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -604,16 +604,14 @@ netxen_cleanup_pci_map(struct netxen_adapter *adapter) | |||
604 | static int | 604 | static int |
605 | netxen_setup_pci_map(struct netxen_adapter *adapter) | 605 | netxen_setup_pci_map(struct netxen_adapter *adapter) |
606 | { | 606 | { |
607 | void __iomem *mem_ptr0 = NULL; | ||
608 | void __iomem *mem_ptr1 = NULL; | ||
609 | void __iomem *mem_ptr2 = NULL; | ||
610 | void __iomem *db_ptr = NULL; | 607 | void __iomem *db_ptr = NULL; |
611 | 608 | ||
612 | resource_size_t mem_base, db_base; | 609 | resource_size_t mem_base, db_base; |
613 | unsigned long mem_len, db_len = 0, pci_len0 = 0; | 610 | unsigned long mem_len, db_len = 0; |
614 | 611 | ||
615 | struct pci_dev *pdev = adapter->pdev; | 612 | struct pci_dev *pdev = adapter->pdev; |
616 | int pci_func = adapter->ahw.pci_func; | 613 | int pci_func = adapter->ahw.pci_func; |
614 | struct netxen_hardware_context *ahw = &adapter->ahw; | ||
617 | 615 | ||
618 | int err = 0; | 616 | int err = 0; |
619 | 617 | ||
@@ -630,24 +628,40 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) | |||
630 | 628 | ||
631 | /* 128 Meg of memory */ | 629 | /* 128 Meg of memory */ |
632 | if (mem_len == NETXEN_PCI_128MB_SIZE) { | 630 | if (mem_len == NETXEN_PCI_128MB_SIZE) { |
633 | mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); | 631 | |
634 | mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, | 632 | ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); |
633 | ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, | ||
635 | SECOND_PAGE_GROUP_SIZE); | 634 | SECOND_PAGE_GROUP_SIZE); |
636 | mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, | 635 | ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, |
637 | THIRD_PAGE_GROUP_SIZE); | 636 | THIRD_PAGE_GROUP_SIZE); |
638 | pci_len0 = FIRST_PAGE_GROUP_SIZE; | 637 | if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || |
638 | ahw->pci_base2 == NULL) { | ||
639 | dev_err(&pdev->dev, "failed to map PCI bar 0\n"); | ||
640 | err = -EIO; | ||
641 | goto err_out; | ||
642 | } | ||
643 | |||
644 | ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; | ||
645 | |||
639 | } else if (mem_len == NETXEN_PCI_32MB_SIZE) { | 646 | } else if (mem_len == NETXEN_PCI_32MB_SIZE) { |
640 | mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); | 647 | |
641 | mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - | 648 | ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); |
649 | ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - | ||
642 | SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); | 650 | SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); |
651 | if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { | ||
652 | dev_err(&pdev->dev, "failed to map PCI bar 0\n"); | ||
653 | err = -EIO; | ||
654 | goto err_out; | ||
655 | } | ||
656 | |||
643 | } else if (mem_len == NETXEN_PCI_2MB_SIZE) { | 657 | } else if (mem_len == NETXEN_PCI_2MB_SIZE) { |
644 | 658 | ||
645 | mem_ptr0 = pci_ioremap_bar(pdev, 0); | 659 | ahw->pci_base0 = pci_ioremap_bar(pdev, 0); |
646 | if (mem_ptr0 == NULL) { | 660 | if (ahw->pci_base0 == NULL) { |
647 | dev_err(&pdev->dev, "failed to map PCI bar 0\n"); | 661 | dev_err(&pdev->dev, "failed to map PCI bar 0\n"); |
648 | return -EIO; | 662 | return -EIO; |
649 | } | 663 | } |
650 | pci_len0 = mem_len; | 664 | ahw->pci_len0 = mem_len; |
651 | } else { | 665 | } else { |
652 | return -EIO; | 666 | return -EIO; |
653 | } | 667 | } |
@@ -656,11 +670,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) | |||
656 | 670 | ||
657 | dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); | 671 | dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); |
658 | 672 | ||
659 | adapter->ahw.pci_base0 = mem_ptr0; | ||
660 | adapter->ahw.pci_len0 = pci_len0; | ||
661 | adapter->ahw.pci_base1 = mem_ptr1; | ||
662 | adapter->ahw.pci_base2 = mem_ptr2; | ||
663 | |||
664 | if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { | 673 | if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { |
665 | adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, | 674 | adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, |
666 | NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); | 675 | NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 776cad2f5715..1028fcb91a28 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -1549,6 +1549,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1549 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), | 1549 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), |
1550 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), | 1550 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), |
1551 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), | 1551 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), |
1552 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), | ||
1552 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), | 1553 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), |
1553 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), | 1554 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), |
1554 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), | 1555 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), |
@@ -1740,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1740 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), | 1741 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), |
1741 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), | 1742 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), |
1742 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), | 1743 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), |
1743 | PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), | 1744 | PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"), |
1744 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), | 1745 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), |
1745 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), | 1746 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), |
1746 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), | 1747 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 9d3ebf3e975e..96740051cdcc 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -186,8 +186,13 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { | |||
186 | 186 | ||
187 | MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); | 187 | MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); |
188 | 188 | ||
189 | static int rx_copybreak = 200; | 189 | /* |
190 | static int use_dac = -1; | 190 | * we set our copybreak very high so that we don't have |
191 | * to allocate 16k frames all the time (see note in | ||
192 | * rtl8169_open() | ||
193 | */ | ||
194 | static int rx_copybreak = 16383; | ||
195 | static int use_dac; | ||
191 | static struct { | 196 | static struct { |
192 | u32 msg_enable; | 197 | u32 msg_enable; |
193 | } debug = { -1 }; | 198 | } debug = { -1 }; |
@@ -511,8 +516,7 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); | |||
511 | module_param(rx_copybreak, int, 0); | 516 | module_param(rx_copybreak, int, 0); |
512 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); | 517 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); |
513 | module_param(use_dac, int, 0); | 518 | module_param(use_dac, int, 0); |
514 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only." | 519 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); |
515 | " Unsafe on 32 bit PCI slot."); | ||
516 | module_param_named(debug, debug.msg_enable, int, 0); | 520 | module_param_named(debug, debug.msg_enable, int, 0); |
517 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); | 521 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); |
518 | MODULE_LICENSE("GPL"); | 522 | MODULE_LICENSE("GPL"); |
@@ -2821,8 +2825,8 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | |||
2821 | spin_lock_irq(&tp->lock); | 2825 | spin_lock_irq(&tp->lock); |
2822 | 2826 | ||
2823 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 2827 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
2824 | RTL_W32(MAC0, low); | ||
2825 | RTL_W32(MAC4, high); | 2828 | RTL_W32(MAC4, high); |
2829 | RTL_W32(MAC0, low); | ||
2826 | RTL_W8(Cfg9346, Cfg9346_Lock); | 2830 | RTL_W8(Cfg9346, Cfg9346_Lock); |
2827 | 2831 | ||
2828 | spin_unlock_irq(&tp->lock); | 2832 | spin_unlock_irq(&tp->lock); |
@@ -2974,7 +2978,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2974 | void __iomem *ioaddr; | 2978 | void __iomem *ioaddr; |
2975 | unsigned int i; | 2979 | unsigned int i; |
2976 | int rc; | 2980 | int rc; |
2977 | int this_use_dac = use_dac; | ||
2978 | 2981 | ||
2979 | if (netif_msg_drv(&debug)) { | 2982 | if (netif_msg_drv(&debug)) { |
2980 | printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", | 2983 | printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", |
@@ -3040,17 +3043,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3040 | 3043 | ||
3041 | tp->cp_cmd = PCIMulRW | RxChkSum; | 3044 | tp->cp_cmd = PCIMulRW | RxChkSum; |
3042 | 3045 | ||
3043 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
3044 | if (!tp->pcie_cap) | ||
3045 | netif_info(tp, probe, dev, "no PCI Express capability\n"); | ||
3046 | |||
3047 | if (this_use_dac < 0) | ||
3048 | this_use_dac = tp->pcie_cap != 0; | ||
3049 | |||
3050 | if ((sizeof(dma_addr_t) > 4) && | 3046 | if ((sizeof(dma_addr_t) > 4) && |
3051 | this_use_dac && | 3047 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
3052 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
3053 | netif_info(tp, probe, dev, "using 64-bit DMA\n"); | ||
3054 | tp->cp_cmd |= PCIDAC; | 3048 | tp->cp_cmd |= PCIDAC; |
3055 | dev->features |= NETIF_F_HIGHDMA; | 3049 | dev->features |= NETIF_F_HIGHDMA; |
3056 | } else { | 3050 | } else { |
@@ -3069,6 +3063,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3069 | goto err_out_free_res_4; | 3063 | goto err_out_free_res_4; |
3070 | } | 3064 | } |
3071 | 3065 | ||
3066 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
3067 | if (!tp->pcie_cap) | ||
3068 | netif_info(tp, probe, dev, "no PCI Express capability\n"); | ||
3069 | |||
3072 | RTL_W16(IntrMask, 0x0000); | 3070 | RTL_W16(IntrMask, 0x0000); |
3073 | 3071 | ||
3074 | /* Soft reset the chip. */ | 3072 | /* Soft reset the chip. */ |
@@ -3224,9 +3222,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) | |||
3224 | } | 3222 | } |
3225 | 3223 | ||
3226 | static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, | 3224 | static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, |
3227 | struct net_device *dev) | 3225 | unsigned int mtu) |
3228 | { | 3226 | { |
3229 | unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; | 3227 | unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; |
3228 | |||
3229 | if (max_frame != 16383) | ||
3230 | printk(KERN_WARNING "WARNING! Changing of MTU on this NIC" | ||
3231 | "May lead to frame reception errors!\n"); | ||
3230 | 3232 | ||
3231 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; | 3233 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; |
3232 | } | 3234 | } |
@@ -3238,7 +3240,17 @@ static int rtl8169_open(struct net_device *dev) | |||
3238 | int retval = -ENOMEM; | 3240 | int retval = -ENOMEM; |
3239 | 3241 | ||
3240 | 3242 | ||
3241 | rtl8169_set_rxbufsize(tp, dev); | 3243 | /* |
3244 | * Note that we use a magic value here, its wierd I know | ||
3245 | * its done because, some subset of rtl8169 hardware suffers from | ||
3246 | * a problem in which frames received that are longer than | ||
3247 | * the size set in RxMaxSize register return garbage sizes | ||
3248 | * when received. To avoid this we need to turn off filtering, | ||
3249 | * which is done by setting a value of 16383 in the RxMaxSize register | ||
3250 | * and allocating 16k frames to handle the largest possible rx value | ||
3251 | * thats what the magic math below does. | ||
3252 | */ | ||
3253 | rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN); | ||
3242 | 3254 | ||
3243 | /* | 3255 | /* |
3244 | * Rx and Tx desscriptors needs 256 bytes alignment. | 3256 | * Rx and Tx desscriptors needs 256 bytes alignment. |
@@ -3891,7 +3903,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) | |||
3891 | 3903 | ||
3892 | rtl8169_down(dev); | 3904 | rtl8169_down(dev); |
3893 | 3905 | ||
3894 | rtl8169_set_rxbufsize(tp, dev); | 3906 | rtl8169_set_rxbufsize(tp, dev->mtu); |
3895 | 3907 | ||
3896 | ret = rtl8169_init_ring(dev); | 3908 | ret = rtl8169_init_ring(dev); |
3897 | if (ret < 0) | 3909 | if (ret < 0) |
@@ -4754,8 +4766,8 @@ static void rtl_set_rx_mode(struct net_device *dev) | |||
4754 | mc_filter[1] = swab32(data); | 4766 | mc_filter[1] = swab32(data); |
4755 | } | 4767 | } |
4756 | 4768 | ||
4757 | RTL_W32(MAR0 + 0, mc_filter[0]); | ||
4758 | RTL_W32(MAR0 + 4, mc_filter[1]); | 4769 | RTL_W32(MAR0 + 4, mc_filter[1]); |
4770 | RTL_W32(MAR0 + 0, mc_filter[0]); | ||
4759 | 4771 | ||
4760 | RTL_W32(RxConfig, tmp); | 4772 | RTL_W32(RxConfig, tmp); |
4761 | 4773 | ||
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 0ab05af237e5..a4f09d490531 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
@@ -851,13 +851,15 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info | |||
851 | 851 | ||
852 | if ( !(rdes0 & 0x8000) || | 852 | if ( !(rdes0 & 0x8000) || |
853 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { | 853 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { |
854 | struct sk_buff *new_skb = NULL; | ||
855 | |||
854 | skb = rxptr->rx_skb_ptr; | 856 | skb = rxptr->rx_skb_ptr; |
855 | 857 | ||
856 | /* Good packet, send to upper layer */ | 858 | /* Good packet, send to upper layer */ |
857 | /* Shorst packet used new SKB */ | 859 | /* Shorst packet used new SKB */ |
858 | if ( (rxlen < RX_COPY_SIZE) && | 860 | if ((rxlen < RX_COPY_SIZE) && |
859 | ( (skb = dev_alloc_skb(rxlen + 2) ) | 861 | (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) { |
860 | != NULL) ) { | 862 | skb = new_skb; |
861 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 863 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
862 | skb_reserve(skb, 2); /* 16byte align */ | 864 | skb_reserve(skb, 2); /* 16byte align */ |
863 | memcpy(skb_put(skb, rxlen), | 865 | memcpy(skb_put(skb, rxlen), |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index e91db4b38012..175d202ab37e 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -745,6 +745,7 @@ static struct pcmcia_device_id serial_ids[] = { | |||
745 | PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), | 745 | PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), |
746 | PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), | 746 | PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), |
747 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), | 747 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), |
748 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), | ||
748 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), | 749 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), |
749 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), | 750 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), |
750 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), | 751 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), |
diff --git a/include/linux/socket.h b/include/linux/socket.h index 7b3aae2052a6..354cc5617f8b 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -255,6 +255,7 @@ struct ucred { | |||
255 | #define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ | 255 | #define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ |
256 | #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ | 256 | #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ |
257 | #define MSG_MORE 0x8000 /* Sender will send more */ | 257 | #define MSG_MORE 0x8000 /* Sender will send more */ |
258 | #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ | ||
258 | 259 | ||
259 | #define MSG_EOF MSG_FIN | 260 | #define MSG_EOF MSG_FIN |
260 | 261 | ||
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 51ca946e3392..3feb2b390308 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1194,7 +1194,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | |||
1194 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 1194 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
1195 | if (idx < s_idx) | 1195 | if (idx < s_idx) |
1196 | goto cont; | 1196 | goto cont; |
1197 | if (idx > s_idx) | 1197 | if (h > s_h || idx > s_idx) |
1198 | s_ip_idx = 0; | 1198 | s_ip_idx = 0; |
1199 | in_dev = __in_dev_get_rcu(dev); | 1199 | in_dev = __in_dev_get_rcu(dev); |
1200 | if (!in_dev) | 1200 | if (!in_dev) |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 0b9d03c54dc3..d0a6092a67be 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1616,17 +1616,20 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) | |||
1616 | int ct; | 1616 | int ct; |
1617 | struct rtnexthop *nhp; | 1617 | struct rtnexthop *nhp; |
1618 | struct net *net = mfc_net(c); | 1618 | struct net *net = mfc_net(c); |
1619 | struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev; | ||
1620 | u8 *b = skb_tail_pointer(skb); | 1619 | u8 *b = skb_tail_pointer(skb); |
1621 | struct rtattr *mp_head; | 1620 | struct rtattr *mp_head; |
1622 | 1621 | ||
1623 | if (dev) | 1622 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1624 | RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); | 1623 | if (c->mfc_parent > MAXVIFS) |
1624 | return -ENOENT; | ||
1625 | |||
1626 | if (VIF_EXISTS(net, c->mfc_parent)) | ||
1627 | RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex); | ||
1625 | 1628 | ||
1626 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 1629 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1627 | 1630 | ||
1628 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 1631 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1629 | if (c->mfc_un.res.ttls[ct] < 255) { | 1632 | if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1630 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 1633 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1631 | goto rtattr_failure; | 1634 | goto rtattr_failure; |
1632 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1635 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 54fd68c14c87..d413b57be9b3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1097,7 +1097,7 @@ static int slow_chain_length(const struct rtable *head) | |||
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | static int rt_intern_hash(unsigned hash, struct rtable *rt, | 1099 | static int rt_intern_hash(unsigned hash, struct rtable *rt, |
1100 | struct rtable **rp, struct sk_buff *skb) | 1100 | struct rtable **rp, struct sk_buff *skb, int ifindex) |
1101 | { | 1101 | { |
1102 | struct rtable *rth, **rthp; | 1102 | struct rtable *rth, **rthp; |
1103 | unsigned long now; | 1103 | unsigned long now; |
@@ -1212,11 +1212,16 @@ restart: | |||
1212 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { | 1212 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { |
1213 | struct net *net = dev_net(rt->u.dst.dev); | 1213 | struct net *net = dev_net(rt->u.dst.dev); |
1214 | int num = ++net->ipv4.current_rt_cache_rebuild_count; | 1214 | int num = ++net->ipv4.current_rt_cache_rebuild_count; |
1215 | if (!rt_caching(dev_net(rt->u.dst.dev))) { | 1215 | if (!rt_caching(net)) { |
1216 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", | 1216 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", |
1217 | rt->u.dst.dev->name, num); | 1217 | rt->u.dst.dev->name, num); |
1218 | } | 1218 | } |
1219 | rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); | 1219 | rt_emergency_hash_rebuild(net); |
1220 | spin_unlock_bh(rt_hash_lock_addr(hash)); | ||
1221 | |||
1222 | hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, | ||
1223 | ifindex, rt_genid(net)); | ||
1224 | goto restart; | ||
1220 | } | 1225 | } |
1221 | } | 1226 | } |
1222 | 1227 | ||
@@ -1477,7 +1482,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1477 | &netevent); | 1482 | &netevent); |
1478 | 1483 | ||
1479 | rt_del(hash, rth); | 1484 | rt_del(hash, rth); |
1480 | if (!rt_intern_hash(hash, rt, &rt, NULL)) | 1485 | if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif)) |
1481 | ip_rt_put(rt); | 1486 | ip_rt_put(rt); |
1482 | goto do_next; | 1487 | goto do_next; |
1483 | } | 1488 | } |
@@ -1931,7 +1936,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1931 | 1936 | ||
1932 | in_dev_put(in_dev); | 1937 | in_dev_put(in_dev); |
1933 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); | 1938 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1934 | return rt_intern_hash(hash, rth, NULL, skb); | 1939 | return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); |
1935 | 1940 | ||
1936 | e_nobufs: | 1941 | e_nobufs: |
1937 | in_dev_put(in_dev); | 1942 | in_dev_put(in_dev); |
@@ -2098,7 +2103,7 @@ static int ip_mkroute_input(struct sk_buff *skb, | |||
2098 | /* put it into the cache */ | 2103 | /* put it into the cache */ |
2099 | hash = rt_hash(daddr, saddr, fl->iif, | 2104 | hash = rt_hash(daddr, saddr, fl->iif, |
2100 | rt_genid(dev_net(rth->u.dst.dev))); | 2105 | rt_genid(dev_net(rth->u.dst.dev))); |
2101 | return rt_intern_hash(hash, rth, NULL, skb); | 2106 | return rt_intern_hash(hash, rth, NULL, skb, fl->iif); |
2102 | } | 2107 | } |
2103 | 2108 | ||
2104 | /* | 2109 | /* |
@@ -2255,7 +2260,7 @@ local_input: | |||
2255 | } | 2260 | } |
2256 | rth->rt_type = res.type; | 2261 | rth->rt_type = res.type; |
2257 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); | 2262 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); |
2258 | err = rt_intern_hash(hash, rth, NULL, skb); | 2263 | err = rt_intern_hash(hash, rth, NULL, skb, fl.iif); |
2259 | goto done; | 2264 | goto done; |
2260 | 2265 | ||
2261 | no_route: | 2266 | no_route: |
@@ -2502,7 +2507,7 @@ static int ip_mkroute_output(struct rtable **rp, | |||
2502 | if (err == 0) { | 2507 | if (err == 0) { |
2503 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, | 2508 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, |
2504 | rt_genid(dev_net(dev_out))); | 2509 | rt_genid(dev_net(dev_out))); |
2505 | err = rt_intern_hash(hash, rth, rp, NULL); | 2510 | err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif); |
2506 | } | 2511 | } |
2507 | 2512 | ||
2508 | return err; | 2513 | return err; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3381b4317c27..7e567ae5eaab 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3610,7 +3610,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
3610 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 3610 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
3611 | if (idx < s_idx) | 3611 | if (idx < s_idx) |
3612 | goto cont; | 3612 | goto cont; |
3613 | if (idx > s_idx) | 3613 | if (h > s_h || idx > s_idx) |
3614 | s_ip_idx = 0; | 3614 | s_ip_idx = 0; |
3615 | ip_idx = 0; | 3615 | ip_idx = 0; |
3616 | if ((idev = __in6_dev_get(dev)) == NULL) | 3616 | if ((idev = __in6_dev_get(dev)) == NULL) |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 23e4ac0cc30e..27acfb58650a 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -1695,17 +1695,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) | |||
1695 | int ct; | 1695 | int ct; |
1696 | struct rtnexthop *nhp; | 1696 | struct rtnexthop *nhp; |
1697 | struct net *net = mfc6_net(c); | 1697 | struct net *net = mfc6_net(c); |
1698 | struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev; | ||
1699 | u8 *b = skb_tail_pointer(skb); | 1698 | u8 *b = skb_tail_pointer(skb); |
1700 | struct rtattr *mp_head; | 1699 | struct rtattr *mp_head; |
1701 | 1700 | ||
1702 | if (dev) | 1701 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1703 | RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); | 1702 | if (c->mf6c_parent > MAXMIFS) |
1703 | return -ENOENT; | ||
1704 | |||
1705 | if (MIF_EXISTS(net, c->mf6c_parent)) | ||
1706 | RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); | ||
1704 | 1707 | ||
1705 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 1708 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1706 | 1709 | ||
1707 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 1710 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1708 | if (c->mfc_un.res.ttls[ct] < 255) { | 1711 | if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1709 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 1712 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1710 | goto rtattr_failure; | 1713 | goto rtattr_failure; |
1711 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1714 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7fcb0e5d1213..0d7713c5c206 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -890,12 +890,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) | |||
890 | struct rt6_info *rt = (struct rt6_info *) dst; | 890 | struct rt6_info *rt = (struct rt6_info *) dst; |
891 | 891 | ||
892 | if (rt) { | 892 | if (rt) { |
893 | if (rt->rt6i_flags & RTF_CACHE) | 893 | if (rt->rt6i_flags & RTF_CACHE) { |
894 | ip6_del_rt(rt); | 894 | if (rt6_check_expired(rt)) { |
895 | else | 895 | ip6_del_rt(rt); |
896 | dst = NULL; | ||
897 | } | ||
898 | } else { | ||
896 | dst_release(dst); | 899 | dst_release(dst); |
900 | dst = NULL; | ||
901 | } | ||
897 | } | 902 | } |
898 | return NULL; | 903 | return dst; |
899 | } | 904 | } |
900 | 905 | ||
901 | static void ip6_link_failure(struct sk_buff *skb) | 906 | static void ip6_link_failure(struct sk_buff *skb) |
diff --git a/net/socket.c b/net/socket.c index 769c386bd428..f55ffe9f8c87 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2135,6 +2135,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | |||
2135 | break; | 2135 | break; |
2136 | ++datagrams; | 2136 | ++datagrams; |
2137 | 2137 | ||
2138 | /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ | ||
2139 | if (flags & MSG_WAITFORONE) | ||
2140 | flags |= MSG_DONTWAIT; | ||
2141 | |||
2138 | if (timeout) { | 2142 | if (timeout) { |
2139 | ktime_get_ts(timeout); | 2143 | ktime_get_ts(timeout); |
2140 | *timeout = timespec_sub(end_time, *timeout); | 2144 | *timeout = timespec_sub(end_time, *timeout); |