aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/sparc/mm/iommu.c13
-rw-r--r--arch/sparc64/kernel/time.c22
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/e100.c142
-rw-r--r--drivers/net/e1000/e1000.h51
-rw-r--r--drivers/net/e1000/e1000_ethtool.c167
-rw-r--r--drivers/net/e1000/e1000_hw.c53
-rw-r--r--drivers/net/e1000/e1000_hw.h25
-rw-r--r--drivers/net/e1000/e1000_main.c814
-rw-r--r--drivers/net/e1000/e1000_param.c14
-rw-r--r--drivers/net/mv643xx_eth.c680
-rw-r--r--drivers/net/skge.c20
-rw-r--r--drivers/net/sky2.c219
-rw-r--r--drivers/net/spider_net.c512
-rw-r--r--drivers/net/spider_net.h75
-rw-r--r--drivers/net/spider_net_ethtool.c19
-rw-r--r--drivers/net/tg3.c82
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/wireless/airo.c21
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/hostap/Kconfig22
-rw-r--r--drivers/net/wireless/hostap/Makefile3
-rw-r--r--drivers/net/wireless/hostap/hostap.h37
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c11
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c36
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h13
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c12
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c60
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h4
-rw-r--r--drivers/net/wireless/ipw2100.c434
-rw-r--r--drivers/net/wireless/ipw2200.c14
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/scsi/ahci.c10
-rw-r--r--drivers/scsi/ata_piix.c3
-rw-r--r--drivers/scsi/libata-core.c73
-rw-r--r--drivers/scsi/sata_promise.c16
-rw-r--r--drivers/scsi/sata_svw.c1
-rw-r--r--drivers/video/sbuslib.c9
-rw-r--r--drivers/video/sbuslib.h2
-rw-r--r--include/asm-powerpc/lppaca.h4
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/libata.h11
-rw-r--r--include/linux/netfilter_ipv6/ip6t_ah.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6t_esp.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6t_frag.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6t_opts.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6t_rt.h9
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/net/ieee80211_crypt.h1
-rw-r--r--include/net/iw_handler.h2
-rw-r--r--net/bridge/netfilter/ebt_ip.c4
-rw-r--r--net/bridge/netfilter/ebt_log.c4
-rw-r--r--net/core/filter.c13
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c34
-rw-r--r--net/dccp/ackvec.c2
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_gre.c1
-rw-r--r--net/ipv4/netfilter/ipt_policy.c7
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/anycast.c2
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/mcast.c6
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6t_dst.c151
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c68
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c157
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c151
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c79
-rw-r--r--net/ipv6/netfilter/ip6t_owner.c28
-rw-r--r--net/ipv6/netfilter/ip6t_policy.c2
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c215
-rw-r--r--net/rxrpc/krxtimod.c2
-rw-r--r--net/rxrpc/proc.c12
-rw-r--r--net/sched/sch_prio.c7
-rw-r--r--net/sched/sch_sfq.c4
-rw-r--r--sound/sparc/cs4231.c3
90 files changed, 2609 insertions, 2187 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 6d1b048c62a1..ff16eac8cf5b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1696,11 +1696,13 @@ M: mtk-manpages@gmx.net
1696W: ftp://ftp.kernel.org/pub/linux/docs/manpages 1696W: ftp://ftp.kernel.org/pub/linux/docs/manpages
1697S: Maintained 1697S: Maintained
1698 1698
1699MARVELL MV64340 ETHERNET DRIVER 1699MARVELL MV643XX ETHERNET DRIVER
1700P: Dale Farnsworth
1701M: dale@farnsworth.org
1700P: Manish Lachwani 1702P: Manish Lachwani
1701L: linux-mips@linux-mips.org 1703M: mlachwani@mvista.com
1702L: netdev@vger.kernel.org 1704L: netdev@vger.kernel.org
1703S: Supported 1705S: Odd Fixes for 2.4; Maintained for 2.6.
1704 1706
1705MATROX FRAMEBUFFER DRIVER 1707MATROX FRAMEBUFFER DRIVER
1706P: Petr Vandrovec 1708P: Petr Vandrovec
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 489bf68d5f05..77840c804786 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -295,8 +295,7 @@ static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
295 int ioptex; 295 int ioptex;
296 int i; 296 int i;
297 297
298 if (busa < iommu->start) 298 BUG_ON(busa < iommu->start);
299 BUG();
300 ioptex = (busa - iommu->start) >> PAGE_SHIFT; 299 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
301 for (i = 0; i < npages; i++) { 300 for (i = 0; i < npages; i++) {
302 iopte_val(iommu->page_table[ioptex + i]) = 0; 301 iopte_val(iommu->page_table[ioptex + i]) = 0;
@@ -340,9 +339,9 @@ static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
340 iopte_t *first; 339 iopte_t *first;
341 int ioptex; 340 int ioptex;
342 341
343 if ((va & ~PAGE_MASK) != 0) BUG(); 342 BUG_ON((va & ~PAGE_MASK) != 0);
344 if ((addr & ~PAGE_MASK) != 0) BUG(); 343 BUG_ON((addr & ~PAGE_MASK) != 0);
345 if ((len & ~PAGE_MASK) != 0) BUG(); 344 BUG_ON((len & ~PAGE_MASK) != 0);
346 345
347 /* page color = physical address */ 346 /* page color = physical address */
348 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, 347 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
@@ -405,8 +404,8 @@ static void iommu_unmap_dma_area(unsigned long busa, int len)
405 unsigned long end; 404 unsigned long end;
406 int ioptex = (busa - iommu->start) >> PAGE_SHIFT; 405 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
407 406
408 if ((busa & ~PAGE_MASK) != 0) BUG(); 407 BUG_ON((busa & ~PAGE_MASK) != 0);
409 if ((len & ~PAGE_MASK) != 0) BUG(); 408 BUG_ON((len & ~PAGE_MASK) != 0);
410 409
411 iopte += ioptex; 410 iopte += ioptex;
412 end = busa + len; 411 end = busa + len;
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 459c8fbe02b4..a22930d62adf 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -280,9 +280,9 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
280 * Since STICK is constantly updating, we have to access it carefully. 280 * Since STICK is constantly updating, we have to access it carefully.
281 * 281 *
282 * The sequence we use to read is: 282 * The sequence we use to read is:
283 * 1) read low 283 * 1) read high
284 * 2) read high 284 * 2) read low
285 * 3) read low again, if it rolled over increment high by 1 285 * 3) read high again, if it rolled re-read both low and high again.
286 * 286 *
287 * Writing STICK safely is also tricky: 287 * Writing STICK safely is also tricky:
288 * 1) write low to zero 288 * 1) write low to zero
@@ -295,18 +295,18 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
295static unsigned long __hbird_read_stick(void) 295static unsigned long __hbird_read_stick(void)
296{ 296{
297 unsigned long ret, tmp1, tmp2, tmp3; 297 unsigned long ret, tmp1, tmp2, tmp3;
298 unsigned long addr = HBIRD_STICK_ADDR; 298 unsigned long addr = HBIRD_STICK_ADDR+8;
299 299
300 __asm__ __volatile__("ldxa [%1] %5, %2\n\t" 300 __asm__ __volatile__("ldxa [%1] %5, %2\n"
301 "add %1, 0x8, %1\n\t" 301 "1:\n\t"
302 "ldxa [%1] %5, %3\n\t"
303 "sub %1, 0x8, %1\n\t" 302 "sub %1, 0x8, %1\n\t"
303 "ldxa [%1] %5, %3\n\t"
304 "add %1, 0x8, %1\n\t"
304 "ldxa [%1] %5, %4\n\t" 305 "ldxa [%1] %5, %4\n\t"
305 "cmp %4, %2\n\t" 306 "cmp %4, %2\n\t"
306 "blu,a,pn %%xcc, 1f\n\t" 307 "bne,a,pn %%xcc, 1b\n\t"
307 " add %3, 1, %3\n" 308 " mov %4, %2\n\t"
308 "1:\n\t" 309 "sllx %4, 32, %4\n\t"
309 "sllx %3, 32, %3\n\t"
310 "or %3, %4, %0\n\t" 310 "or %3, %4, %0\n\t"
311 : "=&r" (ret), "=&r" (addr), 311 : "=&r" (ret), "=&r" (addr),
312 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3) 312 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 7aa49b974dc5..df9d6e80c4f2 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2136,7 +2136,7 @@ static int __init b44_init(void)
2136 2136
2137 /* Setup paramaters for syncing RX/TX DMA descriptors */ 2137 /* Setup paramaters for syncing RX/TX DMA descriptors */
2138 dma_desc_align_mask = ~(dma_desc_align_size - 1); 2138 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2139 dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc)); 2139 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2140 2140
2141 return pci_module_init(&b44_driver); 2141 return pci_module_init(&b44_driver);
2142} 2142}
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 1f7ca453bb4a..dde631f8f685 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -1925,8 +1925,8 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
1925 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); 1925 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1926#endif 1926#endif
1927 if (netif_msg_intr(cp)) 1927 if (netif_msg_intr(cp))
1928 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n", 1928 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
1929 cp->dev->name, status, compwb); 1929 cp->dev->name, status, (unsigned long long)compwb);
1930 /* process all the rings */ 1930 /* process all the rings */
1931 for (ring = 0; ring < N_TX_RINGS; ring++) { 1931 for (ring = 0; ring < N_TX_RINGS; ring++) {
1932#ifdef USE_TX_COMPWB 1932#ifdef USE_TX_COMPWB
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 4726722a0635..bf1fd2b98bf8 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,25 +1,25 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option) 8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version. 9 any later version.
10 10
11 This program is distributed in the hope that it will be useful, but WITHOUT 11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details. 14 more details.
15 15
16 You should have received a copy of the GNU General Public License along with 16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59 17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 19
20 The full GNU General Public License is included in this distribution in the 20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE. 21 file called LICENSE.
22 22
23 Contact Information: 23 Contact Information:
24 Linux NICS <linux.nics@intel.com> 24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -160,7 +160,7 @@
160 160
161#define DRV_NAME "e100" 161#define DRV_NAME "e100"
162#define DRV_EXT "-NAPI" 162#define DRV_EXT "-NAPI"
163#define DRV_VERSION "3.4.14-k4"DRV_EXT 163#define DRV_VERSION "3.5.10-k2"DRV_EXT
164#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 164#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
165#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 165#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
166#define PFX DRV_NAME ": " 166#define PFX DRV_NAME ": "
@@ -320,7 +320,7 @@ enum cuc_dump {
320 cuc_dump_complete = 0x0000A005, 320 cuc_dump_complete = 0x0000A005,
321 cuc_dump_reset_complete = 0x0000A007, 321 cuc_dump_reset_complete = 0x0000A007,
322}; 322};
323 323
324enum port { 324enum port {
325 software_reset = 0x0000, 325 software_reset = 0x0000,
326 selftest = 0x0001, 326 selftest = 0x0001,
@@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
715 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; 715 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
716 writeb(ctrl, &nic->csr->eeprom_ctrl_lo); 716 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
717 e100_write_flush(nic); udelay(4); 717 e100_write_flush(nic); udelay(4);
718 718
719 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); 719 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
720 e100_write_flush(nic); udelay(4); 720 e100_write_flush(nic); udelay(4);
721 721
722 /* Eeprom drives a dummy zero to EEDO after receiving 722 /* Eeprom drives a dummy zero to EEDO after receiving
723 * complete address. Use this to adjust addr_len. */ 723 * complete address. Use this to adjust addr_len. */
724 ctrl = readb(&nic->csr->eeprom_ctrl_lo); 724 ctrl = readb(&nic->csr->eeprom_ctrl_lo);
@@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
726 *addr_len -= (i - 16); 726 *addr_len -= (i - 16);
727 i = 17; 727 i = 17;
728 } 728 }
729 729
730 data = (data << 1) | (ctrl & eedo ? 1 : 0); 730 data = (data << 1) | (ctrl & eedo ? 1 : 0);
731 } 731 }
732 732
@@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \ 11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
1171} 1171}
1172 1172
1173static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1173static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1174{ 1174{
1175/* *INDENT-OFF* */ 1175/* *INDENT-OFF* */
1176 static struct { 1176 static struct {
@@ -1213,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1213* driver can change the algorithm. 1213* driver can change the algorithm.
1214* 1214*
1215* INTDELAY - This loads the dead-man timer with its inital value. 1215* INTDELAY - This loads the dead-man timer with its inital value.
1216* When this timer expires the interrupt is asserted, and the 1216* When this timer expires the interrupt is asserted, and the
1217* timer is reset each time a new packet is received. (see 1217* timer is reset each time a new packet is received. (see
1218* BUNDLEMAX below to set the limit on number of chained packets) 1218* BUNDLEMAX below to set the limit on number of chained packets)
1219* The current default is 0x600 or 1536. Experiments show that 1219* The current default is 0x600 or 1536. Experiments show that
1220* the value should probably stay within the 0x200 - 0x1000. 1220* the value should probably stay within the 0x200 - 0x1000.
1221* 1221*
1222* BUNDLEMAX - 1222* BUNDLEMAX -
1223* This sets the maximum number of frames that will be bundled. In 1223* This sets the maximum number of frames that will be bundled. In
1224* some situations, such as the TCP windowing algorithm, it may be 1224* some situations, such as the TCP windowing algorithm, it may be
1225* better to limit the growth of the bundle size than let it go as 1225* better to limit the growth of the bundle size than let it go as
@@ -1229,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1229* an interrupt for every frame received. If you do not want to put 1229* an interrupt for every frame received. If you do not want to put
1230* a limit on the bundle size, set this value to xFFFF. 1230* a limit on the bundle size, set this value to xFFFF.
1231* 1231*
1232* BUNDLESMALL - 1232* BUNDLESMALL -
1233* This contains a bit-mask describing the minimum size frame that 1233* This contains a bit-mask describing the minimum size frame that
1234* will be bundled. The default masks the lower 7 bits, which means 1234* will be bundled. The default masks the lower 7 bits, which means
1235* that any frame less than 128 bytes in length will not be bundled, 1235* that any frame less than 128 bytes in length will not be bundled,
@@ -1244,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1244* 1244*
1245* The current default is 0xFF80, which masks out the lower 7 bits. 1245* The current default is 0xFF80, which masks out the lower 7 bits.
1246* This means that any frame which is x7F (127) bytes or smaller 1246* This means that any frame which is x7F (127) bytes or smaller
1247* will cause an immediate interrupt. Because this value must be a 1247* will cause an immediate interrupt. Because this value must be a
1248* bit mask, there are only a few valid values that can be used. To 1248* bit mask, there are only a few valid values that can be used. To
1249* turn this feature off, the driver can write the value xFFFF to the 1249* turn this feature off, the driver can write the value xFFFF to the
1250* lower word of this instruction (in the same way that the other 1250* lower word of this instruction (in the same way that the other
@@ -1253,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1253* standard Ethernet frames are <= 2047 bytes in length. 1253* standard Ethernet frames are <= 2047 bytes in length.
1254*************************************************************************/ 1254*************************************************************************/
1255 1255
1256/* if you wish to disable the ucode functionality, while maintaining the 1256/* if you wish to disable the ucode functionality, while maintaining the
1257 * workarounds it provides, set the following defines to: 1257 * workarounds it provides, set the following defines to:
1258 * BUNDLESMALL 0 1258 * BUNDLESMALL 0
1259 * BUNDLEMAX 1 1259 * BUNDLEMAX 1
@@ -1284,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1284 1284
1285 for (i = 0; i < UCODE_SIZE; i++) 1285 for (i = 0; i < UCODE_SIZE; i++)
1286 cb->u.ucode[i] = cpu_to_le32(ucode[i]); 1286 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1287 cb->command = cpu_to_le16(cb_ucode); 1287 cb->command = cpu_to_le16(cb_ucode | cb_el);
1288 return; 1288 return;
1289 } 1289 }
1290 1290
1291noloaducode: 1291noloaducode:
1292 cb->command = cpu_to_le16(cb_nop); 1292 cb->command = cpu_to_le16(cb_nop | cb_el);
1293}
1294
1295static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1296 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1297{
1298 int err = 0, counter = 50;
1299 struct cb *cb = nic->cb_to_clean;
1300
1301 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1302 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1303
1304 /* must restart cuc */
1305 nic->cuc_cmd = cuc_start;
1306
1307 /* wait for completion */
1308 e100_write_flush(nic);
1309 udelay(10);
1310
1311 /* wait for possibly (ouch) 500ms */
1312 while (!(cb->status & cpu_to_le16(cb_complete))) {
1313 msleep(10);
1314 if (!--counter) break;
1315 }
1316
1317 /* ack any interupts, something could have been set */
1318 writeb(~0, &nic->csr->scb.stat_ack);
1319
1320 /* if the command failed, or is not OK, notify and return */
1321 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1322 DPRINTK(PROBE,ERR, "ucode load failed\n");
1323 err = -EPERM;
1324 }
1325
1326 return err;
1293} 1327}
1294 1328
1295static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1329static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1357,13 +1391,13 @@ static int e100_phy_init(struct nic *nic)
1357 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); 1391 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1358 } 1392 }
1359 1393
1360 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 1394 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1361 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) { 1395 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
1362 /* enable/disable MDI/MDI-X auto-switching. 1396 /* enable/disable MDI/MDI-X auto-switching.
1363 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */ 1397 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
1364 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) || 1398 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
1365 (nic->mac == mac_82551_10) || (nic->mii.force_media) || 1399 (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
1366 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) 1400 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
1367 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0); 1401 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
1368 else 1402 else
1369 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH); 1403 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
@@ -1388,7 +1422,7 @@ static int e100_hw_init(struct nic *nic)
1388 return err; 1422 return err;
1389 if((err = e100_exec_cmd(nic, ruc_load_base, 0))) 1423 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1390 return err; 1424 return err;
1391 if((err = e100_exec_cb(nic, NULL, e100_load_ucode))) 1425 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1392 return err; 1426 return err;
1393 if((err = e100_exec_cb(nic, NULL, e100_configure))) 1427 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1394 return err; 1428 return err;
@@ -1493,7 +1527,7 @@ static void e100_update_stats(struct nic *nic)
1493 } 1527 }
1494 } 1528 }
1495 1529
1496 1530
1497 if(e100_exec_cmd(nic, cuc_dump_reset, 0)) 1531 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1498 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); 1532 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1499} 1533}
@@ -1542,10 +1576,10 @@ static void e100_watchdog(unsigned long data)
1542 mii_check_link(&nic->mii); 1576 mii_check_link(&nic->mii);
1543 1577
1544 /* Software generated interrupt to recover from (rare) Rx 1578 /* Software generated interrupt to recover from (rare) Rx
1545 * allocation failure. 1579 * allocation failure.
1546 * Unfortunately have to use a spinlock to not re-enable interrupts 1580 * Unfortunately have to use a spinlock to not re-enable interrupts
1547 * accidentally, due to hardware that shares a register between the 1581 * accidentally, due to hardware that shares a register between the
1548 * interrupt mask bit and the SW Interrupt generation bit */ 1582 * interrupt mask bit and the SW Interrupt generation bit */
1549 spin_lock_irq(&nic->cmd_lock); 1583 spin_lock_irq(&nic->cmd_lock);
1550 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); 1584 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1551 spin_unlock_irq(&nic->cmd_lock); 1585 spin_unlock_irq(&nic->cmd_lock);
@@ -1830,7 +1864,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1830 struct rx *rx_to_start = NULL; 1864 struct rx *rx_to_start = NULL;
1831 1865
1832 /* are we already rnr? then pay attention!!! this ensures that 1866 /* are we already rnr? then pay attention!!! this ensures that
1833 * the state machine progression never allows a start with a 1867 * the state machine progression never allows a start with a
1834 * partially cleaned list, avoiding a race between hardware 1868 * partially cleaned list, avoiding a race between hardware
1835 * and rx_to_clean when in NAPI mode */ 1869 * and rx_to_clean when in NAPI mode */
1836 if(RU_SUSPENDED == nic->ru_running) 1870 if(RU_SUSPENDED == nic->ru_running)
@@ -2066,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev)
2066{ 2100{
2067 struct nic *nic = netdev_priv(netdev); 2101 struct nic *nic = netdev_priv(netdev);
2068 2102
2069 /* Reset outside of interrupt context, to avoid request_irq 2103 /* Reset outside of interrupt context, to avoid request_irq
2070 * in interrupt context */ 2104 * in interrupt context */
2071 schedule_work(&nic->tx_timeout_task); 2105 schedule_work(&nic->tx_timeout_task);
2072} 2106}
@@ -2313,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev,
2313 struct param_range *rfds = &nic->params.rfds; 2347 struct param_range *rfds = &nic->params.rfds;
2314 struct param_range *cbs = &nic->params.cbs; 2348 struct param_range *cbs = &nic->params.cbs;
2315 2349
2316 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2350 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2317 return -EINVAL; 2351 return -EINVAL;
2318 2352
2319 if(netif_running(netdev)) 2353 if(netif_running(netdev))
@@ -2631,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2631 nic->flags |= wol_magic; 2665 nic->flags |= wol_magic;
2632 2666
2633 /* ack any pending wake events, disable PME */ 2667 /* ack any pending wake events, disable PME */
2634 pci_enable_wake(pdev, 0, 0); 2668 err = pci_enable_wake(pdev, 0, 0);
2669 if (err)
2670 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
2635 2671
2636 strcpy(netdev->name, "eth%d"); 2672 strcpy(netdev->name, "eth%d");
2637 if((err = register_netdev(netdev))) { 2673 if((err = register_netdev(netdev))) {
@@ -2682,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2682{ 2718{
2683 struct net_device *netdev = pci_get_drvdata(pdev); 2719 struct net_device *netdev = pci_get_drvdata(pdev);
2684 struct nic *nic = netdev_priv(netdev); 2720 struct nic *nic = netdev_priv(netdev);
2721 int retval;
2685 2722
2686 if(netif_running(netdev)) 2723 if(netif_running(netdev))
2687 e100_down(nic); 2724 e100_down(nic);
@@ -2689,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2689 netif_device_detach(netdev); 2726 netif_device_detach(netdev);
2690 2727
2691 pci_save_state(pdev); 2728 pci_save_state(pdev);
2692 pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic))); 2729 retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
2730 nic->flags & (wol_magic | e100_asf(nic)));
2731 if (retval)
2732 DPRINTK(PROBE,ERR, "Error enabling wake\n");
2693 pci_disable_device(pdev); 2733 pci_disable_device(pdev);
2694 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2734 retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
2735 if (retval)
2736 DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
2695 2737
2696 return 0; 2738 return 0;
2697} 2739}
@@ -2700,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev)
2700{ 2742{
2701 struct net_device *netdev = pci_get_drvdata(pdev); 2743 struct net_device *netdev = pci_get_drvdata(pdev);
2702 struct nic *nic = netdev_priv(netdev); 2744 struct nic *nic = netdev_priv(netdev);
2745 int retval;
2703 2746
2704 pci_set_power_state(pdev, PCI_D0); 2747 retval = pci_set_power_state(pdev, PCI_D0);
2748 if (retval)
2749 DPRINTK(PROBE,ERR, "Error waking adapter\n");
2705 pci_restore_state(pdev); 2750 pci_restore_state(pdev);
2706 /* ack any pending wake events, disable PME */ 2751 /* ack any pending wake events, disable PME */
2707 pci_enable_wake(pdev, 0, 0); 2752 retval = pci_enable_wake(pdev, 0, 0);
2753 if (retval)
2754 DPRINTK(PROBE,ERR, "Error clearing wake events\n");
2708 if(e100_hw_init(nic)) 2755 if(e100_hw_init(nic))
2709 DPRINTK(HW, ERR, "e100_hw_init failed\n"); 2756 DPRINTK(HW, ERR, "e100_hw_init failed\n");
2710 2757
@@ -2721,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev)
2721{ 2768{
2722 struct net_device *netdev = pci_get_drvdata(pdev); 2769 struct net_device *netdev = pci_get_drvdata(pdev);
2723 struct nic *nic = netdev_priv(netdev); 2770 struct nic *nic = netdev_priv(netdev);
2771 int retval;
2724 2772
2725#ifdef CONFIG_PM 2773#ifdef CONFIG_PM
2726 pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2774 retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
2727#else 2775#else
2728 pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); 2776 retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
2729#endif 2777#endif
2778 if (retval)
2779 DPRINTK(PROBE,ERR, "Error enabling wake\n");
2730} 2780}
2731 2781
2732 2782
@@ -2739,7 +2789,7 @@ static struct pci_driver e100_driver = {
2739 .suspend = e100_suspend, 2789 .suspend = e100_suspend,
2740 .resume = e100_resume, 2790 .resume = e100_resume,
2741#endif 2791#endif
2742 .shutdown = e100_shutdown, 2792 .shutdown = e100_shutdown,
2743}; 2793};
2744 2794
2745static int __init e100_init_module(void) 2795static int __init e100_init_module(void)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index e02e9ba2e18b..27c77306193b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -72,10 +72,6 @@
72#include <linux/mii.h> 72#include <linux/mii.h>
73#include <linux/ethtool.h> 73#include <linux/ethtool.h>
74#include <linux/if_vlan.h> 74#include <linux/if_vlan.h>
75#ifdef CONFIG_E1000_MQ
76#include <linux/cpu.h>
77#include <linux/smp.h>
78#endif
79 75
80#define BAR_0 0 76#define BAR_0 0
81#define BAR_1 1 77#define BAR_1 1
@@ -87,6 +83,10 @@
87struct e1000_adapter; 83struct e1000_adapter;
88 84
89#include "e1000_hw.h" 85#include "e1000_hw.h"
86#ifdef CONFIG_E1000_MQ
87#include <linux/cpu.h>
88#include <linux/smp.h>
89#endif
90 90
91#ifdef DBG 91#ifdef DBG
92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) 92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@@ -169,6 +169,13 @@ struct e1000_buffer {
169 uint16_t next_to_watch; 169 uint16_t next_to_watch;
170}; 170};
171 171
172#ifdef CONFIG_E1000_MQ
173struct e1000_queue_stats {
174 uint64_t packets;
175 uint64_t bytes;
176};
177#endif
178
172struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; 179struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
173struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; 180struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
174 181
@@ -191,10 +198,12 @@ struct e1000_tx_ring {
191 spinlock_t tx_lock; 198 spinlock_t tx_lock;
192 uint16_t tdh; 199 uint16_t tdh;
193 uint16_t tdt; 200 uint16_t tdt;
194 uint64_t pkt;
195 201
196 boolean_t last_tx_tso; 202 boolean_t last_tx_tso;
197 203
204#ifdef CONFIG_E1000_MQ
205 struct e1000_queue_stats tx_stats;
206#endif
198}; 207};
199 208
200struct e1000_rx_ring { 209struct e1000_rx_ring {
@@ -216,9 +225,17 @@ struct e1000_rx_ring {
216 struct e1000_ps_page *ps_page; 225 struct e1000_ps_page *ps_page;
217 struct e1000_ps_page_dma *ps_page_dma; 226 struct e1000_ps_page_dma *ps_page_dma;
218 227
228 struct sk_buff *rx_skb_top;
229 struct sk_buff *rx_skb_prev;
230
231 /* cpu for rx queue */
232 int cpu;
233
219 uint16_t rdh; 234 uint16_t rdh;
220 uint16_t rdt; 235 uint16_t rdt;
221 uint64_t pkt; 236#ifdef CONFIG_E1000_MQ
237 struct e1000_queue_stats rx_stats;
238#endif
222}; 239};
223 240
224#define E1000_DESC_UNUSED(R) \ 241#define E1000_DESC_UNUSED(R) \
@@ -251,6 +268,9 @@ struct e1000_adapter {
251 uint16_t link_speed; 268 uint16_t link_speed;
252 uint16_t link_duplex; 269 uint16_t link_duplex;
253 spinlock_t stats_lock; 270 spinlock_t stats_lock;
271#ifdef CONFIG_E1000_NAPI
272 spinlock_t tx_queue_lock;
273#endif
254 atomic_t irq_sem; 274 atomic_t irq_sem;
255 struct work_struct tx_timeout_task; 275 struct work_struct tx_timeout_task;
256 struct work_struct watchdog_task; 276 struct work_struct watchdog_task;
@@ -264,6 +284,7 @@ struct e1000_adapter {
264#ifdef CONFIG_E1000_MQ 284#ifdef CONFIG_E1000_MQ
265 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ 285 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
266#endif 286#endif
287 unsigned long tx_queue_len;
267 uint32_t txd_cmd; 288 uint32_t txd_cmd;
268 uint32_t tx_int_delay; 289 uint32_t tx_int_delay;
269 uint32_t tx_abs_int_delay; 290 uint32_t tx_abs_int_delay;
@@ -271,9 +292,11 @@ struct e1000_adapter {
271 uint64_t gotcl_old; 292 uint64_t gotcl_old;
272 uint64_t tpt_old; 293 uint64_t tpt_old;
273 uint64_t colc_old; 294 uint64_t colc_old;
295 uint32_t tx_timeout_count;
274 uint32_t tx_fifo_head; 296 uint32_t tx_fifo_head;
275 uint32_t tx_head_addr; 297 uint32_t tx_head_addr;
276 uint32_t tx_fifo_size; 298 uint32_t tx_fifo_size;
299 uint8_t tx_timeout_factor;
277 atomic_t tx_fifo_stall; 300 atomic_t tx_fifo_stall;
278 boolean_t pcix_82544; 301 boolean_t pcix_82544;
279 boolean_t detect_tx_hung; 302 boolean_t detect_tx_hung;
@@ -281,14 +304,15 @@ struct e1000_adapter {
281 /* RX */ 304 /* RX */
282#ifdef CONFIG_E1000_NAPI 305#ifdef CONFIG_E1000_NAPI
283 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 306 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
284 struct e1000_rx_ring *rx_ring, 307 struct e1000_rx_ring *rx_ring,
285 int *work_done, int work_to_do); 308 int *work_done, int work_to_do);
286#else 309#else
287 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 310 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
288 struct e1000_rx_ring *rx_ring); 311 struct e1000_rx_ring *rx_ring);
289#endif 312#endif
290 void (*alloc_rx_buf) (struct e1000_adapter *adapter, 313 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
291 struct e1000_rx_ring *rx_ring); 314 struct e1000_rx_ring *rx_ring,
315 int cleaned_count);
292 struct e1000_rx_ring *rx_ring; /* One per active queue */ 316 struct e1000_rx_ring *rx_ring; /* One per active queue */
293#ifdef CONFIG_E1000_NAPI 317#ifdef CONFIG_E1000_NAPI
294 struct net_device *polling_netdev; /* One per active queue */ 318 struct net_device *polling_netdev; /* One per active queue */
@@ -296,13 +320,15 @@ struct e1000_adapter {
296#ifdef CONFIG_E1000_MQ 320#ifdef CONFIG_E1000_MQ
297 struct net_device **cpu_netdev; /* per-cpu */ 321 struct net_device **cpu_netdev; /* per-cpu */
298 struct call_async_data_struct rx_sched_call_data; 322 struct call_async_data_struct rx_sched_call_data;
299 int cpu_for_queue[4]; 323 cpumask_t cpumask;
300#endif 324#endif
301 int num_queues; 325 int num_tx_queues;
326 int num_rx_queues;
302 327
303 uint64_t hw_csum_err; 328 uint64_t hw_csum_err;
304 uint64_t hw_csum_good; 329 uint64_t hw_csum_good;
305 uint64_t rx_hdr_split; 330 uint64_t rx_hdr_split;
331 uint32_t alloc_rx_buff_failed;
306 uint32_t rx_int_delay; 332 uint32_t rx_int_delay;
307 uint32_t rx_abs_int_delay; 333 uint32_t rx_abs_int_delay;
308 boolean_t rx_csum; 334 boolean_t rx_csum;
@@ -330,6 +356,7 @@ struct e1000_adapter {
330 struct e1000_rx_ring test_rx_ring; 356 struct e1000_rx_ring test_rx_ring;
331 357
332 358
359 u32 *config_space;
333 int msg_enable; 360 int msg_enable;
334#ifdef CONFIG_PCI_MSI 361#ifdef CONFIG_PCI_MSI
335 boolean_t have_msi; 362 boolean_t have_msi;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c88f1a3c1b1d..d252297e4db0 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
80 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 80 { "tx_deferred_ok", E1000_STAT(stats.dc) },
81 { "tx_single_coll_ok", E1000_STAT(stats.scc) }, 81 { "tx_single_coll_ok", E1000_STAT(stats.scc) },
82 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, 82 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
83 { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
83 { "rx_long_length_errors", E1000_STAT(stats.roc) }, 84 { "rx_long_length_errors", E1000_STAT(stats.roc) },
84 { "rx_short_length_errors", E1000_STAT(stats.ruc) }, 85 { "rx_short_length_errors", E1000_STAT(stats.ruc) },
85 { "rx_align_errors", E1000_STAT(stats.algnerrc) }, 86 { "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 94 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, 95 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
95 { "rx_header_split", E1000_STAT(rx_hdr_split) }, 96 { "rx_header_split", E1000_STAT(rx_hdr_split) },
97 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
96}; 98};
97#define E1000_STATS_LEN \ 99
100#ifdef CONFIG_E1000_MQ
101#define E1000_QUEUE_STATS_LEN \
102 (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
103 ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
104 * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
105#else
106#define E1000_QUEUE_STATS_LEN 0
107#endif
108#define E1000_GLOBAL_STATS_LEN \
98 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) 109 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
110#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
99static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { 111static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
100 "Register test (offline)", "Eeprom test (offline)", 112 "Register test (offline)", "Eeprom test (offline)",
101 "Interrupt test (offline)", "Loopback test (offline)", 113 "Interrupt test (offline)", "Loopback test (offline)",
@@ -183,7 +195,15 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
183 struct e1000_adapter *adapter = netdev_priv(netdev); 195 struct e1000_adapter *adapter = netdev_priv(netdev);
184 struct e1000_hw *hw = &adapter->hw; 196 struct e1000_hw *hw = &adapter->hw;
185 197
186 if(ecmd->autoneg == AUTONEG_ENABLE) { 198 /* When SoL/IDER sessions are active, autoneg/speed/duplex
199 * cannot be changed */
200 if (e1000_check_phy_reset_block(hw)) {
201 DPRINTK(DRV, ERR, "Cannot change link characteristics "
202 "when SoL/IDER is active.\n");
203 return -EINVAL;
204 }
205
206 if (ecmd->autoneg == AUTONEG_ENABLE) {
187 hw->autoneg = 1; 207 hw->autoneg = 1;
188 if(hw->media_type == e1000_media_type_fiber) 208 if(hw->media_type == e1000_media_type_fiber)
189 hw->autoneg_advertised = ADVERTISED_1000baseT_Full | 209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
@@ -567,21 +587,21 @@ e1000_get_drvinfo(struct net_device *netdev,
567 587
568 strncpy(drvinfo->driver, e1000_driver_name, 32); 588 strncpy(drvinfo->driver, e1000_driver_name, 32);
569 strncpy(drvinfo->version, e1000_driver_version, 32); 589 strncpy(drvinfo->version, e1000_driver_version, 32);
570 590
571 /* EEPROM image version # is reported as firware version # for 591 /* EEPROM image version # is reported as firmware version # for
572 * 8257{1|2|3} controllers */ 592 * 8257{1|2|3} controllers */
573 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data); 593 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
574 switch (adapter->hw.mac_type) { 594 switch (adapter->hw.mac_type) {
575 case e1000_82571: 595 case e1000_82571:
576 case e1000_82572: 596 case e1000_82572:
577 case e1000_82573: 597 case e1000_82573:
578 sprintf(firmware_version, "%d.%d-%d", 598 sprintf(firmware_version, "%d.%d-%d",
579 (eeprom_data & 0xF000) >> 12, 599 (eeprom_data & 0xF000) >> 12,
580 (eeprom_data & 0x0FF0) >> 4, 600 (eeprom_data & 0x0FF0) >> 4,
581 eeprom_data & 0x000F); 601 eeprom_data & 0x000F);
582 break; 602 break;
583 default: 603 default:
584 sprintf(firmware_version, "n/a"); 604 sprintf(firmware_version, "N/A");
585 } 605 }
586 606
587 strncpy(drvinfo->fw_version, firmware_version, 32); 607 strncpy(drvinfo->fw_version, firmware_version, 32);
@@ -623,8 +643,8 @@ e1000_set_ringparam(struct net_device *netdev,
623 struct e1000_rx_ring *rxdr, *rx_old, *rx_new; 643 struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
624 int i, err, tx_ring_size, rx_ring_size; 644 int i, err, tx_ring_size, rx_ring_size;
625 645
626 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues; 646 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
627 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues; 647 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
628 648
629 if (netif_running(adapter->netdev)) 649 if (netif_running(adapter->netdev))
630 e1000_down(adapter); 650 e1000_down(adapter);
@@ -663,10 +683,10 @@ e1000_set_ringparam(struct net_device *netdev,
663 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 683 E1000_MAX_TXD : E1000_MAX_82544_TXD));
664 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 684 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
665 685
666 for (i = 0; i < adapter->num_queues; i++) { 686 for (i = 0; i < adapter->num_tx_queues; i++)
667 txdr[i].count = txdr->count; 687 txdr[i].count = txdr->count;
688 for (i = 0; i < adapter->num_rx_queues; i++)
668 rxdr[i].count = rxdr->count; 689 rxdr[i].count = rxdr->count;
669 }
670 690
671 if(netif_running(adapter->netdev)) { 691 if(netif_running(adapter->netdev)) {
672 /* Try to get new resources before deleting old */ 692 /* Try to get new resources before deleting old */
@@ -979,18 +999,17 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
979 } 999 }
980 } 1000 }
981 1001
982 if(txdr->desc) { 1002 if (txdr->desc) {
983 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); 1003 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
984 txdr->desc = NULL; 1004 txdr->desc = NULL;
985 } 1005 }
986 if(rxdr->desc) { 1006 if (rxdr->desc) {
987 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 1007 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
988 rxdr->desc = NULL; 1008 rxdr->desc = NULL;
989 } 1009 }
990 1010
991 kfree(txdr->buffer_info); 1011 kfree(txdr->buffer_info);
992 txdr->buffer_info = NULL; 1012 txdr->buffer_info = NULL;
993
994 kfree(rxdr->buffer_info); 1013 kfree(rxdr->buffer_info);
995 rxdr->buffer_info = NULL; 1014 rxdr->buffer_info = NULL;
996 1015
@@ -1327,11 +1346,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1327static int 1346static int
1328e1000_setup_loopback_test(struct e1000_adapter *adapter) 1347e1000_setup_loopback_test(struct e1000_adapter *adapter)
1329{ 1348{
1330 uint32_t rctl;
1331 struct e1000_hw *hw = &adapter->hw; 1349 struct e1000_hw *hw = &adapter->hw;
1350 uint32_t rctl;
1332 1351
1333 if (hw->media_type == e1000_media_type_fiber || 1352 if (hw->media_type == e1000_media_type_fiber ||
1334 hw->media_type == e1000_media_type_internal_serdes) { 1353 hw->media_type == e1000_media_type_internal_serdes) {
1335 switch (hw->mac_type) { 1354 switch (hw->mac_type) {
1336 case e1000_82545: 1355 case e1000_82545:
1337 case e1000_82546: 1356 case e1000_82546:
@@ -1362,25 +1381,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
1362static void 1381static void
1363e1000_loopback_cleanup(struct e1000_adapter *adapter) 1382e1000_loopback_cleanup(struct e1000_adapter *adapter)
1364{ 1383{
1384 struct e1000_hw *hw = &adapter->hw;
1365 uint32_t rctl; 1385 uint32_t rctl;
1366 uint16_t phy_reg; 1386 uint16_t phy_reg;
1367 struct e1000_hw *hw = &adapter->hw;
1368 1387
1369 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1388 rctl = E1000_READ_REG(hw, RCTL);
1370 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1389 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1371 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1390 E1000_WRITE_REG(hw, RCTL, rctl);
1372 1391
1373 switch (hw->mac_type) { 1392 switch (hw->mac_type) {
1374 case e1000_82571: 1393 case e1000_82571:
1375 case e1000_82572: 1394 case e1000_82572:
1376 if (hw->media_type == e1000_media_type_fiber || 1395 if (hw->media_type == e1000_media_type_fiber ||
1377 hw->media_type == e1000_media_type_internal_serdes){ 1396 hw->media_type == e1000_media_type_internal_serdes) {
1378#define E1000_SERDES_LB_OFF 0x400 1397#define E1000_SERDES_LB_OFF 0x400
1379 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF); 1398 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
1380 msec_delay(10); 1399 msec_delay(10);
1381 break; 1400 break;
1382 } 1401 }
1383 /* fall thru for Cu adapters */ 1402 /* Fall Through */
1384 case e1000_82545: 1403 case e1000_82545:
1385 case e1000_82546: 1404 case e1000_82546:
1386 case e1000_82545_rev_3: 1405 case e1000_82545_rev_3:
@@ -1401,7 +1420,7 @@ static void
1401e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1420e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1402{ 1421{
1403 memset(skb->data, 0xFF, frame_size); 1422 memset(skb->data, 0xFF, frame_size);
1404 frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; 1423 frame_size &= ~1;
1405 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1424 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1406 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1425 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1407 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1426 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
@@ -1410,7 +1429,7 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1410static int 1429static int
1411e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1430e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1412{ 1431{
1413 frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; 1432 frame_size &= ~1;
1414 if(*(skb->data + 3) == 0xFF) { 1433 if(*(skb->data + 3) == 0xFF) {
1415 if((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1434 if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1416 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1435 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
@@ -1488,14 +1507,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1488static int 1507static int
1489e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) 1508e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
1490{ 1509{
1491 if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback; 1510 /* PHY loopback cannot be performed if SoL/IDER
1492 if((*data = e1000_setup_loopback_test(adapter))) 1511 * sessions are active */
1493 goto err_loopback_setup; 1512 if (e1000_check_phy_reset_block(&adapter->hw)) {
1513 DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
1514 "when SoL/IDER is active.\n");
1515 *data = 0;
1516 goto out;
1517 }
1518
1519 if ((*data = e1000_setup_desc_rings(adapter)))
1520 goto out;
1521 if ((*data = e1000_setup_loopback_test(adapter)))
1522 goto err_loopback;
1494 *data = e1000_run_loopback_test(adapter); 1523 *data = e1000_run_loopback_test(adapter);
1495 e1000_loopback_cleanup(adapter); 1524 e1000_loopback_cleanup(adapter);
1496err_loopback_setup: 1525
1497 e1000_free_desc_rings(adapter);
1498err_loopback: 1526err_loopback:
1527 e1000_free_desc_rings(adapter);
1528out:
1499 return *data; 1529 return *data;
1500} 1530}
1501 1531
@@ -1617,6 +1647,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1617 1647
1618 case E1000_DEV_ID_82546EB_FIBER: 1648 case E1000_DEV_ID_82546EB_FIBER:
1619 case E1000_DEV_ID_82546GB_FIBER: 1649 case E1000_DEV_ID_82546GB_FIBER:
1650 case E1000_DEV_ID_82571EB_FIBER:
1620 /* Wake events only supported on port A for dual fiber */ 1651 /* Wake events only supported on port A for dual fiber */
1621 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1652 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1622 wol->supported = 0; 1653 wol->supported = 0;
@@ -1660,6 +1691,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1660 1691
1661 case E1000_DEV_ID_82546EB_FIBER: 1692 case E1000_DEV_ID_82546EB_FIBER:
1662 case E1000_DEV_ID_82546GB_FIBER: 1693 case E1000_DEV_ID_82546GB_FIBER:
1694 case E1000_DEV_ID_82571EB_FIBER:
1663 /* Wake events only supported on port A for dual fiber */ 1695 /* Wake events only supported on port A for dual fiber */
1664 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 1696 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1665 return wol->wolopts ? -EOPNOTSUPP : 0; 1697 return wol->wolopts ? -EOPNOTSUPP : 0;
@@ -1721,21 +1753,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1721 mod_timer(&adapter->blink_timer, jiffies); 1753 mod_timer(&adapter->blink_timer, jiffies);
1722 msleep_interruptible(data * 1000); 1754 msleep_interruptible(data * 1000);
1723 del_timer_sync(&adapter->blink_timer); 1755 del_timer_sync(&adapter->blink_timer);
1724 } 1756 } else if (adapter->hw.mac_type < e1000_82573) {
1725 else if(adapter->hw.mac_type < e1000_82573) { 1757 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1726 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1758 (E1000_LEDCTL_LED2_BLINK_RATE |
1727 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | 1759 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
1728 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1760 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1729 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | 1761 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
1730 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); 1762 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
1731 msleep_interruptible(data * 1000); 1763 msleep_interruptible(data * 1000);
1732 } 1764 } else {
1733 else { 1765 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1734 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1766 (E1000_LEDCTL_LED2_BLINK_RATE |
1735 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | 1767 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
1736 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1768 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1737 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | 1769 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
1738 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); 1770 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
1739 msleep_interruptible(data * 1000); 1771 msleep_interruptible(data * 1000);
1740 } 1772 }
1741 1773
@@ -1768,19 +1800,43 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1768 struct ethtool_stats *stats, uint64_t *data) 1800 struct ethtool_stats *stats, uint64_t *data)
1769{ 1801{
1770 struct e1000_adapter *adapter = netdev_priv(netdev); 1802 struct e1000_adapter *adapter = netdev_priv(netdev);
1803#ifdef CONFIG_E1000_MQ
1804 uint64_t *queue_stat;
1805 int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
1806 int j, k;
1807#endif
1771 int i; 1808 int i;
1772 1809
1773 e1000_update_stats(adapter); 1810 e1000_update_stats(adapter);
1774 for(i = 0; i < E1000_STATS_LEN; i++) { 1811 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1775 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1812 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
1776 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1813 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1777 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 1814 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
1778 } 1815 }
1816#ifdef CONFIG_E1000_MQ
1817 for (j = 0; j < adapter->num_tx_queues; j++) {
1818 queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
1819 for (k = 0; k < stat_count; k++)
1820 data[i + k] = queue_stat[k];
1821 i += k;
1822 }
1823 for (j = 0; j < adapter->num_rx_queues; j++) {
1824 queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
1825 for (k = 0; k < stat_count; k++)
1826 data[i + k] = queue_stat[k];
1827 i += k;
1828 }
1829#endif
1830/* BUG_ON(i != E1000_STATS_LEN); */
1779} 1831}
1780 1832
1781static void 1833static void
1782e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1834e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1783{ 1835{
1836#ifdef CONFIG_E1000_MQ
1837 struct e1000_adapter *adapter = netdev_priv(netdev);
1838#endif
1839 uint8_t *p = data;
1784 int i; 1840 int i;
1785 1841
1786 switch(stringset) { 1842 switch(stringset) {
@@ -1789,11 +1845,26 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1789 E1000_TEST_LEN*ETH_GSTRING_LEN); 1845 E1000_TEST_LEN*ETH_GSTRING_LEN);
1790 break; 1846 break;
1791 case ETH_SS_STATS: 1847 case ETH_SS_STATS:
1792 for (i=0; i < E1000_STATS_LEN; i++) { 1848 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1793 memcpy(data + i * ETH_GSTRING_LEN, 1849 memcpy(p, e1000_gstrings_stats[i].stat_string,
1794 e1000_gstrings_stats[i].stat_string, 1850 ETH_GSTRING_LEN);
1795 ETH_GSTRING_LEN); 1851 p += ETH_GSTRING_LEN;
1852 }
1853#ifdef CONFIG_E1000_MQ
1854 for (i = 0; i < adapter->num_tx_queues; i++) {
1855 sprintf(p, "tx_queue_%u_packets", i);
1856 p += ETH_GSTRING_LEN;
1857 sprintf(p, "tx_queue_%u_bytes", i);
1858 p += ETH_GSTRING_LEN;
1796 } 1859 }
1860 for (i = 0; i < adapter->num_rx_queues; i++) {
1861 sprintf(p, "rx_queue_%u_packets", i);
1862 p += ETH_GSTRING_LEN;
1863 sprintf(p, "rx_queue_%u_bytes", i);
1864 p += ETH_GSTRING_LEN;
1865 }
1866#endif
1867/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
1797 break; 1868 break;
1798 } 1869 }
1799} 1870}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 136fc031e4ad..2437d362ff63 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
318 case E1000_DEV_ID_82546GB_FIBER: 318 case E1000_DEV_ID_82546GB_FIBER:
319 case E1000_DEV_ID_82546GB_SERDES: 319 case E1000_DEV_ID_82546GB_SERDES:
320 case E1000_DEV_ID_82546GB_PCIE: 320 case E1000_DEV_ID_82546GB_PCIE:
321 case E1000_DEV_ID_82546GB_QUAD_COPPER:
322 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
321 hw->mac_type = e1000_82546_rev_3; 323 hw->mac_type = e1000_82546_rev_3;
322 break; 324 break;
323 case E1000_DEV_ID_82541EI: 325 case E1000_DEV_ID_82541EI:
@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
639 uint16_t cmd_mmrbc; 641 uint16_t cmd_mmrbc;
640 uint16_t stat_mmrbc; 642 uint16_t stat_mmrbc;
641 uint32_t mta_size; 643 uint32_t mta_size;
644 uint32_t ctrl_ext;
642 645
643 DEBUGFUNC("e1000_init_hw"); 646 DEBUGFUNC("e1000_init_hw");
644 647
@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
735 break; 738 break;
736 case e1000_82571: 739 case e1000_82571:
737 case e1000_82572: 740 case e1000_82572:
738 ctrl |= (1 << 22);
739 case e1000_82573: 741 case e1000_82573:
740 ctrl |= E1000_TXDCTL_COUNT_DESC; 742 ctrl |= E1000_TXDCTL_COUNT_DESC;
741 break; 743 break;
@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
775 */ 777 */
776 e1000_clear_hw_cntrs(hw); 778 e1000_clear_hw_cntrs(hw);
777 779
780 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
781 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
782 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
783 /* Relaxed ordering must be disabled to avoid a parity
784 * error crash in a PCI slot. */
785 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
786 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
787 }
788
778 return ret_val; 789 return ret_val;
779} 790}
780 791
@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
838 849
839 DEBUGFUNC("e1000_setup_link"); 850 DEBUGFUNC("e1000_setup_link");
840 851
852 /* In the case of the phy reset being blocked, we already have a link.
853 * We do not have to set it up again. */
854 if (e1000_check_phy_reset_block(hw))
855 return E1000_SUCCESS;
856
841 /* Read and store word 0x0F of the EEPROM. This word contains bits 857 /* Read and store word 0x0F of the EEPROM. This word contains bits
842 * that determine the hardware's default PAUSE (flow control) mode, 858 * that determine the hardware's default PAUSE (flow control) mode,
843 * a bit that determines whether the HW defaults to enabling or 859 * a bit that determines whether the HW defaults to enabling or
@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1929void 1945void
1930e1000_config_collision_dist(struct e1000_hw *hw) 1946e1000_config_collision_dist(struct e1000_hw *hw)
1931{ 1947{
1932 uint32_t tctl; 1948 uint32_t tctl, coll_dist;
1933 1949
1934 DEBUGFUNC("e1000_config_collision_dist"); 1950 DEBUGFUNC("e1000_config_collision_dist");
1935 1951
1952 if (hw->mac_type < e1000_82543)
1953 coll_dist = E1000_COLLISION_DISTANCE_82542;
1954 else
1955 coll_dist = E1000_COLLISION_DISTANCE;
1956
1936 tctl = E1000_READ_REG(hw, TCTL); 1957 tctl = E1000_READ_REG(hw, TCTL);
1937 1958
1938 tctl &= ~E1000_TCTL_COLD; 1959 tctl &= ~E1000_TCTL_COLD;
1939 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; 1960 tctl |= coll_dist << E1000_COLD_SHIFT;
1940 1961
1941 E1000_WRITE_REG(hw, TCTL, tctl); 1962 E1000_WRITE_REG(hw, TCTL, tctl);
1942 E1000_WRITE_FLUSH(hw); 1963 E1000_WRITE_FLUSH(hw);
@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
2982 3003
2983 if (hw->mac_type < e1000_82571) 3004 if (hw->mac_type < e1000_82571)
2984 msec_delay(10); 3005 msec_delay(10);
3006 else
3007 udelay(100);
2985 3008
2986 E1000_WRITE_REG(hw, CTRL, ctrl); 3009 E1000_WRITE_REG(hw, CTRL, ctrl);
2987 E1000_WRITE_FLUSH(hw); 3010 E1000_WRITE_FLUSH(hw);
@@ -3881,14 +3904,16 @@ e1000_read_eeprom(struct e1000_hw *hw,
3881 return -E1000_ERR_EEPROM; 3904 return -E1000_ERR_EEPROM;
3882 } 3905 }
3883 3906
3884 /* FLASH reads without acquiring the semaphore are safe in 82573-based 3907 /* FLASH reads without acquiring the semaphore are safe */
3885 * controllers. 3908 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
3886 */ 3909 hw->eeprom.use_eerd == FALSE) {
3887 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3910 switch (hw->mac_type) {
3888 (hw->mac_type != e1000_82573)) { 3911 default:
3889 /* Prepare the EEPROM for reading */ 3912 /* Prepare the EEPROM for reading */
3890 if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3913 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
3891 return -E1000_ERR_EEPROM; 3914 return -E1000_ERR_EEPROM;
3915 break;
3916 }
3892 } 3917 }
3893 3918
3894 if(eeprom->use_eerd == TRUE) { 3919 if(eeprom->use_eerd == TRUE) {
@@ -6720,6 +6745,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
6720 break; 6745 break;
6721 } 6746 }
6722 6747
6748 /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
6749 * Need to wait for PHY configuration completion before accessing NVM
6750 * and PHY. */
6751 if (hw->mac_type == e1000_82573)
6752 msec_delay(25);
6753
6723 return E1000_SUCCESS; 6754 return E1000_SUCCESS;
6724} 6755}
6725 6756
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 7caa35748cea..0b8f6f2b774b 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -439,6 +439,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
439#define E1000_DEV_ID_82546GB_FIBER 0x107A 439#define E1000_DEV_ID_82546GB_FIBER 0x107A
440#define E1000_DEV_ID_82546GB_SERDES 0x107B 440#define E1000_DEV_ID_82546GB_SERDES 0x107B
441#define E1000_DEV_ID_82546GB_PCIE 0x108A 441#define E1000_DEV_ID_82546GB_PCIE 0x108A
442#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
442#define E1000_DEV_ID_82547EI 0x1019 443#define E1000_DEV_ID_82547EI 0x1019
443#define E1000_DEV_ID_82571EB_COPPER 0x105E 444#define E1000_DEV_ID_82571EB_COPPER 0x105E
444#define E1000_DEV_ID_82571EB_FIBER 0x105F 445#define E1000_DEV_ID_82571EB_FIBER 0x105F
@@ -449,6 +450,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
449#define E1000_DEV_ID_82573E 0x108B 450#define E1000_DEV_ID_82573E 0x108B
450#define E1000_DEV_ID_82573E_IAMT 0x108C 451#define E1000_DEV_ID_82573E_IAMT 0x108C
451#define E1000_DEV_ID_82573L 0x109A 452#define E1000_DEV_ID_82573L 0x109A
453#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
452 454
453 455
454#define NODE_ADDRESS_SIZE 6 456#define NODE_ADDRESS_SIZE 6
@@ -1497,6 +1499,7 @@ struct e1000_hw {
1497#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 1499#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
1498#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ 1500#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
1499#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 1501#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
1502#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
1500#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 1503#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1501#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 1504#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1502#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 1505#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@@ -1954,6 +1957,23 @@ struct e1000_host_command_info {
1954 1957
1955#define E1000_MDALIGN 4096 1958#define E1000_MDALIGN 4096
1956 1959
1960/* PCI-Ex registers */
1961
1962/* PCI-Ex Control Register */
1963#define E1000_GCR_RXD_NO_SNOOP 0x00000001
1964#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
1965#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
1966#define E1000_GCR_TXD_NO_SNOOP 0x00000008
1967#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
1968#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
1969
1970#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
1971 E1000_GCR_RXDSCW_NO_SNOOP | \
1972 E1000_GCR_RXDSCR_NO_SNOOP | \
1973 E1000_GCR TXD_NO_SNOOP | \
1974 E1000_GCR_TXDSCW_NO_SNOOP | \
1975 E1000_GCR_TXDSCR_NO_SNOOP)
1976
1957#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 1977#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
1958/* Function Active and Power State to MNG */ 1978/* Function Active and Power State to MNG */
1959#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 1979#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
@@ -2077,7 +2097,10 @@ struct e1000_host_command_info {
2077/* Collision related configuration parameters */ 2097/* Collision related configuration parameters */
2078#define E1000_COLLISION_THRESHOLD 15 2098#define E1000_COLLISION_THRESHOLD 15
2079#define E1000_CT_SHIFT 4 2099#define E1000_CT_SHIFT 4
2080#define E1000_COLLISION_DISTANCE 64 2100/* Collision distance is a 0-based value that applies to
2101 * half-duplex-capable hardware only. */
2102#define E1000_COLLISION_DISTANCE 63
2103#define E1000_COLLISION_DISTANCE_82542 64
2081#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE 2104#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
2082#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE 2105#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
2083#define E1000_COLD_SHIFT 12 2106#define E1000_COLD_SHIFT 12
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 438a931fd55d..d0a5d1656c5f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -43,7 +43,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
43#else 43#else
44#define DRIVERNAPI "-NAPI" 44#define DRIVERNAPI "-NAPI"
45#endif 45#endif
46#define DRV_VERSION "6.1.16-k2"DRIVERNAPI 46#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
47char e1000_driver_version[] = DRV_VERSION; 47char e1000_driver_version[] = DRV_VERSION;
48static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 48static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
49 49
@@ -97,7 +97,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
97 INTEL_E1000_ETHERNET_DEVICE(0x108A), 97 INTEL_E1000_ETHERNET_DEVICE(0x108A),
98 INTEL_E1000_ETHERNET_DEVICE(0x108B), 98 INTEL_E1000_ETHERNET_DEVICE(0x108B),
99 INTEL_E1000_ETHERNET_DEVICE(0x108C), 99 INTEL_E1000_ETHERNET_DEVICE(0x108C),
100 INTEL_E1000_ETHERNET_DEVICE(0x1099),
100 INTEL_E1000_ETHERNET_DEVICE(0x109A), 101 INTEL_E1000_ETHERNET_DEVICE(0x109A),
102 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
101 /* required last entry */ 103 /* required last entry */
102 {0,} 104 {0,}
103}; 105};
@@ -171,9 +173,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
171 struct e1000_rx_ring *rx_ring); 173 struct e1000_rx_ring *rx_ring);
172#endif 174#endif
173static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 175static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
174 struct e1000_rx_ring *rx_ring); 176 struct e1000_rx_ring *rx_ring,
177 int cleaned_count);
175static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 178static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring); 179 struct e1000_rx_ring *rx_ring,
180 int cleaned_count);
177static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 181static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
178static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 182static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
179 int cmd); 183 int cmd);
@@ -319,7 +323,75 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
319 } 323 }
320 } 324 }
321} 325}
322 326
327/**
328 * e1000_release_hw_control - release control of the h/w to f/w
329 * @adapter: address of board private structure
330 *
331 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
332 * For ASF and Pass Through versions of f/w this means that the
333 * driver is no longer loaded. For AMT version (only with 82573) i
334 * of the f/w this means that the netowrk i/f is closed.
335 *
336 **/
337
338static inline void
339e1000_release_hw_control(struct e1000_adapter *adapter)
340{
341 uint32_t ctrl_ext;
342 uint32_t swsm;
343
344 /* Let firmware taken over control of h/w */
345 switch (adapter->hw.mac_type) {
346 case e1000_82571:
347 case e1000_82572:
348 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
349 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
350 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
351 break;
352 case e1000_82573:
353 swsm = E1000_READ_REG(&adapter->hw, SWSM);
354 E1000_WRITE_REG(&adapter->hw, SWSM,
355 swsm & ~E1000_SWSM_DRV_LOAD);
356 default:
357 break;
358 }
359}
360
361/**
362 * e1000_get_hw_control - get control of the h/w from f/w
363 * @adapter: address of board private structure
364 *
365 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
366 * For ASF and Pass Through versions of f/w this means that
367 * the driver is loaded. For AMT version (only with 82573)
368 * of the f/w this means that the netowrk i/f is open.
369 *
370 **/
371
372static inline void
373e1000_get_hw_control(struct e1000_adapter *adapter)
374{
375 uint32_t ctrl_ext;
376 uint32_t swsm;
377 /* Let firmware know the driver has taken over */
378 switch (adapter->hw.mac_type) {
379 case e1000_82571:
380 case e1000_82572:
381 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
382 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
383 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
384 break;
385 case e1000_82573:
386 swsm = E1000_READ_REG(&adapter->hw, SWSM);
387 E1000_WRITE_REG(&adapter->hw, SWSM,
388 swsm | E1000_SWSM_DRV_LOAD);
389 break;
390 default:
391 break;
392 }
393}
394
323int 395int
324e1000_up(struct e1000_adapter *adapter) 396e1000_up(struct e1000_adapter *adapter)
325{ 397{
@@ -343,8 +415,14 @@ e1000_up(struct e1000_adapter *adapter)
343 e1000_configure_tx(adapter); 415 e1000_configure_tx(adapter);
344 e1000_setup_rctl(adapter); 416 e1000_setup_rctl(adapter);
345 e1000_configure_rx(adapter); 417 e1000_configure_rx(adapter);
346 for (i = 0; i < adapter->num_queues; i++) 418 /* call E1000_DESC_UNUSED which always leaves
347 adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); 419 * at least 1 descriptor unused to make sure
420 * next_to_use != next_to_clean */
421 for (i = 0; i < adapter->num_rx_queues; i++) {
422 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
423 adapter->alloc_rx_buf(adapter, ring,
424 E1000_DESC_UNUSED(ring));
425 }
348 426
349#ifdef CONFIG_PCI_MSI 427#ifdef CONFIG_PCI_MSI
350 if(adapter->hw.mac_type > e1000_82547_rev_2) { 428 if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -364,6 +442,12 @@ e1000_up(struct e1000_adapter *adapter)
364 return err; 442 return err;
365 } 443 }
366 444
445#ifdef CONFIG_E1000_MQ
446 e1000_setup_queue_mapping(adapter);
447#endif
448
449 adapter->tx_queue_len = netdev->tx_queue_len;
450
367 mod_timer(&adapter->watchdog_timer, jiffies); 451 mod_timer(&adapter->watchdog_timer, jiffies);
368 452
369#ifdef CONFIG_E1000_NAPI 453#ifdef CONFIG_E1000_NAPI
@@ -378,6 +462,8 @@ void
378e1000_down(struct e1000_adapter *adapter) 462e1000_down(struct e1000_adapter *adapter)
379{ 463{
380 struct net_device *netdev = adapter->netdev; 464 struct net_device *netdev = adapter->netdev;
465 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
466 e1000_check_mng_mode(&adapter->hw);
381 467
382 e1000_irq_disable(adapter); 468 e1000_irq_disable(adapter);
383#ifdef CONFIG_E1000_MQ 469#ifdef CONFIG_E1000_MQ
@@ -396,6 +482,7 @@ e1000_down(struct e1000_adapter *adapter)
396#ifdef CONFIG_E1000_NAPI 482#ifdef CONFIG_E1000_NAPI
397 netif_poll_disable(netdev); 483 netif_poll_disable(netdev);
398#endif 484#endif
485 netdev->tx_queue_len = adapter->tx_queue_len;
399 adapter->link_speed = 0; 486 adapter->link_speed = 0;
400 adapter->link_duplex = 0; 487 adapter->link_duplex = 0;
401 netif_carrier_off(netdev); 488 netif_carrier_off(netdev);
@@ -405,12 +492,16 @@ e1000_down(struct e1000_adapter *adapter)
405 e1000_clean_all_tx_rings(adapter); 492 e1000_clean_all_tx_rings(adapter);
406 e1000_clean_all_rx_rings(adapter); 493 e1000_clean_all_rx_rings(adapter);
407 494
408 /* If WoL is not enabled and management mode is not IAMT 495 /* Power down the PHY so no link is implied when interface is down *
409 * Power down the PHY so no link is implied when interface is down */ 496 * The PHY cannot be powered down if any of the following is TRUE *
410 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 497 * (a) WoL is enabled
498 * (b) AMT is active
499 * (c) SoL/IDER session is active */
500 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
411 adapter->hw.media_type == e1000_media_type_copper && 501 adapter->hw.media_type == e1000_media_type_copper &&
412 !e1000_check_mng_mode(&adapter->hw) && 502 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
413 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { 503 !mng_mode_enabled &&
504 !e1000_check_phy_reset_block(&adapter->hw)) {
414 uint16_t mii_reg; 505 uint16_t mii_reg;
415 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 506 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
416 mii_reg |= MII_CR_POWER_DOWN; 507 mii_reg |= MII_CR_POWER_DOWN;
@@ -422,10 +513,8 @@ e1000_down(struct e1000_adapter *adapter)
422void 513void
423e1000_reset(struct e1000_adapter *adapter) 514e1000_reset(struct e1000_adapter *adapter)
424{ 515{
425 struct net_device *netdev = adapter->netdev;
426 uint32_t pba, manc; 516 uint32_t pba, manc;
427 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 517 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
428 uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
429 518
430 /* Repartition Pba for greater than 9k mtu 519 /* Repartition Pba for greater than 9k mtu
431 * To take effect CTRL.RST is required. 520 * To take effect CTRL.RST is required.
@@ -449,15 +538,8 @@ e1000_reset(struct e1000_adapter *adapter)
449 } 538 }
450 539
451 if((adapter->hw.mac_type != e1000_82573) && 540 if((adapter->hw.mac_type != e1000_82573) &&
452 (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { 541 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
453 pba -= 8; /* allocate more FIFO for Tx */ 542 pba -= 8; /* allocate more FIFO for Tx */
454 /* send an XOFF when there is enough space in the
455 * Rx FIFO to hold one extra full size Rx packet
456 */
457 fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
458 ETHERNET_FCS_SIZE + 1;
459 fc_low_water_mark = fc_high_water_mark + 8;
460 }
461 543
462 544
463 if(adapter->hw.mac_type == e1000_82547) { 545 if(adapter->hw.mac_type == e1000_82547) {
@@ -471,10 +553,12 @@ e1000_reset(struct e1000_adapter *adapter)
471 E1000_WRITE_REG(&adapter->hw, PBA, pba); 553 E1000_WRITE_REG(&adapter->hw, PBA, pba);
472 554
473 /* flow control settings */ 555 /* flow control settings */
474 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - 556 /* Set the FC high water mark to 90% of the FIFO size.
475 fc_high_water_mark; 557 * Required to clear last 3 LSB */
476 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - 558 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
477 fc_low_water_mark; 559
560 adapter->hw.fc_high_water = fc_high_water_mark;
561 adapter->hw.fc_low_water = fc_high_water_mark - 8;
478 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 562 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
479 adapter->hw.fc_send_xon = 1; 563 adapter->hw.fc_send_xon = 1;
480 adapter->hw.fc = adapter->hw.original_fc; 564 adapter->hw.fc = adapter->hw.original_fc;
@@ -517,8 +601,6 @@ e1000_probe(struct pci_dev *pdev,
517 struct net_device *netdev; 601 struct net_device *netdev;
518 struct e1000_adapter *adapter; 602 struct e1000_adapter *adapter;
519 unsigned long mmio_start, mmio_len; 603 unsigned long mmio_start, mmio_len;
520 uint32_t ctrl_ext;
521 uint32_t swsm;
522 604
523 static int cards_found = 0; 605 static int cards_found = 0;
524 int i, err, pci_using_dac; 606 int i, err, pci_using_dac;
@@ -712,8 +794,7 @@ e1000_probe(struct pci_dev *pdev,
712 case e1000_82546: 794 case e1000_82546:
713 case e1000_82546_rev_3: 795 case e1000_82546_rev_3:
714 case e1000_82571: 796 case e1000_82571:
715 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) 797 if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
716 && (adapter->hw.media_type == e1000_media_type_copper)) {
717 e1000_read_eeprom(&adapter->hw, 798 e1000_read_eeprom(&adapter->hw,
718 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 799 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
719 break; 800 break;
@@ -727,25 +808,36 @@ e1000_probe(struct pci_dev *pdev,
727 if(eeprom_data & eeprom_apme_mask) 808 if(eeprom_data & eeprom_apme_mask)
728 adapter->wol |= E1000_WUFC_MAG; 809 adapter->wol |= E1000_WUFC_MAG;
729 810
811 /* print bus type/speed/width info */
812 {
813 struct e1000_hw *hw = &adapter->hw;
814 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
815 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
816 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
817 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
818 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
819 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
820 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
821 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
822 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
823 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
824 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
825 "32-bit"));
826 }
827
828 for (i = 0; i < 6; i++)
829 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
830
730 /* reset the hardware with the new settings */ 831 /* reset the hardware with the new settings */
731 e1000_reset(adapter); 832 e1000_reset(adapter);
732 833
733 /* Let firmware know the driver has taken over */ 834 /* If the controller is 82573 and f/w is AMT, do not set
734 switch(adapter->hw.mac_type) { 835 * DRV_LOAD until the interface is up. For all other cases,
735 case e1000_82571: 836 * let the f/w know that the h/w is now under the control
736 case e1000_82572: 837 * of the driver. */
737 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 838 if (adapter->hw.mac_type != e1000_82573 ||
738 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 839 !e1000_check_mng_mode(&adapter->hw))
739 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 840 e1000_get_hw_control(adapter);
740 break;
741 case e1000_82573:
742 swsm = E1000_READ_REG(&adapter->hw, SWSM);
743 E1000_WRITE_REG(&adapter->hw, SWSM,
744 swsm | E1000_SWSM_DRV_LOAD);
745 break;
746 default:
747 break;
748 }
749 841
750 strcpy(netdev->name, "eth%d"); 842 strcpy(netdev->name, "eth%d");
751 if((err = register_netdev(netdev))) 843 if((err = register_netdev(netdev)))
@@ -782,8 +874,7 @@ e1000_remove(struct pci_dev *pdev)
782{ 874{
783 struct net_device *netdev = pci_get_drvdata(pdev); 875 struct net_device *netdev = pci_get_drvdata(pdev);
784 struct e1000_adapter *adapter = netdev_priv(netdev); 876 struct e1000_adapter *adapter = netdev_priv(netdev);
785 uint32_t ctrl_ext; 877 uint32_t manc;
786 uint32_t manc, swsm;
787#ifdef CONFIG_E1000_NAPI 878#ifdef CONFIG_E1000_NAPI
788 int i; 879 int i;
789#endif 880#endif
@@ -799,26 +890,13 @@ e1000_remove(struct pci_dev *pdev)
799 } 890 }
800 } 891 }
801 892
802 switch(adapter->hw.mac_type) { 893 /* Release control of h/w to f/w. If f/w is AMT enabled, this
803 case e1000_82571: 894 * would have already happened in close and is redundant. */
804 case e1000_82572: 895 e1000_release_hw_control(adapter);
805 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
806 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
807 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
808 break;
809 case e1000_82573:
810 swsm = E1000_READ_REG(&adapter->hw, SWSM);
811 E1000_WRITE_REG(&adapter->hw, SWSM,
812 swsm & ~E1000_SWSM_DRV_LOAD);
813 break;
814
815 default:
816 break;
817 }
818 896
819 unregister_netdev(netdev); 897 unregister_netdev(netdev);
820#ifdef CONFIG_E1000_NAPI 898#ifdef CONFIG_E1000_NAPI
821 for (i = 0; i < adapter->num_queues; i++) 899 for (i = 0; i < adapter->num_rx_queues; i++)
822 __dev_put(&adapter->polling_netdev[i]); 900 __dev_put(&adapter->polling_netdev[i]);
823#endif 901#endif
824 902
@@ -923,15 +1001,34 @@ e1000_sw_init(struct e1000_adapter *adapter)
923 switch (hw->mac_type) { 1001 switch (hw->mac_type) {
924 case e1000_82571: 1002 case e1000_82571:
925 case e1000_82572: 1003 case e1000_82572:
926 adapter->num_queues = 2; 1004 /* These controllers support 2 tx queues, but with a single
1005 * qdisc implementation, multiple tx queues aren't quite as
1006 * interesting. If we can find a logical way of mapping
1007 * flows to a queue, then perhaps we can up the num_tx_queue
1008 * count back to its default. Until then, we run the risk of
1009 * terrible performance due to SACK overload. */
1010 adapter->num_tx_queues = 1;
1011 adapter->num_rx_queues = 2;
927 break; 1012 break;
928 default: 1013 default:
929 adapter->num_queues = 1; 1014 adapter->num_tx_queues = 1;
1015 adapter->num_rx_queues = 1;
930 break; 1016 break;
931 } 1017 }
932 adapter->num_queues = min(adapter->num_queues, num_online_cpus()); 1018 adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
1019 adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
1020 DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
1021 adapter->num_rx_queues,
1022 ((adapter->num_rx_queues == 1)
1023 ? ((num_online_cpus() > 1)
1024 ? "(due to unsupported feature in current adapter)"
1025 : "(due to unsupported system configuration)")
1026 : ""));
1027 DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
1028 adapter->num_tx_queues);
933#else 1029#else
934 adapter->num_queues = 1; 1030 adapter->num_tx_queues = 1;
1031 adapter->num_rx_queues = 1;
935#endif 1032#endif
936 1033
937 if (e1000_alloc_queues(adapter)) { 1034 if (e1000_alloc_queues(adapter)) {
@@ -940,17 +1037,14 @@ e1000_sw_init(struct e1000_adapter *adapter)
940 } 1037 }
941 1038
942#ifdef CONFIG_E1000_NAPI 1039#ifdef CONFIG_E1000_NAPI
943 for (i = 0; i < adapter->num_queues; i++) { 1040 for (i = 0; i < adapter->num_rx_queues; i++) {
944 adapter->polling_netdev[i].priv = adapter; 1041 adapter->polling_netdev[i].priv = adapter;
945 adapter->polling_netdev[i].poll = &e1000_clean; 1042 adapter->polling_netdev[i].poll = &e1000_clean;
946 adapter->polling_netdev[i].weight = 64; 1043 adapter->polling_netdev[i].weight = 64;
947 dev_hold(&adapter->polling_netdev[i]); 1044 dev_hold(&adapter->polling_netdev[i]);
948 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); 1045 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
949 } 1046 }
950#endif 1047 spin_lock_init(&adapter->tx_queue_lock);
951
952#ifdef CONFIG_E1000_MQ
953 e1000_setup_queue_mapping(adapter);
954#endif 1048#endif
955 1049
956 atomic_set(&adapter->irq_sem, 1); 1050 atomic_set(&adapter->irq_sem, 1);
@@ -973,13 +1067,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
973{ 1067{
974 int size; 1068 int size;
975 1069
976 size = sizeof(struct e1000_tx_ring) * adapter->num_queues; 1070 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
977 adapter->tx_ring = kmalloc(size, GFP_KERNEL); 1071 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
978 if (!adapter->tx_ring) 1072 if (!adapter->tx_ring)
979 return -ENOMEM; 1073 return -ENOMEM;
980 memset(adapter->tx_ring, 0, size); 1074 memset(adapter->tx_ring, 0, size);
981 1075
982 size = sizeof(struct e1000_rx_ring) * adapter->num_queues; 1076 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
983 adapter->rx_ring = kmalloc(size, GFP_KERNEL); 1077 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
984 if (!adapter->rx_ring) { 1078 if (!adapter->rx_ring) {
985 kfree(adapter->tx_ring); 1079 kfree(adapter->tx_ring);
@@ -988,7 +1082,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
988 memset(adapter->rx_ring, 0, size); 1082 memset(adapter->rx_ring, 0, size);
989 1083
990#ifdef CONFIG_E1000_NAPI 1084#ifdef CONFIG_E1000_NAPI
991 size = sizeof(struct net_device) * adapter->num_queues; 1085 size = sizeof(struct net_device) * adapter->num_rx_queues;
992 adapter->polling_netdev = kmalloc(size, GFP_KERNEL); 1086 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
993 if (!adapter->polling_netdev) { 1087 if (!adapter->polling_netdev) {
994 kfree(adapter->tx_ring); 1088 kfree(adapter->tx_ring);
@@ -998,6 +1092,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
998 memset(adapter->polling_netdev, 0, size); 1092 memset(adapter->polling_netdev, 0, size);
999#endif 1093#endif
1000 1094
1095#ifdef CONFIG_E1000_MQ
1096 adapter->rx_sched_call_data.func = e1000_rx_schedule;
1097 adapter->rx_sched_call_data.info = adapter->netdev;
1098
1099 adapter->cpu_netdev = alloc_percpu(struct net_device *);
1100 adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
1101#endif
1102
1001 return E1000_SUCCESS; 1103 return E1000_SUCCESS;
1002} 1104}
1003 1105
@@ -1017,14 +1119,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter)
1017 lock_cpu_hotplug(); 1119 lock_cpu_hotplug();
1018 i = 0; 1120 i = 0;
1019 for_each_online_cpu(cpu) { 1121 for_each_online_cpu(cpu) {
1020 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; 1122 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
1021 /* This is incomplete because we'd like to assign separate 1123 /* This is incomplete because we'd like to assign separate
1022 * physical cpus to these netdev polling structures and 1124 * physical cpus to these netdev polling structures and
1023 * avoid saturating a subset of cpus. 1125 * avoid saturating a subset of cpus.
1024 */ 1126 */
1025 if (i < adapter->num_queues) { 1127 if (i < adapter->num_rx_queues) {
1026 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; 1128 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
1027 adapter->cpu_for_queue[i] = cpu; 1129 adapter->rx_ring[i].cpu = cpu;
1130 cpu_set(cpu, adapter->cpumask);
1028 } else 1131 } else
1029 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; 1132 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
1030 1133
@@ -1071,6 +1174,12 @@ e1000_open(struct net_device *netdev)
1071 e1000_update_mng_vlan(adapter); 1174 e1000_update_mng_vlan(adapter);
1072 } 1175 }
1073 1176
1177 /* If AMT is enabled, let the firmware know that the network
1178 * interface is now open */
1179 if (adapter->hw.mac_type == e1000_82573 &&
1180 e1000_check_mng_mode(&adapter->hw))
1181 e1000_get_hw_control(adapter);
1182
1074 return E1000_SUCCESS; 1183 return E1000_SUCCESS;
1075 1184
1076err_up: 1185err_up:
@@ -1109,6 +1218,13 @@ e1000_close(struct net_device *netdev)
1109 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1218 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1110 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1219 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1111 } 1220 }
1221
1222 /* If AMT is enabled, let the firmware know that the network
1223 * interface is now closed */
1224 if (adapter->hw.mac_type == e1000_82573 &&
1225 e1000_check_mng_mode(&adapter->hw))
1226 e1000_release_hw_control(adapter);
1227
1112 return 0; 1228 return 0;
1113} 1229}
1114 1230
@@ -1229,7 +1345,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1229{ 1345{
1230 int i, err = 0; 1346 int i, err = 0;
1231 1347
1232 for (i = 0; i < adapter->num_queues; i++) { 1348 for (i = 0; i < adapter->num_tx_queues; i++) {
1233 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1349 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1234 if (err) { 1350 if (err) {
1235 DPRINTK(PROBE, ERR, 1351 DPRINTK(PROBE, ERR,
@@ -1254,10 +1370,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1254 uint64_t tdba; 1370 uint64_t tdba;
1255 struct e1000_hw *hw = &adapter->hw; 1371 struct e1000_hw *hw = &adapter->hw;
1256 uint32_t tdlen, tctl, tipg, tarc; 1372 uint32_t tdlen, tctl, tipg, tarc;
1373 uint32_t ipgr1, ipgr2;
1257 1374
1258 /* Setup the HW Tx Head and Tail descriptor pointers */ 1375 /* Setup the HW Tx Head and Tail descriptor pointers */
1259 1376
1260 switch (adapter->num_queues) { 1377 switch (adapter->num_tx_queues) {
1261 case 2: 1378 case 2:
1262 tdba = adapter->tx_ring[1].dma; 1379 tdba = adapter->tx_ring[1].dma;
1263 tdlen = adapter->tx_ring[1].count * 1380 tdlen = adapter->tx_ring[1].count *
@@ -1287,22 +1404,26 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1287 1404
1288 /* Set the default values for the Tx Inter Packet Gap timer */ 1405 /* Set the default values for the Tx Inter Packet Gap timer */
1289 1406
1407 if (hw->media_type == e1000_media_type_fiber ||
1408 hw->media_type == e1000_media_type_internal_serdes)
1409 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1410 else
1411 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1412
1290 switch (hw->mac_type) { 1413 switch (hw->mac_type) {
1291 case e1000_82542_rev2_0: 1414 case e1000_82542_rev2_0:
1292 case e1000_82542_rev2_1: 1415 case e1000_82542_rev2_1:
1293 tipg = DEFAULT_82542_TIPG_IPGT; 1416 tipg = DEFAULT_82542_TIPG_IPGT;
1294 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 1417 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1295 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1418 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1296 break; 1419 break;
1297 default: 1420 default:
1298 if (hw->media_type == e1000_media_type_fiber || 1421 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1299 hw->media_type == e1000_media_type_internal_serdes) 1422 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1300 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1423 break;
1301 else
1302 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1303 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1304 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1305 } 1424 }
1425 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1426 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1306 E1000_WRITE_REG(hw, TIPG, tipg); 1427 E1000_WRITE_REG(hw, TIPG, tipg);
1307 1428
1308 /* Set the Tx Interrupt Delay register */ 1429 /* Set the Tx Interrupt Delay register */
@@ -1454,6 +1575,8 @@ setup_rx_desc_die:
1454 1575
1455 rxdr->next_to_clean = 0; 1576 rxdr->next_to_clean = 0;
1456 rxdr->next_to_use = 0; 1577 rxdr->next_to_use = 0;
1578 rxdr->rx_skb_top = NULL;
1579 rxdr->rx_skb_prev = NULL;
1457 1580
1458 return 0; 1581 return 0;
1459} 1582}
@@ -1475,7 +1598,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1475{ 1598{
1476 int i, err = 0; 1599 int i, err = 0;
1477 1600
1478 for (i = 0; i < adapter->num_queues; i++) { 1601 for (i = 0; i < adapter->num_rx_queues; i++) {
1479 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1602 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1480 if (err) { 1603 if (err) {
1481 DPRINTK(PROBE, ERR, 1604 DPRINTK(PROBE, ERR,
@@ -1510,7 +1633,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1510 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1633 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1511 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1634 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1512 1635
1513 if(adapter->hw.tbi_compatibility_on == 1) 1636 if (adapter->hw.mac_type > e1000_82543)
1637 rctl |= E1000_RCTL_SECRC;
1638
1639 if (adapter->hw.tbi_compatibility_on == 1)
1514 rctl |= E1000_RCTL_SBP; 1640 rctl |= E1000_RCTL_SBP;
1515 else 1641 else
1516 rctl &= ~E1000_RCTL_SBP; 1642 rctl &= ~E1000_RCTL_SBP;
@@ -1638,16 +1764,21 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1638 } 1764 }
1639 1765
1640 if (hw->mac_type >= e1000_82571) { 1766 if (hw->mac_type >= e1000_82571) {
1641 /* Reset delay timers after every interrupt */
1642 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 1767 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1768 /* Reset delay timers after every interrupt */
1643 ctrl_ext |= E1000_CTRL_EXT_CANC; 1769 ctrl_ext |= E1000_CTRL_EXT_CANC;
1770#ifdef CONFIG_E1000_NAPI
1771 /* Auto-Mask interrupts upon ICR read. */
1772 ctrl_ext |= E1000_CTRL_EXT_IAME;
1773#endif
1644 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 1774 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1775 E1000_WRITE_REG(hw, IAM, ~0);
1645 E1000_WRITE_FLUSH(hw); 1776 E1000_WRITE_FLUSH(hw);
1646 } 1777 }
1647 1778
1648 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1779 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1649 * the Base and Length of the Rx Descriptor Ring */ 1780 * the Base and Length of the Rx Descriptor Ring */
1650 switch (adapter->num_queues) { 1781 switch (adapter->num_rx_queues) {
1651#ifdef CONFIG_E1000_MQ 1782#ifdef CONFIG_E1000_MQ
1652 case 2: 1783 case 2:
1653 rdba = adapter->rx_ring[1].dma; 1784 rdba = adapter->rx_ring[1].dma;
@@ -1674,7 +1805,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1674 } 1805 }
1675 1806
1676#ifdef CONFIG_E1000_MQ 1807#ifdef CONFIG_E1000_MQ
1677 if (adapter->num_queues > 1) { 1808 if (adapter->num_rx_queues > 1) {
1678 uint32_t random[10]; 1809 uint32_t random[10];
1679 1810
1680 get_random_bytes(&random[0], 40); 1811 get_random_bytes(&random[0], 40);
@@ -1684,7 +1815,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1684 E1000_WRITE_REG(hw, RSSIM, 0); 1815 E1000_WRITE_REG(hw, RSSIM, 0);
1685 } 1816 }
1686 1817
1687 switch (adapter->num_queues) { 1818 switch (adapter->num_rx_queues) {
1688 case 2: 1819 case 2:
1689 default: 1820 default:
1690 reta = 0x00800080; 1821 reta = 0x00800080;
@@ -1776,7 +1907,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1776{ 1907{
1777 int i; 1908 int i;
1778 1909
1779 for (i = 0; i < adapter->num_queues; i++) 1910 for (i = 0; i < adapter->num_tx_queues; i++)
1780 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1911 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1781} 1912}
1782 1913
@@ -1789,12 +1920,10 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1789 buffer_info->dma, 1920 buffer_info->dma,
1790 buffer_info->length, 1921 buffer_info->length,
1791 PCI_DMA_TODEVICE); 1922 PCI_DMA_TODEVICE);
1792 buffer_info->dma = 0;
1793 } 1923 }
1794 if(buffer_info->skb) { 1924 if (buffer_info->skb)
1795 dev_kfree_skb_any(buffer_info->skb); 1925 dev_kfree_skb_any(buffer_info->skb);
1796 buffer_info->skb = NULL; 1926 memset(buffer_info, 0, sizeof(struct e1000_buffer));
1797 }
1798} 1927}
1799 1928
1800/** 1929/**
@@ -1843,7 +1972,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1843{ 1972{
1844 int i; 1973 int i;
1845 1974
1846 for (i = 0; i < adapter->num_queues; i++) 1975 for (i = 0; i < adapter->num_tx_queues; i++)
1847 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1976 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1848} 1977}
1849 1978
@@ -1887,7 +2016,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
1887{ 2016{
1888 int i; 2017 int i;
1889 2018
1890 for (i = 0; i < adapter->num_queues; i++) 2019 for (i = 0; i < adapter->num_rx_queues; i++)
1891 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2020 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
1892} 2021}
1893 2022
@@ -1913,8 +2042,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1913 for(i = 0; i < rx_ring->count; i++) { 2042 for(i = 0; i < rx_ring->count; i++) {
1914 buffer_info = &rx_ring->buffer_info[i]; 2043 buffer_info = &rx_ring->buffer_info[i];
1915 if(buffer_info->skb) { 2044 if(buffer_info->skb) {
1916 ps_page = &rx_ring->ps_page[i];
1917 ps_page_dma = &rx_ring->ps_page_dma[i];
1918 pci_unmap_single(pdev, 2045 pci_unmap_single(pdev,
1919 buffer_info->dma, 2046 buffer_info->dma,
1920 buffer_info->length, 2047 buffer_info->length,
@@ -1922,19 +2049,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1922 2049
1923 dev_kfree_skb(buffer_info->skb); 2050 dev_kfree_skb(buffer_info->skb);
1924 buffer_info->skb = NULL; 2051 buffer_info->skb = NULL;
1925 2052 }
1926 for(j = 0; j < adapter->rx_ps_pages; j++) { 2053 ps_page = &rx_ring->ps_page[i];
1927 if(!ps_page->ps_page[j]) break; 2054 ps_page_dma = &rx_ring->ps_page_dma[i];
1928 pci_unmap_single(pdev, 2055 for (j = 0; j < adapter->rx_ps_pages; j++) {
1929 ps_page_dma->ps_page_dma[j], 2056 if (!ps_page->ps_page[j]) break;
1930 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2057 pci_unmap_page(pdev,
1931 ps_page_dma->ps_page_dma[j] = 0; 2058 ps_page_dma->ps_page_dma[j],
1932 put_page(ps_page->ps_page[j]); 2059 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1933 ps_page->ps_page[j] = NULL; 2060 ps_page_dma->ps_page_dma[j] = 0;
1934 } 2061 put_page(ps_page->ps_page[j]);
2062 ps_page->ps_page[j] = NULL;
1935 } 2063 }
1936 } 2064 }
1937 2065
2066 /* there also may be some cached data in our adapter */
2067 if (rx_ring->rx_skb_top) {
2068 dev_kfree_skb(rx_ring->rx_skb_top);
2069
2070 /* rx_skb_prev will be wiped out by rx_skb_top */
2071 rx_ring->rx_skb_top = NULL;
2072 rx_ring->rx_skb_prev = NULL;
2073 }
2074
2075
1938 size = sizeof(struct e1000_buffer) * rx_ring->count; 2076 size = sizeof(struct e1000_buffer) * rx_ring->count;
1939 memset(rx_ring->buffer_info, 0, size); 2077 memset(rx_ring->buffer_info, 0, size);
1940 size = sizeof(struct e1000_ps_page) * rx_ring->count; 2078 size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -1963,7 +2101,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
1963{ 2101{
1964 int i; 2102 int i;
1965 2103
1966 for (i = 0; i < adapter->num_queues; i++) 2104 for (i = 0; i < adapter->num_rx_queues; i++)
1967 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2105 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1968} 2106}
1969 2107
@@ -2005,7 +2143,9 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
2005 2143
2006 if(netif_running(netdev)) { 2144 if(netif_running(netdev)) {
2007 e1000_configure_rx(adapter); 2145 e1000_configure_rx(adapter);
2008 e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); 2146 /* No need to loop, because 82542 supports only 1 queue */
2147 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2148 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2009 } 2149 }
2010} 2150}
2011 2151
@@ -2204,7 +2344,7 @@ static void
2204e1000_watchdog_task(struct e1000_adapter *adapter) 2344e1000_watchdog_task(struct e1000_adapter *adapter)
2205{ 2345{
2206 struct net_device *netdev = adapter->netdev; 2346 struct net_device *netdev = adapter->netdev;
2207 struct e1000_tx_ring *txdr = &adapter->tx_ring[0]; 2347 struct e1000_tx_ring *txdr = adapter->tx_ring;
2208 uint32_t link; 2348 uint32_t link;
2209 2349
2210 e1000_check_for_link(&adapter->hw); 2350 e1000_check_for_link(&adapter->hw);
@@ -2231,6 +2371,21 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2231 adapter->link_duplex == FULL_DUPLEX ? 2371 adapter->link_duplex == FULL_DUPLEX ?
2232 "Full Duplex" : "Half Duplex"); 2372 "Full Duplex" : "Half Duplex");
2233 2373
2374 /* tweak tx_queue_len according to speed/duplex */
2375 netdev->tx_queue_len = adapter->tx_queue_len;
2376 adapter->tx_timeout_factor = 1;
2377 if (adapter->link_duplex == HALF_DUPLEX) {
2378 switch (adapter->link_speed) {
2379 case SPEED_10:
2380 netdev->tx_queue_len = 10;
2381 adapter->tx_timeout_factor = 8;
2382 break;
2383 case SPEED_100:
2384 netdev->tx_queue_len = 100;
2385 break;
2386 }
2387 }
2388
2234 netif_carrier_on(netdev); 2389 netif_carrier_on(netdev);
2235 netif_wake_queue(netdev); 2390 netif_wake_queue(netdev);
2236 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); 2391 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
@@ -2263,7 +2418,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2263 2418
2264 e1000_update_adaptive(&adapter->hw); 2419 e1000_update_adaptive(&adapter->hw);
2265 2420
2266 if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) { 2421#ifdef CONFIG_E1000_MQ
2422 txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
2423#endif
2424 if (!netif_carrier_ok(netdev)) {
2267 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2425 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2268 /* We've lost link, so the controller stops DMA, 2426 /* We've lost link, so the controller stops DMA,
2269 * but we've got queued Tx work that's never going 2427 * but we've got queued Tx work that's never going
@@ -2314,6 +2472,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2314{ 2472{
2315#ifdef NETIF_F_TSO 2473#ifdef NETIF_F_TSO
2316 struct e1000_context_desc *context_desc; 2474 struct e1000_context_desc *context_desc;
2475 struct e1000_buffer *buffer_info;
2317 unsigned int i; 2476 unsigned int i;
2318 uint32_t cmd_length = 0; 2477 uint32_t cmd_length = 0;
2319 uint16_t ipcse = 0, tucse, mss; 2478 uint16_t ipcse = 0, tucse, mss;
@@ -2363,6 +2522,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2363 2522
2364 i = tx_ring->next_to_use; 2523 i = tx_ring->next_to_use;
2365 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2524 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2525 buffer_info = &tx_ring->buffer_info[i];
2366 2526
2367 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2527 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2368 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2528 context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -2374,14 +2534,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2374 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2534 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2375 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2535 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2376 2536
2537 buffer_info->time_stamp = jiffies;
2538
2377 if (++i == tx_ring->count) i = 0; 2539 if (++i == tx_ring->count) i = 0;
2378 tx_ring->next_to_use = i; 2540 tx_ring->next_to_use = i;
2379 2541
2380 return 1; 2542 return TRUE;
2381 } 2543 }
2382#endif 2544#endif
2383 2545
2384 return 0; 2546 return FALSE;
2385} 2547}
2386 2548
2387static inline boolean_t 2549static inline boolean_t
@@ -2389,6 +2551,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2389 struct sk_buff *skb) 2551 struct sk_buff *skb)
2390{ 2552{
2391 struct e1000_context_desc *context_desc; 2553 struct e1000_context_desc *context_desc;
2554 struct e1000_buffer *buffer_info;
2392 unsigned int i; 2555 unsigned int i;
2393 uint8_t css; 2556 uint8_t css;
2394 2557
@@ -2396,6 +2559,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2396 css = skb->h.raw - skb->data; 2559 css = skb->h.raw - skb->data;
2397 2560
2398 i = tx_ring->next_to_use; 2561 i = tx_ring->next_to_use;
2562 buffer_info = &tx_ring->buffer_info[i];
2399 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2563 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2400 2564
2401 context_desc->upper_setup.tcp_fields.tucss = css; 2565 context_desc->upper_setup.tcp_fields.tucss = css;
@@ -2404,6 +2568,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2404 context_desc->tcp_seg_setup.data = 0; 2568 context_desc->tcp_seg_setup.data = 0;
2405 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2569 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2406 2570
2571 buffer_info->time_stamp = jiffies;
2572
2407 if (unlikely(++i == tx_ring->count)) i = 0; 2573 if (unlikely(++i == tx_ring->count)) i = 0;
2408 tx_ring->next_to_use = i; 2574 tx_ring->next_to_use = i;
2409 2575
@@ -2688,11 +2854,30 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2688 * overrun the FIFO, adjust the max buffer len if mss 2854 * overrun the FIFO, adjust the max buffer len if mss
2689 * drops. */ 2855 * drops. */
2690 if(mss) { 2856 if(mss) {
2857 uint8_t hdr_len;
2691 max_per_txd = min(mss << 2, max_per_txd); 2858 max_per_txd = min(mss << 2, max_per_txd);
2692 max_txd_pwr = fls(max_per_txd) - 1; 2859 max_txd_pwr = fls(max_per_txd) - 1;
2860
2861 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2862 * points to just header, pull a few bytes of payload from
2863 * frags into skb->data */
2864 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2865 if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
2866 (adapter->hw.mac_type == e1000_82571 ||
2867 adapter->hw.mac_type == e1000_82572)) {
2868 unsigned int pull_size;
2869 pull_size = min((unsigned int)4, skb->data_len);
2870 if (!__pskb_pull_tail(skb, pull_size)) {
2871 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2872 dev_kfree_skb_any(skb);
2873 return -EFAULT;
2874 }
2875 len = skb->len - skb->data_len;
2876 }
2693 } 2877 }
2694 2878
2695 if((mss) || (skb->ip_summed == CHECKSUM_HW)) 2879 if((mss) || (skb->ip_summed == CHECKSUM_HW))
2880 /* reserve a descriptor for the offload context */
2696 count++; 2881 count++;
2697 count++; 2882 count++;
2698#else 2883#else
@@ -2726,27 +2911,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2726 if(adapter->pcix_82544) 2911 if(adapter->pcix_82544)
2727 count += nr_frags; 2912 count += nr_frags;
2728 2913
2729#ifdef NETIF_F_TSO
2730 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2731 * points to just header, pull a few bytes of payload from
2732 * frags into skb->data */
2733 if (skb_shinfo(skb)->tso_size) {
2734 uint8_t hdr_len;
2735 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2736 if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
2737 (adapter->hw.mac_type == e1000_82571 ||
2738 adapter->hw.mac_type == e1000_82572)) {
2739 unsigned int pull_size;
2740 pull_size = min((unsigned int)4, skb->data_len);
2741 if (!__pskb_pull_tail(skb, pull_size)) {
2742 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2743 dev_kfree_skb_any(skb);
2744 return -EFAULT;
2745 }
2746 }
2747 }
2748#endif
2749
2750 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2914 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2751 e1000_transfer_dhcp_info(adapter, skb); 2915 e1000_transfer_dhcp_info(adapter, skb);
2752 2916
@@ -2833,6 +2997,7 @@ e1000_tx_timeout_task(struct net_device *netdev)
2833{ 2997{
2834 struct e1000_adapter *adapter = netdev_priv(netdev); 2998 struct e1000_adapter *adapter = netdev_priv(netdev);
2835 2999
3000 adapter->tx_timeout_count++;
2836 e1000_down(adapter); 3001 e1000_down(adapter);
2837 e1000_up(adapter); 3002 e1000_up(adapter);
2838} 3003}
@@ -2850,7 +3015,7 @@ e1000_get_stats(struct net_device *netdev)
2850{ 3015{
2851 struct e1000_adapter *adapter = netdev_priv(netdev); 3016 struct e1000_adapter *adapter = netdev_priv(netdev);
2852 3017
2853 e1000_update_stats(adapter); 3018 /* only return the current stats */
2854 return &adapter->net_stats; 3019 return &adapter->net_stats;
2855} 3020}
2856 3021
@@ -2871,50 +3036,51 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2871 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3036 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
2872 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3037 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2873 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3038 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
2874 return -EINVAL;
2875 }
2876
2877#define MAX_STD_JUMBO_FRAME_SIZE 9234
2878 /* might want this to be bigger enum check... */
2879 /* 82571 controllers limit jumbo frame size to 10500 bytes */
2880 if ((adapter->hw.mac_type == e1000_82571 ||
2881 adapter->hw.mac_type == e1000_82572) &&
2882 max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2883 DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
2884 "on 82571 and 82572 controllers.\n");
2885 return -EINVAL; 3039 return -EINVAL;
2886 } 3040 }
2887 3041
2888 if(adapter->hw.mac_type == e1000_82573 && 3042 /* Adapter-specific max frame size limits. */
2889 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 3043 switch (adapter->hw.mac_type) {
2890 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 3044 case e1000_82542_rev2_0:
2891 "on 82573\n"); 3045 case e1000_82542_rev2_1:
2892 return -EINVAL; 3046 case e1000_82573:
2893 } 3047 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2894 3048 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
2895 if(adapter->hw.mac_type > e1000_82547_rev_2) { 3049 return -EINVAL;
2896 adapter->rx_buffer_len = max_frame; 3050 }
2897 E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 3051 break;
2898 } else { 3052 case e1000_82571:
2899 if(unlikely((adapter->hw.mac_type < e1000_82543) && 3053 case e1000_82572:
2900 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { 3054#define MAX_STD_JUMBO_FRAME_SIZE 9234
2901 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 3055 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2902 "on 82542\n"); 3056 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
2903 return -EINVAL; 3057 return -EINVAL;
2904
2905 } else {
2906 if(max_frame <= E1000_RXBUFFER_2048) {
2907 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2908 } else if(max_frame <= E1000_RXBUFFER_4096) {
2909 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
2910 } else if(max_frame <= E1000_RXBUFFER_8192) {
2911 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2912 } else if(max_frame <= E1000_RXBUFFER_16384) {
2913 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
2914 }
2915 } 3058 }
3059 break;
3060 default:
3061 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3062 break;
2916 } 3063 }
2917 3064
3065 /* since the driver code now supports splitting a packet across
3066 * multiple descriptors, most of the fifo related limitations on
3067 * jumbo frame traffic have gone away.
3068 * simply use 2k descriptors for everything.
3069 *
3070 * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3071 * means we reserve 2 more, this pushes us to allocate from the next
3072 * larger slab size
3073 * i.e. RXBUFFER_2048 --> size-4096 slab */
3074
3075 /* recent hardware supports 1KB granularity */
3076 if (adapter->hw.mac_type > e1000_82547_rev_2) {
3077 adapter->rx_buffer_len =
3078 ((max_frame < E1000_RXBUFFER_2048) ?
3079 max_frame : E1000_RXBUFFER_2048);
3080 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3081 } else
3082 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3083
2918 netdev->mtu = new_mtu; 3084 netdev->mtu = new_mtu;
2919 3085
2920 if(netif_running(netdev)) { 3086 if(netif_running(netdev)) {
@@ -3037,12 +3203,11 @@ e1000_update_stats(struct e1000_adapter *adapter)
3037 3203
3038 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3204 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3039 adapter->stats.crcerrs + adapter->stats.algnerrc + 3205 adapter->stats.crcerrs + adapter->stats.algnerrc +
3040 adapter->stats.rlec + adapter->stats.mpc + 3206 adapter->stats.rlec + adapter->stats.cexterr;
3041 adapter->stats.cexterr; 3207 adapter->net_stats.rx_dropped = 0;
3042 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3208 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3043 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3209 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3044 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3210 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3045 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
3046 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3211 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3047 3212
3048 /* Tx Errors */ 3213 /* Tx Errors */
@@ -3110,12 +3275,24 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3110 struct e1000_adapter *adapter = netdev_priv(netdev); 3275 struct e1000_adapter *adapter = netdev_priv(netdev);
3111 struct e1000_hw *hw = &adapter->hw; 3276 struct e1000_hw *hw = &adapter->hw;
3112 uint32_t icr = E1000_READ_REG(hw, ICR); 3277 uint32_t icr = E1000_READ_REG(hw, ICR);
3113#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI) 3278#ifndef CONFIG_E1000_NAPI
3114 int i; 3279 int i;
3280#else
3281 /* Interrupt Auto-Mask...upon reading ICR,
3282 * interrupts are masked. No need for the
3283 * IMC write, but it does mean we should
3284 * account for it ASAP. */
3285 if (likely(hw->mac_type >= e1000_82571))
3286 atomic_inc(&adapter->irq_sem);
3115#endif 3287#endif
3116 3288
3117 if(unlikely(!icr)) 3289 if (unlikely(!icr)) {
3290#ifdef CONFIG_E1000_NAPI
3291 if (hw->mac_type >= e1000_82571)
3292 e1000_irq_enable(adapter);
3293#endif
3118 return IRQ_NONE; /* Not our interrupt */ 3294 return IRQ_NONE; /* Not our interrupt */
3295 }
3119 3296
3120 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3297 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3121 hw->get_link_status = 1; 3298 hw->get_link_status = 1;
@@ -3123,19 +3300,19 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3123 } 3300 }
3124 3301
3125#ifdef CONFIG_E1000_NAPI 3302#ifdef CONFIG_E1000_NAPI
3126 atomic_inc(&adapter->irq_sem); 3303 if (unlikely(hw->mac_type < e1000_82571)) {
3127 E1000_WRITE_REG(hw, IMC, ~0); 3304 atomic_inc(&adapter->irq_sem);
3128 E1000_WRITE_FLUSH(hw); 3305 E1000_WRITE_REG(hw, IMC, ~0);
3306 E1000_WRITE_FLUSH(hw);
3307 }
3129#ifdef CONFIG_E1000_MQ 3308#ifdef CONFIG_E1000_MQ
3130 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { 3309 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
3131 cpu_set(adapter->cpu_for_queue[0], 3310 /* We must setup the cpumask once count == 0 since
3132 adapter->rx_sched_call_data.cpumask); 3311 * each cpu bit is cleared when the work is done. */
3133 for (i = 1; i < adapter->num_queues; i++) { 3312 adapter->rx_sched_call_data.cpumask = adapter->cpumask;
3134 cpu_set(adapter->cpu_for_queue[i], 3313 atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
3135 adapter->rx_sched_call_data.cpumask); 3314 atomic_set(&adapter->rx_sched_call_data.count,
3136 atomic_inc(&adapter->irq_sem); 3315 adapter->num_rx_queues);
3137 }
3138 atomic_set(&adapter->rx_sched_call_data.count, i);
3139 smp_call_async_mask(&adapter->rx_sched_call_data); 3316 smp_call_async_mask(&adapter->rx_sched_call_data);
3140 } else { 3317 } else {
3141 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); 3318 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
@@ -3187,7 +3364,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3187{ 3364{
3188 struct e1000_adapter *adapter; 3365 struct e1000_adapter *adapter;
3189 int work_to_do = min(*budget, poll_dev->quota); 3366 int work_to_do = min(*budget, poll_dev->quota);
3190 int tx_cleaned, i = 0, work_done = 0; 3367 int tx_cleaned = 0, i = 0, work_done = 0;
3191 3368
3192 /* Must NOT use netdev_priv macro here. */ 3369 /* Must NOT use netdev_priv macro here. */
3193 adapter = poll_dev->priv; 3370 adapter = poll_dev->priv;
@@ -3198,11 +3375,23 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3198 3375
3199 while (poll_dev != &adapter->polling_netdev[i]) { 3376 while (poll_dev != &adapter->polling_netdev[i]) {
3200 i++; 3377 i++;
3201 if (unlikely(i == adapter->num_queues)) 3378 if (unlikely(i == adapter->num_rx_queues))
3202 BUG(); 3379 BUG();
3203 } 3380 }
3204 3381
3205 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); 3382 if (likely(adapter->num_tx_queues == 1)) {
3383 /* e1000_clean is called per-cpu. This lock protects
3384 * tx_ring[0] from being cleaned by multiple cpus
3385 * simultaneously. A failure obtaining the lock means
3386 * tx_ring[0] is currently being cleaned anyway. */
3387 if (spin_trylock(&adapter->tx_queue_lock)) {
3388 tx_cleaned = e1000_clean_tx_irq(adapter,
3389 &adapter->tx_ring[0]);
3390 spin_unlock(&adapter->tx_queue_lock);
3391 }
3392 } else
3393 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3394
3206 adapter->clean_rx(adapter, &adapter->rx_ring[i], 3395 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3207 &work_done, work_to_do); 3396 &work_done, work_to_do);
3208 3397
@@ -3247,17 +3436,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3247 buffer_info = &tx_ring->buffer_info[i]; 3436 buffer_info = &tx_ring->buffer_info[i];
3248 cleaned = (i == eop); 3437 cleaned = (i == eop);
3249 3438
3439#ifdef CONFIG_E1000_MQ
3440 tx_ring->tx_stats.bytes += buffer_info->length;
3441#endif
3250 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3442 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3251 3443 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3252 tx_desc->buffer_addr = 0;
3253 tx_desc->lower.data = 0;
3254 tx_desc->upper.data = 0;
3255 3444
3256 if(unlikely(++i == tx_ring->count)) i = 0; 3445 if(unlikely(++i == tx_ring->count)) i = 0;
3257 } 3446 }
3258 3447
3259 tx_ring->pkt++; 3448#ifdef CONFIG_E1000_MQ
3260 3449 tx_ring->tx_stats.packets++;
3450#endif
3451
3261 eop = tx_ring->buffer_info[i].next_to_watch; 3452 eop = tx_ring->buffer_info[i].next_to_watch;
3262 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3453 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3263 } 3454 }
@@ -3276,32 +3467,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3276 /* Detect a transmit hang in hardware, this serializes the 3467 /* Detect a transmit hang in hardware, this serializes the
3277 * check with the clearing of time_stamp and movement of i */ 3468 * check with the clearing of time_stamp and movement of i */
3278 adapter->detect_tx_hung = FALSE; 3469 adapter->detect_tx_hung = FALSE;
3279 if (tx_ring->buffer_info[i].dma && 3470 if (tx_ring->buffer_info[eop].dma &&
3280 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) 3471 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3472 adapter->tx_timeout_factor * HZ)
3281 && !(E1000_READ_REG(&adapter->hw, STATUS) & 3473 && !(E1000_READ_REG(&adapter->hw, STATUS) &
3282 E1000_STATUS_TXOFF)) { 3474 E1000_STATUS_TXOFF)) {
3283 3475
3284 /* detected Tx unit hang */ 3476 /* detected Tx unit hang */
3285 i = tx_ring->next_to_clean;
3286 eop = tx_ring->buffer_info[i].next_to_watch;
3287 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3288 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3477 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
3478 " Tx Queue <%lu>\n"
3289 " TDH <%x>\n" 3479 " TDH <%x>\n"
3290 " TDT <%x>\n" 3480 " TDT <%x>\n"
3291 " next_to_use <%x>\n" 3481 " next_to_use <%x>\n"
3292 " next_to_clean <%x>\n" 3482 " next_to_clean <%x>\n"
3293 "buffer_info[next_to_clean]\n" 3483 "buffer_info[next_to_clean]\n"
3294 " dma <%llx>\n"
3295 " time_stamp <%lx>\n" 3484 " time_stamp <%lx>\n"
3296 " next_to_watch <%x>\n" 3485 " next_to_watch <%x>\n"
3297 " jiffies <%lx>\n" 3486 " jiffies <%lx>\n"
3298 " next_to_watch.status <%x>\n", 3487 " next_to_watch.status <%x>\n",
3488 (unsigned long)((tx_ring - adapter->tx_ring) /
3489 sizeof(struct e1000_tx_ring)),
3299 readl(adapter->hw.hw_addr + tx_ring->tdh), 3490 readl(adapter->hw.hw_addr + tx_ring->tdh),
3300 readl(adapter->hw.hw_addr + tx_ring->tdt), 3491 readl(adapter->hw.hw_addr + tx_ring->tdt),
3301 tx_ring->next_to_use, 3492 tx_ring->next_to_use,
3302 i, 3493 tx_ring->next_to_clean,
3303 (unsigned long long)tx_ring->buffer_info[i].dma, 3494 tx_ring->buffer_info[eop].time_stamp,
3304 tx_ring->buffer_info[i].time_stamp,
3305 eop, 3495 eop,
3306 jiffies, 3496 jiffies,
3307 eop_desc->upper.fields.status); 3497 eop_desc->upper.fields.status);
@@ -3386,20 +3576,23 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3386 uint32_t length; 3576 uint32_t length;
3387 uint8_t last_byte; 3577 uint8_t last_byte;
3388 unsigned int i; 3578 unsigned int i;
3389 boolean_t cleaned = FALSE; 3579 int cleaned_count = 0;
3580 boolean_t cleaned = FALSE, multi_descriptor = FALSE;
3390 3581
3391 i = rx_ring->next_to_clean; 3582 i = rx_ring->next_to_clean;
3392 rx_desc = E1000_RX_DESC(*rx_ring, i); 3583 rx_desc = E1000_RX_DESC(*rx_ring, i);
3393 3584
3394 while(rx_desc->status & E1000_RXD_STAT_DD) { 3585 while(rx_desc->status & E1000_RXD_STAT_DD) {
3395 buffer_info = &rx_ring->buffer_info[i]; 3586 buffer_info = &rx_ring->buffer_info[i];
3587 u8 status;
3396#ifdef CONFIG_E1000_NAPI 3588#ifdef CONFIG_E1000_NAPI
3397 if(*work_done >= work_to_do) 3589 if(*work_done >= work_to_do)
3398 break; 3590 break;
3399 (*work_done)++; 3591 (*work_done)++;
3400#endif 3592#endif
3593 status = rx_desc->status;
3401 cleaned = TRUE; 3594 cleaned = TRUE;
3402 3595 cleaned_count++;
3403 pci_unmap_single(pdev, 3596 pci_unmap_single(pdev,
3404 buffer_info->dma, 3597 buffer_info->dma,
3405 buffer_info->length, 3598 buffer_info->length,
@@ -3433,18 +3626,40 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3433 } 3626 }
3434 } 3627 }
3435 3628
3436 /* Good Receive */ 3629 /* code added for copybreak, this should improve
3437 skb_put(skb, length - ETHERNET_FCS_SIZE); 3630 * performance for small packets with large amounts
3631 * of reassembly being done in the stack */
3632#define E1000_CB_LENGTH 256
3633 if ((length < E1000_CB_LENGTH) &&
3634 !rx_ring->rx_skb_top &&
3635 /* or maybe (status & E1000_RXD_STAT_EOP) && */
3636 !multi_descriptor) {
3637 struct sk_buff *new_skb =
3638 dev_alloc_skb(length + NET_IP_ALIGN);
3639 if (new_skb) {
3640 skb_reserve(new_skb, NET_IP_ALIGN);
3641 new_skb->dev = netdev;
3642 memcpy(new_skb->data - NET_IP_ALIGN,
3643 skb->data - NET_IP_ALIGN,
3644 length + NET_IP_ALIGN);
3645 /* save the skb in buffer_info as good */
3646 buffer_info->skb = skb;
3647 skb = new_skb;
3648 skb_put(skb, length);
3649 }
3650 }
3651
3652 /* end copybreak code */
3438 3653
3439 /* Receive Checksum Offload */ 3654 /* Receive Checksum Offload */
3440 e1000_rx_checksum(adapter, 3655 e1000_rx_checksum(adapter,
3441 (uint32_t)(rx_desc->status) | 3656 (uint32_t)(status) |
3442 ((uint32_t)(rx_desc->errors) << 24), 3657 ((uint32_t)(rx_desc->errors) << 24),
3443 rx_desc->csum, skb); 3658 rx_desc->csum, skb);
3444 skb->protocol = eth_type_trans(skb, netdev); 3659 skb->protocol = eth_type_trans(skb, netdev);
3445#ifdef CONFIG_E1000_NAPI 3660#ifdef CONFIG_E1000_NAPI
3446 if(unlikely(adapter->vlgrp && 3661 if(unlikely(adapter->vlgrp &&
3447 (rx_desc->status & E1000_RXD_STAT_VP))) { 3662 (status & E1000_RXD_STAT_VP))) {
3448 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3663 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3449 le16_to_cpu(rx_desc->special) & 3664 le16_to_cpu(rx_desc->special) &
3450 E1000_RXD_SPC_VLAN_MASK); 3665 E1000_RXD_SPC_VLAN_MASK);
@@ -3462,17 +3677,26 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3462 } 3677 }
3463#endif /* CONFIG_E1000_NAPI */ 3678#endif /* CONFIG_E1000_NAPI */
3464 netdev->last_rx = jiffies; 3679 netdev->last_rx = jiffies;
3465 rx_ring->pkt++; 3680#ifdef CONFIG_E1000_MQ
3681 rx_ring->rx_stats.packets++;
3682 rx_ring->rx_stats.bytes += length;
3683#endif
3466 3684
3467next_desc: 3685next_desc:
3468 rx_desc->status = 0; 3686 rx_desc->status = 0;
3469 buffer_info->skb = NULL;
3470 if(unlikely(++i == rx_ring->count)) i = 0;
3471 3687
3472 rx_desc = E1000_RX_DESC(*rx_ring, i); 3688 /* return some buffers to hardware, one at a time is too slow */
3689 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3690 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3691 cleaned_count = 0;
3692 }
3693
3473 } 3694 }
3474 rx_ring->next_to_clean = i; 3695 rx_ring->next_to_clean = i;
3475 adapter->alloc_rx_buf(adapter, rx_ring); 3696
3697 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3698 if (cleaned_count)
3699 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3476 3700
3477 return cleaned; 3701 return cleaned;
3478} 3702}
@@ -3501,6 +3725,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3501 struct sk_buff *skb; 3725 struct sk_buff *skb;
3502 unsigned int i, j; 3726 unsigned int i, j;
3503 uint32_t length, staterr; 3727 uint32_t length, staterr;
3728 int cleaned_count = 0;
3504 boolean_t cleaned = FALSE; 3729 boolean_t cleaned = FALSE;
3505 3730
3506 i = rx_ring->next_to_clean; 3731 i = rx_ring->next_to_clean;
@@ -3517,6 +3742,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3517 (*work_done)++; 3742 (*work_done)++;
3518#endif 3743#endif
3519 cleaned = TRUE; 3744 cleaned = TRUE;
3745 cleaned_count++;
3520 pci_unmap_single(pdev, buffer_info->dma, 3746 pci_unmap_single(pdev, buffer_info->dma,
3521 buffer_info->length, 3747 buffer_info->length,
3522 PCI_DMA_FROMDEVICE); 3748 PCI_DMA_FROMDEVICE);
@@ -3593,18 +3819,28 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3593 } 3819 }
3594#endif /* CONFIG_E1000_NAPI */ 3820#endif /* CONFIG_E1000_NAPI */
3595 netdev->last_rx = jiffies; 3821 netdev->last_rx = jiffies;
3596 rx_ring->pkt++; 3822#ifdef CONFIG_E1000_MQ
3823 rx_ring->rx_stats.packets++;
3824 rx_ring->rx_stats.bytes += length;
3825#endif
3597 3826
3598next_desc: 3827next_desc:
3599 rx_desc->wb.middle.status_error &= ~0xFF; 3828 rx_desc->wb.middle.status_error &= ~0xFF;
3600 buffer_info->skb = NULL; 3829 buffer_info->skb = NULL;
3601 if(unlikely(++i == rx_ring->count)) i = 0;
3602 3830
3603 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3831 /* return some buffers to hardware, one at a time is too slow */
3832 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3833 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3834 cleaned_count = 0;
3835 }
3836
3604 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3837 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3605 } 3838 }
3606 rx_ring->next_to_clean = i; 3839 rx_ring->next_to_clean = i;
3607 adapter->alloc_rx_buf(adapter, rx_ring); 3840
3841 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3842 if (cleaned_count)
3843 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3608 3844
3609 return cleaned; 3845 return cleaned;
3610} 3846}
@@ -3616,7 +3852,8 @@ next_desc:
3616 3852
3617static void 3853static void
3618e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 3854e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3619 struct e1000_rx_ring *rx_ring) 3855 struct e1000_rx_ring *rx_ring,
3856 int cleaned_count)
3620{ 3857{
3621 struct net_device *netdev = adapter->netdev; 3858 struct net_device *netdev = adapter->netdev;
3622 struct pci_dev *pdev = adapter->pdev; 3859 struct pci_dev *pdev = adapter->pdev;
@@ -3629,11 +3866,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3629 i = rx_ring->next_to_use; 3866 i = rx_ring->next_to_use;
3630 buffer_info = &rx_ring->buffer_info[i]; 3867 buffer_info = &rx_ring->buffer_info[i];
3631 3868
3632 while(!buffer_info->skb) { 3869 while (cleaned_count--) {
3633 skb = dev_alloc_skb(bufsz); 3870 if (!(skb = buffer_info->skb))
3871 skb = dev_alloc_skb(bufsz);
3872 else {
3873 skb_trim(skb, 0);
3874 goto map_skb;
3875 }
3876
3634 3877
3635 if(unlikely(!skb)) { 3878 if(unlikely(!skb)) {
3636 /* Better luck next round */ 3879 /* Better luck next round */
3880 adapter->alloc_rx_buff_failed++;
3637 break; 3881 break;
3638 } 3882 }
3639 3883
@@ -3670,6 +3914,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3670 3914
3671 buffer_info->skb = skb; 3915 buffer_info->skb = skb;
3672 buffer_info->length = adapter->rx_buffer_len; 3916 buffer_info->length = adapter->rx_buffer_len;
3917map_skb:
3673 buffer_info->dma = pci_map_single(pdev, 3918 buffer_info->dma = pci_map_single(pdev,
3674 skb->data, 3919 skb->data,
3675 adapter->rx_buffer_len, 3920 adapter->rx_buffer_len,
@@ -3718,7 +3963,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3718 3963
3719static void 3964static void
3720e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 3965e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3721 struct e1000_rx_ring *rx_ring) 3966 struct e1000_rx_ring *rx_ring,
3967 int cleaned_count)
3722{ 3968{
3723 struct net_device *netdev = adapter->netdev; 3969 struct net_device *netdev = adapter->netdev;
3724 struct pci_dev *pdev = adapter->pdev; 3970 struct pci_dev *pdev = adapter->pdev;
@@ -3734,7 +3980,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3734 ps_page = &rx_ring->ps_page[i]; 3980 ps_page = &rx_ring->ps_page[i];
3735 ps_page_dma = &rx_ring->ps_page_dma[i]; 3981 ps_page_dma = &rx_ring->ps_page_dma[i];
3736 3982
3737 while(!buffer_info->skb) { 3983 while (cleaned_count--) {
3738 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3984 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3739 3985
3740 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 3986 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
@@ -4106,8 +4352,12 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4106 4352
4107 if((adapter->hw.mng_cookie.status & 4353 if((adapter->hw.mng_cookie.status &
4108 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4354 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4109 (vid == adapter->mng_vlan_id)) 4355 (vid == adapter->mng_vlan_id)) {
4356 /* release control to f/w */
4357 e1000_release_hw_control(adapter);
4110 return; 4358 return;
4359 }
4360
4111 /* remove VID from filter table */ 4361 /* remove VID from filter table */
4112 index = (vid >> 5) & 0x7F; 4362 index = (vid >> 5) & 0x7F;
4113 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 4363 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -4173,8 +4423,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4173{ 4423{
4174 struct net_device *netdev = pci_get_drvdata(pdev); 4424 struct net_device *netdev = pci_get_drvdata(pdev);
4175 struct e1000_adapter *adapter = netdev_priv(netdev); 4425 struct e1000_adapter *adapter = netdev_priv(netdev);
4176 uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; 4426 uint32_t ctrl, ctrl_ext, rctl, manc, status;
4177 uint32_t wufc = adapter->wol; 4427 uint32_t wufc = adapter->wol;
4428 int retval = 0;
4178 4429
4179 netif_device_detach(netdev); 4430 netif_device_detach(netdev);
4180 4431
@@ -4220,13 +4471,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4220 4471
4221 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 4472 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
4222 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 4473 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
4223 pci_enable_wake(pdev, 3, 1); 4474 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
4224 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ 4475 if (retval)
4476 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4477 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4478 if (retval)
4479 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4225 } else { 4480 } else {
4226 E1000_WRITE_REG(&adapter->hw, WUC, 0); 4481 E1000_WRITE_REG(&adapter->hw, WUC, 0);
4227 E1000_WRITE_REG(&adapter->hw, WUFC, 0); 4482 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
4228 pci_enable_wake(pdev, 3, 0); 4483 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4229 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ 4484 if (retval)
4485 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4486 retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
4487 if (retval)
4488 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4230 } 4489 }
4231 4490
4232 pci_save_state(pdev); 4491 pci_save_state(pdev);
@@ -4237,29 +4496,24 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4237 if(manc & E1000_MANC_SMBUS_EN) { 4496 if(manc & E1000_MANC_SMBUS_EN) {
4238 manc |= E1000_MANC_ARP_EN; 4497 manc |= E1000_MANC_ARP_EN;
4239 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4498 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4240 pci_enable_wake(pdev, 3, 1); 4499 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
4241 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ 4500 if (retval)
4501 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4502 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4503 if (retval)
4504 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4242 } 4505 }
4243 } 4506 }
4244 4507
4245 switch(adapter->hw.mac_type) { 4508 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4246 case e1000_82571: 4509 * would have already happened in close and is redundant. */
4247 case e1000_82572: 4510 e1000_release_hw_control(adapter);
4248 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4249 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
4250 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4251 break;
4252 case e1000_82573:
4253 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4254 E1000_WRITE_REG(&adapter->hw, SWSM,
4255 swsm & ~E1000_SWSM_DRV_LOAD);
4256 break;
4257 default:
4258 break;
4259 }
4260 4511
4261 pci_disable_device(pdev); 4512 pci_disable_device(pdev);
4262 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4513
4514 retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
4515 if (retval)
4516 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4263 4517
4264 return 0; 4518 return 0;
4265} 4519}
@@ -4269,16 +4523,21 @@ e1000_resume(struct pci_dev *pdev)
4269{ 4523{
4270 struct net_device *netdev = pci_get_drvdata(pdev); 4524 struct net_device *netdev = pci_get_drvdata(pdev);
4271 struct e1000_adapter *adapter = netdev_priv(netdev); 4525 struct e1000_adapter *adapter = netdev_priv(netdev);
4272 uint32_t manc, ret_val, swsm; 4526 int retval;
4273 uint32_t ctrl_ext; 4527 uint32_t manc, ret_val;
4274 4528
4275 pci_set_power_state(pdev, PCI_D0); 4529 retval = pci_set_power_state(pdev, PCI_D0);
4276 pci_restore_state(pdev); 4530 if (retval)
4531 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4277 ret_val = pci_enable_device(pdev); 4532 ret_val = pci_enable_device(pdev);
4278 pci_set_master(pdev); 4533 pci_set_master(pdev);
4279 4534
4280 pci_enable_wake(pdev, PCI_D3hot, 0); 4535 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4281 pci_enable_wake(pdev, PCI_D3cold, 0); 4536 if (retval)
4537 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4538 retval = pci_enable_wake(pdev, PCI_D3cold, 0);
4539 if (retval)
4540 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4282 4541
4283 e1000_reset(adapter); 4542 e1000_reset(adapter);
4284 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 4543 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
@@ -4295,21 +4554,13 @@ e1000_resume(struct pci_dev *pdev)
4295 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4554 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4296 } 4555 }
4297 4556
4298 switch(adapter->hw.mac_type) { 4557 /* If the controller is 82573 and f/w is AMT, do not set
4299 case e1000_82571: 4558 * DRV_LOAD until the interface is up. For all other cases,
4300 case e1000_82572: 4559 * let the f/w know that the h/w is now under the control
4301 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4560 * of the driver. */
4302 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 4561 if (adapter->hw.mac_type != e1000_82573 ||
4303 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 4562 !e1000_check_mng_mode(&adapter->hw))
4304 break; 4563 e1000_get_hw_control(adapter);
4305 case e1000_82573:
4306 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4307 E1000_WRITE_REG(&adapter->hw, SWSM,
4308 swsm | E1000_SWSM_DRV_LOAD);
4309 break;
4310 default:
4311 break;
4312 }
4313 4564
4314 return 0; 4565 return 0;
4315} 4566}
@@ -4327,6 +4578,9 @@ e1000_netpoll(struct net_device *netdev)
4327 disable_irq(adapter->pdev->irq); 4578 disable_irq(adapter->pdev->irq);
4328 e1000_intr(adapter->pdev->irq, netdev, NULL); 4579 e1000_intr(adapter->pdev->irq, netdev, NULL);
4329 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4580 e1000_clean_tx_irq(adapter, adapter->tx_ring);
4581#ifndef CONFIG_E1000_NAPI
4582 adapter->clean_rx(adapter, adapter->rx_ring);
4583#endif
4330 enable_irq(adapter->pdev->irq); 4584 enable_irq(adapter->pdev->irq);
4331} 4585}
4332#endif 4586#endif
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index ccbbe5ad8e0f..0a7918c62557 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
177 * 177 *
178 * Valid Range: 100-100000 (0=off, 1=dynamic) 178 * Valid Range: 100-100000 (0=off, 1=dynamic)
179 * 179 *
180 * Default Value: 1 180 * Default Value: 8000
181 */ 181 */
182 182
183E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); 183E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
@@ -320,7 +320,7 @@ e1000_check_options(struct e1000_adapter *adapter)
320 } else { 320 } else {
321 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
322 } 322 }
323 for (i = 0; i < adapter->num_queues; i++) 323 for (i = 0; i < adapter->num_tx_queues; i++)
324 tx_ring[i].count = tx_ring->count; 324 tx_ring[i].count = tx_ring->count;
325 } 325 }
326 { /* Receive Descriptor Count */ 326 { /* Receive Descriptor Count */
@@ -346,7 +346,7 @@ e1000_check_options(struct e1000_adapter *adapter)
346 } else { 346 } else {
347 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
348 } 348 }
349 for (i = 0; i < adapter->num_queues; i++) 349 for (i = 0; i < adapter->num_rx_queues; i++)
350 rx_ring[i].count = rx_ring->count; 350 rx_ring[i].count = rx_ring->count;
351 } 351 }
352 { /* Checksum Offload Enable/Disable */ 352 { /* Checksum Offload Enable/Disable */
@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
388 e1000_validate_option(&fc, &opt, adapter); 388 e1000_validate_option(&fc, &opt, adapter);
389 adapter->hw.fc = adapter->hw.original_fc = fc; 389 adapter->hw.fc = adapter->hw.original_fc = fc;
390 } else { 390 } else {
391 adapter->hw.fc = opt.def; 391 adapter->hw.fc = adapter->hw.original_fc = opt.def;
392 } 392 }
393 } 393 }
394 { /* Transmit Interrupt Delay */ 394 { /* Transmit Interrupt Delay */
@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
584 .p = dplx_list }} 584 .p = dplx_list }}
585 }; 585 };
586 586
587 if (e1000_check_phy_reset_block(&adapter->hw)) {
588 DPRINTK(PROBE, INFO,
589 "Link active due to SoL/IDER Session. "
590 "Speed/Duplex/AutoNeg parameter ignored.\n");
591 return;
592 }
587 if (num_Duplex > bd) { 593 if (num_Duplex > bd) {
588 dplx = Duplex[bd]; 594 dplx = Duplex[bd];
589 e1000_validate_option(&dplx, &opt, adapter); 595 e1000_validate_option(&dplx, &opt, adapter);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 22c3a37bba5a..40ae36b20c9d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -35,6 +35,8 @@
35#include <linux/tcp.h> 35#include <linux/tcp.h>
36#include <linux/udp.h> 36#include <linux/udp.h>
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <linux/in.h>
39#include <linux/ip.h>
38 40
39#include <linux/bitops.h> 41#include <linux/bitops.h>
40#include <linux/delay.h> 42#include <linux/delay.h>
@@ -55,13 +57,15 @@
55/* Constants */ 57/* Constants */
56#define VLAN_HLEN 4 58#define VLAN_HLEN 4
57#define FCS_LEN 4 59#define FCS_LEN 4
58#define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN 60#define DMA_ALIGN 8 /* hw requires 8-byte alignment */
61#define HW_IP_ALIGN 2 /* hw aligns IP header */
62#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
59#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) 63#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
60 64
61#define INT_CAUSE_UNMASK_ALL 0x0007ffff 65#define INT_UNMASK_ALL 0x0007ffff
62#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff 66#define INT_UNMASK_ALL_EXT 0x0011ffff
63#define INT_CAUSE_MASK_ALL 0x00000000 67#define INT_MASK_ALL 0x00000000
64#define INT_CAUSE_MASK_ALL_EXT 0x00000000 68#define INT_MASK_ALL_EXT 0x00000000
65#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL 69#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
66#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT 70#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
67 71
@@ -78,8 +82,9 @@
78static int eth_port_link_is_up(unsigned int eth_port_num); 82static int eth_port_link_is_up(unsigned int eth_port_num);
79static void eth_port_uc_addr_get(struct net_device *dev, 83static void eth_port_uc_addr_get(struct net_device *dev,
80 unsigned char *MacAddr); 84 unsigned char *MacAddr);
81static int mv643xx_eth_real_open(struct net_device *); 85static void eth_port_set_multicast_list(struct net_device *);
82static int mv643xx_eth_real_stop(struct net_device *); 86static int mv643xx_eth_open(struct net_device *);
87static int mv643xx_eth_stop(struct net_device *);
83static int mv643xx_eth_change_mtu(struct net_device *, int); 88static int mv643xx_eth_change_mtu(struct net_device *, int);
84static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); 89static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
85static void eth_port_init_mac_tables(unsigned int eth_port_num); 90static void eth_port_init_mac_tables(unsigned int eth_port_num);
@@ -124,15 +129,8 @@ static inline void mv_write(int offset, u32 data)
124 */ 129 */
125static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 130static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
126{ 131{
127 struct mv643xx_private *mp = netdev_priv(dev); 132 if ((new_mtu > 9500) || (new_mtu < 64))
128 unsigned long flags;
129
130 spin_lock_irqsave(&mp->lock, flags);
131
132 if ((new_mtu > 9500) || (new_mtu < 64)) {
133 spin_unlock_irqrestore(&mp->lock, flags);
134 return -EINVAL; 133 return -EINVAL;
135 }
136 134
137 dev->mtu = new_mtu; 135 dev->mtu = new_mtu;
138 /* 136 /*
@@ -142,17 +140,13 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
142 * to memory is full, which might fail the open function. 140 * to memory is full, which might fail the open function.
143 */ 141 */
144 if (netif_running(dev)) { 142 if (netif_running(dev)) {
145 if (mv643xx_eth_real_stop(dev)) 143 mv643xx_eth_stop(dev);
146 printk(KERN_ERR 144 if (mv643xx_eth_open(dev))
147 "%s: Fatal error on stopping device\n",
148 dev->name);
149 if (mv643xx_eth_real_open(dev))
150 printk(KERN_ERR 145 printk(KERN_ERR
151 "%s: Fatal error on opening device\n", 146 "%s: Fatal error on opening device\n",
152 dev->name); 147 dev->name);
153 } 148 }
154 149
155 spin_unlock_irqrestore(&mp->lock, flags);
156 return 0; 150 return 0;
157} 151}
158 152
@@ -170,15 +164,19 @@ static void mv643xx_eth_rx_task(void *data)
170 struct mv643xx_private *mp = netdev_priv(dev); 164 struct mv643xx_private *mp = netdev_priv(dev);
171 struct pkt_info pkt_info; 165 struct pkt_info pkt_info;
172 struct sk_buff *skb; 166 struct sk_buff *skb;
167 int unaligned;
173 168
174 if (test_and_set_bit(0, &mp->rx_task_busy)) 169 if (test_and_set_bit(0, &mp->rx_task_busy))
175 panic("%s: Error in test_set_bit / clear_bit", dev->name); 170 panic("%s: Error in test_set_bit / clear_bit", dev->name);
176 171
177 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { 172 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
178 skb = dev_alloc_skb(RX_SKB_SIZE); 173 skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
179 if (!skb) 174 if (!skb)
180 break; 175 break;
181 mp->rx_ring_skbs++; 176 mp->rx_ring_skbs++;
177 unaligned = (u32)skb->data & (DMA_ALIGN - 1);
178 if (unaligned)
179 skb_reserve(skb, DMA_ALIGN - unaligned);
182 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; 180 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
183 pkt_info.byte_cnt = RX_SKB_SIZE; 181 pkt_info.byte_cnt = RX_SKB_SIZE;
184 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, 182 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
@@ -189,7 +187,7 @@ static void mv643xx_eth_rx_task(void *data)
189 "%s: Error allocating RX Ring\n", dev->name); 187 "%s: Error allocating RX Ring\n", dev->name);
190 break; 188 break;
191 } 189 }
192 skb_reserve(skb, 2); 190 skb_reserve(skb, HW_IP_ALIGN);
193 } 191 }
194 clear_bit(0, &mp->rx_task_busy); 192 clear_bit(0, &mp->rx_task_busy);
195 /* 193 /*
@@ -207,7 +205,7 @@ static void mv643xx_eth_rx_task(void *data)
207 else { 205 else {
208 /* Return interrupts */ 206 /* Return interrupts */
209 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), 207 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
210 INT_CAUSE_UNMASK_ALL); 208 INT_UNMASK_ALL);
211 } 209 }
212#endif 210#endif
213} 211}
@@ -267,6 +265,8 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
267 mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 265 mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
268 266
269 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config); 267 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
268
269 eth_port_set_multicast_list(dev);
270} 270}
271 271
272/* 272/*
@@ -342,8 +342,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
342 if (!(eth_int_cause_ext & (BIT0 | BIT8))) 342 if (!(eth_int_cause_ext & (BIT0 | BIT8)))
343 return released; 343 return released;
344 344
345 spin_lock(&mp->lock);
346
347 /* Check only queue 0 */ 345 /* Check only queue 0 */
348 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { 346 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
349 if (pkt_info.cmd_sts & BIT0) { 347 if (pkt_info.cmd_sts & BIT0) {
@@ -351,31 +349,21 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
351 stats->tx_errors++; 349 stats->tx_errors++;
352 } 350 }
353 351
354 /* 352 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
355 * If return_info is different than 0, release the skb. 353 dma_unmap_single(NULL, pkt_info.buf_ptr,
356 * The case where return_info is not 0 is only in case 354 pkt_info.byte_cnt,
357 * when transmitted a scatter/gather packet, where only 355 DMA_TO_DEVICE);
358 * last skb releases the whole chain. 356 else
359 */ 357 dma_unmap_page(NULL, pkt_info.buf_ptr,
360 if (pkt_info.return_info) { 358 pkt_info.byte_cnt,
361 if (skb_shinfo(pkt_info.return_info)->nr_frags) 359 DMA_TO_DEVICE);
362 dma_unmap_page(NULL, pkt_info.buf_ptr,
363 pkt_info.byte_cnt,
364 DMA_TO_DEVICE);
365 else
366 dma_unmap_single(NULL, pkt_info.buf_ptr,
367 pkt_info.byte_cnt,
368 DMA_TO_DEVICE);
369 360
361 if (pkt_info.return_info) {
370 dev_kfree_skb_irq(pkt_info.return_info); 362 dev_kfree_skb_irq(pkt_info.return_info);
371 released = 0; 363 released = 0;
372 } else 364 }
373 dma_unmap_page(NULL, pkt_info.buf_ptr,
374 pkt_info.byte_cnt, DMA_TO_DEVICE);
375 } 365 }
376 366
377 spin_unlock(&mp->lock);
378
379 return released; 367 return released;
380} 368}
381 369
@@ -482,12 +470,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
482 470
483 /* Read interrupt cause registers */ 471 /* Read interrupt cause registers */
484 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & 472 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
485 INT_CAUSE_UNMASK_ALL; 473 INT_UNMASK_ALL;
486 474
487 if (eth_int_cause & BIT1) 475 if (eth_int_cause & BIT1)
488 eth_int_cause_ext = mv_read( 476 eth_int_cause_ext = mv_read(
489 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 477 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
490 INT_CAUSE_UNMASK_ALL_EXT; 478 INT_UNMASK_ALL_EXT;
491 479
492#ifdef MV643XX_NAPI 480#ifdef MV643XX_NAPI
493 if (!(eth_int_cause & 0x0007fffd)) { 481 if (!(eth_int_cause & 0x0007fffd)) {
@@ -512,9 +500,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
512 } else { 500 } else {
513 if (netif_rx_schedule_prep(dev)) { 501 if (netif_rx_schedule_prep(dev)) {
514 /* Mask all the interrupts */ 502 /* Mask all the interrupts */
515 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); 503 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
516 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG 504 INT_MASK_ALL);
517 (port_num), 0); 505 /* wait for previous write to complete */
506 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
518 __netif_rx_schedule(dev); 507 __netif_rx_schedule(dev);
519 } 508 }
520#else 509#else
@@ -527,9 +516,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
527 * with skb's. 516 * with skb's.
528 */ 517 */
529#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK 518#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
530 /* Unmask all interrupts on ethernet port */ 519 /* Mask all interrupts on ethernet port */
531 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 520 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
532 INT_CAUSE_MASK_ALL); 521 INT_MASK_ALL);
522 /* wait for previous write to take effect */
523 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
524
533 queue_task(&mp->rx_task, &tq_immediate); 525 queue_task(&mp->rx_task, &tq_immediate);
534 mark_bh(IMMEDIATE_BH); 526 mark_bh(IMMEDIATE_BH);
535#else 527#else
@@ -636,56 +628,6 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
636} 628}
637 629
638/* 630/*
639 * mv643xx_eth_open
640 *
641 * This function is called when openning the network device. The function
642 * should initialize all the hardware, initialize cyclic Rx/Tx
643 * descriptors chain and buffers and allocate an IRQ to the network
644 * device.
645 *
646 * Input : a pointer to the network device structure
647 *
648 * Output : zero of success , nonzero if fails.
649 */
650
651static int mv643xx_eth_open(struct net_device *dev)
652{
653 struct mv643xx_private *mp = netdev_priv(dev);
654 unsigned int port_num = mp->port_num;
655 int err;
656
657 spin_lock_irq(&mp->lock);
658
659 err = request_irq(dev->irq, mv643xx_eth_int_handler,
660 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
661
662 if (err) {
663 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
664 port_num);
665 err = -EAGAIN;
666 goto out;
667 }
668
669 if (mv643xx_eth_real_open(dev)) {
670 printk("%s: Error opening interface\n", dev->name);
671 err = -EBUSY;
672 goto out_free;
673 }
674
675 spin_unlock_irq(&mp->lock);
676
677 return 0;
678
679out_free:
680 free_irq(dev->irq, dev);
681
682out:
683 spin_unlock_irq(&mp->lock);
684
685 return err;
686}
687
688/*
689 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. 631 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
690 * 632 *
691 * DESCRIPTION: 633 * DESCRIPTION:
@@ -777,28 +719,37 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
777 mp->port_tx_queue_command |= 1; 719 mp->port_tx_queue_command |= 1;
778} 720}
779 721
780/* Helper function for mv643xx_eth_open */ 722/*
781static int mv643xx_eth_real_open(struct net_device *dev) 723 * mv643xx_eth_open
724 *
725 * This function is called when openning the network device. The function
726 * should initialize all the hardware, initialize cyclic Rx/Tx
727 * descriptors chain and buffers and allocate an IRQ to the network
728 * device.
729 *
730 * Input : a pointer to the network device structure
731 *
732 * Output : zero of success , nonzero if fails.
733 */
734
735static int mv643xx_eth_open(struct net_device *dev)
782{ 736{
783 struct mv643xx_private *mp = netdev_priv(dev); 737 struct mv643xx_private *mp = netdev_priv(dev);
784 unsigned int port_num = mp->port_num; 738 unsigned int port_num = mp->port_num;
785 unsigned int size; 739 unsigned int size;
740 int err;
741
742 err = request_irq(dev->irq, mv643xx_eth_int_handler,
743 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
744 if (err) {
745 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
746 port_num);
747 return -EAGAIN;
748 }
786 749
787 /* Stop RX Queues */ 750 /* Stop RX Queues */
788 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 751 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
789 752
790 /* Clear the ethernet port interrupts */
791 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
792 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
793
794 /* Unmask RX buffer and TX end interrupt */
795 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
796 INT_CAUSE_UNMASK_ALL);
797
798 /* Unmask phy and link status changes interrupts */
799 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
800 INT_CAUSE_UNMASK_ALL_EXT);
801
802 /* Set the MAC Address */ 753 /* Set the MAC Address */
803 memcpy(mp->port_mac_addr, dev->dev_addr, 6); 754 memcpy(mp->port_mac_addr, dev->dev_addr, 6);
804 755
@@ -818,14 +769,15 @@ static int mv643xx_eth_real_open(struct net_device *dev)
818 GFP_KERNEL); 769 GFP_KERNEL);
819 if (!mp->rx_skb) { 770 if (!mp->rx_skb) {
820 printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name); 771 printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
821 return -ENOMEM; 772 err = -ENOMEM;
773 goto out_free_irq;
822 } 774 }
823 mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, 775 mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
824 GFP_KERNEL); 776 GFP_KERNEL);
825 if (!mp->tx_skb) { 777 if (!mp->tx_skb) {
826 printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name); 778 printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
827 kfree(mp->rx_skb); 779 err = -ENOMEM;
828 return -ENOMEM; 780 goto out_free_rx_skb;
829 } 781 }
830 782
831 /* Allocate TX ring */ 783 /* Allocate TX ring */
@@ -845,9 +797,8 @@ static int mv643xx_eth_real_open(struct net_device *dev)
845 if (!mp->p_tx_desc_area) { 797 if (!mp->p_tx_desc_area) {
846 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", 798 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
847 dev->name, size); 799 dev->name, size);
848 kfree(mp->rx_skb); 800 err = -ENOMEM;
849 kfree(mp->tx_skb); 801 goto out_free_tx_skb;
850 return -ENOMEM;
851 } 802 }
852 BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */ 803 BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
853 memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size); 804 memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
@@ -874,13 +825,12 @@ static int mv643xx_eth_real_open(struct net_device *dev)
874 printk(KERN_ERR "%s: Freeing previously allocated TX queues...", 825 printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
875 dev->name); 826 dev->name);
876 if (mp->rx_sram_size) 827 if (mp->rx_sram_size)
877 iounmap(mp->p_rx_desc_area); 828 iounmap(mp->p_tx_desc_area);
878 else 829 else
879 dma_free_coherent(NULL, mp->tx_desc_area_size, 830 dma_free_coherent(NULL, mp->tx_desc_area_size,
880 mp->p_tx_desc_area, mp->tx_desc_dma); 831 mp->p_tx_desc_area, mp->tx_desc_dma);
881 kfree(mp->rx_skb); 832 err = -ENOMEM;
882 kfree(mp->tx_skb); 833 goto out_free_tx_skb;
883 return -ENOMEM;
884 } 834 }
885 memset((void *)mp->p_rx_desc_area, 0, size); 835 memset((void *)mp->p_rx_desc_area, 0, size);
886 836
@@ -900,9 +850,26 @@ static int mv643xx_eth_real_open(struct net_device *dev)
900 mp->tx_int_coal = 850 mp->tx_int_coal =
901 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 851 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
902 852
903 netif_start_queue(dev); 853 /* Clear any pending ethernet port interrupts */
854 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
855 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
856
857 /* Unmask phy and link status changes interrupts */
858 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
859 INT_UNMASK_ALL_EXT);
904 860
861 /* Unmask RX buffer and TX end interrupt */
862 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
905 return 0; 863 return 0;
864
865out_free_tx_skb:
866 kfree(mp->tx_skb);
867out_free_rx_skb:
868 kfree(mp->rx_skb);
869out_free_irq:
870 free_irq(dev->irq, dev);
871
872 return err;
906} 873}
907 874
908static void mv643xx_eth_free_tx_rings(struct net_device *dev) 875static void mv643xx_eth_free_tx_rings(struct net_device *dev)
@@ -910,14 +877,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
910 struct mv643xx_private *mp = netdev_priv(dev); 877 struct mv643xx_private *mp = netdev_priv(dev);
911 unsigned int port_num = mp->port_num; 878 unsigned int port_num = mp->port_num;
912 unsigned int curr; 879 unsigned int curr;
880 struct sk_buff *skb;
913 881
914 /* Stop Tx Queues */ 882 /* Stop Tx Queues */
915 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 883 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
916 884
917 /* Free outstanding skb's on TX rings */ 885 /* Free outstanding skb's on TX rings */
918 for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { 886 for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
919 if (mp->tx_skb[curr]) { 887 skb = mp->tx_skb[curr];
920 dev_kfree_skb(mp->tx_skb[curr]); 888 if (skb) {
889 mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags;
890 dev_kfree_skb(skb);
921 mp->tx_ring_skbs--; 891 mp->tx_ring_skbs--;
922 } 892 }
923 } 893 }
@@ -973,44 +943,32 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
973 * Output : zero if success , nonzero if fails 943 * Output : zero if success , nonzero if fails
974 */ 944 */
975 945
976/* Helper function for mv643xx_eth_stop */ 946static int mv643xx_eth_stop(struct net_device *dev)
977
978static int mv643xx_eth_real_stop(struct net_device *dev)
979{ 947{
980 struct mv643xx_private *mp = netdev_priv(dev); 948 struct mv643xx_private *mp = netdev_priv(dev);
981 unsigned int port_num = mp->port_num; 949 unsigned int port_num = mp->port_num;
982 950
951 /* Mask all interrupts on ethernet port */
952 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
953 /* wait for previous write to complete */
954 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
955
956#ifdef MV643XX_NAPI
957 netif_poll_disable(dev);
958#endif
983 netif_carrier_off(dev); 959 netif_carrier_off(dev);
984 netif_stop_queue(dev); 960 netif_stop_queue(dev);
985 961
986 mv643xx_eth_free_tx_rings(dev);
987 mv643xx_eth_free_rx_rings(dev);
988
989 eth_port_reset(mp->port_num); 962 eth_port_reset(mp->port_num);
990 963
991 /* Disable ethernet port interrupts */ 964 mv643xx_eth_free_tx_rings(dev);
992 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 965 mv643xx_eth_free_rx_rings(dev);
993 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
994
995 /* Mask RX buffer and TX end interrupt */
996 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
997
998 /* Mask phy and link status changes interrupts */
999 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
1000
1001 return 0;
1002}
1003
1004static int mv643xx_eth_stop(struct net_device *dev)
1005{
1006 struct mv643xx_private *mp = netdev_priv(dev);
1007
1008 spin_lock_irq(&mp->lock);
1009 966
1010 mv643xx_eth_real_stop(dev); 967#ifdef MV643XX_NAPI
968 netif_poll_enable(dev);
969#endif
1011 970
1012 free_irq(dev->irq, dev); 971 free_irq(dev->irq, dev);
1013 spin_unlock_irq(&mp->lock);
1014 972
1015 return 0; 973 return 0;
1016} 974}
@@ -1022,20 +980,17 @@ static void mv643xx_tx(struct net_device *dev)
1022 struct pkt_info pkt_info; 980 struct pkt_info pkt_info;
1023 981
1024 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { 982 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
1025 if (pkt_info.return_info) { 983 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
1026 if (skb_shinfo(pkt_info.return_info)->nr_frags) 984 dma_unmap_single(NULL, pkt_info.buf_ptr,
1027 dma_unmap_page(NULL, pkt_info.buf_ptr, 985 pkt_info.byte_cnt,
1028 pkt_info.byte_cnt, 986 DMA_TO_DEVICE);
1029 DMA_TO_DEVICE); 987 else
1030 else 988 dma_unmap_page(NULL, pkt_info.buf_ptr,
1031 dma_unmap_single(NULL, pkt_info.buf_ptr, 989 pkt_info.byte_cnt,
1032 pkt_info.byte_cnt, 990 DMA_TO_DEVICE);
1033 DMA_TO_DEVICE);
1034 991
992 if (pkt_info.return_info)
1035 dev_kfree_skb_irq(pkt_info.return_info); 993 dev_kfree_skb_irq(pkt_info.return_info);
1036 } else
1037 dma_unmap_page(NULL, pkt_info.buf_ptr,
1038 pkt_info.byte_cnt, DMA_TO_DEVICE);
1039 } 994 }
1040 995
1041 if (netif_queue_stopped(dev) && 996 if (netif_queue_stopped(dev) &&
@@ -1053,14 +1008,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1053 struct mv643xx_private *mp = netdev_priv(dev); 1008 struct mv643xx_private *mp = netdev_priv(dev);
1054 int done = 1, orig_budget, work_done; 1009 int done = 1, orig_budget, work_done;
1055 unsigned int port_num = mp->port_num; 1010 unsigned int port_num = mp->port_num;
1056 unsigned long flags;
1057 1011
1058#ifdef MV643XX_TX_FAST_REFILL 1012#ifdef MV643XX_TX_FAST_REFILL
1059 if (++mp->tx_clean_threshold > 5) { 1013 if (++mp->tx_clean_threshold > 5) {
1060 spin_lock_irqsave(&mp->lock, flags);
1061 mv643xx_tx(dev); 1014 mv643xx_tx(dev);
1062 mp->tx_clean_threshold = 0; 1015 mp->tx_clean_threshold = 0;
1063 spin_unlock_irqrestore(&mp->lock, flags);
1064 } 1016 }
1065#endif 1017#endif
1066 1018
@@ -1078,21 +1030,36 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1078 } 1030 }
1079 1031
1080 if (done) { 1032 if (done) {
1081 spin_lock_irqsave(&mp->lock, flags); 1033 netif_rx_complete(dev);
1082 __netif_rx_complete(dev);
1083 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1034 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
1084 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1035 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1085 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1036 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1086 INT_CAUSE_UNMASK_ALL); 1037 INT_UNMASK_ALL);
1087 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1088 INT_CAUSE_UNMASK_ALL_EXT);
1089 spin_unlock_irqrestore(&mp->lock, flags);
1090 } 1038 }
1091 1039
1092 return done ? 0 : 1; 1040 return done ? 0 : 1;
1093} 1041}
1094#endif 1042#endif
1095 1043
1044/* Hardware can't handle unaligned fragments smaller than 9 bytes.
1045 * This helper function detects that case.
1046 */
1047
1048static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1049{
1050 unsigned int frag;
1051 skb_frag_t *fragp;
1052
1053 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1054 fragp = &skb_shinfo(skb)->frags[frag];
1055 if (fragp->size <= 8 && fragp->page_offset & 0x7)
1056 return 1;
1057
1058 }
1059 return 0;
1060}
1061
1062
1096/* 1063/*
1097 * mv643xx_eth_start_xmit 1064 * mv643xx_eth_start_xmit
1098 * 1065 *
@@ -1136,12 +1103,19 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1136 return 1; 1103 return 1;
1137 } 1104 }
1138 1105
1106#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1107 if (has_tiny_unaligned_frags(skb)) {
1108 if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
1109 stats->tx_dropped++;
1110 printk(KERN_DEBUG "%s: failed to linearize tiny "
1111 "unaligned fragment\n", dev->name);
1112 return 1;
1113 }
1114 }
1115
1139 spin_lock_irqsave(&mp->lock, flags); 1116 spin_lock_irqsave(&mp->lock, flags);
1140 1117
1141 /* Update packet info data structure -- DMA owned, first last */
1142#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1143 if (!skb_shinfo(skb)->nr_frags) { 1118 if (!skb_shinfo(skb)->nr_frags) {
1144linear:
1145 if (skb->ip_summed != CHECKSUM_HW) { 1119 if (skb->ip_summed != CHECKSUM_HW) {
1146 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 1120 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1147 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1121 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
@@ -1150,7 +1124,6 @@ linear:
1150 5 << ETH_TX_IHL_SHIFT; 1124 5 << ETH_TX_IHL_SHIFT;
1151 pkt_info.l4i_chk = 0; 1125 pkt_info.l4i_chk = 0;
1152 } else { 1126 } else {
1153
1154 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1127 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1155 ETH_TX_FIRST_DESC | 1128 ETH_TX_FIRST_DESC |
1156 ETH_TX_LAST_DESC | 1129 ETH_TX_LAST_DESC |
@@ -1158,14 +1131,16 @@ linear:
1158 ETH_GEN_IP_V_4_CHECKSUM | 1131 ETH_GEN_IP_V_4_CHECKSUM |
1159 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; 1132 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1160 /* CPU already calculated pseudo header checksum. */ 1133 /* CPU already calculated pseudo header checksum. */
1161 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1134 if ((skb->protocol == ETH_P_IP) &&
1135 (skb->nh.iph->protocol == IPPROTO_UDP) ) {
1162 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1136 pkt_info.cmd_sts |= ETH_UDP_FRAME;
1163 pkt_info.l4i_chk = skb->h.uh->check; 1137 pkt_info.l4i_chk = skb->h.uh->check;
1164 } else if (skb->nh.iph->protocol == IPPROTO_TCP) 1138 } else if ((skb->protocol == ETH_P_IP) &&
1139 (skb->nh.iph->protocol == IPPROTO_TCP))
1165 pkt_info.l4i_chk = skb->h.th->check; 1140 pkt_info.l4i_chk = skb->h.th->check;
1166 else { 1141 else {
1167 printk(KERN_ERR 1142 printk(KERN_ERR
1168 "%s: chksum proto != TCP or UDP\n", 1143 "%s: chksum proto != IPv4 TCP or UDP\n",
1169 dev->name); 1144 dev->name);
1170 spin_unlock_irqrestore(&mp->lock, flags); 1145 spin_unlock_irqrestore(&mp->lock, flags);
1171 return 1; 1146 return 1;
@@ -1183,26 +1158,6 @@ linear:
1183 } else { 1158 } else {
1184 unsigned int frag; 1159 unsigned int frag;
1185 1160
1186 /* Since hardware can't handle unaligned fragments smaller
1187 * than 9 bytes, if we find any, we linearize the skb
1188 * and start again. When I've seen it, it's always been
1189 * the first frag (probably near the end of the page),
1190 * but we check all frags to be safe.
1191 */
1192 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1193 skb_frag_t *fragp;
1194
1195 fragp = &skb_shinfo(skb)->frags[frag];
1196 if (fragp->size <= 8 && fragp->page_offset & 0x7) {
1197 skb_linearize(skb, GFP_ATOMIC);
1198 printk(KERN_DEBUG "%s: unaligned tiny fragment"
1199 "%d of %d, fixed\n",
1200 dev->name, frag,
1201 skb_shinfo(skb)->nr_frags);
1202 goto linear;
1203 }
1204 }
1205
1206 /* first frag which is skb header */ 1161 /* first frag which is skb header */
1207 pkt_info.byte_cnt = skb_headlen(skb); 1162 pkt_info.byte_cnt = skb_headlen(skb);
1208 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, 1163 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
@@ -1221,14 +1176,16 @@ linear:
1221 ETH_GEN_IP_V_4_CHECKSUM | 1176 ETH_GEN_IP_V_4_CHECKSUM |
1222 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; 1177 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1223 /* CPU already calculated pseudo header checksum. */ 1178 /* CPU already calculated pseudo header checksum. */
1224 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1179 if ((skb->protocol == ETH_P_IP) &&
1180 (skb->nh.iph->protocol == IPPROTO_UDP)) {
1225 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1181 pkt_info.cmd_sts |= ETH_UDP_FRAME;
1226 pkt_info.l4i_chk = skb->h.uh->check; 1182 pkt_info.l4i_chk = skb->h.uh->check;
1227 } else if (skb->nh.iph->protocol == IPPROTO_TCP) 1183 } else if ((skb->protocol == ETH_P_IP) &&
1184 (skb->nh.iph->protocol == IPPROTO_TCP))
1228 pkt_info.l4i_chk = skb->h.th->check; 1185 pkt_info.l4i_chk = skb->h.th->check;
1229 else { 1186 else {
1230 printk(KERN_ERR 1187 printk(KERN_ERR
1231 "%s: chksum proto != TCP or UDP\n", 1188 "%s: chksum proto != IPv4 TCP or UDP\n",
1232 dev->name); 1189 dev->name);
1233 spin_unlock_irqrestore(&mp->lock, flags); 1190 spin_unlock_irqrestore(&mp->lock, flags);
1234 return 1; 1191 return 1;
@@ -1288,6 +1245,8 @@ linear:
1288 } 1245 }
1289 } 1246 }
1290#else 1247#else
1248 spin_lock_irqsave(&mp->lock, flags);
1249
1291 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | 1250 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
1292 ETH_TX_LAST_DESC; 1251 ETH_TX_LAST_DESC;
1293 pkt_info.l4i_chk = 0; 1252 pkt_info.l4i_chk = 0;
@@ -1340,39 +1299,18 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1340} 1299}
1341 1300
1342#ifdef CONFIG_NET_POLL_CONTROLLER 1301#ifdef CONFIG_NET_POLL_CONTROLLER
1343static inline void mv643xx_enable_irq(struct mv643xx_private *mp)
1344{
1345 int port_num = mp->port_num;
1346 unsigned long flags;
1347
1348 spin_lock_irqsave(&mp->lock, flags);
1349 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1350 INT_CAUSE_UNMASK_ALL);
1351 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1352 INT_CAUSE_UNMASK_ALL_EXT);
1353 spin_unlock_irqrestore(&mp->lock, flags);
1354}
1355
1356static inline void mv643xx_disable_irq(struct mv643xx_private *mp)
1357{
1358 int port_num = mp->port_num;
1359 unsigned long flags;
1360
1361 spin_lock_irqsave(&mp->lock, flags);
1362 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1363 INT_CAUSE_MASK_ALL);
1364 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1365 INT_CAUSE_MASK_ALL_EXT);
1366 spin_unlock_irqrestore(&mp->lock, flags);
1367}
1368
1369static void mv643xx_netpoll(struct net_device *netdev) 1302static void mv643xx_netpoll(struct net_device *netdev)
1370{ 1303{
1371 struct mv643xx_private *mp = netdev_priv(netdev); 1304 struct mv643xx_private *mp = netdev_priv(netdev);
1305 int port_num = mp->port_num;
1306
1307 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
1308 /* wait for previous write to complete */
1309 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
1372 1310
1373 mv643xx_disable_irq(mp);
1374 mv643xx_eth_int_handler(netdev->irq, netdev, NULL); 1311 mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
1375 mv643xx_enable_irq(mp); 1312
1313 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
1376} 1314}
1377#endif 1315#endif
1378 1316
@@ -1441,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1441 * Zero copy can only work if we use Discovery II memory. Else, we will 1379 * Zero copy can only work if we use Discovery II memory. Else, we will
1442 * have to map the buffers to ISA memory which is only 16 MB 1380 * have to map the buffers to ISA memory which is only 16 MB
1443 */ 1381 */
1444 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM; 1382 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
1445#endif 1383#endif
1446#endif 1384#endif
1447 1385
@@ -2054,6 +1992,196 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
2054} 1992}
2055 1993
2056/* 1994/*
1995 * The entries in each table are indexed by a hash of a packet's MAC
1996 * address. One bit in each entry determines whether the packet is
1997 * accepted. There are 4 entries (each 8 bits wide) in each register
1998 * of the table. The bits in each entry are defined as follows:
1999 * 0 Accept=1, Drop=0
2000 * 3-1 Queue (ETH_Q0=0)
2001 * 7-4 Reserved = 0;
2002 */
2003static void eth_port_set_filter_table_entry(int table, unsigned char entry)
2004{
2005 unsigned int table_reg;
2006 unsigned int tbl_offset;
2007 unsigned int reg_offset;
2008
2009 tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */
2010 reg_offset = entry % 4; /* Entry offset within the register */
2011
2012 /* Set "accepts frame bit" at specified table entry */
2013 table_reg = mv_read(table + tbl_offset);
2014 table_reg |= 0x01 << (8 * reg_offset);
2015 mv_write(table + tbl_offset, table_reg);
2016}
2017
2018/*
2019 * eth_port_mc_addr - Multicast address settings.
2020 *
2021 * The MV device supports multicast using two tables:
2022 * 1) Special Multicast Table for MAC addresses of the form
2023 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
2024 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2025 * Table entries in the DA-Filter table.
2026 * 2) Other Multicast Table for multicast of another type. A CRC-8bit
2027 * is used as an index to the Other Multicast Table entries in the
2028 * DA-Filter table. This function calculates the CRC-8bit value.
2029 * In either case, eth_port_set_filter_table_entry() is then called
2030 * to set to set the actual table entry.
2031 */
2032static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
2033{
2034 unsigned int mac_h;
2035 unsigned int mac_l;
2036 unsigned char crc_result = 0;
2037 int table;
2038 int mac_array[48];
2039 int crc[8];
2040 int i;
2041
2042 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
2043 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
2044 table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2045 (eth_port_num);
2046 eth_port_set_filter_table_entry(table, p_addr[5]);
2047 return;
2048 }
2049
2050 /* Calculate CRC-8 out of the given address */
2051 mac_h = (p_addr[0] << 8) | (p_addr[1]);
2052 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
2053 (p_addr[4] << 8) | (p_addr[5] << 0);
2054
2055 for (i = 0; i < 32; i++)
2056 mac_array[i] = (mac_l >> i) & 0x1;
2057 for (i = 32; i < 48; i++)
2058 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
2059
2060 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
2061 mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
2062 mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
2063 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
2064 mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0];
2065
2066 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
2067 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
2068 mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
2069 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
2070 mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
2071 mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
2072 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0];
2073
2074 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
2075 mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
2076 mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
2077 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
2078 mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^
2079 mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0];
2080
2081 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
2082 mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
2083 mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
2084 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
2085 mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^
2086 mac_array[3] ^ mac_array[2] ^ mac_array[1];
2087
2088 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
2089 mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
2090 mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
2091 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
2092 mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^
2093 mac_array[3] ^ mac_array[2];
2094
2095 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
2096 mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
2097 mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
2098 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
2099 mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^
2100 mac_array[4] ^ mac_array[3];
2101
2102 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
2103 mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
2104 mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
2105 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
2106 mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^
2107 mac_array[4];
2108
2109 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
2110 mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
2111 mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
2112 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
2113 mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5];
2114
2115 for (i = 0; i < 8; i++)
2116 crc_result = crc_result | (crc[i] << i);
2117
2118 table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
2119 eth_port_set_filter_table_entry(table, crc_result);
2120}
2121
2122/*
2123 * Set the entire multicast list based on dev->mc_list.
2124 */
2125static void eth_port_set_multicast_list(struct net_device *dev)
2126{
2127
2128 struct dev_mc_list *mc_list;
2129 int i;
2130 int table_index;
2131 struct mv643xx_private *mp = netdev_priv(dev);
2132 unsigned int eth_port_num = mp->port_num;
2133
2134 /* If the device is in promiscuous mode or in all multicast mode,
2135 * we will fully populate both multicast tables with accept.
2136 * This is guaranteed to yield a match on all multicast addresses...
2137 */
2138 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
2139 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2140 /* Set all entries in DA filter special multicast
2141 * table (Ex_dFSMT)
2142 * Set for ETH_Q0 for now
2143 * Bits
2144 * 0 Accept=1, Drop=0
2145 * 3-1 Queue ETH_Q0=0
2146 * 7-4 Reserved = 0;
2147 */
2148 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2149
2150 /* Set all entries in DA filter other multicast
2151 * table (Ex_dFOMT)
2152 * Set for ETH_Q0 for now
2153 * Bits
2154 * 0 Accept=1, Drop=0
2155 * 3-1 Queue ETH_Q0=0
2156 * 7-4 Reserved = 0;
2157 */
2158 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2159 }
2160 return;
2161 }
2162
2163 /* We will clear out multicast tables every time we get the list.
2164 * Then add the entire new list...
2165 */
2166 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2167 /* Clear DA filter special multicast table (Ex_dFSMT) */
2168 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2169 (eth_port_num) + table_index, 0);
2170
2171 /* Clear DA filter other multicast table (Ex_dFOMT) */
2172 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2173 (eth_port_num) + table_index, 0);
2174 }
2175
2176 /* Get pointer to net_device multicast list and add each one... */
2177 for (i = 0, mc_list = dev->mc_list;
2178 (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
2179 i++, mc_list = mc_list->next)
2180 if (mc_list->dmi_addrlen == 6)
2181 eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
2182}
2183
2184/*
2057 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables 2185 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
2058 * 2186 *
2059 * DESCRIPTION: 2187 * DESCRIPTION:
@@ -2080,11 +2208,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2080 2208
2081 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2209 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2082 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2210 /* Clear DA filter special multicast table (Ex_dFSMT) */
2083 mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2211 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2084 (eth_port_num) + table_index), 0); 2212 (eth_port_num) + table_index, 0);
2085 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2213 /* Clear DA filter other multicast table (Ex_dFOMT) */
2086 mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2214 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2087 (eth_port_num) + table_index), 0); 2215 (eth_port_num) + table_index, 0);
2088 } 2216 }
2089} 2217}
2090 2218
@@ -2489,6 +2617,7 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2489 struct eth_tx_desc *current_descriptor; 2617 struct eth_tx_desc *current_descriptor;
2490 struct eth_tx_desc *first_descriptor; 2618 struct eth_tx_desc *first_descriptor;
2491 u32 command; 2619 u32 command;
2620 unsigned long flags;
2492 2621
2493 /* Do not process Tx ring in case of Tx ring resource error */ 2622 /* Do not process Tx ring in case of Tx ring resource error */
2494 if (mp->tx_resource_err) 2623 if (mp->tx_resource_err)
@@ -2505,6 +2634,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2505 return ETH_ERROR; 2634 return ETH_ERROR;
2506 } 2635 }
2507 2636
2637 spin_lock_irqsave(&mp->lock, flags);
2638
2508 mp->tx_ring_skbs++; 2639 mp->tx_ring_skbs++;
2509 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2640 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2510 2641
@@ -2554,11 +2685,15 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2554 mp->tx_resource_err = 1; 2685 mp->tx_resource_err = 1;
2555 mp->tx_curr_desc_q = tx_first_desc; 2686 mp->tx_curr_desc_q = tx_first_desc;
2556 2687
2688 spin_unlock_irqrestore(&mp->lock, flags);
2689
2557 return ETH_QUEUE_LAST_RESOURCE; 2690 return ETH_QUEUE_LAST_RESOURCE;
2558 } 2691 }
2559 2692
2560 mp->tx_curr_desc_q = tx_next_desc; 2693 mp->tx_curr_desc_q = tx_next_desc;
2561 2694
2695 spin_unlock_irqrestore(&mp->lock, flags);
2696
2562 return ETH_OK; 2697 return ETH_OK;
2563} 2698}
2564#else 2699#else
@@ -2569,11 +2704,14 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2569 int tx_desc_used; 2704 int tx_desc_used;
2570 struct eth_tx_desc *current_descriptor; 2705 struct eth_tx_desc *current_descriptor;
2571 unsigned int command_status; 2706 unsigned int command_status;
2707 unsigned long flags;
2572 2708
2573 /* Do not process Tx ring in case of Tx ring resource error */ 2709 /* Do not process Tx ring in case of Tx ring resource error */
2574 if (mp->tx_resource_err) 2710 if (mp->tx_resource_err)
2575 return ETH_QUEUE_FULL; 2711 return ETH_QUEUE_FULL;
2576 2712
2713 spin_lock_irqsave(&mp->lock, flags);
2714
2577 mp->tx_ring_skbs++; 2715 mp->tx_ring_skbs++;
2578 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2716 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2579 2717
@@ -2604,9 +2742,12 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2604 /* Check for ring index overlap in the Tx desc ring */ 2742 /* Check for ring index overlap in the Tx desc ring */
2605 if (tx_desc_curr == tx_desc_used) { 2743 if (tx_desc_curr == tx_desc_used) {
2606 mp->tx_resource_err = 1; 2744 mp->tx_resource_err = 1;
2745
2746 spin_unlock_irqrestore(&mp->lock, flags);
2607 return ETH_QUEUE_LAST_RESOURCE; 2747 return ETH_QUEUE_LAST_RESOURCE;
2608 } 2748 }
2609 2749
2750 spin_unlock_irqrestore(&mp->lock, flags);
2610 return ETH_OK; 2751 return ETH_OK;
2611} 2752}
2612#endif 2753#endif
@@ -2629,23 +2770,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2629 * Tx ring 'first' and 'used' indexes are updated. 2770 * Tx ring 'first' and 'used' indexes are updated.
2630 * 2771 *
2631 * RETURN: 2772 * RETURN:
2632 * ETH_ERROR in case the routine can not access Tx desc ring. 2773 * ETH_OK on success
2633 * ETH_RETRY in case there is transmission in process. 2774 * ETH_ERROR otherwise.
2634 * ETH_END_OF_JOB if the routine has nothing to release.
2635 * ETH_OK otherwise.
2636 * 2775 *
2637 */ 2776 */
2638static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, 2777static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2639 struct pkt_info *p_pkt_info) 2778 struct pkt_info *p_pkt_info)
2640{ 2779{
2641 int tx_desc_used; 2780 int tx_desc_used;
2781 int tx_busy_desc;
2782 struct eth_tx_desc *p_tx_desc_used;
2783 unsigned int command_status;
2784 unsigned long flags;
2785 int err = ETH_OK;
2786
2787 spin_lock_irqsave(&mp->lock, flags);
2788
2642#ifdef MV643XX_CHECKSUM_OFFLOAD_TX 2789#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2643 int tx_busy_desc = mp->tx_first_desc_q; 2790 tx_busy_desc = mp->tx_first_desc_q;
2644#else 2791#else
2645 int tx_busy_desc = mp->tx_curr_desc_q; 2792 tx_busy_desc = mp->tx_curr_desc_q;
2646#endif 2793#endif
2647 struct eth_tx_desc *p_tx_desc_used;
2648 unsigned int command_status;
2649 2794
2650 /* Get the Tx Desc ring indexes */ 2795 /* Get the Tx Desc ring indexes */
2651 tx_desc_used = mp->tx_used_desc_q; 2796 tx_desc_used = mp->tx_used_desc_q;
@@ -2653,22 +2798,30 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2653 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; 2798 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
2654 2799
2655 /* Sanity check */ 2800 /* Sanity check */
2656 if (p_tx_desc_used == NULL) 2801 if (p_tx_desc_used == NULL) {
2657 return ETH_ERROR; 2802 err = ETH_ERROR;
2803 goto out;
2804 }
2658 2805
2659 /* Stop release. About to overlap the current available Tx descriptor */ 2806 /* Stop release. About to overlap the current available Tx descriptor */
2660 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) 2807 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
2661 return ETH_END_OF_JOB; 2808 err = ETH_ERROR;
2809 goto out;
2810 }
2662 2811
2663 command_status = p_tx_desc_used->cmd_sts; 2812 command_status = p_tx_desc_used->cmd_sts;
2664 2813
2665 /* Still transmitting... */ 2814 /* Still transmitting... */
2666 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) 2815 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2667 return ETH_RETRY; 2816 err = ETH_ERROR;
2817 goto out;
2818 }
2668 2819
2669 /* Pass the packet information to the caller */ 2820 /* Pass the packet information to the caller */
2670 p_pkt_info->cmd_sts = command_status; 2821 p_pkt_info->cmd_sts = command_status;
2671 p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; 2822 p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
2823 p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
2824 p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
2672 mp->tx_skb[tx_desc_used] = NULL; 2825 mp->tx_skb[tx_desc_used] = NULL;
2673 2826
2674 /* Update the next descriptor to release. */ 2827 /* Update the next descriptor to release. */
@@ -2680,7 +2833,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2680 BUG_ON(mp->tx_ring_skbs == 0); 2833 BUG_ON(mp->tx_ring_skbs == 0);
2681 mp->tx_ring_skbs--; 2834 mp->tx_ring_skbs--;
2682 2835
2683 return ETH_OK; 2836out:
2837 spin_unlock_irqrestore(&mp->lock, flags);
2838
2839 return err;
2684} 2840}
2685 2841
2686/* 2842/*
@@ -2712,11 +2868,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2712 int rx_next_curr_desc, rx_curr_desc, rx_used_desc; 2868 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
2713 volatile struct eth_rx_desc *p_rx_desc; 2869 volatile struct eth_rx_desc *p_rx_desc;
2714 unsigned int command_status; 2870 unsigned int command_status;
2871 unsigned long flags;
2715 2872
2716 /* Do not process Rx ring in case of Rx ring resource error */ 2873 /* Do not process Rx ring in case of Rx ring resource error */
2717 if (mp->rx_resource_err) 2874 if (mp->rx_resource_err)
2718 return ETH_QUEUE_FULL; 2875 return ETH_QUEUE_FULL;
2719 2876
2877 spin_lock_irqsave(&mp->lock, flags);
2878
2720 /* Get the Rx Desc ring 'curr and 'used' indexes */ 2879 /* Get the Rx Desc ring 'curr and 'used' indexes */
2721 rx_curr_desc = mp->rx_curr_desc_q; 2880 rx_curr_desc = mp->rx_curr_desc_q;
2722 rx_used_desc = mp->rx_used_desc_q; 2881 rx_used_desc = mp->rx_used_desc_q;
@@ -2728,8 +2887,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2728 rmb(); 2887 rmb();
2729 2888
2730 /* Nothing to receive... */ 2889 /* Nothing to receive... */
2731 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) 2890 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2891 spin_unlock_irqrestore(&mp->lock, flags);
2732 return ETH_END_OF_JOB; 2892 return ETH_END_OF_JOB;
2893 }
2733 2894
2734 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; 2895 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
2735 p_pkt_info->cmd_sts = command_status; 2896 p_pkt_info->cmd_sts = command_status;
@@ -2749,6 +2910,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2749 if (rx_next_curr_desc == rx_used_desc) 2910 if (rx_next_curr_desc == rx_used_desc)
2750 mp->rx_resource_err = 1; 2911 mp->rx_resource_err = 1;
2751 2912
2913 spin_unlock_irqrestore(&mp->lock, flags);
2914
2752 return ETH_OK; 2915 return ETH_OK;
2753} 2916}
2754 2917
@@ -2777,6 +2940,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
2777{ 2940{
2778 int used_rx_desc; /* Where to return Rx resource */ 2941 int used_rx_desc; /* Where to return Rx resource */
2779 volatile struct eth_rx_desc *p_used_rx_desc; 2942 volatile struct eth_rx_desc *p_used_rx_desc;
2943 unsigned long flags;
2944
2945 spin_lock_irqsave(&mp->lock, flags);
2780 2946
2781 /* Get 'used' Rx descriptor */ 2947 /* Get 'used' Rx descriptor */
2782 used_rx_desc = mp->rx_used_desc_q; 2948 used_rx_desc = mp->rx_used_desc_q;
@@ -2800,6 +2966,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
2800 /* Any Rx return cancels the Rx resource error status */ 2966 /* Any Rx return cancels the Rx resource error status */
2801 mp->rx_resource_err = 0; 2967 mp->rx_resource_err = 0;
2802 2968
2969 spin_unlock_irqrestore(&mp->lock, flags);
2970
2803 return ETH_OK; 2971 return ETH_OK;
2804} 2972}
2805 2973
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b538e3038058..bf55a4cfb3d2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3243 3243
3244 pci_set_master(pdev); 3244 pci_set_master(pdev);
3245 3245
3246 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) 3246 if (sizeof(dma_addr_t) > sizeof(u32) &&
3247 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3247 using_dac = 1; 3248 using_dac = 1;
3248 else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 3249 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3249 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3250 if (err < 0) {
3250 pci_name(pdev)); 3251 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
3251 goto err_out_free_regions; 3252 "for consistent allocations\n", pci_name(pdev));
3253 goto err_out_free_regions;
3254 }
3255 } else {
3256 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3257 if (err) {
3258 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3259 pci_name(pdev));
3260 goto err_out_free_regions;
3261 }
3252 } 3262 }
3253 3263
3254#ifdef __BIG_ENDIAN 3264#ifdef __BIG_ENDIAN
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index f5d697c0c031..f8b973a04b65 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -57,7 +57,7 @@
57#include "sky2.h" 57#include "sky2.h"
58 58
59#define DRV_NAME "sky2" 59#define DRV_NAME "sky2"
60#define DRV_VERSION "0.11" 60#define DRV_VERSION "0.13"
61#define PFX DRV_NAME " " 61#define PFX DRV_NAME " "
62 62
63/* 63/*
@@ -75,6 +75,7 @@
75#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) 75#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
76#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) 76#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
77#define RX_DEF_PENDING RX_MAX_PENDING 77#define RX_DEF_PENDING RX_MAX_PENDING
78#define RX_SKB_ALIGN 8
78 79
79#define TX_RING_SIZE 512 80#define TX_RING_SIZE 512
80#define TX_DEF_PENDING (TX_RING_SIZE - 1) 81#define TX_DEF_PENDING (TX_RING_SIZE - 1)
@@ -91,7 +92,7 @@
91static const u32 default_msg = 92static const u32 default_msg =
92 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 93 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
93 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR 94 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
94 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR; 95 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
95 96
96static int debug = -1; /* defaults above */ 97static int debug = -1; /* defaults above */
97module_param(debug, int, 0); 98module_param(debug, int, 0);
@@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
624 625
625} 626}
626 627
627static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len) 628/* Assign Ram Buffer allocation.
629 * start and end are in units of 4k bytes
630 * ram registers are in units of 64bit words
631 */
632static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
628{ 633{
629 u32 end; 634 u32 start, end;
630 635
631 start /= 8; 636 start = startk * 4096/8;
632 len /= 8; 637 end = (endk * 4096/8) - 1;
633 end = start + len - 1;
634 638
635 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 639 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
636 sky2_write32(hw, RB_ADDR(q, RB_START), start); 640 sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
639 sky2_write32(hw, RB_ADDR(q, RB_RP), start); 643 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
640 644
641 if (q == Q_R1 || q == Q_R2) { 645 if (q == Q_R1 || q == Q_R2) {
642 u32 rxup, rxlo; 646 u32 space = (endk - startk) * 4096/8;
647 u32 tp = space - space/4;
643 648
644 rxlo = len/2; 649 /* On receive queue's set the thresholds
645 rxup = rxlo + len/4; 650 * give receiver priority when > 3/4 full
651 * send pause when down to 2K
652 */
653 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
654 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
646 655
647 /* Set thresholds on receive queue's */ 656 tp = space - 2048/8;
648 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup); 657 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
649 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo); 658 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
650 } else { 659 } else {
651 /* Enable store & forward on Tx queue's because 660 /* Enable store & forward on Tx queue's because
652 * Tx FIFO is only 1K on Yukon 661 * Tx FIFO is only 1K on Yukon
@@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
695 * This is a workaround code taken from SysKonnect sk98lin driver 704 * This is a workaround code taken from SysKonnect sk98lin driver
696 * to deal with chip bug on Yukon EC rev 0 in the wraparound case. 705 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
697 */ 706 */
698static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, 707static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
699 u16 idx, u16 *last, u16 size) 708 u16 idx, u16 *last, u16 size)
700{ 709{
710 wmb();
701 if (is_ec_a1(hw) && idx < *last) { 711 if (is_ec_a1(hw) && idx < *last) {
702 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); 712 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
703 713
@@ -721,6 +731,7 @@ setnew:
721 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); 731 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
722 } 732 }
723 *last = idx; 733 *last = idx;
734 mmiowb();
724} 735}
725 736
726 737
@@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
734/* Return high part of DMA address (could be 32 or 64 bit) */ 745/* Return high part of DMA address (could be 32 or 64 bit) */
735static inline u32 high32(dma_addr_t a) 746static inline u32 high32(dma_addr_t a)
736{ 747{
737 return (a >> 16) >> 16; 748 return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
738} 749}
739 750
740/* Build description to hardware about buffer */ 751/* Build description to hardware about buffer */
741static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) 752static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
742{ 753{
743 struct sky2_rx_le *le; 754 struct sky2_rx_le *le;
744 u32 hi = high32(map); 755 u32 hi = high32(map);
@@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
878 struct sky2_hw *hw = sky2->hw; 889 struct sky2_hw *hw = sky2->hw;
879 u16 port = sky2->port; 890 u16 port = sky2->port;
880 891
881 spin_lock(&sky2->tx_lock); 892 spin_lock_bh(&sky2->tx_lock);
882 893
883 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); 894 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
884 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); 895 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
885 sky2->vlgrp = grp; 896 sky2->vlgrp = grp;
886 897
887 spin_unlock(&sky2->tx_lock); 898 spin_unlock_bh(&sky2->tx_lock);
888} 899}
889 900
890static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 901static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
893 struct sky2_hw *hw = sky2->hw; 904 struct sky2_hw *hw = sky2->hw;
894 u16 port = sky2->port; 905 u16 port = sky2->port;
895 906
896 spin_lock(&sky2->tx_lock); 907 spin_lock_bh(&sky2->tx_lock);
897 908
898 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 909 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
899 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 910 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
900 if (sky2->vlgrp) 911 if (sky2->vlgrp)
901 sky2->vlgrp->vlan_devices[vid] = NULL; 912 sky2->vlgrp->vlan_devices[vid] = NULL;
902 913
903 spin_unlock(&sky2->tx_lock); 914 spin_unlock_bh(&sky2->tx_lock);
904} 915}
905#endif 916#endif
906 917
907/* 918/*
919 * It appears the hardware has a bug in the FIFO logic that
920 * cause it to hang if the FIFO gets overrun and the receive buffer
921 * is not aligned. ALso alloc_skb() won't align properly if slab
922 * debugging is enabled.
923 */
924static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
925{
926 struct sk_buff *skb;
927
928 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
929 if (likely(skb)) {
930 unsigned long p = (unsigned long) skb->data;
931 skb_reserve(skb,
932 ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
933 }
934
935 return skb;
936}
937
938/*
908 * Allocate and setup receiver buffer pool. 939 * Allocate and setup receiver buffer pool.
909 * In case of 64 bit dma, there are 2X as many list elements 940 * In case of 64 bit dma, there are 2X as many list elements
910 * available as ring entries 941 * available as ring entries
911 * and need to reserve one list element so we don't wrap around. 942 * and need to reserve one list element so we don't wrap around.
912 *
913 * It appears the hardware has a bug in the FIFO logic that
914 * cause it to hang if the FIFO gets overrun and the receive buffer
915 * is not aligned. This means we can't use skb_reserve to align
916 * the IP header.
917 */ 943 */
918static int sky2_rx_start(struct sky2_port *sky2) 944static int sky2_rx_start(struct sky2_port *sky2)
919{ 945{
@@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
929 for (i = 0; i < sky2->rx_pending; i++) { 955 for (i = 0; i < sky2->rx_pending; i++) {
930 struct ring_info *re = sky2->rx_ring + i; 956 struct ring_info *re = sky2->rx_ring + i;
931 957
932 re->skb = dev_alloc_skb(sky2->rx_bufsize); 958 re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
933 if (!re->skb) 959 if (!re->skb)
934 goto nomem; 960 goto nomem;
935 961
@@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev)
986 1012
987 sky2_mac_init(hw, port); 1013 sky2_mac_init(hw, port);
988 1014
989 /* Configure RAM buffers */ 1015 /* Determine available ram buffer space (in 4K blocks).
990 if (hw->chip_id == CHIP_ID_YUKON_FE || 1016 * Note: not sure about the FE setting below yet
991 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2)) 1017 */
992 ramsize = 4096; 1018 if (hw->chip_id == CHIP_ID_YUKON_FE)
993 else { 1019 ramsize = 4;
994 u8 e0 = sky2_read8(hw, B2_E_0); 1020 else
995 ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096); 1021 ramsize = sky2_read8(hw, B2_E_0);
996 } 1022
1023 /* Give transmitter one third (rounded up) */
1024 rxspace = ramsize - (ramsize + 2) / 3;
997 1025
998 /* 2/3 for Rx */
999 rxspace = (2 * ramsize) / 3;
1000 sky2_ramset(hw, rxqaddr[port], 0, rxspace); 1026 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1001 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); 1027 sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
1002 1028
1003 /* Make sure SyncQ is disabled */ 1029 /* Make sure SyncQ is disabled */
1004 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), 1030 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
@@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2)
1054} 1080}
1055 1081
1056/* Estimate of number of transmit list elements required */ 1082/* Estimate of number of transmit list elements required */
1057static inline unsigned tx_le_req(const struct sk_buff *skb) 1083static unsigned tx_le_req(const struct sk_buff *skb)
1058{ 1084{
1059 unsigned count; 1085 unsigned count;
1060 1086
@@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1090 u16 mss; 1116 u16 mss;
1091 u8 ctrl; 1117 u8 ctrl;
1092 1118
1119 /* No BH disabling for tx_lock here. We are running in BH disabled
1120 * context and TX reclaim runs via poll inside of a software
1121 * interrupt, and no related locks in IRQ processing.
1122 */
1093 if (!spin_trylock(&sky2->tx_lock)) 1123 if (!spin_trylock(&sky2->tx_lock))
1094 return NETDEV_TX_LOCKED; 1124 return NETDEV_TX_LOCKED;
1095 1125
@@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1099 */ 1129 */
1100 if (!netif_queue_stopped(dev)) { 1130 if (!netif_queue_stopped(dev)) {
1101 netif_stop_queue(dev); 1131 netif_stop_queue(dev);
1102 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 1132 if (net_ratelimit())
1103 dev->name); 1133 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
1134 dev->name);
1104 } 1135 }
1105 spin_unlock(&sky2->tx_lock); 1136 spin_unlock(&sky2->tx_lock);
1106 1137
@@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1199 1230
1200 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, 1231 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1201 frag->size, PCI_DMA_TODEVICE); 1232 frag->size, PCI_DMA_TODEVICE);
1202 addr64 = (mapping >> 16) >> 16; 1233 addr64 = high32(mapping);
1203 if (addr64 != sky2->tx_addr64) { 1234 if (addr64 != sky2->tx_addr64) {
1204 le = get_tx_le(sky2); 1235 le = get_tx_le(sky2);
1205 le->tx.addr = cpu_to_le32(addr64); 1236 le->tx.addr = cpu_to_le32(addr64);
@@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1229 netif_stop_queue(dev); 1260 netif_stop_queue(dev);
1230 1261
1231out_unlock: 1262out_unlock:
1232 mmiowb();
1233 spin_unlock(&sky2->tx_lock); 1263 spin_unlock(&sky2->tx_lock);
1234 1264
1235 dev->trans_start = jiffies; 1265 dev->trans_start = jiffies;
@@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1282 dev_kfree_skb_any(skb); 1312 dev_kfree_skb_any(skb);
1283 } 1313 }
1284 1314
1285 spin_lock(&sky2->tx_lock);
1286 sky2->tx_cons = put; 1315 sky2->tx_cons = put;
1287 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) 1316 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
1288 netif_wake_queue(dev); 1317 netif_wake_queue(dev);
1289 spin_unlock(&sky2->tx_lock);
1290} 1318}
1291 1319
1292/* Cleanup all untransmitted buffers, assume transmitter not running */ 1320/* Cleanup all untransmitted buffers, assume transmitter not running */
1293static void sky2_tx_clean(struct sky2_port *sky2) 1321static void sky2_tx_clean(struct sky2_port *sky2)
1294{ 1322{
1323 spin_lock_bh(&sky2->tx_lock);
1295 sky2_tx_complete(sky2, sky2->tx_prod); 1324 sky2_tx_complete(sky2, sky2->tx_prod);
1325 spin_unlock_bh(&sky2->tx_lock);
1296} 1326}
1297 1327
1298/* Network shutdown */ 1328/* Network shutdown */
@@ -1582,28 +1612,40 @@ out:
1582 local_irq_enable(); 1612 local_irq_enable();
1583} 1613}
1584 1614
1615
1616/* Transmit timeout is only called if we are running, carries is up
1617 * and tx queue is full (stopped).
1618 */
1585static void sky2_tx_timeout(struct net_device *dev) 1619static void sky2_tx_timeout(struct net_device *dev)
1586{ 1620{
1587 struct sky2_port *sky2 = netdev_priv(dev); 1621 struct sky2_port *sky2 = netdev_priv(dev);
1588 struct sky2_hw *hw = sky2->hw; 1622 struct sky2_hw *hw = sky2->hw;
1589 unsigned txq = txqaddr[sky2->port]; 1623 unsigned txq = txqaddr[sky2->port];
1624 u16 ridx;
1625
1626 /* Maybe we just missed an status interrupt */
1627 spin_lock(&sky2->tx_lock);
1628 ridx = sky2_read16(hw,
1629 sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1630 sky2_tx_complete(sky2, ridx);
1631 spin_unlock(&sky2->tx_lock);
1632
1633 if (!netif_queue_stopped(dev)) {
1634 if (net_ratelimit())
1635 pr_info(PFX "transmit interrupt missed? recovered\n");
1636 return;
1637 }
1590 1638
1591 if (netif_msg_timer(sky2)) 1639 if (netif_msg_timer(sky2))
1592 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1640 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1593 1641
1594 netif_stop_queue(dev);
1595
1596 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1642 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1597 sky2_read32(hw, Q_ADDR(txq, Q_CSR));
1598
1599 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1643 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1600 1644
1601 sky2_tx_clean(sky2); 1645 sky2_tx_clean(sky2);
1602 1646
1603 sky2_qset(hw, txq); 1647 sky2_qset(hw, txq);
1604 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); 1648 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1605
1606 netif_wake_queue(dev);
1607} 1649}
1608 1650
1609 1651
@@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1713 } else { 1755 } else {
1714 struct sk_buff *nskb; 1756 struct sk_buff *nskb;
1715 1757
1716 nskb = dev_alloc_skb(sky2->rx_bufsize); 1758 nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
1717 if (!nskb) 1759 if (!nskb)
1718 goto resubmit; 1760 goto resubmit;
1719 1761
@@ -1745,7 +1787,7 @@ oversize:
1745error: 1787error:
1746 ++sky2->net_stats.rx_errors; 1788 ++sky2->net_stats.rx_errors;
1747 1789
1748 if (netif_msg_rx_err(sky2)) 1790 if (netif_msg_rx_err(sky2) && net_ratelimit())
1749 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", 1791 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1750 sky2->netdev->name, status, length); 1792 sky2->netdev->name, status, length);
1751 1793
@@ -1766,13 +1808,16 @@ error:
1766 */ 1808 */
1767#define TX_NO_STATUS 0xffff 1809#define TX_NO_STATUS 0xffff
1768 1810
1769static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) 1811static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
1770{ 1812{
1771 if (last != TX_NO_STATUS) { 1813 if (last != TX_NO_STATUS) {
1772 struct net_device *dev = hw->dev[port]; 1814 struct net_device *dev = hw->dev[port];
1773 if (dev && netif_running(dev)) { 1815 if (dev && netif_running(dev)) {
1774 struct sky2_port *sky2 = netdev_priv(dev); 1816 struct sky2_port *sky2 = netdev_priv(dev);
1817
1818 spin_lock(&sky2->tx_lock);
1775 sky2_tx_complete(sky2, last); 1819 sky2_tx_complete(sky2, last);
1820 spin_unlock(&sky2->tx_lock);
1776 } 1821 }
1777 } 1822 }
1778} 1823}
@@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1800 struct sk_buff *skb; 1845 struct sk_buff *skb;
1801 u32 status; 1846 u32 status;
1802 u16 length; 1847 u16 length;
1803 u8 op;
1804 1848
1805 le = hw->st_le + hw->st_idx; 1849 le = hw->st_le + hw->st_idx;
1806 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; 1850 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
@@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1814 sky2 = netdev_priv(dev); 1858 sky2 = netdev_priv(dev);
1815 status = le32_to_cpu(le->status); 1859 status = le32_to_cpu(le->status);
1816 length = le16_to_cpu(le->length); 1860 length = le16_to_cpu(le->length);
1817 op = le->opcode & ~HW_OWNER;
1818 le->opcode = 0;
1819 1861
1820 switch (op) { 1862 switch (le->opcode & ~HW_OWNER) {
1821 case OP_RXSTAT: 1863 case OP_RXSTAT:
1822 skb = sky2_receive(sky2, length, status); 1864 skb = sky2_receive(sky2, length, status);
1823 if (!skb) 1865 if (!skb)
@@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1865 default: 1907 default:
1866 if (net_ratelimit()) 1908 if (net_ratelimit())
1867 printk(KERN_WARNING PFX 1909 printk(KERN_WARNING PFX
1868 "unknown status opcode 0x%x\n", op); 1910 "unknown status opcode 0x%x\n", le->opcode);
1869 break; 1911 break;
1870 } 1912 }
1871 } 1913 }
1872 1914
1873exit_loop: 1915exit_loop:
1874 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 1916 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1875 mmiowb();
1876 1917
1877 sky2_tx_check(hw, 0, tx_done[0]); 1918 sky2_tx_check(hw, 0, tx_done[0]);
1878 sky2_tx_check(hw, 1, tx_done[1]); 1919 sky2_tx_check(hw, 1, tx_done[1]);
@@ -1887,7 +1928,6 @@ exit_loop:
1887 netif_rx_complete(dev0); 1928 netif_rx_complete(dev0);
1888 hw->intr_mask |= Y2_IS_STAT_BMU; 1929 hw->intr_mask |= Y2_IS_STAT_BMU;
1889 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1930 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1890 mmiowb();
1891 return 0; 1931 return 0;
1892 } else { 1932 } else {
1893 *budget -= work_done; 1933 *budget -= work_done;
@@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1900{ 1940{
1901 struct net_device *dev = hw->dev[port]; 1941 struct net_device *dev = hw->dev[port];
1902 1942
1903 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", 1943 if (net_ratelimit())
1904 dev->name, status); 1944 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
1945 dev->name, status);
1905 1946
1906 if (status & Y2_IS_PAR_RD1) { 1947 if (status & Y2_IS_PAR_RD1) {
1907 printk(KERN_ERR PFX "%s: ram data read parity error\n", 1948 if (net_ratelimit())
1908 dev->name); 1949 printk(KERN_ERR PFX "%s: ram data read parity error\n",
1950 dev->name);
1909 /* Clear IRQ */ 1951 /* Clear IRQ */
1910 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); 1952 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1911 } 1953 }
1912 1954
1913 if (status & Y2_IS_PAR_WR1) { 1955 if (status & Y2_IS_PAR_WR1) {
1914 printk(KERN_ERR PFX "%s: ram data write parity error\n", 1956 if (net_ratelimit())
1915 dev->name); 1957 printk(KERN_ERR PFX "%s: ram data write parity error\n",
1958 dev->name);
1916 1959
1917 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); 1960 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1918 } 1961 }
1919 1962
1920 if (status & Y2_IS_PAR_MAC1) { 1963 if (status & Y2_IS_PAR_MAC1) {
1921 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); 1964 if (net_ratelimit())
1965 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
1922 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); 1966 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1923 } 1967 }
1924 1968
1925 if (status & Y2_IS_PAR_RX1) { 1969 if (status & Y2_IS_PAR_RX1) {
1926 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); 1970 if (net_ratelimit())
1971 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
1927 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); 1972 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1928 } 1973 }
1929 1974
1930 if (status & Y2_IS_TCP_TXA1) { 1975 if (status & Y2_IS_TCP_TXA1) {
1931 printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name); 1976 if (net_ratelimit())
1977 printk(KERN_ERR PFX "%s: TCP segmentation error\n",
1978 dev->name);
1932 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); 1979 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1933 } 1980 }
1934} 1981}
@@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
1944 u16 pci_err; 1991 u16 pci_err;
1945 1992
1946 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); 1993 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
1947 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 1994 if (net_ratelimit())
1948 pci_name(hw->pdev), pci_err); 1995 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1996 pci_name(hw->pdev), pci_err);
1949 1997
1950 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1998 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1951 pci_write_config_word(hw->pdev, PCI_STATUS, 1999 pci_write_config_word(hw->pdev, PCI_STATUS,
@@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
1959 2007
1960 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); 2008 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
1961 2009
1962 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2010 if (net_ratelimit())
1963 pci_name(hw->pdev), pex_err); 2011 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
2012 pci_name(hw->pdev), pex_err);
1964 2013
1965 /* clear the interrupt */ 2014 /* clear the interrupt */
1966 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2015 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw)
2250 return 0; 2299 return 0;
2251} 2300}
2252 2301
2253static inline u32 sky2_supported_modes(const struct sky2_hw *hw) 2302static u32 sky2_supported_modes(const struct sky2_hw *hw)
2254{ 2303{
2255 u32 modes; 2304 u32 modes;
2256 if (hw->copper) { 2305 if (hw->copper) {
@@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2995 return dev; 3044 return dev;
2996} 3045}
2997 3046
2998static inline void sky2_show_addr(struct net_device *dev) 3047static void __devinit sky2_show_addr(struct net_device *dev)
2999{ 3048{
3000 const struct sky2_port *sky2 = netdev_priv(dev); 3049 const struct sky2_port *sky2 = netdev_priv(dev);
3001 3050
@@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3038 goto err_out_free_regions; 3087 goto err_out_free_regions;
3039 } 3088 }
3040 3089
3041 if (sizeof(dma_addr_t) > sizeof(u32)) { 3090 if (sizeof(dma_addr_t) > sizeof(u32) &&
3042 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 3091 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3043 if (!err) 3092 using_dac = 1;
3044 using_dac = 1; 3093 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3045 } 3094 if (err < 0) {
3095 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
3096 "for consistent allocations\n", pci_name(pdev));
3097 goto err_out_free_regions;
3098 }
3046 3099
3047 if (!using_dac) { 3100 } else {
3048 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3101 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3049 if (err) { 3102 if (err) {
3050 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3103 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
@@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3052 goto err_out_free_regions; 3105 goto err_out_free_regions;
3053 } 3106 }
3054 } 3107 }
3108
3055#ifdef __BIG_ENDIAN 3109#ifdef __BIG_ENDIAN
3056 /* byte swap descriptors in hardware */ 3110 /* byte swap descriptors in hardware */
3057 { 3111 {
@@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3064#endif 3118#endif
3065 3119
3066 err = -ENOMEM; 3120 err = -ENOMEM;
3067 hw = kmalloc(sizeof(*hw), GFP_KERNEL); 3121 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3068 if (!hw) { 3122 if (!hw) {
3069 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3123 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3070 pci_name(pdev)); 3124 pci_name(pdev));
3071 goto err_out_free_regions; 3125 goto err_out_free_regions;
3072 } 3126 }
3073 3127
3074 memset(hw, 0, sizeof(*hw));
3075 hw->pdev = pdev; 3128 hw->pdev = pdev;
3076 3129
3077 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3130 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 0d765f1733b5..1f5975a61e1f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/config.h> 24#include <linux/config.h>
25
26#include <linux/compiler.h> 25#include <linux/compiler.h>
27#include <linux/crc32.h> 26#include <linux/crc32.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
@@ -30,6 +29,7 @@
30#include <linux/ethtool.h> 29#include <linux/ethtool.h>
31#include <linux/firmware.h> 30#include <linux/firmware.h>
32#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/in.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/ip.h> 35#include <linux/ip.h>
@@ -43,6 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/tcp.h> 44#include <linux/tcp.h>
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/vmalloc.h>
46#include <linux/wait.h> 47#include <linux/wait.h>
47#include <linux/workqueue.h> 48#include <linux/workqueue.h>
48#include <asm/bitops.h> 49#include <asm/bitops.h>
@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
108 writel(value, card->regs + reg); 109 writel(value, card->regs + reg);
109} 110}
110 111
111/**
112 * spider_net_write_reg_sync - writes to an SMMIO register of a card
113 * @card: device structure
114 * @reg: register to write to
115 * @value: value to write into the specified SMMIO register
116 *
117 * Unlike spider_net_write_reg, this will also make sure the
118 * data arrives on the card by reading the reg again.
119 */
120static void
121spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
122{
123 value = cpu_to_le32(value);
124 writel(value, card->regs + reg);
125 (void)readl(card->regs + reg);
126}
127
128/**
129 * spider_net_rx_irq_off - switch off rx irq on this spider card
130 * @card: device structure
131 *
132 * switches off rx irq by masking them out in the GHIINTnMSK register
133 */
134static void
135spider_net_rx_irq_off(struct spider_net_card *card)
136{
137 u32 regvalue;
138 unsigned long flags;
139
140 spin_lock_irqsave(&card->intmask_lock, flags);
141 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
142 regvalue &= ~SPIDER_NET_RXINT;
143 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
144 spin_unlock_irqrestore(&card->intmask_lock, flags);
145}
146
147/** spider_net_write_phy - write to phy register 112/** spider_net_write_phy - write to phy register
148 * @netdev: adapter to be written to 113 * @netdev: adapter to be written to
149 * @mii_id: id of MII 114 * @mii_id: id of MII
@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
199} 164}
200 165
201/** 166/**
202 * spider_net_rx_irq_on - switch on rx irq on this spider card 167 * spider_net_rx_irq_off - switch off rx irq on this spider card
203 * @card: device structure
204 *
205 * switches on rx irq by enabling them in the GHIINTnMSK register
206 */
207static void
208spider_net_rx_irq_on(struct spider_net_card *card)
209{
210 u32 regvalue;
211 unsigned long flags;
212
213 spin_lock_irqsave(&card->intmask_lock, flags);
214 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
215 regvalue |= SPIDER_NET_RXINT;
216 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
217 spin_unlock_irqrestore(&card->intmask_lock, flags);
218}
219
220/**
221 * spider_net_tx_irq_off - switch off tx irq on this spider card
222 * @card: device structure 168 * @card: device structure
223 * 169 *
224 * switches off tx irq by masking them out in the GHIINTnMSK register 170 * switches off rx irq by masking them out in the GHIINTnMSK register
225 */ 171 */
226static void 172static void
227spider_net_tx_irq_off(struct spider_net_card *card) 173spider_net_rx_irq_off(struct spider_net_card *card)
228{ 174{
229 u32 regvalue; 175 u32 regvalue;
230 unsigned long flags;
231 176
232 spin_lock_irqsave(&card->intmask_lock, flags); 177 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
233 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 178 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
234 regvalue &= ~SPIDER_NET_TXINT;
235 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
236 spin_unlock_irqrestore(&card->intmask_lock, flags);
237} 179}
238 180
239/** 181/**
240 * spider_net_tx_irq_on - switch on tx irq on this spider card 182 * spider_net_rx_irq_on - switch on rx irq on this spider card
241 * @card: device structure 183 * @card: device structure
242 * 184 *
243 * switches on tx irq by enabling them in the GHIINTnMSK register 185 * switches on rx irq by enabling them in the GHIINTnMSK register
244 */ 186 */
245static void 187static void
246spider_net_tx_irq_on(struct spider_net_card *card) 188spider_net_rx_irq_on(struct spider_net_card *card)
247{ 189{
248 u32 regvalue; 190 u32 regvalue;
249 unsigned long flags;
250 191
251 spin_lock_irqsave(&card->intmask_lock, flags); 192 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
252 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 193 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
253 regvalue |= SPIDER_NET_TXINT;
254 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
255 spin_unlock_irqrestore(&card->intmask_lock, flags);
256} 194}
257 195
258/** 196/**
@@ -326,9 +264,8 @@ static enum spider_net_descr_status
326spider_net_get_descr_status(struct spider_net_descr *descr) 264spider_net_get_descr_status(struct spider_net_descr *descr)
327{ 265{
328 u32 cmd_status; 266 u32 cmd_status;
329 rmb(); 267
330 cmd_status = descr->dmac_cmd_status; 268 cmd_status = descr->dmac_cmd_status;
331 rmb();
332 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; 269 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
333 /* no need to mask out any bits, as cmd_status is 32 bits wide only 270 /* no need to mask out any bits, as cmd_status is 32 bits wide only
334 * (and unsigned) */ 271 * (and unsigned) */
@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
349{ 286{
350 u32 cmd_status; 287 u32 cmd_status;
351 /* read the status */ 288 /* read the status */
352 mb();
353 cmd_status = descr->dmac_cmd_status; 289 cmd_status = descr->dmac_cmd_status;
354 /* clean the upper 4 bits */ 290 /* clean the upper 4 bits */
355 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; 291 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
357 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; 293 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
358 /* and write it back */ 294 /* and write it back */
359 descr->dmac_cmd_status = cmd_status; 295 descr->dmac_cmd_status = cmd_status;
360 wmb();
361} 296}
362 297
363/** 298/**
@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
398{ 333{
399 int i; 334 int i;
400 struct spider_net_descr *descr; 335 struct spider_net_descr *descr;
336 dma_addr_t buf;
401 337
402 spin_lock_init(&card->chain_lock); 338 atomic_set(&card->rx_chain_refill,0);
403 339
404 descr = start_descr; 340 descr = start_descr;
405 memset(descr, 0, sizeof(*descr) * no); 341 memset(descr, 0, sizeof(*descr) * no);
@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
408 for (i=0; i<no; i++, descr++) { 344 for (i=0; i<no; i++, descr++) {
409 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 345 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
410 346
411 descr->bus_addr = 347 buf = pci_map_single(card->pdev, descr,
412 pci_map_single(card->pdev, descr, 348 SPIDER_NET_DESCR_SIZE,
413 SPIDER_NET_DESCR_SIZE, 349 PCI_DMA_BIDIRECTIONAL);
414 PCI_DMA_BIDIRECTIONAL);
415 350
416 if (descr->bus_addr == DMA_ERROR_CODE) 351 if (buf == DMA_ERROR_CODE)
417 goto iommu_error; 352 goto iommu_error;
418 353
354 descr->bus_addr = buf;
419 descr->next = descr + 1; 355 descr->next = descr + 1;
420 descr->prev = descr - 1; 356 descr->prev = descr - 1;
421 357
@@ -439,7 +375,8 @@ iommu_error:
439 for (i=0; i < no; i++, descr++) 375 for (i=0; i < no; i++, descr++)
440 if (descr->bus_addr) 376 if (descr->bus_addr)
441 pci_unmap_single(card->pdev, descr->bus_addr, 377 pci_unmap_single(card->pdev, descr->bus_addr,
442 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL); 378 SPIDER_NET_DESCR_SIZE,
379 PCI_DMA_BIDIRECTIONAL);
443 return -ENOMEM; 380 return -ENOMEM;
444} 381}
445 382
@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
459 if (descr->skb) { 396 if (descr->skb) {
460 dev_kfree_skb(descr->skb); 397 dev_kfree_skb(descr->skb);
461 pci_unmap_single(card->pdev, descr->buf_addr, 398 pci_unmap_single(card->pdev, descr->buf_addr,
462 SPIDER_NET_MAX_MTU, 399 SPIDER_NET_MAX_FRAME,
463 PCI_DMA_BIDIRECTIONAL); 400 PCI_DMA_BIDIRECTIONAL);
464 } 401 }
465 descr = descr->next; 402 descr = descr->next;
@@ -480,12 +417,13 @@ static int
480spider_net_prepare_rx_descr(struct spider_net_card *card, 417spider_net_prepare_rx_descr(struct spider_net_card *card,
481 struct spider_net_descr *descr) 418 struct spider_net_descr *descr)
482{ 419{
420 dma_addr_t buf;
483 int error = 0; 421 int error = 0;
484 int offset; 422 int offset;
485 int bufsize; 423 int bufsize;
486 424
487 /* we need to round up the buffer size to a multiple of 128 */ 425 /* we need to round up the buffer size to a multiple of 128 */
488 bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) & 426 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
489 (~(SPIDER_NET_RXBUF_ALIGN - 1)); 427 (~(SPIDER_NET_RXBUF_ALIGN - 1));
490 428
491 /* and we need to have it 128 byte aligned, therefore we allocate a 429 /* and we need to have it 128 byte aligned, therefore we allocate a
@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
493 /* allocate an skb */ 431 /* allocate an skb */
494 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 432 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
495 if (!descr->skb) { 433 if (!descr->skb) {
496 if (net_ratelimit()) 434 if (netif_msg_rx_err(card) && net_ratelimit())
497 if (netif_msg_rx_err(card)) 435 pr_err("Not enough memory to allocate rx buffer\n");
498 pr_err("Not enough memory to allocate "
499 "rx buffer\n");
500 return -ENOMEM; 436 return -ENOMEM;
501 } 437 }
502 descr->buf_size = bufsize; 438 descr->buf_size = bufsize;
@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
510 if (offset) 446 if (offset)
511 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 447 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
512 /* io-mmu-map the skb */ 448 /* io-mmu-map the skb */
513 descr->buf_addr = pci_map_single(card->pdev, descr->skb->data, 449 buf = pci_map_single(card->pdev, descr->skb->data,
514 SPIDER_NET_MAX_MTU, 450 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
515 PCI_DMA_BIDIRECTIONAL); 451 descr->buf_addr = buf;
516 if (descr->buf_addr == DMA_ERROR_CODE) { 452 if (buf == DMA_ERROR_CODE) {
517 dev_kfree_skb_any(descr->skb); 453 dev_kfree_skb_any(descr->skb);
518 if (netif_msg_rx_err(card)) 454 if (netif_msg_rx_err(card) && net_ratelimit())
519 pr_err("Could not iommu-map rx buffer\n"); 455 pr_err("Could not iommu-map rx buffer\n");
520 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 456 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
521 } else { 457 } else {
@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
526} 462}
527 463
528/** 464/**
529 * spider_net_enable_rxctails - sets RX dmac chain tail addresses 465 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
530 * @card: card structure 466 * @card: card structure
531 * 467 *
532 * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the 468 * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
533 * chip by writing to the appropriate register. DMA is enabled in 469 * chip by writing to the appropriate register. DMA is enabled in
534 * spider_net_enable_rxdmac. 470 * spider_net_enable_rxdmac.
535 */ 471 */
@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
551static void 487static void
552spider_net_enable_rxdmac(struct spider_net_card *card) 488spider_net_enable_rxdmac(struct spider_net_card *card)
553{ 489{
490 wmb();
554 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 491 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
555 SPIDER_NET_DMA_RX_VALUE); 492 SPIDER_NET_DMA_RX_VALUE);
556} 493}
@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
559 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains 496 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
560 * @card: card structure 497 * @card: card structure
561 * 498 *
562 * refills descriptors in all chains (last used chain first): allocates skbs 499 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
563 * and iommu-maps them.
564 */ 500 */
565static void 501static void
566spider_net_refill_rx_chain(struct spider_net_card *card) 502spider_net_refill_rx_chain(struct spider_net_card *card)
567{ 503{
568 struct spider_net_descr_chain *chain; 504 struct spider_net_descr_chain *chain;
569 int count = 0;
570 unsigned long flags;
571 505
572 chain = &card->rx_chain; 506 chain = &card->rx_chain;
573 507
574 spin_lock_irqsave(&card->chain_lock, flags); 508 /* one context doing the refill (and a second context seeing that
575 while (spider_net_get_descr_status(chain->head) == 509 * and omitting it) is ok. If called by NAPI, we'll be called again
576 SPIDER_NET_DESCR_NOT_IN_USE) { 510 * as spider_net_decode_one_descr is called several times. If some
577 if (spider_net_prepare_rx_descr(card, chain->head)) 511 * interrupt calls us, the NAPI is about to clean up anyway. */
578 break; 512 if (atomic_inc_return(&card->rx_chain_refill) == 1)
579 count++; 513 while (spider_net_get_descr_status(chain->head) ==
580 chain->head = chain->head->next; 514 SPIDER_NET_DESCR_NOT_IN_USE) {
581 } 515 if (spider_net_prepare_rx_descr(card, chain->head))
582 spin_unlock_irqrestore(&card->chain_lock, flags); 516 break;
517 chain->head = chain->head->next;
518 }
583 519
584 /* could be optimized, only do that, if we know the DMA processing 520 atomic_dec(&card->rx_chain_refill);
585 * has terminated */
586 if (count)
587 spider_net_enable_rxdmac(card);
588} 521}
589 522
590/** 523/**
@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
613 /* this will allocate the rest of the rx buffers; if not, it's 546 /* this will allocate the rest of the rx buffers; if not, it's
614 * business as usual later on */ 547 * business as usual later on */
615 spider_net_refill_rx_chain(card); 548 spider_net_refill_rx_chain(card);
549 spider_net_enable_rxdmac(card);
616 return 0; 550 return 0;
617 551
618error: 552error:
@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
649 * @card: adapter structure 583 * @card: adapter structure
650 * @brutal: if set, don't care about whether descriptor seems to be in use 584 * @brutal: if set, don't care about whether descriptor seems to be in use
651 * 585 *
652 * releases the tx descriptors that spider has finished with (if non-brutal) 586 * returns 0 if the tx ring is empty, otherwise 1.
653 * or simply release tx descriptors (if brutal) 587 *
588 * spider_net_release_tx_chain releases the tx descriptors that spider has
589 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
590 * If some other context is calling this function, we return 1 so that we're
591 * scheduled again (if we were scheduled) and will not loose initiative.
654 */ 592 */
655static void 593static int
656spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 594spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
657{ 595{
658 struct spider_net_descr_chain *tx_chain = &card->tx_chain; 596 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
659 enum spider_net_descr_status status; 597 enum spider_net_descr_status status;
660 598
661 spider_net_tx_irq_off(card); 599 if (atomic_inc_return(&card->tx_chain_release) != 1) {
600 atomic_dec(&card->tx_chain_release);
601 return 1;
602 }
662 603
663 /* no lock for chain needed, if this is only executed once at a time */
664again:
665 for (;;) { 604 for (;;) {
666 status = spider_net_get_descr_status(tx_chain->tail); 605 status = spider_net_get_descr_status(tx_chain->tail);
667 switch (status) { 606 switch (status) {
668 case SPIDER_NET_DESCR_CARDOWNED: 607 case SPIDER_NET_DESCR_CARDOWNED:
669 if (!brutal) goto out; 608 if (!brutal)
609 goto out;
670 /* fallthrough, if we release the descriptors 610 /* fallthrough, if we release the descriptors
671 * brutally (then we don't care about 611 * brutally (then we don't care about
672 * SPIDER_NET_DESCR_CARDOWNED) */ 612 * SPIDER_NET_DESCR_CARDOWNED) */
@@ -693,25 +633,30 @@ again:
693 tx_chain->tail = tx_chain->tail->next; 633 tx_chain->tail = tx_chain->tail->next;
694 } 634 }
695out: 635out:
636 atomic_dec(&card->tx_chain_release);
637
696 netif_wake_queue(card->netdev); 638 netif_wake_queue(card->netdev);
697 639
698 if (!brutal) { 640 if (status == SPIDER_NET_DESCR_CARDOWNED)
699 /* switch on tx irqs (while we are still in the interrupt 641 return 1;
700 * handler, so we don't get an interrupt), check again 642 return 0;
701 * for done descriptors. This results in fewer interrupts */ 643}
702 spider_net_tx_irq_on(card);
703 status = spider_net_get_descr_status(tx_chain->tail);
704 switch (status) {
705 case SPIDER_NET_DESCR_RESPONSE_ERROR:
706 case SPIDER_NET_DESCR_PROTECTION_ERROR:
707 case SPIDER_NET_DESCR_FORCE_END:
708 case SPIDER_NET_DESCR_COMPLETE:
709 goto again;
710 default:
711 break;
712 }
713 }
714 644
645/**
646 * spider_net_cleanup_tx_ring - cleans up the TX ring
647 * @card: card structure
648 *
649 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
650 * interrupts to cleanup our TX ring) and returns sent packets to the stack
651 * by freeing them
652 */
653static void
654spider_net_cleanup_tx_ring(struct spider_net_card *card)
655{
656 if ( (spider_net_release_tx_chain(card, 0)) &&
657 (card->netdev->flags & IFF_UP) ) {
658 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
659 }
715} 660}
716 661
717/** 662/**
@@ -726,16 +671,22 @@ out:
726static u8 671static u8
727spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) 672spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
728{ 673{
729 /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
730 * ff:ff:ff:ff:ff:ff must result in 0xfd */
731 u32 crc; 674 u32 crc;
732 u8 hash; 675 u8 hash;
676 char addr_for_crc[ETH_ALEN] = { 0, };
677 int i, bit;
733 678
734 crc = crc32_be(~0, addr, netdev->addr_len); 679 for (i = 0; i < ETH_ALEN * 8; i++) {
680 bit = (addr[i / 8] >> (i % 8)) & 1;
681 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
682 }
683
684 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
735 685
736 hash = (crc >> 27); 686 hash = (crc >> 27);
737 hash <<= 3; 687 hash <<= 3;
738 hash |= crc & 7; 688 hash |= crc & 7;
689 hash &= 0xff;
739 690
740 return hash; 691 return hash;
741} 692}
@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
821{ 772{
822 struct spider_net_card *card = netdev_priv(netdev); 773 struct spider_net_card *card = netdev_priv(netdev);
823 774
775 tasklet_kill(&card->rxram_full_tl);
824 netif_poll_disable(netdev); 776 netif_poll_disable(netdev);
825 netif_carrier_off(netdev); 777 netif_carrier_off(netdev);
826 netif_stop_queue(netdev); 778 netif_stop_queue(netdev);
779 del_timer_sync(&card->tx_timer);
827 780
828 /* disable/mask all interrupts */ 781 /* disable/mask all interrupts */
829 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 782 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
872 * @skb: packet to consider 825 * @skb: packet to consider
873 * 826 *
874 * fills out the command and status field of the descriptor structure, 827 * fills out the command and status field of the descriptor structure,
875 * depending on hardware checksum settings. This function assumes a wmb() 828 * depending on hardware checksum settings.
876 * has executed before.
877 */ 829 */
878static void 830static void
879spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, 831spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
880 struct sk_buff *skb) 832 struct sk_buff *skb)
881{ 833{
834 /* make sure the other fields in the descriptor are written */
835 wmb();
836
882 if (skb->ip_summed != CHECKSUM_HW) { 837 if (skb->ip_summed != CHECKSUM_HW) {
883 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 838 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
884 return; 839 return;
@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
887 /* is packet ip? 842 /* is packet ip?
888 * if yes: tcp? udp? */ 843 * if yes: tcp? udp? */
889 if (skb->protocol == htons(ETH_P_IP)) { 844 if (skb->protocol == htons(ETH_P_IP)) {
890 if (skb->nh.iph->protocol == IPPROTO_TCP) { 845 if (skb->nh.iph->protocol == IPPROTO_TCP)
891 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; 846 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
892 } else if (skb->nh.iph->protocol == IPPROTO_UDP) { 847 else if (skb->nh.iph->protocol == IPPROTO_UDP)
893 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; 848 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
894 } else { /* the stack should checksum non-tcp and non-udp 849 else /* the stack should checksum non-tcp and non-udp
895 packets on his own: NETIF_F_IP_CSUM */ 850 packets on his own: NETIF_F_IP_CSUM */
896 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 851 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
897 }
898 } 852 }
899} 853}
900 854
@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
914 struct spider_net_descr *descr, 868 struct spider_net_descr *descr,
915 struct sk_buff *skb) 869 struct sk_buff *skb)
916{ 870{
917 descr->buf_addr = pci_map_single(card->pdev, skb->data, 871 dma_addr_t buf;
918 skb->len, PCI_DMA_BIDIRECTIONAL); 872
919 if (descr->buf_addr == DMA_ERROR_CODE) { 873 buf = pci_map_single(card->pdev, skb->data,
920 if (netif_msg_tx_err(card)) 874 skb->len, PCI_DMA_BIDIRECTIONAL);
875 if (buf == DMA_ERROR_CODE) {
876 if (netif_msg_tx_err(card) && net_ratelimit())
921 pr_err("could not iommu-map packet (%p, %i). " 877 pr_err("could not iommu-map packet (%p, %i). "
922 "Dropping packet\n", skb->data, skb->len); 878 "Dropping packet\n", skb->data, skb->len);
923 return -ENOMEM; 879 return -ENOMEM;
924 } 880 }
925 881
882 descr->buf_addr = buf;
926 descr->buf_size = skb->len; 883 descr->buf_size = skb->len;
927 descr->skb = skb; 884 descr->skb = skb;
928 descr->data_status = 0; 885 descr->data_status = 0;
929 886
930 /* make sure the above values are in memory before we change the
931 * status */
932 wmb();
933
934 spider_net_set_txdescr_cmdstat(descr,skb); 887 spider_net_set_txdescr_cmdstat(descr,skb);
935 888
936 return 0; 889 return 0;
@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
972 struct spider_net_descr *descr; 925 struct spider_net_descr *descr;
973 int result; 926 int result;
974 927
975 descr = spider_net_get_next_tx_descr(card); 928 spider_net_release_tx_chain(card, 0);
976 929
977 if (!descr) { 930 descr = spider_net_get_next_tx_descr(card);
978 netif_stop_queue(netdev);
979 931
980 descr = spider_net_get_next_tx_descr(card); 932 if (!descr)
981 if (!descr) 933 goto error;
982 goto error;
983 else
984 netif_start_queue(netdev);
985 }
986 934
987 result = spider_net_prepare_tx_descr(card, descr, skb); 935 result = spider_net_prepare_tx_descr(card, descr, skb);
988 if (result) 936 if (result)
@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
990 938
991 card->tx_chain.head = card->tx_chain.head->next; 939 card->tx_chain.head = card->tx_chain.head->next;
992 940
993 /* make sure the status from spider_net_prepare_tx_descr is in
994 * memory before we check out the previous descriptor */
995 wmb();
996
997 if (spider_net_get_descr_status(descr->prev) != 941 if (spider_net_get_descr_status(descr->prev) !=
998 SPIDER_NET_DESCR_CARDOWNED) 942 SPIDER_NET_DESCR_CARDOWNED) {
999 spider_net_kick_tx_dma(card, descr); 943 /* make sure the current descriptor is in memory. Then
944 * kicking it on again makes sense, if the previous is not
945 * card-owned anymore. Check the previous descriptor twice
946 * to omit an mb() in heavy traffic cases */
947 mb();
948 if (spider_net_get_descr_status(descr->prev) !=
949 SPIDER_NET_DESCR_CARDOWNED)
950 spider_net_kick_tx_dma(card, descr);
951 }
952
953 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
1000 954
1001 return NETDEV_TX_OK; 955 return NETDEV_TX_OK;
1002 956
1003error: 957error:
1004 card->netdev_stats.tx_dropped++; 958 card->netdev_stats.tx_dropped++;
1005 return NETDEV_TX_LOCKED; 959 return NETDEV_TX_BUSY;
1006} 960}
1007 961
1008/** 962/**
@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1027 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 981 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
1028 * @descr: descriptor to process 982 * @descr: descriptor to process
1029 * @card: card structure 983 * @card: card structure
984 * @napi: whether caller is in NAPI context
1030 * 985 *
1031 * returns 1 on success, 0 if no packet was passed to the stack 986 * returns 1 on success, 0 if no packet was passed to the stack
1032 * 987 *
@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1035 */ 990 */
1036static int 991static int
1037spider_net_pass_skb_up(struct spider_net_descr *descr, 992spider_net_pass_skb_up(struct spider_net_descr *descr,
1038 struct spider_net_card *card) 993 struct spider_net_card *card, int napi)
1039{ 994{
1040 struct sk_buff *skb; 995 struct sk_buff *skb;
1041 struct net_device *netdev; 996 struct net_device *netdev;
@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1046 1001
1047 netdev = card->netdev; 1002 netdev = card->netdev;
1048 1003
1049 /* check for errors in the data_error flag */ 1004 /* unmap descriptor */
1050 if ((data_error & SPIDER_NET_DATA_ERROR_MASK) && 1005 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
1051 netif_msg_rx_err(card))
1052 pr_err("error in received descriptor found, "
1053 "data_status=x%08x, data_error=x%08x\n",
1054 data_status, data_error);
1055
1056 /* prepare skb, unmap descriptor */
1057 skb = descr->skb;
1058 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
1059 PCI_DMA_BIDIRECTIONAL); 1006 PCI_DMA_BIDIRECTIONAL);
1060 1007
1061 /* the cases we'll throw away the packet immediately */ 1008 /* the cases we'll throw away the packet immediately */
1062 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) 1009 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1010 if (netif_msg_rx_err(card))
1011 pr_err("error in received descriptor found, "
1012 "data_status=x%08x, data_error=x%08x\n",
1013 data_status, data_error);
1063 return 0; 1014 return 0;
1015 }
1064 1016
1017 skb = descr->skb;
1065 skb->dev = netdev; 1018 skb->dev = netdev;
1066 skb_put(skb, descr->valid_size); 1019 skb_put(skb, descr->valid_size);
1067 1020
@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1073 1026
1074 /* checksum offload */ 1027 /* checksum offload */
1075 if (card->options.rx_csum) { 1028 if (card->options.rx_csum) {
1076 if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) && 1029 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
1077 (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) ) 1030 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
1031 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1078 skb->ip_summed = CHECKSUM_UNNECESSARY; 1032 skb->ip_summed = CHECKSUM_UNNECESSARY;
1079 else 1033 else
1080 skb->ip_summed = CHECKSUM_NONE; 1034 skb->ip_summed = CHECKSUM_NONE;
1081 } else { 1035 } else
1082 skb->ip_summed = CHECKSUM_NONE; 1036 skb->ip_summed = CHECKSUM_NONE;
1083 }
1084 1037
1085 if (data_status & SPIDER_NET_VLAN_PACKET) { 1038 if (data_status & SPIDER_NET_VLAN_PACKET) {
1086 /* further enhancements: HW-accel VLAN 1039 /* further enhancements: HW-accel VLAN
@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1089 } 1042 }
1090 1043
1091 /* pass skb up to stack */ 1044 /* pass skb up to stack */
1092 netif_receive_skb(skb); 1045 if (napi)
1046 netif_receive_skb(skb);
1047 else
1048 netif_rx_ni(skb);
1093 1049
1094 /* update netdevice statistics */ 1050 /* update netdevice statistics */
1095 card->netdev_stats.rx_packets++; 1051 card->netdev_stats.rx_packets++;
@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1099} 1055}
1100 1056
1101/** 1057/**
1102 * spider_net_decode_descr - processes an rx descriptor 1058 * spider_net_decode_one_descr - processes an rx descriptor
1103 * @card: card structure 1059 * @card: card structure
1060 * @napi: whether caller is in NAPI context
1104 * 1061 *
1105 * returns 1 if a packet has been sent to the stack, otherwise 0 1062 * returns 1 if a packet has been sent to the stack, otherwise 0
1106 * 1063 *
1107 * processes an rx descriptor by iommu-unmapping the data buffer and passing 1064 * processes an rx descriptor by iommu-unmapping the data buffer and passing
1108 * the packet up to the stack 1065 * the packet up to the stack. This function is called in softirq
1066 * context, e.g. either bottom half from interrupt or NAPI polling context
1109 */ 1067 */
1110static int 1068static int
1111spider_net_decode_one_descr(struct spider_net_card *card) 1069spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1112{ 1070{
1113 enum spider_net_descr_status status; 1071 enum spider_net_descr_status status;
1114 struct spider_net_descr *descr; 1072 struct spider_net_descr *descr;
@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1122 1080
1123 if (status == SPIDER_NET_DESCR_CARDOWNED) { 1081 if (status == SPIDER_NET_DESCR_CARDOWNED) {
1124 /* nothing in the descriptor yet */ 1082 /* nothing in the descriptor yet */
1125 return 0; 1083 result=0;
1084 goto out;
1126 } 1085 }
1127 1086
1128 if (status == SPIDER_NET_DESCR_NOT_IN_USE) { 1087 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1129 /* not initialized yet, I bet chain->tail == chain->head 1088 /* not initialized yet, the ring must be empty */
1130 * and the ring is empty */
1131 spider_net_refill_rx_chain(card); 1089 spider_net_refill_rx_chain(card);
1132 return 0; 1090 spider_net_enable_rxdmac(card);
1091 result=0;
1092 goto out;
1133 } 1093 }
1134 1094
1135 /* descriptor definitively used -- move on head */ 1095 /* descriptor definitively used -- move on tail */
1136 chain->tail = descr->next; 1096 chain->tail = descr->next;
1137 1097
1138 result = 0; 1098 result = 0;
@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1143 pr_err("%s: dropping RX descriptor with state %d\n", 1103 pr_err("%s: dropping RX descriptor with state %d\n",
1144 card->netdev->name, status); 1104 card->netdev->name, status);
1145 card->netdev_stats.rx_dropped++; 1105 card->netdev_stats.rx_dropped++;
1106 pci_unmap_single(card->pdev, descr->buf_addr,
1107 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
1108 dev_kfree_skb_irq(descr->skb);
1146 goto refill; 1109 goto refill;
1147 } 1110 }
1148 1111
@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1155 } 1118 }
1156 1119
1157 /* ok, we've got a packet in descr */ 1120 /* ok, we've got a packet in descr */
1158 result = spider_net_pass_skb_up(descr, card); 1121 result = spider_net_pass_skb_up(descr, card, napi);
1159refill: 1122refill:
1160 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1123 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
1161 /* change the descriptor state: */ 1124 /* change the descriptor state: */
1162 spider_net_refill_rx_chain(card); 1125 if (!napi)
1163 1126 spider_net_refill_rx_chain(card);
1127out:
1164 return result; 1128 return result;
1165} 1129}
1166 1130
@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1186 packets_to_do = min(*budget, netdev->quota); 1150 packets_to_do = min(*budget, netdev->quota);
1187 1151
1188 while (packets_to_do) { 1152 while (packets_to_do) {
1189 if (spider_net_decode_one_descr(card)) { 1153 if (spider_net_decode_one_descr(card, 1)) {
1190 packets_done++; 1154 packets_done++;
1191 packets_to_do--; 1155 packets_to_do--;
1192 } else { 1156 } else {
@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1198 1162
1199 netdev->quota -= packets_done; 1163 netdev->quota -= packets_done;
1200 *budget -= packets_done; 1164 *budget -= packets_done;
1165 spider_net_refill_rx_chain(card);
1201 1166
1202 /* if all packets are in the stack, enable interrupts and return 0 */ 1167 /* if all packets are in the stack, enable interrupts and return 0 */
1203 /* if not, return 1 */ 1168 /* if not, return 1 */
@@ -1342,6 +1307,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
1342} 1307}
1343 1308
1344/** 1309/**
1310 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1311 * @card: card structure
1312 *
1313 * spider_net_handle_rxram_full empties the RX ring so that spider can put
1314 * more packets in it and empty its RX RAM. This is called in bottom half
1315 * context
1316 */
1317static void
1318spider_net_handle_rxram_full(struct spider_net_card *card)
1319{
1320 while (spider_net_decode_one_descr(card, 0))
1321 ;
1322 spider_net_enable_rxchtails(card);
1323 spider_net_enable_rxdmac(card);
1324 netif_rx_schedule(card->netdev);
1325}
1326
1327/**
1345 * spider_net_handle_error_irq - handles errors raised by an interrupt 1328 * spider_net_handle_error_irq - handles errors raised by an interrupt
1346 * @card: card structure 1329 * @card: card structure
1347 * @status_reg: interrupt status register 0 (GHIINT0STS) 1330 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1449 switch (i) 1432 switch (i)
1450 { 1433 {
1451 case SPIDER_NET_GTMFLLINT: 1434 case SPIDER_NET_GTMFLLINT:
1452 if (netif_msg_intr(card)) 1435 if (netif_msg_intr(card) && net_ratelimit())
1453 pr_err("Spider TX RAM full\n"); 1436 pr_err("Spider TX RAM full\n");
1454 show_error = 0; 1437 show_error = 0;
1455 break; 1438 break;
1439 case SPIDER_NET_GRFDFLLINT: /* fallthrough */
1440 case SPIDER_NET_GRFCFLLINT: /* fallthrough */
1441 case SPIDER_NET_GRFBFLLINT: /* fallthrough */
1442 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1456 case SPIDER_NET_GRMFLLINT: 1443 case SPIDER_NET_GRMFLLINT:
1457 if (netif_msg_intr(card)) 1444 if (netif_msg_intr(card) && net_ratelimit())
1458 pr_err("Spider RX RAM full, incoming packets " 1445 pr_err("Spider RX RAM full, incoming packets "
1459 "might be discarded !\n"); 1446 "might be discarded!\n");
1460 netif_rx_schedule(card->netdev); 1447 spider_net_rx_irq_off(card);
1461 spider_net_enable_rxchtails(card); 1448 tasklet_schedule(&card->rxram_full_tl);
1462 spider_net_enable_rxdmac(card); 1449 show_error = 0;
1463 break; 1450 break;
1464 1451
1465 /* case SPIDER_NET_GTMSHTINT: problem, print a message */ 1452 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1467 /* allrighty. tx from previous descr ok */ 1454 /* allrighty. tx from previous descr ok */
1468 show_error = 0; 1455 show_error = 0;
1469 break; 1456 break;
1470 /* case SPIDER_NET_GRFDFLLINT: print a message down there */
1471 /* case SPIDER_NET_GRFCFLLINT: print a message down there */
1472 /* case SPIDER_NET_GRFBFLLINT: print a message down there */
1473 /* case SPIDER_NET_GRFAFLLINT: print a message down there */
1474 1457
1475 /* chain end */ 1458 /* chain end */
1476 case SPIDER_NET_GDDDCEINT: /* fallthrough */ 1459 case SPIDER_NET_GDDDCEINT: /* fallthrough */
@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1482 "restarting DMAC %c.\n", 1465 "restarting DMAC %c.\n",
1483 'D'+i-SPIDER_NET_GDDDCEINT); 1466 'D'+i-SPIDER_NET_GDDDCEINT);
1484 spider_net_refill_rx_chain(card); 1467 spider_net_refill_rx_chain(card);
1468 spider_net_enable_rxdmac(card);
1485 show_error = 0; 1469 show_error = 0;
1486 break; 1470 break;
1487 1471
@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1492 case SPIDER_NET_GDAINVDINT: 1476 case SPIDER_NET_GDAINVDINT:
1493 /* could happen when rx chain is full */ 1477 /* could happen when rx chain is full */
1494 spider_net_refill_rx_chain(card); 1478 spider_net_refill_rx_chain(card);
1479 spider_net_enable_rxdmac(card);
1495 show_error = 0; 1480 show_error = 0;
1496 break; 1481 break;
1497 1482
@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
1580 if (!status_reg) 1565 if (!status_reg)
1581 return IRQ_NONE; 1566 return IRQ_NONE;
1582 1567
1583 if (status_reg & SPIDER_NET_TXINT)
1584 spider_net_release_tx_chain(card, 0);
1585
1586 if (status_reg & SPIDER_NET_RXINT ) { 1568 if (status_reg & SPIDER_NET_RXINT ) {
1587 spider_net_rx_irq_off(card); 1569 spider_net_rx_irq_off(card);
1588 netif_rx_schedule(netdev); 1570 netif_rx_schedule(netdev);
1589 } 1571 }
1590 1572
1591 /* we do this after rx and tx processing, as we want the tx chain 1573 if (status_reg & SPIDER_NET_ERRINT )
1592 * processed to see, whether we should restart tx dma processing */ 1574 spider_net_handle_error_irq(card, status_reg);
1593 spider_net_handle_error_irq(card, status_reg);
1594 1575
1595 /* clear interrupt sources */ 1576 /* clear interrupt sources */
1596 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1577 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
1831/** 1812/**
1832 * spider_net_download_firmware - loads firmware into the adapter 1813 * spider_net_download_firmware - loads firmware into the adapter
1833 * @card: card structure 1814 * @card: card structure
1834 * @firmware: firmware pointer 1815 * @firmware_ptr: pointer to firmware data
1835 * 1816 *
1836 * spider_net_download_firmware loads the firmware opened by 1817 * spider_net_download_firmware loads the firmware data into the
1837 * spider_net_init_firmware into the adapter. 1818 * adapter. It assumes the length etc. to be allright.
1838 */ 1819 */
1839static void 1820static int
1840spider_net_download_firmware(struct spider_net_card *card, 1821spider_net_download_firmware(struct spider_net_card *card,
1841 const struct firmware *firmware) 1822 u8 *firmware_ptr)
1842{ 1823{
1843 int sequencer, i; 1824 int sequencer, i;
1844 u32 *fw_ptr = (u32 *)firmware->data; 1825 u32 *fw_ptr = (u32 *)firmware_ptr;
1845 1826
1846 /* stop sequencers */ 1827 /* stop sequencers */
1847 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1828 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1848 SPIDER_NET_STOP_SEQ_VALUE); 1829 SPIDER_NET_STOP_SEQ_VALUE);
1849 1830
1850 for (sequencer = 0; sequencer < 6; sequencer++) { 1831 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1832 sequencer++) {
1851 spider_net_write_reg(card, 1833 spider_net_write_reg(card,
1852 SPIDER_NET_GSnPRGADR + sequencer * 8, 0); 1834 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1853 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1835 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1854 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1836 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1855 sequencer * 8, *fw_ptr); 1837 sequencer * 8, *fw_ptr);
1856 fw_ptr++; 1838 fw_ptr++;
1857 } 1839 }
1858 } 1840 }
1859 1841
1842 if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1843 return -EIO;
1844
1860 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1845 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1861 SPIDER_NET_RUN_SEQ_VALUE); 1846 SPIDER_NET_RUN_SEQ_VALUE);
1847
1848 return 0;
1862} 1849}
1863 1850
1864/** 1851/**
@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
1890static int 1877static int
1891spider_net_init_firmware(struct spider_net_card *card) 1878spider_net_init_firmware(struct spider_net_card *card)
1892{ 1879{
1893 const struct firmware *firmware; 1880 struct firmware *firmware = NULL;
1894 int err = -EIO; 1881 struct device_node *dn;
1882 u8 *fw_prop = NULL;
1883 int err = -ENOENT;
1884 int fw_size;
1885
1886 if (request_firmware((const struct firmware **)&firmware,
1887 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1888 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1889 netif_msg_probe(card) ) {
1890 pr_err("Incorrect size of spidernet firmware in " \
1891 "filesystem. Looking in host firmware...\n");
1892 goto try_host_fw;
1893 }
1894 err = spider_net_download_firmware(card, firmware->data);
1895 1895
1896 if (request_firmware(&firmware, 1896 release_firmware(firmware);
1897 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) { 1897 if (err)
1898 if (netif_msg_probe(card)) 1898 goto try_host_fw;
1899 pr_err("Couldn't read in sequencer data file %s.\n",
1900 SPIDER_NET_FIRMWARE_NAME);
1901 firmware = NULL;
1902 goto out;
1903 }
1904 1899
1905 if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) { 1900 goto done;
1906 if (netif_msg_probe(card))
1907 pr_err("Invalid size of sequencer data file %s.\n",
1908 SPIDER_NET_FIRMWARE_NAME);
1909 goto out;
1910 } 1901 }
1911 1902
1912 spider_net_download_firmware(card, firmware); 1903try_host_fw:
1904 dn = pci_device_to_OF_node(card->pdev);
1905 if (!dn)
1906 goto out_err;
1913 1907
1914 err = 0; 1908 fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
1915out: 1909 if (!fw_prop)
1916 release_firmware(firmware); 1910 goto out_err;
1911
1912 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1913 netif_msg_probe(card) ) {
1914 pr_err("Incorrect size of spidernet firmware in " \
1915 "host firmware\n");
1916 goto done;
1917 }
1917 1918
1919 err = spider_net_download_firmware(card, fw_prop);
1920
1921done:
1922 return err;
1923out_err:
1924 if (netif_msg_probe(card))
1925 pr_err("Couldn't find spidernet firmware in filesystem " \
1926 "or host firmware\n");
1918 return err; 1927 return err;
1919} 1928}
1920 1929
@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1934 SPIDER_NET_CKRCTRL_RUN_VALUE); 1943 SPIDER_NET_CKRCTRL_RUN_VALUE);
1935 1944
1936 /* empty sequencer data */ 1945 /* empty sequencer data */
1937 for (sequencer = 0; sequencer < 6; sequencer++) { 1946 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1947 sequencer++) {
1938 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1948 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1939 sequencer * 8, 0x0); 1949 sequencer * 8, 0x0);
1940 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1950 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1941 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1951 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1942 sequencer * 8, 0x0); 1952 sequencer * 8, 0x0);
1943 } 1953 }
@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
2061 SET_NETDEV_DEV(netdev, &card->pdev->dev); 2071 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2062 2072
2063 pci_set_drvdata(card->pdev, netdev); 2073 pci_set_drvdata(card->pdev, netdev);
2064 spin_lock_init(&card->intmask_lock); 2074
2075 atomic_set(&card->tx_chain_release,0);
2076 card->rxram_full_tl.data = (unsigned long) card;
2077 card->rxram_full_tl.func =
2078 (void (*)(unsigned long)) spider_net_handle_rxram_full;
2079 init_timer(&card->tx_timer);
2080 card->tx_timer.function =
2081 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
2082 card->tx_timer.data = (unsigned long) card;
2065 netdev->irq = card->pdev->irq; 2083 netdev->irq = card->pdev->irq;
2066 2084
2067 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2085 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 22b2f2347351..5922b529a048 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops;
33 33
34extern char spider_net_driver_name[]; 34extern char spider_net_driver_name[];
35 35
36#define SPIDER_NET_MAX_MTU 2308 36#define SPIDER_NET_MAX_FRAME 2312
37#define SPIDER_NET_MAX_MTU 2294
37#define SPIDER_NET_MIN_MTU 64 38#define SPIDER_NET_MIN_MTU 64
38 39
39#define SPIDER_NET_RXBUF_ALIGN 128 40#define SPIDER_NET_RXBUF_ALIGN 128
40 41
41#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64 42#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
42#define SPIDER_NET_RX_DESCRIPTORS_MIN 16 43#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
43#define SPIDER_NET_RX_DESCRIPTORS_MAX 256 44#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
44 45
45#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64 46#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
46#define SPIDER_NET_TX_DESCRIPTORS_MIN 16 47#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
47#define SPIDER_NET_TX_DESCRIPTORS_MAX 256 48#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
49
50#define SPIDER_NET_TX_TIMER 20
48 51
49#define SPIDER_NET_RX_CSUM_DEFAULT 1 52#define SPIDER_NET_RX_CSUM_DEFAULT 1
50 53
51#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ 54#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
52#define SPIDER_NET_NAPI_WEIGHT 64 55#define SPIDER_NET_NAPI_WEIGHT 64
53 56
54#define SPIDER_NET_FIRMWARE_LEN 1024 57#define SPIDER_NET_FIRMWARE_SEQS 6
58#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
59#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
60 SPIDER_NET_FIRMWARE_SEQWORDS * \
61 sizeof(u32))
55#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin" 62#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
56 63
57/** spider_net SMMIO registers */ 64/** spider_net SMMIO registers */
@@ -142,14 +149,12 @@ extern char spider_net_driver_name[];
142/** SCONFIG registers */ 149/** SCONFIG registers */
143#define SPIDER_NET_SCONFIG_IOACTE 0x00002810 150#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
144 151
145/** hardcoded register values */ 152/** interrupt mask registers */
146#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff 153#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
147#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff 154#define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7
148/* no MAC aborts -> auto retransmission */ 155/* no MAC aborts -> auto retransmission */
149#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1 156#define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1
150 157
151/* clear counter when interrupt sources are cleared
152#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */
153/* we rely on flagged descriptor interrupts */ 158/* we rely on flagged descriptor interrupts */
154#define SPIDER_NET_FRAMENUM_VALUE 0x00000000 159#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
155/* set this first, then the FRAMENUM_VALUE */ 160/* set this first, then the FRAMENUM_VALUE */
@@ -168,7 +173,7 @@ extern char spider_net_driver_name[];
168#if 0 173#if 0
169#define SPIDER_NET_WOL_VALUE 0x00000000 174#define SPIDER_NET_WOL_VALUE 0x00000000
170#endif 175#endif
171#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8 176#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
172 177
173/* pause frames: automatic, no upper retransmission count */ 178/* pause frames: automatic, no upper retransmission count */
174/* outside loopback mode: ETOMOD signal dont matter, not connected */ 179/* outside loopback mode: ETOMOD signal dont matter, not connected */
@@ -318,6 +323,10 @@ enum spider_net_int2_status {
318#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \ 323#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
319 (1 << SPIDER_NET_GRMFLLINT) ) 324 (1 << SPIDER_NET_GRMFLLINT) )
320 325
326#define SPIDER_NET_ERRINT ( 0xffffffff & \
327 (~SPIDER_NET_TXINT) & \
328 (~SPIDER_NET_RXINT) )
329
321#define SPIDER_NET_GPREXEC 0x80000000 330#define SPIDER_NET_GPREXEC 0x80000000
322#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 331#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
323 332
@@ -358,9 +367,6 @@ enum spider_net_int2_status {
358/* descr ready, descr is in middle of chain, get interrupt on completion */ 367/* descr ready, descr is in middle of chain, get interrupt on completion */
359#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 368#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
360 369
361/* multicast is no problem */
362#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff
363
364enum spider_net_descr_status { 370enum spider_net_descr_status {
365 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ 371 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
366 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ 372 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
@@ -373,9 +379,9 @@ enum spider_net_descr_status {
373 379
374struct spider_net_descr { 380struct spider_net_descr {
375 /* as defined by the hardware */ 381 /* as defined by the hardware */
376 dma_addr_t buf_addr; 382 u32 buf_addr;
377 u32 buf_size; 383 u32 buf_size;
378 dma_addr_t next_descr_addr; 384 u32 next_descr_addr;
379 u32 dmac_cmd_status; 385 u32 dmac_cmd_status;
380 u32 result_size; 386 u32 result_size;
381 u32 valid_size; /* all zeroes for tx */ 387 u32 valid_size; /* all zeroes for tx */
@@ -384,7 +390,7 @@ struct spider_net_descr {
384 390
385 /* used in the driver */ 391 /* used in the driver */
386 struct sk_buff *skb; 392 struct sk_buff *skb;
387 dma_addr_t bus_addr; 393 u32 bus_addr;
388 struct spider_net_descr *next; 394 struct spider_net_descr *next;
389 struct spider_net_descr *prev; 395 struct spider_net_descr *prev;
390} __attribute__((aligned(32))); 396} __attribute__((aligned(32)));
@@ -396,21 +402,21 @@ struct spider_net_descr_chain {
396}; 402};
397 403
398/* descriptor data_status bits */ 404/* descriptor data_status bits */
399#define SPIDER_NET_RXIPCHK 29 405#define SPIDER_NET_RX_IPCHK 29
400#define SPIDER_NET_TCPUDPIPCHK 28 406#define SPIDER_NET_RX_TCPCHK 28
401#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \
402 1 << SPIDER_NET_TCPUDPIPCHK)
403
404#define SPIDER_NET_VLAN_PACKET 21 407#define SPIDER_NET_VLAN_PACKET 21
408#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
409 (1 << SPIDER_NET_RX_TCPCHK) )
405 410
406/* descriptor data_error bits */ 411/* descriptor data_error bits */
407#define SPIDER_NET_RXIPCHKERR 27 412#define SPIDER_NET_RX_IPCHKERR 27
408#define SPIDER_NET_RXTCPCHKERR 26 413#define SPIDER_NET_RX_RXTCPCHKERR 28
409#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \ 414
410 1 << SPIDER_NET_RXTCPCHKERR) 415#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
411 416
412/* the cases we don't pass the packet to the stack */ 417/* the cases we don't pass the packet to the stack.
413#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000 418 * 701b8000 would be correct, but every packets gets that flag */
419#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
414 420
415#define SPIDER_NET_DESCR_SIZE 32 421#define SPIDER_NET_DESCR_SIZE 32
416 422
@@ -445,13 +451,16 @@ struct spider_net_card {
445 451
446 struct spider_net_descr_chain tx_chain; 452 struct spider_net_descr_chain tx_chain;
447 struct spider_net_descr_chain rx_chain; 453 struct spider_net_descr_chain rx_chain;
448 spinlock_t chain_lock; 454 atomic_t rx_chain_refill;
455 atomic_t tx_chain_release;
449 456
450 struct net_device_stats netdev_stats; 457 struct net_device_stats netdev_stats;
451 458
452 struct spider_net_options options; 459 struct spider_net_options options;
453 460
454 spinlock_t intmask_lock; 461 spinlock_t intmask_lock;
462 struct tasklet_struct rxram_full_tl;
463 struct timer_list tx_timer;
455 464
456 struct work_struct tx_timeout_task; 465 struct work_struct tx_timeout_task;
457 atomic_t tx_timeout_task_counter; 466 atomic_t tx_timeout_task_counter;
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index d42e60ba74ce..a5bb0b7633af 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
113 return 0; 113 return 0;
114} 114}
115 115
116static uint32_t
117spider_net_ethtool_get_tx_csum(struct net_device *netdev)
118{
119 return (netdev->features & NETIF_F_HW_CSUM) != 0;
120}
121
122static int
123spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
124{
125 if (data)
126 netdev->features |= NETIF_F_HW_CSUM;
127 else
128 netdev->features &= ~NETIF_F_HW_CSUM;
129
130 return 0;
131}
132
116struct ethtool_ops spider_net_ethtool_ops = { 133struct ethtool_ops spider_net_ethtool_ops = {
117 .get_settings = spider_net_ethtool_get_settings, 134 .get_settings = spider_net_ethtool_get_settings,
118 .get_drvinfo = spider_net_ethtool_get_drvinfo, 135 .get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = {
122 .nway_reset = spider_net_ethtool_nway_reset, 139 .nway_reset = spider_net_ethtool_nway_reset,
123 .get_rx_csum = spider_net_ethtool_get_rx_csum, 140 .get_rx_csum = spider_net_ethtool_get_rx_csum,
124 .set_rx_csum = spider_net_ethtool_set_rx_csum, 141 .set_rx_csum = spider_net_ethtool_set_rx_csum,
142 .get_tx_csum = spider_net_ethtool_get_tx_csum,
143 .set_tx_csum = spider_net_ethtool_set_tx_csum,
125}; 144};
126 145
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index eb86b059809b..f2d1dafde087 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.47" 72#define DRV_MODULE_VERSION "3.48"
73#define DRV_MODULE_RELDATE "Dec 28, 2005" 73#define DRV_MODULE_RELDATE "Jan 16, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -1325,10 +1325,12 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1325 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 1325 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1326 tw32(0x7d00, val); 1326 tw32(0x7d00, val);
1327 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 1327 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1328 tg3_nvram_lock(tp); 1328 int err;
1329
1330 err = tg3_nvram_lock(tp);
1329 tg3_halt_cpu(tp, RX_CPU_BASE); 1331 tg3_halt_cpu(tp, RX_CPU_BASE);
1330 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0); 1332 if (!err)
1331 tg3_nvram_unlock(tp); 1333 tg3_nvram_unlock(tp);
1332 } 1334 }
1333 } 1335 }
1334 1336
@@ -4193,14 +4195,19 @@ static int tg3_nvram_lock(struct tg3 *tp)
4193 if (tp->tg3_flags & TG3_FLAG_NVRAM) { 4195 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4194 int i; 4196 int i;
4195 4197
4196 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 4198 if (tp->nvram_lock_cnt == 0) {
4197 for (i = 0; i < 8000; i++) { 4199 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4198 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 4200 for (i = 0; i < 8000; i++) {
4199 break; 4201 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4200 udelay(20); 4202 break;
4203 udelay(20);
4204 }
4205 if (i == 8000) {
4206 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4207 return -ENODEV;
4208 }
4201 } 4209 }
4202 if (i == 8000) 4210 tp->nvram_lock_cnt++;
4203 return -ENODEV;
4204 } 4211 }
4205 return 0; 4212 return 0;
4206} 4213}
@@ -4208,8 +4215,12 @@ static int tg3_nvram_lock(struct tg3 *tp)
4208/* tp->lock is held. */ 4215/* tp->lock is held. */
4209static void tg3_nvram_unlock(struct tg3 *tp) 4216static void tg3_nvram_unlock(struct tg3 *tp)
4210{ 4217{
4211 if (tp->tg3_flags & TG3_FLAG_NVRAM) 4218 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4212 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 4219 if (tp->nvram_lock_cnt > 0)
4220 tp->nvram_lock_cnt--;
4221 if (tp->nvram_lock_cnt == 0)
4222 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4223 }
4213} 4224}
4214 4225
4215/* tp->lock is held. */ 4226/* tp->lock is held. */
@@ -4320,8 +4331,13 @@ static int tg3_chip_reset(struct tg3 *tp)
4320 void (*write_op)(struct tg3 *, u32, u32); 4331 void (*write_op)(struct tg3 *, u32, u32);
4321 int i; 4332 int i;
4322 4333
4323 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) 4334 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4324 tg3_nvram_lock(tp); 4335 tg3_nvram_lock(tp);
4336 /* No matching tg3_nvram_unlock() after this because
4337 * chip reset below will undo the nvram lock.
4338 */
4339 tp->nvram_lock_cnt = 0;
4340 }
4325 4341
4326 /* 4342 /*
4327 * We must avoid the readl() that normally takes place. 4343 * We must avoid the readl() that normally takes place.
@@ -4717,6 +4733,10 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4717 (offset == RX_CPU_BASE ? "RX" : "TX")); 4733 (offset == RX_CPU_BASE ? "RX" : "TX"));
4718 return -ENODEV; 4734 return -ENODEV;
4719 } 4735 }
4736
4737 /* Clear firmware's nvram arbitration. */
4738 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4739 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4720 return 0; 4740 return 0;
4721} 4741}
4722 4742
@@ -4736,7 +4756,7 @@ struct fw_info {
4736static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, 4756static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4737 int cpu_scratch_size, struct fw_info *info) 4757 int cpu_scratch_size, struct fw_info *info)
4738{ 4758{
4739 int err, i; 4759 int err, lock_err, i;
4740 void (*write_op)(struct tg3 *, u32, u32); 4760 void (*write_op)(struct tg3 *, u32, u32);
4741 4761
4742 if (cpu_base == TX_CPU_BASE && 4762 if (cpu_base == TX_CPU_BASE &&
@@ -4755,9 +4775,10 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4755 /* It is possible that bootcode is still loading at this point. 4775 /* It is possible that bootcode is still loading at this point.
4756 * Get the nvram lock first before halting the cpu. 4776 * Get the nvram lock first before halting the cpu.
4757 */ 4777 */
4758 tg3_nvram_lock(tp); 4778 lock_err = tg3_nvram_lock(tp);
4759 err = tg3_halt_cpu(tp, cpu_base); 4779 err = tg3_halt_cpu(tp, cpu_base);
4760 tg3_nvram_unlock(tp); 4780 if (!lock_err)
4781 tg3_nvram_unlock(tp);
4761 if (err) 4782 if (err)
4762 goto out; 4783 goto out;
4763 4784
@@ -8182,7 +8203,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8182 data[1] = 1; 8203 data[1] = 1;
8183 } 8204 }
8184 if (etest->flags & ETH_TEST_FL_OFFLINE) { 8205 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8185 int irq_sync = 0; 8206 int err, irq_sync = 0;
8186 8207
8187 if (netif_running(dev)) { 8208 if (netif_running(dev)) {
8188 tg3_netif_stop(tp); 8209 tg3_netif_stop(tp);
@@ -8192,11 +8213,12 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8192 tg3_full_lock(tp, irq_sync); 8213 tg3_full_lock(tp, irq_sync);
8193 8214
8194 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 8215 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8195 tg3_nvram_lock(tp); 8216 err = tg3_nvram_lock(tp);
8196 tg3_halt_cpu(tp, RX_CPU_BASE); 8217 tg3_halt_cpu(tp, RX_CPU_BASE);
8197 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8218 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8198 tg3_halt_cpu(tp, TX_CPU_BASE); 8219 tg3_halt_cpu(tp, TX_CPU_BASE);
8199 tg3_nvram_unlock(tp); 8220 if (!err)
8221 tg3_nvram_unlock(tp);
8200 8222
8201 if (tg3_test_registers(tp) != 0) { 8223 if (tg3_test_registers(tp) != 0) {
8202 etest->flags |= ETH_TEST_FL_FAILED; 8224 etest->flags |= ETH_TEST_FL_FAILED;
@@ -8588,7 +8610,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
8588 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 8610 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8589 tp->tg3_flags |= TG3_FLAG_NVRAM; 8611 tp->tg3_flags |= TG3_FLAG_NVRAM;
8590 8612
8591 tg3_nvram_lock(tp); 8613 if (tg3_nvram_lock(tp)) {
8614 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8615 "tg3_nvram_init failed.\n", tp->dev->name);
8616 return;
8617 }
8592 tg3_enable_nvram_access(tp); 8618 tg3_enable_nvram_access(tp);
8593 8619
8594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 8620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
@@ -8686,7 +8712,9 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8686 if (offset > NVRAM_ADDR_MSK) 8712 if (offset > NVRAM_ADDR_MSK)
8687 return -EINVAL; 8713 return -EINVAL;
8688 8714
8689 tg3_nvram_lock(tp); 8715 ret = tg3_nvram_lock(tp);
8716 if (ret)
8717 return ret;
8690 8718
8691 tg3_enable_nvram_access(tp); 8719 tg3_enable_nvram_access(tp);
8692 8720
@@ -8785,10 +8813,6 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8785 8813
8786 offset = offset + (pagesize - page_off); 8814 offset = offset + (pagesize - page_off);
8787 8815
8788 /* Nvram lock released by tg3_nvram_read() above,
8789 * so need to get it again.
8790 */
8791 tg3_nvram_lock(tp);
8792 tg3_enable_nvram_access(tp); 8816 tg3_enable_nvram_access(tp);
8793 8817
8794 /* 8818 /*
@@ -8925,7 +8949,9 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8925 else { 8949 else {
8926 u32 grc_mode; 8950 u32 grc_mode;
8927 8951
8928 tg3_nvram_lock(tp); 8952 ret = tg3_nvram_lock(tp);
8953 if (ret)
8954 return ret;
8929 8955
8930 tg3_enable_nvram_access(tp); 8956 tg3_enable_nvram_access(tp);
8931 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 8957 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 890e1635996b..e8243305f0e8 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2275,6 +2275,7 @@ struct tg3 {
2275 dma_addr_t stats_mapping; 2275 dma_addr_t stats_mapping;
2276 struct work_struct reset_task; 2276 struct work_struct reset_task;
2277 2277
2278 int nvram_lock_cnt;
2278 u32 nvram_size; 2279 u32 nvram_size;
2279 u32 nvram_pagesize; 2280 u32 nvram_pagesize;
2280 u32 nvram_jedecnum; 2281 u32 nvram_jedecnum;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ee866fd6957d..a4c7ae94614d 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev,
5668 int channel = fwrq->m; 5668 int channel = fwrq->m;
5669 /* We should do a better check than that, 5669 /* We should do a better check than that,
5670 * based on the card capability !!! */ 5670 * based on the card capability !!! */
5671 if((channel < 1) || (channel > 16)) { 5671 if((channel < 1) || (channel > 14)) {
5672 printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m); 5672 printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
5673 rc = -EINVAL; 5673 rc = -EINVAL;
5674 } else { 5674 } else {
5675 readConfigRid(local, 1); 5675 readConfigRid(local, 1);
5676 /* Yes ! We can set it !!! */ 5676 /* Yes ! We can set it !!! */
5677 local->config.channelSet = (u16)(channel - 1); 5677 local->config.channelSet = (u16) channel;
5678 set_bit (FLAG_COMMIT, &local->flags); 5678 set_bit (FLAG_COMMIT, &local->flags);
5679 } 5679 }
5680 } 5680 }
@@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev,
5692{ 5692{
5693 struct airo_info *local = dev->priv; 5693 struct airo_info *local = dev->priv;
5694 StatusRid status_rid; /* Card status info */ 5694 StatusRid status_rid; /* Card status info */
5695 int ch;
5695 5696
5696 readConfigRid(local, 1); 5697 readConfigRid(local, 1);
5697 if ((local->config.opmode & 0xFF) == MODE_STA_ESS) 5698 if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
@@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev,
5699 else 5700 else
5700 readStatusRid(local, &status_rid, 1); 5701 readStatusRid(local, &status_rid, 1);
5701 5702
5702#ifdef WEXT_USECHANNELS 5703 ch = (int)status_rid.channel;
5703 fwrq->m = ((int)status_rid.channel) + 1; 5704 if((ch > 0) && (ch < 15)) {
5704 fwrq->e = 0; 5705 fwrq->m = frequency_list[ch - 1] * 100000;
5705#else
5706 {
5707 int f = (int)status_rid.channel;
5708 fwrq->m = frequency_list[f] * 100000;
5709 fwrq->e = 1; 5706 fwrq->e = 1;
5707 } else {
5708 fwrq->m = ch;
5709 fwrq->e = 0;
5710 } 5710 }
5711#endif
5712 5711
5713 return 0; 5712 return 0;
5714} 5713}
@@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev,
5783 /* If none, we may want to get the one that was set */ 5782 /* If none, we may want to get the one that was set */
5784 5783
5785 /* Push it out ! */ 5784 /* Push it out ! */
5786 dwrq->length = status_rid.SSIDlen + 1; 5785 dwrq->length = status_rid.SSIDlen;
5787 dwrq->flags = 1; /* active */ 5786 dwrq->flags = 1; /* active */
5788 5787
5789 return 0; 5788 return 0;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index f0ccfef66445..98a76f10a0f7 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev,
1718 if (priv->new_SSID_size != 0) { 1718 if (priv->new_SSID_size != 0) {
1719 memcpy(extra, priv->new_SSID, priv->new_SSID_size); 1719 memcpy(extra, priv->new_SSID, priv->new_SSID_size);
1720 extra[priv->new_SSID_size] = '\0'; 1720 extra[priv->new_SSID_size] = '\0';
1721 dwrq->length = priv->new_SSID_size + 1; 1721 dwrq->length = priv->new_SSID_size;
1722 } else { 1722 } else {
1723 memcpy(extra, priv->SSID, priv->SSID_size); 1723 memcpy(extra, priv->SSID, priv->SSID_size);
1724 extra[priv->SSID_size] = '\0'; 1724 extra[priv->SSID_size] = '\0';
1725 dwrq->length = priv->SSID_size + 1; 1725 dwrq->length = priv->SSID_size;
1726 } 1726 }
1727 1727
1728 dwrq->flags = !priv->connect_to_any_BSS; /* active */ 1728 dwrq->flags = !priv->connect_to_any_BSS; /* active */
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 56f41c714d38..c8f6286dd35f 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE
26 depends on HOSTAP 26 depends on HOSTAP
27 ---help--- 27 ---help---
28 Configure Host AP driver to include support for firmware image 28 Configure Host AP driver to include support for firmware image
29 download. Current version supports only downloading to volatile, i.e., 29 download. This option by itself only enables downloading to the
30 RAM memory. Flash upgrade is not yet supported. 30 volatile memory, i.e. the card RAM. This option is required to
31 support cards that don't have firmware in flash, such as D-Link
32 DWL-520 rev E and D-Link DWL-650 rev P.
31 33
32 Firmware image downloading needs user space tool, prism2_srec. It is 34 Firmware image downloading needs a user space tool, prism2_srec.
33 available from http://hostap.epitest.fi/. 35 It is available from http://hostap.epitest.fi/.
36
37config HOSTAP_FIRMWARE_NVRAM
38 bool "Support for non-volatile firmware download"
39 depends on HOSTAP_FIRMWARE
40 ---help---
41 Allow Host AP driver to write firmware images to the non-volatile
42 card memory, i.e. flash memory that survives power cycling.
43 Enable this option if you want to be able to change card firmware
44 permanently.
45
46 Firmware image downloading needs a user space tool, prism2_srec.
47 It is available from http://hostap.epitest.fi/.
34 48
35config HOSTAP_PLX 49config HOSTAP_PLX
36 tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors" 50 tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/hostap/Makefile
index 353ccb93134b..b8e41a702c00 100644
--- a/drivers/net/wireless/hostap/Makefile
+++ b/drivers/net/wireless/hostap/Makefile
@@ -1,4 +1,5 @@
1hostap-y := hostap_main.o 1hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
2 hostap_ioctl.o hostap_main.o hostap_proc.o
2obj-$(CONFIG_HOSTAP) += hostap.o 3obj-$(CONFIG_HOSTAP) += hostap.o
3 4
4obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o 5obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index 5fac89b8ce3a..5e63765219fe 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -1,6 +1,15 @@
1#ifndef HOSTAP_H 1#ifndef HOSTAP_H
2#define HOSTAP_H 2#define HOSTAP_H
3 3
4#include <linux/ethtool.h>
5
6#include "hostap_wlan.h"
7#include "hostap_ap.h"
8
9static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
10 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
11#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
12
4/* hostap.c */ 13/* hostap.c */
5 14
6extern struct proc_dir_entry *hostap_proc; 15extern struct proc_dir_entry *hostap_proc;
@@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev);
40int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, 49int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
41 u8 *body, size_t bodylen); 50 u8 *body, size_t bodylen);
42int prism2_sta_deauth(local_info_t *local, u16 reason); 51int prism2_sta_deauth(local_info_t *local, u16 reason);
52int prism2_wds_add(local_info_t *local, u8 *remote_addr,
53 int rtnl_locked);
54int prism2_wds_del(local_info_t *local, u8 *remote_addr,
55 int rtnl_locked, int do_not_remove);
56
57
58/* hostap_ap.c */
59
60int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
61int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
62void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
63int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
64void ap_control_kickall(struct ap_data *ap);
65void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
66 struct ieee80211_crypt_data ***crypt);
67int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
68 struct iw_quality qual[], int buf_size,
69 int aplist);
70int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
71int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
43 72
44 73
45/* hostap_proc.c */ 74/* hostap_proc.c */
@@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local);
54void hostap_info_process(local_info_t *local, struct sk_buff *skb); 83void hostap_info_process(local_info_t *local, struct sk_buff *skb);
55 84
56 85
86/* hostap_ioctl.c */
87
88extern const struct iw_handler_def hostap_iw_handler_def;
89extern struct ethtool_ops prism2_ethtool_ops;
90
91int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
92
93
57#endif /* HOSTAP_H */ 94#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index bf506f50d722..1fc72fe511e9 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -1,6 +1,9 @@
1#ifndef HOSTAP_80211_H 1#ifndef HOSTAP_80211_H
2#define HOSTAP_80211_H 2#define HOSTAP_80211_H
3 3
4#include <linux/types.h>
5#include <net/ieee80211_crypt.h>
6
4struct hostap_ieee80211_mgmt { 7struct hostap_ieee80211_mgmt {
5 u16 frame_control; 8 u16 frame_control;
6 u16 duration; 9 u16 duration;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index 4b13b76425c1..7e04dc94b3bc 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -1,7 +1,18 @@
1#include <linux/etherdevice.h> 1#include <linux/etherdevice.h>
2#include <net/ieee80211_crypt.h>
2 3
3#include "hostap_80211.h" 4#include "hostap_80211.h"
4#include "hostap.h" 5#include "hostap.h"
6#include "hostap_ap.h"
7
8/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
9/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
10static unsigned char rfc1042_header[] =
11{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
12/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
13static unsigned char bridge_tunnel_header[] =
14{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
15/* No encapsulation header if EtherType < 0x600 (=length) */
5 16
6void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, 17void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
7 struct hostap_80211_rx_status *rx_stats) 18 struct hostap_80211_rx_status *rx_stats)
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 9d24f8a38ac5..4a85e63906f1 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,3 +1,18 @@
1#include "hostap_80211.h"
2#include "hostap_common.h"
3#include "hostap_wlan.h"
4#include "hostap.h"
5#include "hostap_ap.h"
6
7/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
8/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
9static unsigned char rfc1042_header[] =
10{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
11/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
12static unsigned char bridge_tunnel_header[] =
13{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
14/* No encapsulation header if EtherType < 0x600 (=length) */
15
1void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) 16void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
2{ 17{
3 struct ieee80211_hdr_4addr *hdr; 18 struct ieee80211_hdr_4addr *hdr;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 9da94ab7f05f..753a1de6664b 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -16,6 +16,14 @@
16 * (8802.11: 5.5) 16 * (8802.11: 5.5)
17 */ 17 */
18 18
19#include <linux/proc_fs.h>
20#include <linux/delay.h>
21#include <linux/random.h>
22
23#include "hostap_wlan.h"
24#include "hostap.h"
25#include "hostap_ap.h"
26
19static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL, 27static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
20 DEF_INTS }; 28 DEF_INTS };
21module_param_array(other_ap_policy, int, NULL, 0444); 29module_param_array(other_ap_policy, int, NULL, 0444);
@@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
360} 368}
361 369
362 370
363static int ap_control_add_mac(struct mac_restrictions *mac_restrictions, 371int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
364 u8 *mac)
365{ 372{
366 struct mac_entry *entry; 373 struct mac_entry *entry;
367 374
@@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
380} 387}
381 388
382 389
383static int ap_control_del_mac(struct mac_restrictions *mac_restrictions, 390int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
384 u8 *mac)
385{ 391{
386 struct list_head *ptr; 392 struct list_head *ptr;
387 struct mac_entry *entry; 393 struct mac_entry *entry;
@@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
433} 439}
434 440
435 441
436static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) 442void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
437{ 443{
438 struct list_head *ptr, *n; 444 struct list_head *ptr, *n;
439 struct mac_entry *entry; 445 struct mac_entry *entry;
@@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
454} 460}
455 461
456 462
457static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, 463int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
458 u8 *mac)
459{ 464{
460 struct sta_info *sta; 465 struct sta_info *sta;
461 u16 resp; 466 u16 resp;
@@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
486#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 491#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
487 492
488 493
489static void ap_control_kickall(struct ap_data *ap) 494void ap_control_kickall(struct ap_data *ap)
490{ 495{
491 struct list_head *ptr, *n; 496 struct list_head *ptr, *n;
492 struct sta_info *sta; 497 struct sta_info *sta;
@@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
2321} 2326}
2322 2327
2323 2328
2324static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], 2329int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
2325 struct iw_quality qual[], int buf_size, 2330 struct iw_quality qual[], int buf_size,
2326 int aplist) 2331 int aplist)
2327{ 2332{
2328 struct ap_data *ap = local->ap; 2333 struct ap_data *ap = local->ap;
2329 struct list_head *ptr; 2334 struct list_head *ptr;
@@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
2363 2368
2364/* Translate our list of Access Points & Stations to a card independant 2369/* Translate our list of Access Points & Stations to a card independant
2365 * format that the Wireless Tools will understand - Jean II */ 2370 * format that the Wireless Tools will understand - Jean II */
2366static int prism2_ap_translate_scan(struct net_device *dev, char *buffer) 2371int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
2367{ 2372{
2368 struct hostap_interface *iface; 2373 struct hostap_interface *iface;
2369 local_info_t *local; 2374 local_info_t *local;
@@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
2608} 2613}
2609 2614
2610 2615
2611static int prism2_hostapd(struct ap_data *ap, 2616int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
2612 struct prism2_hostapd_param *param)
2613{ 2617{
2614 switch (param->cmd) { 2618 switch (param->cmd) {
2615 case PRISM2_HOSTAPD_FLUSH: 2619 case PRISM2_HOSTAPD_FLUSH:
@@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local)
3207} 3211}
3208 3212
3209 3213
3210static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, 3214void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
3211 struct ieee80211_crypt_data ***crypt) 3215 struct ieee80211_crypt_data ***crypt)
3212{ 3216{
3213 struct sta_info *sta; 3217 struct sta_info *sta;
3214 3218
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
index 6d00df69c2e3..2fa2452b6b07 100644
--- a/drivers/net/wireless/hostap/hostap_ap.h
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -1,6 +1,8 @@
1#ifndef HOSTAP_AP_H 1#ifndef HOSTAP_AP_H
2#define HOSTAP_AP_H 2#define HOSTAP_AP_H
3 3
4#include "hostap_80211.h"
5
4/* AP data structures for STAs */ 6/* AP data structures for STAs */
5 7
6/* maximum number of frames to buffer per STA */ 8/* maximum number of frames to buffer per STA */
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index 6f4fa9dc308f..01624005d808 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -1,6 +1,9 @@
1#ifndef HOSTAP_COMMON_H 1#ifndef HOSTAP_COMMON_H
2#define HOSTAP_COMMON_H 2#define HOSTAP_COMMON_H
3 3
4#include <linux/types.h>
5#include <linux/if_ether.h>
6
4#define BIT(x) (1 << (x)) 7#define BIT(x) (1 << (x))
5 8
6#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] 9#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index 7ed3425d08c1..c090a5aebb58 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -21,15 +21,10 @@
21#define PRISM2_DOWNLOAD_SUPPORT 21#define PRISM2_DOWNLOAD_SUPPORT
22#endif 22#endif
23 23
24#ifdef PRISM2_DOWNLOAD_SUPPORT 24/* Allow kernel configuration to enable non-volatile download support. */
25/* Allow writing firmware images into flash, i.e., to non-volatile storage. 25#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
26 * Before you enable this option, you should make absolutely sure that you are 26#define PRISM2_NON_VOLATILE_DOWNLOAD
27 * using prism2_srec utility that comes with THIS version of the driver! 27#endif
28 * In addition, please note that it is possible to kill your card with
29 * non-volatile download if you are using incorrect image. This feature has not
30 * been fully tested, so please be careful with it. */
31/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
32#endif /* PRISM2_DOWNLOAD_SUPPORT */
33 28
34/* Save low-level I/O for debugging. This should not be enabled in normal use. 29/* Save low-level I/O for debugging. This should not be enabled in normal use.
35 */ 30 */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 5aa998fdf1c4..50f72d831cf4 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -1,5 +1,8 @@
1/* Host AP driver Info Frame processing (part of hostap.o module) */ 1/* Host AP driver Info Frame processing (part of hostap.o module) */
2 2
3#include "hostap_wlan.h"
4#include "hostap.h"
5#include "hostap_ap.h"
3 6
4/* Called only as a tasklet (software IRQ) */ 7/* Called only as a tasklet (software IRQ) */
5static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf, 8static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 2617d70bcda9..f3e0ce1ee037 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1,11 +1,13 @@
1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ 1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
2 2
3#ifdef in_atomic 3#include <linux/types.h>
4/* Get kernel_locked() for in_atomic() */
5#include <linux/smp_lock.h> 4#include <linux/smp_lock.h>
6#endif
7#include <linux/ethtool.h> 5#include <linux/ethtool.h>
6#include <net/ieee80211_crypt.h>
8 7
8#include "hostap_wlan.h"
9#include "hostap.h"
10#include "hostap_ap.h"
9 11
10static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) 12static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
11{ 13{
@@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
3910 local->sta_fw_ver & 0xff); 3912 local->sta_fw_ver & 0xff);
3911} 3913}
3912 3914
3913static struct ethtool_ops prism2_ethtool_ops = { 3915struct ethtool_ops prism2_ethtool_ops = {
3914 .get_drvinfo = prism2_get_drvinfo 3916 .get_drvinfo = prism2_get_drvinfo
3915}; 3917};
3916 3918
@@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] =
3985 (iw_handler) prism2_ioctl_priv_readmif, /* 3 */ 3987 (iw_handler) prism2_ioctl_priv_readmif, /* 3 */
3986}; 3988};
3987 3989
3988static const struct iw_handler_def hostap_iw_handler_def = 3990const struct iw_handler_def hostap_iw_handler_def =
3989{ 3991{
3990 .num_standard = sizeof(prism2_handler) / sizeof(iw_handler), 3992 .num_standard = sizeof(prism2_handler) / sizeof(iw_handler),
3991 .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler), 3993 .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler),
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 3d2ea61033be..8dd4c4446a64 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -24,6 +24,7 @@
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25#include <linux/rtnetlink.h> 25#include <linux/rtnetlink.h>
26#include <linux/wireless.h> 26#include <linux/wireless.h>
27#include <linux/etherdevice.h>
27#include <net/iw_handler.h> 28#include <net/iw_handler.h>
28#include <net/ieee80211.h> 29#include <net/ieee80211.h>
29#include <net/ieee80211_crypt.h> 30#include <net/ieee80211_crypt.h>
@@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION);
47#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */)) 48#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
48 49
49 50
50/* hostap.c */
51static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
52 int rtnl_locked);
53static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
54 int rtnl_locked, int do_not_remove);
55
56/* hostap_ap.c */
57static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
58 struct iw_quality qual[], int buf_size,
59 int aplist);
60static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
61static int prism2_hostapd(struct ap_data *ap,
62 struct prism2_hostapd_param *param);
63static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
64 struct ieee80211_crypt_data ***crypt);
65static void ap_control_kickall(struct ap_data *ap);
66#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
67static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
68 u8 *mac);
69static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
70 u8 *mac);
71static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
72static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
73 u8 *mac);
74#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
75
76
77static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
78 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
79#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
80
81
82/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
83/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
84static unsigned char rfc1042_header[] =
85{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
86/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
87static unsigned char bridge_tunnel_header[] =
88{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
89/* No encapsulation header if EtherType < 0x600 (=length) */
90
91
92/* FIX: these could be compiled separately and linked together to hostap.o */
93#include "hostap_ap.c"
94#include "hostap_info.c"
95#include "hostap_ioctl.c"
96#include "hostap_proc.c"
97#include "hostap_80211_rx.c"
98#include "hostap_80211_tx.c"
99
100
101struct net_device * hostap_add_interface(struct local_info *local, 51struct net_device * hostap_add_interface(struct local_info *local,
102 int type, int rtnl_locked, 52 int type, int rtnl_locked,
103 const char *prefix, 53 const char *prefix,
@@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr)
196} 146}
197 147
198 148
199static int prism2_wds_add(local_info_t *local, u8 *remote_addr, 149int prism2_wds_add(local_info_t *local, u8 *remote_addr,
200 int rtnl_locked) 150 int rtnl_locked)
201{ 151{
202 struct net_device *dev; 152 struct net_device *dev;
203 struct list_head *ptr; 153 struct list_head *ptr;
@@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
258} 208}
259 209
260 210
261static int prism2_wds_del(local_info_t *local, u8 *remote_addr, 211int prism2_wds_del(local_info_t *local, u8 *remote_addr,
262 int rtnl_locked, int do_not_remove) 212 int rtnl_locked, int do_not_remove)
263{ 213{
264 unsigned long flags; 214 unsigned long flags;
265 struct list_head *ptr; 215 struct list_head *ptr;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index a0a4cbd4937a..d1d8ce022e63 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -1,5 +1,12 @@
1/* /proc routines for Host AP driver */ 1/* /proc routines for Host AP driver */
2 2
3#include <linux/types.h>
4#include <linux/proc_fs.h>
5#include <net/ieee80211_crypt.h>
6
7#include "hostap_wlan.h"
8#include "hostap.h"
9
3#define PROC_LIMIT (PAGE_SIZE - 80) 10#define PROC_LIMIT (PAGE_SIZE - 80)
4 11
5 12
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index cfd801559492..87a54aa6f4dd 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -1,6 +1,10 @@
1#ifndef HOSTAP_WLAN_H 1#ifndef HOSTAP_WLAN_H
2#define HOSTAP_WLAN_H 2#define HOSTAP_WLAN_H
3 3
4#include <linux/wireless.h>
5#include <linux/netdevice.h>
6#include <net/iw_handler.h>
7
4#include "hostap_config.h" 8#include "hostap_config.h"
5#include "hostap_common.h" 9#include "hostap_common.h"
6 10
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 7518384f34d9..8bf02763b5c7 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -5735,70 +5735,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev)
5735 return &priv->ieee->stats; 5735 return &priv->ieee->stats;
5736} 5736}
5737 5737
5738#if WIRELESS_EXT < 18
5739/* Support for wpa_supplicant before WE-18, deprecated. */
5740
5741/* following definitions must match definitions in driver_ipw.c */
5742
5743#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
5744
5745#define IPW2100_CMD_SET_WPA_PARAM 1
5746#define IPW2100_CMD_SET_WPA_IE 2
5747#define IPW2100_CMD_SET_ENCRYPTION 3
5748#define IPW2100_CMD_MLME 4
5749
5750#define IPW2100_PARAM_WPA_ENABLED 1
5751#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2
5752#define IPW2100_PARAM_DROP_UNENCRYPTED 3
5753#define IPW2100_PARAM_PRIVACY_INVOKED 4
5754#define IPW2100_PARAM_AUTH_ALGS 5
5755#define IPW2100_PARAM_IEEE_802_1X 6
5756
5757#define IPW2100_MLME_STA_DEAUTH 1
5758#define IPW2100_MLME_STA_DISASSOC 2
5759
5760#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2
5761#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3
5762#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4
5763#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5
5764#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6
5765#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7
5766
5767#define IPW2100_CRYPT_ALG_NAME_LEN 16
5768
5769struct ipw2100_param {
5770 u32 cmd;
5771 u8 sta_addr[ETH_ALEN];
5772 union {
5773 struct {
5774 u8 name;
5775 u32 value;
5776 } wpa_param;
5777 struct {
5778 u32 len;
5779 u8 reserved[32];
5780 u8 data[0];
5781 } wpa_ie;
5782 struct {
5783 u32 command;
5784 u32 reason_code;
5785 } mlme;
5786 struct {
5787 u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
5788 u8 set_tx;
5789 u32 err;
5790 u8 idx;
5791 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
5792 u16 key_len;
5793 u8 key[0];
5794 } crypt;
5795
5796 } u;
5797};
5798
5799/* end of driver_ipw.c code */
5800#endif /* WIRELESS_EXT < 18 */
5801
5802static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) 5738static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
5803{ 5739{
5804 /* This is called when wpa_supplicant loads and closes the driver 5740 /* This is called when wpa_supplicant loads and closes the driver
@@ -5807,11 +5743,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
5807 return 0; 5743 return 0;
5808} 5744}
5809 5745
5810#if WIRELESS_EXT < 18
5811#define IW_AUTH_ALG_OPEN_SYSTEM 0x1
5812#define IW_AUTH_ALG_SHARED_KEY 0x2
5813#endif
5814
5815static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) 5746static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
5816{ 5747{
5817 5748
@@ -5855,360 +5786,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
5855 ipw2100_set_wpa_ie(priv, &frame, 0); 5786 ipw2100_set_wpa_ie(priv, &frame, 0);
5856} 5787}
5857 5788
5858#if WIRELESS_EXT < 18
5859static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value)
5860{
5861 struct ipw2100_priv *priv = ieee80211_priv(dev);
5862 struct ieee80211_crypt_data *crypt;
5863 unsigned long flags;
5864 int ret = 0;
5865
5866 switch (name) {
5867 case IPW2100_PARAM_WPA_ENABLED:
5868 ret = ipw2100_wpa_enable(priv, value);
5869 break;
5870
5871 case IPW2100_PARAM_TKIP_COUNTERMEASURES:
5872 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
5873 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
5874 break;
5875
5876 flags = crypt->ops->get_flags(crypt->priv);
5877
5878 if (value)
5879 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
5880 else
5881 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
5882
5883 crypt->ops->set_flags(flags, crypt->priv);
5884
5885 break;
5886
5887 case IPW2100_PARAM_DROP_UNENCRYPTED:{
5888 /* See IW_AUTH_DROP_UNENCRYPTED handling for details */
5889 struct ieee80211_security sec = {
5890 .flags = SEC_ENABLED,
5891 .enabled = value,
5892 };
5893 priv->ieee->drop_unencrypted = value;
5894 /* We only change SEC_LEVEL for open mode. Others
5895 * are set by ipw_wpa_set_encryption.
5896 */
5897 if (!value) {
5898 sec.flags |= SEC_LEVEL;
5899 sec.level = SEC_LEVEL_0;
5900 } else {
5901 sec.flags |= SEC_LEVEL;
5902 sec.level = SEC_LEVEL_1;
5903 }
5904 if (priv->ieee->set_security)
5905 priv->ieee->set_security(priv->ieee->dev, &sec);
5906 break;
5907 }
5908
5909 case IPW2100_PARAM_PRIVACY_INVOKED:
5910 priv->ieee->privacy_invoked = value;
5911 break;
5912
5913 case IPW2100_PARAM_AUTH_ALGS:
5914 ret = ipw2100_wpa_set_auth_algs(priv, value);
5915 break;
5916
5917 case IPW2100_PARAM_IEEE_802_1X:
5918 priv->ieee->ieee802_1x = value;
5919 break;
5920
5921 default:
5922 printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
5923 dev->name, name);
5924 ret = -EOPNOTSUPP;
5925 }
5926
5927 return ret;
5928}
5929
5930static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason)
5931{
5932
5933 struct ipw2100_priv *priv = ieee80211_priv(dev);
5934 int ret = 0;
5935
5936 switch (command) {
5937 case IPW2100_MLME_STA_DEAUTH:
5938 // silently ignore
5939 break;
5940
5941 case IPW2100_MLME_STA_DISASSOC:
5942 ipw2100_disassociate_bssid(priv);
5943 break;
5944
5945 default:
5946 printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
5947 dev->name, command);
5948 ret = -EOPNOTSUPP;
5949 }
5950
5951 return ret;
5952}
5953
5954static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
5955 struct ipw2100_param *param, int plen)
5956{
5957
5958 struct ipw2100_priv *priv = ieee80211_priv(dev);
5959 struct ieee80211_device *ieee = priv->ieee;
5960 u8 *buf;
5961
5962 if (!ieee->wpa_enabled)
5963 return -EOPNOTSUPP;
5964
5965 if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
5966 (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
5967 return -EINVAL;
5968
5969 if (param->u.wpa_ie.len) {
5970 buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
5971 if (buf == NULL)
5972 return -ENOMEM;
5973
5974 memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
5975
5976 kfree(ieee->wpa_ie);
5977 ieee->wpa_ie = buf;
5978 ieee->wpa_ie_len = param->u.wpa_ie.len;
5979
5980 } else {
5981 kfree(ieee->wpa_ie);
5982 ieee->wpa_ie = NULL;
5983 ieee->wpa_ie_len = 0;
5984 }
5985
5986 ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
5987
5988 return 0;
5989}
5990
5991/* implementation borrowed from hostap driver */
5992
5993static int ipw2100_wpa_set_encryption(struct net_device *dev,
5994 struct ipw2100_param *param,
5995 int param_len)
5996{
5997 int ret = 0;
5998 struct ipw2100_priv *priv = ieee80211_priv(dev);
5999 struct ieee80211_device *ieee = priv->ieee;
6000 struct ieee80211_crypto_ops *ops;
6001 struct ieee80211_crypt_data **crypt;
6002
6003 struct ieee80211_security sec = {
6004 .flags = 0,
6005 };
6006
6007 param->u.crypt.err = 0;
6008 param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
6009
6010 if (param_len !=
6011 (int)((char *)param->u.crypt.key - (char *)param) +
6012 param->u.crypt.key_len) {
6013 IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
6014 param->u.crypt.key_len);
6015 return -EINVAL;
6016 }
6017 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
6018 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
6019 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
6020 if (param->u.crypt.idx >= WEP_KEYS)
6021 return -EINVAL;
6022 crypt = &ieee->crypt[param->u.crypt.idx];
6023 } else {
6024 return -EINVAL;
6025 }
6026
6027 sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
6028 if (strcmp(param->u.crypt.alg, "none") == 0) {
6029 if (crypt) {
6030 sec.enabled = 0;
6031 sec.encrypt = 0;
6032 sec.level = SEC_LEVEL_0;
6033 sec.flags |= SEC_LEVEL;
6034 ieee80211_crypt_delayed_deinit(ieee, crypt);
6035 }
6036 goto done;
6037 }
6038 sec.enabled = 1;
6039 sec.encrypt = 1;
6040
6041 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6042 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
6043 request_module("ieee80211_crypt_wep");
6044 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6045 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
6046 request_module("ieee80211_crypt_tkip");
6047 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6048 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
6049 request_module("ieee80211_crypt_ccmp");
6050 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6051 }
6052 if (ops == NULL) {
6053 IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
6054 dev->name, param->u.crypt.alg);
6055 param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
6056 ret = -EINVAL;
6057 goto done;
6058 }
6059
6060 if (*crypt == NULL || (*crypt)->ops != ops) {
6061 struct ieee80211_crypt_data *new_crypt;
6062
6063 ieee80211_crypt_delayed_deinit(ieee, crypt);
6064
6065 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
6066 if (new_crypt == NULL) {
6067 ret = -ENOMEM;
6068 goto done;
6069 }
6070 new_crypt->ops = ops;
6071 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
6072 new_crypt->priv =
6073 new_crypt->ops->init(param->u.crypt.idx);
6074
6075 if (new_crypt->priv == NULL) {
6076 kfree(new_crypt);
6077 param->u.crypt.err =
6078 IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
6079 ret = -EINVAL;
6080 goto done;
6081 }
6082
6083 *crypt = new_crypt;
6084 }
6085
6086 if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
6087 (*crypt)->ops->set_key(param->u.crypt.key,
6088 param->u.crypt.key_len, param->u.crypt.seq,
6089 (*crypt)->priv) < 0) {
6090 IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
6091 param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
6092 ret = -EINVAL;
6093 goto done;
6094 }
6095
6096 if (param->u.crypt.set_tx) {
6097 ieee->tx_keyidx = param->u.crypt.idx;
6098 sec.active_key = param->u.crypt.idx;
6099 sec.flags |= SEC_ACTIVE_KEY;
6100 }
6101
6102 if (ops->name != NULL) {
6103
6104 if (strcmp(ops->name, "WEP") == 0) {
6105 memcpy(sec.keys[param->u.crypt.idx],
6106 param->u.crypt.key, param->u.crypt.key_len);
6107 sec.key_sizes[param->u.crypt.idx] =
6108 param->u.crypt.key_len;
6109 sec.flags |= (1 << param->u.crypt.idx);
6110 sec.flags |= SEC_LEVEL;
6111 sec.level = SEC_LEVEL_1;
6112 } else if (strcmp(ops->name, "TKIP") == 0) {
6113 sec.flags |= SEC_LEVEL;
6114 sec.level = SEC_LEVEL_2;
6115 } else if (strcmp(ops->name, "CCMP") == 0) {
6116 sec.flags |= SEC_LEVEL;
6117 sec.level = SEC_LEVEL_3;
6118 }
6119 }
6120 done:
6121 if (ieee->set_security)
6122 ieee->set_security(ieee->dev, &sec);
6123
6124 /* Do not reset port if card is in Managed mode since resetting will
6125 * generate new IEEE 802.11 authentication which may end up in looping
6126 * with IEEE 802.1X. If your hardware requires a reset after WEP
6127 * configuration (for example... Prism2), implement the reset_port in
6128 * the callbacks structures used to initialize the 802.11 stack. */
6129 if (ieee->reset_on_keychange &&
6130 ieee->iw_mode != IW_MODE_INFRA &&
6131 ieee->reset_port && ieee->reset_port(dev)) {
6132 IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
6133 param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
6134 return -EINVAL;
6135 }
6136
6137 return ret;
6138}
6139
6140static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p)
6141{
6142
6143 struct ipw2100_param *param;
6144 int ret = 0;
6145
6146 IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
6147
6148 if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
6149 return -EINVAL;
6150
6151 param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
6152 if (param == NULL)
6153 return -ENOMEM;
6154
6155 if (copy_from_user(param, p->pointer, p->length)) {
6156 kfree(param);
6157 return -EFAULT;
6158 }
6159
6160 switch (param->cmd) {
6161
6162 case IPW2100_CMD_SET_WPA_PARAM:
6163 ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
6164 param->u.wpa_param.value);
6165 break;
6166
6167 case IPW2100_CMD_SET_WPA_IE:
6168 ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
6169 break;
6170
6171 case IPW2100_CMD_SET_ENCRYPTION:
6172 ret = ipw2100_wpa_set_encryption(dev, param, p->length);
6173 break;
6174
6175 case IPW2100_CMD_MLME:
6176 ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
6177 param->u.mlme.reason_code);
6178 break;
6179
6180 default:
6181 printk(KERN_ERR DRV_NAME
6182 ": %s: Unknown WPA supplicant request: %d\n", dev->name,
6183 param->cmd);
6184 ret = -EOPNOTSUPP;
6185
6186 }
6187
6188 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
6189 ret = -EFAULT;
6190
6191 kfree(param);
6192 return ret;
6193}
6194
6195static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6196{
6197 struct iwreq *wrq = (struct iwreq *)rq;
6198 int ret = -1;
6199 switch (cmd) {
6200 case IPW2100_IOCTL_WPA_SUPPLICANT:
6201 ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
6202 return ret;
6203
6204 default:
6205 return -EOPNOTSUPP;
6206 }
6207
6208 return -EOPNOTSUPP;
6209}
6210#endif /* WIRELESS_EXT < 18 */
6211
6212static void ipw_ethtool_get_drvinfo(struct net_device *dev, 5789static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6213 struct ethtool_drvinfo *info) 5790 struct ethtool_drvinfo *info)
6214{ 5791{
@@ -6337,9 +5914,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6337 dev->open = ipw2100_open; 5914 dev->open = ipw2100_open;
6338 dev->stop = ipw2100_close; 5915 dev->stop = ipw2100_close;
6339 dev->init = ipw2100_net_init; 5916 dev->init = ipw2100_net_init;
6340#if WIRELESS_EXT < 18
6341 dev->do_ioctl = ipw2100_ioctl;
6342#endif
6343 dev->get_stats = ipw2100_stats; 5917 dev->get_stats = ipw2100_stats;
6344 dev->ethtool_ops = &ipw2100_ethtool_ops; 5918 dev->ethtool_ops = &ipw2100_ethtool_ops;
6345 dev->tx_timeout = ipw2100_tx_timeout; 5919 dev->tx_timeout = ipw2100_tx_timeout;
@@ -7855,7 +7429,6 @@ static int ipw2100_wx_get_power(struct net_device *dev,
7855 return 0; 7429 return 0;
7856} 7430}
7857 7431
7858#if WIRELESS_EXT > 17
7859/* 7432/*
7860 * WE-18 WPA support 7433 * WE-18 WPA support
7861 */ 7434 */
@@ -8117,7 +7690,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
8117 } 7690 }
8118 return 0; 7691 return 0;
8119} 7692}
8120#endif /* WIRELESS_EXT > 17 */
8121 7693
8122/* 7694/*
8123 * 7695 *
@@ -8350,11 +7922,7 @@ static iw_handler ipw2100_wx_handlers[] = {
8350 NULL, /* SIOCWIWTHRSPY */ 7922 NULL, /* SIOCWIWTHRSPY */
8351 ipw2100_wx_set_wap, /* SIOCSIWAP */ 7923 ipw2100_wx_set_wap, /* SIOCSIWAP */
8352 ipw2100_wx_get_wap, /* SIOCGIWAP */ 7924 ipw2100_wx_get_wap, /* SIOCGIWAP */
8353#if WIRELESS_EXT > 17
8354 ipw2100_wx_set_mlme, /* SIOCSIWMLME */ 7925 ipw2100_wx_set_mlme, /* SIOCSIWMLME */
8355#else
8356 NULL, /* -- hole -- */
8357#endif
8358 NULL, /* SIOCGIWAPLIST -- deprecated */ 7926 NULL, /* SIOCGIWAPLIST -- deprecated */
8359 ipw2100_wx_set_scan, /* SIOCSIWSCAN */ 7927 ipw2100_wx_set_scan, /* SIOCSIWSCAN */
8360 ipw2100_wx_get_scan, /* SIOCGIWSCAN */ 7928 ipw2100_wx_get_scan, /* SIOCGIWSCAN */
@@ -8378,7 +7946,6 @@ static iw_handler ipw2100_wx_handlers[] = {
8378 ipw2100_wx_get_encode, /* SIOCGIWENCODE */ 7946 ipw2100_wx_get_encode, /* SIOCGIWENCODE */
8379 ipw2100_wx_set_power, /* SIOCSIWPOWER */ 7947 ipw2100_wx_set_power, /* SIOCSIWPOWER */
8380 ipw2100_wx_get_power, /* SIOCGIWPOWER */ 7948 ipw2100_wx_get_power, /* SIOCGIWPOWER */
8381#if WIRELESS_EXT > 17
8382 NULL, /* -- hole -- */ 7949 NULL, /* -- hole -- */
8383 NULL, /* -- hole -- */ 7950 NULL, /* -- hole -- */
8384 ipw2100_wx_set_genie, /* SIOCSIWGENIE */ 7951 ipw2100_wx_set_genie, /* SIOCSIWGENIE */
@@ -8388,7 +7955,6 @@ static iw_handler ipw2100_wx_handlers[] = {
8388 ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */ 7955 ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */
8389 ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */ 7956 ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */
8390 NULL, /* SIOCSIWPMKSA */ 7957 NULL, /* SIOCSIWPMKSA */
8391#endif
8392}; 7958};
8393 7959
8394#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV 7960#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 819be2b6b7df..4c28e332ecc3 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -8936,14 +8936,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8936 IPW_DEBUG_HC("starting request direct scan!\n"); 8936 IPW_DEBUG_HC("starting request direct scan!\n");
8937 8937
8938 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { 8938 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8939 err = wait_event_interruptible(priv->wait_state, 8939 /* We should not sleep here; otherwise we will block most
8940 !(priv-> 8940 * of the system (for instance, we hold rtnl_lock when we
8941 status & (STATUS_SCANNING | 8941 * get here).
8942 STATUS_SCAN_ABORTING))); 8942 */
8943 if (err) { 8943 err = -EAGAIN;
8944 IPW_DEBUG_HC("aborting direct scan"); 8944 goto done;
8945 goto done;
8946 }
8947 } 8945 }
8948 memset(&scan, 0, sizeof(scan)); 8946 memset(&scan, 0, sizeof(scan));
8949 8947
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 135a156db25d..c5cd61c7f927 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
748 if (essid->length) { 748 if (essid->length) {
749 dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ 749 dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
750 /* if it is to big, trunk it */ 750 /* if it is to big, trunk it */
751 dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1); 751 dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length);
752 } else { 752 } else {
753 dwrq->flags = 0; 753 dwrq->flags = 0;
754 dwrq->length = 0; 754 dwrq->length = 0;
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 33d64d2ee53f..a8261d8454dd 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
177#endif 177#endif
178 178
179 newskb->dev = skb->dev; 179 newskb->dev = skb->dev;
180 dev_kfree_skb(skb); 180 dev_kfree_skb_irq(skb);
181 skb = newskb; 181 skb = newskb;
182 } 182 }
183 } 183 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 319180ca7e71..7880d8c31aad 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev,
1256 extra[IW_ESSID_MAX_SIZE] = '\0'; 1256 extra[IW_ESSID_MAX_SIZE] = '\0';
1257 1257
1258 /* Push it out ! */ 1258 /* Push it out ! */
1259 dwrq->length = strlen(extra) + 1; 1259 dwrq->length = strlen(extra);
1260 dwrq->flags = 1; /* active */ 1260 dwrq->flags = 1; /* active */
1261 1261
1262 return 0; 1262 return 0;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 7e2039f52c49..cf373625fc70 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev,
2280 extra[IW_ESSID_MAX_SIZE] = '\0'; 2280 extra[IW_ESSID_MAX_SIZE] = '\0';
2281 2281
2282 /* Set the length */ 2282 /* Set the length */
2283 wrqu->data.length = strlen(extra) + 1; 2283 wrqu->data.length = strlen(extra);
2284 2284
2285 return 0; 2285 return 0;
2286} 2286}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 605f0df0bfba..dda6099903c1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1142,6 +1142,9 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
1142 case 0x27c4: 1142 case 0x27c4:
1143 ich = 7; 1143 ich = 7;
1144 break; 1144 break;
1145 case 0x2828: /* ICH8M */
1146 ich = 8;
1147 break;
1145 default: 1148 default:
1146 /* we do not handle this PCI device */ 1149 /* we do not handle this PCI device */
1147 return; 1150 return;
@@ -1161,7 +1164,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
1161 else 1164 else
1162 return; /* not in combined mode */ 1165 return; /* not in combined mode */
1163 } else { 1166 } else {
1164 WARN_ON((ich != 6) && (ich != 7)); 1167 WARN_ON((ich != 6) && (ich != 7) && (ich != 8));
1165 tmp &= 0x3; /* interesting bits 1:0 */ 1168 tmp &= 0x3; /* interesting bits 1:0 */
1166 if (tmp & (1 << 0)) 1169 if (tmp & (1 << 0))
1167 comb = (1 << 2); /* PATA port 0, SATA port 1 */ 1170 comb = (1 << 2); /* PATA port 0, SATA port 1 */
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index d113290b5fc0..19bd346951dd 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -276,6 +276,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
276 board_ahci }, /* ESB2 */ 276 board_ahci }, /* ESB2 */
277 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 277 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
278 board_ahci }, /* ICH7-M DH */ 278 board_ahci }, /* ICH7-M DH */
279 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
280 board_ahci }, /* ICH8 */
281 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
282 board_ahci }, /* ICH8 */
283 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
284 board_ahci }, /* ICH8 */
285 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
286 board_ahci }, /* ICH8M */
287 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
288 board_ahci }, /* ICH8M */
279 { } /* terminate list */ 289 { } /* terminate list */
280}; 290};
281 291
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 557788ec4eec..fc3ca051ceed 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -157,6 +157,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
157 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 157 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
158 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 158 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
159 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 159 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
160 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
161 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
162 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
160 163
161 { } /* terminate list */ 164 { } /* terminate list */
162}; 165};
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 99bae8369ab2..46c4cdbaee86 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -611,6 +611,10 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
611 if (dev->flags & ATA_DFLAG_PIO) { 611 if (dev->flags & ATA_DFLAG_PIO) {
612 tf->protocol = ATA_PROT_PIO; 612 tf->protocol = ATA_PROT_PIO;
613 index = dev->multi_count ? 0 : 8; 613 index = dev->multi_count ? 0 : 8;
614 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
615 /* Unable to use DMA due to host limitation */
616 tf->protocol = ATA_PROT_PIO;
617 index = dev->multi_count ? 0 : 4;
614 } else { 618 } else {
615 tf->protocol = ATA_PROT_DMA; 619 tf->protocol = ATA_PROT_DMA;
616 index = 16; 620 index = 16;
@@ -1051,18 +1055,22 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
1051{ 1055{
1052 u16 modes; 1056 u16 modes;
1053 1057
1054 /* Usual case. Word 53 indicates word 88 is valid */ 1058 /* Usual case. Word 53 indicates word 64 is valid */
1055 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) { 1059 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1056 modes = adev->id[ATA_ID_PIO_MODES] & 0x03; 1060 modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
1057 modes <<= 3; 1061 modes <<= 3;
1058 modes |= 0x7; 1062 modes |= 0x7;
1059 return modes; 1063 return modes;
1060 } 1064 }
1061 1065
1062 /* If word 88 isn't valid then Word 51 holds the PIO timing number 1066 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing
1063 for the maximum. Turn it into a mask and return it */ 1067 number for the maximum. Turn it into a mask and return it */
1064 modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; 1068 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
1065 return modes; 1069 return modes;
1070 /* But wait.. there's more. Design your standards by committee and
1071 you too can get a free iordy field to process. However its the
1072 speeds not the modes that are supported... Note drivers using the
1073 timing API will get this right anyway */
1066} 1074}
1067 1075
1068struct ata_exec_internal_arg { 1076struct ata_exec_internal_arg {
@@ -1165,6 +1173,39 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1165} 1173}
1166 1174
1167/** 1175/**
1176 * ata_pio_need_iordy - check if iordy needed
1177 * @adev: ATA device
1178 *
1179 * Check if the current speed of the device requires IORDY. Used
1180 * by various controllers for chip configuration.
1181 */
1182
1183unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1184{
1185 int pio;
1186 int speed = adev->pio_mode - XFER_PIO_0;
1187
1188 if (speed < 2)
1189 return 0;
1190 if (speed > 2)
1191 return 1;
1192
1193 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1194
1195 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1196 pio = adev->id[ATA_ID_EIDE_PIO];
1197 /* Is the speed faster than the drive allows non IORDY ? */
1198 if (pio) {
1199 /* This is cycle times not frequency - watch the logic! */
1200 if (pio > 240) /* PIO2 is 240nS per cycle */
1201 return 1;
1202 return 0;
1203 }
1204 }
1205 return 0;
1206}
1207
1208/**
1168 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1209 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1169 * @ap: port on which device we wish to probe resides 1210 * @ap: port on which device we wish to probe resides
1170 * @device: device bus address, starting at zero 1211 * @device: device bus address, starting at zero
@@ -1415,7 +1456,7 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
1415 ap->udma_mask &= ATA_UDMA5; 1456 ap->udma_mask &= ATA_UDMA5;
1416 ap->host->max_sectors = ATA_MAX_SECTORS; 1457 ap->host->max_sectors = ATA_MAX_SECTORS;
1417 ap->host->hostt->max_sectors = ATA_MAX_SECTORS; 1458 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1418 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS; 1459 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1419 } 1460 }
1420 1461
1421 if (ap->ops->dev_config) 1462 if (ap->ops->dev_config)
@@ -3056,10 +3097,21 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3056static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 3097static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3057 unsigned int buflen, int do_write) 3098 unsigned int buflen, int do_write)
3058{ 3099{
3059 if (ap->flags & ATA_FLAG_MMIO) 3100 /* Make the crap hardware pay the costs not the good stuff */
3060 ata_mmio_data_xfer(ap, buf, buflen, do_write); 3101 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3061 else 3102 unsigned long flags;
3062 ata_pio_data_xfer(ap, buf, buflen, do_write); 3103 local_irq_save(flags);
3104 if (ap->flags & ATA_FLAG_MMIO)
3105 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3106 else
3107 ata_pio_data_xfer(ap, buf, buflen, do_write);
3108 local_irq_restore(flags);
3109 } else {
3110 if (ap->flags & ATA_FLAG_MMIO)
3111 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3112 else
3113 ata_pio_data_xfer(ap, buf, buflen, do_write);
3114 }
3063} 3115}
3064 3116
3065/** 3117/**
@@ -5122,6 +5174,7 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string);
5122EXPORT_SYMBOL_GPL(ata_dev_config); 5174EXPORT_SYMBOL_GPL(ata_dev_config);
5123EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5175EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5124 5176
5177EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5125EXPORT_SYMBOL_GPL(ata_timing_compute); 5178EXPORT_SYMBOL_GPL(ata_timing_compute);
5126EXPORT_SYMBOL_GPL(ata_timing_merge); 5179EXPORT_SYMBOL_GPL(ata_timing_merge);
5127 5180
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 3d1ea09a06a1..b0b0a69b3563 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -66,6 +66,7 @@ enum {
66 board_2037x = 0, /* FastTrak S150 TX2plus */ 66 board_2037x = 0, /* FastTrak S150 TX2plus */
67 board_20319 = 1, /* FastTrak S150 TX4 */ 67 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 68 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */
69 70
70 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */
71 72
@@ -190,6 +191,16 @@ static const struct ata_port_info pdc_port_info[] = {
190 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
191 .port_ops = &pdc_pata_ops, 192 .port_ops = &pdc_pata_ops,
192 }, 193 },
194
195 /* board_20771 */
196 {
197 .sht = &pdc_ata_sht,
198 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
199 .pio_mask = 0x1f, /* pio0-4 */
200 .mwdma_mask = 0x07, /* mwdma0-2 */
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops,
203 },
193}; 204};
194 205
195static const struct pci_device_id pdc_ata_pci_tbl[] = { 206static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -226,6 +237,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
226 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
227 board_20619 }, 238 board_20619 },
228 239
240 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
241 board_20771 },
229 { } /* terminate list */ 242 { } /* terminate list */
230}; 243};
231 244
@@ -706,6 +719,9 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
706 case board_2037x: 719 case board_2037x:
707 probe_ent->n_ports = 2; 720 probe_ent->n_ports = 2;
708 break; 721 break;
722 case board_20771:
723 probe_ent->n_ports = 2;
724 break;
709 case board_20619: 725 case board_20619:
710 probe_ent->n_ports = 4; 726 probe_ent->n_ports = 4;
711 727
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 668373590aa4..d8472563fde8 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -470,6 +470,7 @@ static const struct pci_device_id k2_sata_pci_tbl[] = {
470 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 470 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
471 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 471 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
472 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 472 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { } 474 { }
474}; 475};
475 476
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 55e6e2d60d3a..a4d7cc51ce0b 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -199,8 +199,7 @@ struct fbcmap32 {
199#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32) 199#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32)
200#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32) 200#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32)
201 201
202static int fbiogetputcmap(struct file *file, struct fb_info *info, 202static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
203 unsigned int cmd, unsigned long arg)
204{ 203{
205 struct fbcmap32 __user *argp = (void __user *)arg; 204 struct fbcmap32 __user *argp = (void __user *)arg;
206 struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p)); 205 struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
@@ -236,8 +235,7 @@ struct fbcursor32 {
236#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32) 235#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32)
237#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32) 236#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32)
238 237
239static int fbiogscursor(struct file *file, struct fb_info *info, 238static int fbiogscursor(struct fb_info *info, unsigned long arg)
240 unsigned long arg)
241{ 239{
242 struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p)); 240 struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
243 struct fbcursor32 __user *argp = (void __user *)arg; 241 struct fbcursor32 __user *argp = (void __user *)arg;
@@ -263,8 +261,7 @@ static int fbiogscursor(struct file *file, struct fb_info *info,
263 return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p); 261 return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
264} 262}
265 263
266long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, 264int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
267 unsigned long arg)
268{ 265{
269 switch (cmd) { 266 switch (cmd) {
270 case FBIOGTYPE: 267 case FBIOGTYPE:
diff --git a/drivers/video/sbuslib.h b/drivers/video/sbuslib.h
index f753939013ed..492828c3fe8f 100644
--- a/drivers/video/sbuslib.h
+++ b/drivers/video/sbuslib.h
@@ -20,7 +20,7 @@ extern int sbusfb_mmap_helper(struct sbus_mmap_map *map,
20int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, 20int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
21 struct fb_info *info, 21 struct fb_info *info,
22 int type, int fb_depth, unsigned long fb_size); 22 int type, int fb_depth, unsigned long fb_size);
23long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, 23int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
24 unsigned long arg); 24 unsigned long arg);
25 25
26#endif /* _SBUSLIB_H */ 26#endif /* _SBUSLIB_H */
diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h
index cd9f11f1ef14..4dc514aabfe7 100644
--- a/include/asm-powerpc/lppaca.h
+++ b/include/asm-powerpc/lppaca.h
@@ -31,7 +31,7 @@
31 31
32/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 32/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
33 * alignment is sufficient to prevent this */ 33 * alignment is sufficient to prevent this */
34struct __attribute__((__aligned__(0x400))) lppaca { 34struct lppaca {
35//============================================================================= 35//=============================================================================
36// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data 36// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
37// NOTE: The xDynXyz fields are fields that will be dynamically changed by 37// NOTE: The xDynXyz fields are fields that will be dynamically changed by
@@ -129,7 +129,7 @@ struct __attribute__((__aligned__(0x400))) lppaca {
129// CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data 129// CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data
130//============================================================================= 130//=============================================================================
131 u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF 131 u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
132}; 132} __attribute__((__aligned__(0x400)));
133 133
134extern struct lppaca lppaca[]; 134extern struct lppaca lppaca[];
135 135
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 323924edb26a..a5363324cf95 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -228,6 +228,7 @@ extern void dump_stack(void);
228 ntohs((addr).s6_addr16[6]), \ 228 ntohs((addr).s6_addr16[6]), \
229 ntohs((addr).s6_addr16[7]) 229 ntohs((addr).s6_addr16[7])
230#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" 230#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
231#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x"
231 232
232#if defined(__LITTLE_ENDIAN) 233#if defined(__LITTLE_ENDIAN)
233#define HIPQUAD(addr) \ 234#define HIPQUAD(addr) \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index a43c95f8f968..9e5db2949c58 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -126,16 +126,19 @@ enum {
126 126
127 ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ 127 ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */
128 128
129 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
130 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
131
129 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 132 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
130 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 133 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
131 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 134 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
132 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 135 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
133 136
134 /* various lengths of time */ 137 /* various lengths of time */
135 ATA_TMOUT_EDD = 5 * HZ, /* hueristic */ 138 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
136 ATA_TMOUT_PIO = 30 * HZ, 139 ATA_TMOUT_PIO = 30 * HZ,
137 ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */ 140 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
138 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */ 141 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
139 ATA_TMOUT_CDB = 30 * HZ, 142 ATA_TMOUT_CDB = 30 * HZ,
140 ATA_TMOUT_CDB_QUICK = 5 * HZ, 143 ATA_TMOUT_CDB_QUICK = 5 * HZ,
141 ATA_TMOUT_INTERNAL = 30 * HZ, 144 ATA_TMOUT_INTERNAL = 30 * HZ,
@@ -499,6 +502,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
499/* 502/*
500 * Timing helpers 503 * Timing helpers
501 */ 504 */
505
506extern unsigned int ata_pio_need_iordy(const struct ata_device *);
502extern int ata_timing_compute(struct ata_device *, unsigned short, 507extern int ata_timing_compute(struct ata_device *, unsigned short,
503 struct ata_timing *, int, int); 508 struct ata_timing *, int, int);
504extern void ata_timing_merge(const struct ata_timing *, 509extern void ata_timing_merge(const struct ata_timing *,
diff --git a/include/linux/netfilter_ipv6/ip6t_ah.h b/include/linux/netfilter_ipv6/ip6t_ah.h
index c4f0793a0a98..8531879eb464 100644
--- a/include/linux/netfilter_ipv6/ip6t_ah.h
+++ b/include/linux/netfilter_ipv6/ip6t_ah.h
@@ -18,13 +18,4 @@ struct ip6t_ah
18#define IP6T_AH_INV_LEN 0x02 /* Invert the sense of length. */ 18#define IP6T_AH_INV_LEN 0x02 /* Invert the sense of length. */
19#define IP6T_AH_INV_MASK 0x03 /* All possible flags. */ 19#define IP6T_AH_INV_MASK 0x03 /* All possible flags. */
20 20
21#define MASK_HOPOPTS 128
22#define MASK_DSTOPTS 64
23#define MASK_ROUTING 32
24#define MASK_FRAGMENT 16
25#define MASK_AH 8
26#define MASK_ESP 4
27#define MASK_NONE 2
28#define MASK_PROTO 1
29
30#endif /*_IP6T_AH_H*/ 21#endif /*_IP6T_AH_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_esp.h b/include/linux/netfilter_ipv6/ip6t_esp.h
index 01142b98a231..a91b6abc8079 100644
--- a/include/linux/netfilter_ipv6/ip6t_esp.h
+++ b/include/linux/netfilter_ipv6/ip6t_esp.h
@@ -7,15 +7,6 @@ struct ip6t_esp
7 u_int8_t invflags; /* Inverse flags */ 7 u_int8_t invflags; /* Inverse flags */
8}; 8};
9 9
10#define MASK_HOPOPTS 128
11#define MASK_DSTOPTS 64
12#define MASK_ROUTING 32
13#define MASK_FRAGMENT 16
14#define MASK_AH 8
15#define MASK_ESP 4
16#define MASK_NONE 2
17#define MASK_PROTO 1
18
19/* Values for "invflags" field in struct ip6t_esp. */ 10/* Values for "invflags" field in struct ip6t_esp. */
20#define IP6T_ESP_INV_SPI 0x01 /* Invert the sense of spi. */ 11#define IP6T_ESP_INV_SPI 0x01 /* Invert the sense of spi. */
21#define IP6T_ESP_INV_MASK 0x01 /* All possible flags. */ 12#define IP6T_ESP_INV_MASK 0x01 /* All possible flags. */
diff --git a/include/linux/netfilter_ipv6/ip6t_frag.h b/include/linux/netfilter_ipv6/ip6t_frag.h
index 449a57eca7dd..66070a0d6dfc 100644
--- a/include/linux/netfilter_ipv6/ip6t_frag.h
+++ b/include/linux/netfilter_ipv6/ip6t_frag.h
@@ -21,13 +21,4 @@ struct ip6t_frag
21#define IP6T_FRAG_INV_LEN 0x02 /* Invert the sense of length. */ 21#define IP6T_FRAG_INV_LEN 0x02 /* Invert the sense of length. */
22#define IP6T_FRAG_INV_MASK 0x03 /* All possible flags. */ 22#define IP6T_FRAG_INV_MASK 0x03 /* All possible flags. */
23 23
24#define MASK_HOPOPTS 128
25#define MASK_DSTOPTS 64
26#define MASK_ROUTING 32
27#define MASK_FRAGMENT 16
28#define MASK_AH 8
29#define MASK_ESP 4
30#define MASK_NONE 2
31#define MASK_PROTO 1
32
33#endif /*_IP6T_FRAG_H*/ 24#endif /*_IP6T_FRAG_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_opts.h b/include/linux/netfilter_ipv6/ip6t_opts.h
index e259b6275bd2..a07e36380ae8 100644
--- a/include/linux/netfilter_ipv6/ip6t_opts.h
+++ b/include/linux/netfilter_ipv6/ip6t_opts.h
@@ -20,13 +20,4 @@ struct ip6t_opts
20#define IP6T_OPTS_INV_LEN 0x01 /* Invert the sense of length. */ 20#define IP6T_OPTS_INV_LEN 0x01 /* Invert the sense of length. */
21#define IP6T_OPTS_INV_MASK 0x01 /* All possible flags. */ 21#define IP6T_OPTS_INV_MASK 0x01 /* All possible flags. */
22 22
23#define MASK_HOPOPTS 128
24#define MASK_DSTOPTS 64
25#define MASK_ROUTING 32
26#define MASK_FRAGMENT 16
27#define MASK_AH 8
28#define MASK_ESP 4
29#define MASK_NONE 2
30#define MASK_PROTO 1
31
32#endif /*_IP6T_OPTS_H*/ 23#endif /*_IP6T_OPTS_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_rt.h b/include/linux/netfilter_ipv6/ip6t_rt.h
index f1070fbf2757..52156023e8db 100644
--- a/include/linux/netfilter_ipv6/ip6t_rt.h
+++ b/include/linux/netfilter_ipv6/ip6t_rt.h
@@ -30,13 +30,4 @@ struct ip6t_rt
30#define IP6T_RT_INV_LEN 0x04 /* Invert the sense of length. */ 30#define IP6T_RT_INV_LEN 0x04 /* Invert the sense of length. */
31#define IP6T_RT_INV_MASK 0x07 /* All possible flags. */ 31#define IP6T_RT_INV_MASK 0x07 /* All possible flags. */
32 32
33#define MASK_HOPOPTS 128
34#define MASK_DSTOPTS 64
35#define MASK_ROUTING 32
36#define MASK_FRAGMENT 16
37#define MASK_AH 8
38#define MASK_ESP 4
39#define MASK_NONE 2
40#define MASK_PROTO 1
41
42#endif /*_IP6T_RT_H*/ 33#endif /*_IP6T_RT_H*/
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e5fd66c5650b..ad7cc22bd424 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -926,7 +926,7 @@ static inline int skb_tailroom(const struct sk_buff *skb)
926 * Increase the headroom of an empty &sk_buff by reducing the tail 926 * Increase the headroom of an empty &sk_buff by reducing the tail
927 * room. This is only allowed for an empty buffer. 927 * room. This is only allowed for an empty buffer.
928 */ 928 */
929static inline void skb_reserve(struct sk_buff *skb, unsigned int len) 929static inline void skb_reserve(struct sk_buff *skb, int len)
930{ 930{
931 skb->data += len; 931 skb->data += len;
932 skb->tail += len; 932 skb->tail += len;
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h
index 03b766afdc39..cd82c3e998e4 100644
--- a/include/net/ieee80211_crypt.h
+++ b/include/net/ieee80211_crypt.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <net/ieee80211.h>
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29 30
30enum { 31enum {
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index d67c8393a343..a2c5e0b88422 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -327,7 +327,7 @@ struct iw_handler_def
327 __u16 num_private_args; 327 __u16 num_private_args;
328 328
329 /* Array of handlers for standard ioctls 329 /* Array of handlers for standard ioctls
330 * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME] 330 * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT]
331 */ 331 */
332 const iw_handler * standard; 332 const iw_handler * standard;
333 333
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index f158fe67dd60..dc5d0b2427cf 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -92,7 +92,9 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask,
92 if (info->invflags & EBT_IP_PROTO) 92 if (info->invflags & EBT_IP_PROTO)
93 return -EINVAL; 93 return -EINVAL;
94 if (info->protocol != IPPROTO_TCP && 94 if (info->protocol != IPPROTO_TCP &&
95 info->protocol != IPPROTO_UDP) 95 info->protocol != IPPROTO_UDP &&
96 info->protocol != IPPROTO_SCTP &&
97 info->protocol != IPPROTO_DCCP)
96 return -EINVAL; 98 return -EINVAL;
97 } 99 }
98 if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1]) 100 if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index a29c1232c420..0128fbbe2328 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -95,7 +95,9 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
95 "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), 95 "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr),
96 NIPQUAD(ih->daddr), ih->tos, ih->protocol); 96 NIPQUAD(ih->daddr), ih->tos, ih->protocol);
97 if (ih->protocol == IPPROTO_TCP || 97 if (ih->protocol == IPPROTO_TCP ||
98 ih->protocol == IPPROTO_UDP) { 98 ih->protocol == IPPROTO_UDP ||
99 ih->protocol == IPPROTO_SCTP ||
100 ih->protocol == IPPROTO_DCCP) {
99 struct tcpudphdr _ports, *pptr; 101 struct tcpudphdr _ports, *pptr;
100 102
101 pptr = skb_header_pointer(skb, ih->ihl*4, 103 pptr = skb_header_pointer(skb, ih->ihl*4,
diff --git a/net/core/filter.c b/net/core/filter.c
index a52665f75224..9540946a48f3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -74,7 +74,6 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
74 * filtering, filter is the array of filter instructions, and 74 * filtering, filter is the array of filter instructions, and
75 * len is the number of filter blocks in the array. 75 * len is the number of filter blocks in the array.
76 */ 76 */
77
78unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 77unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
79{ 78{
80 struct sock_filter *fentry; /* We walk down these */ 79 struct sock_filter *fentry; /* We walk down these */
@@ -175,7 +174,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
175 continue; 174 continue;
176 case BPF_LD|BPF_W|BPF_ABS: 175 case BPF_LD|BPF_W|BPF_ABS:
177 k = fentry->k; 176 k = fentry->k;
178 load_w: 177load_w:
179 ptr = load_pointer(skb, k, 4, &tmp); 178 ptr = load_pointer(skb, k, 4, &tmp);
180 if (ptr != NULL) { 179 if (ptr != NULL) {
181 A = ntohl(*(u32 *)ptr); 180 A = ntohl(*(u32 *)ptr);
@@ -184,7 +183,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
184 break; 183 break;
185 case BPF_LD|BPF_H|BPF_ABS: 184 case BPF_LD|BPF_H|BPF_ABS:
186 k = fentry->k; 185 k = fentry->k;
187 load_h: 186load_h:
188 ptr = load_pointer(skb, k, 2, &tmp); 187 ptr = load_pointer(skb, k, 2, &tmp);
189 if (ptr != NULL) { 188 if (ptr != NULL) {
190 A = ntohs(*(u16 *)ptr); 189 A = ntohs(*(u16 *)ptr);
@@ -374,7 +373,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
374 case BPF_JMP|BPF_JSET|BPF_K: 373 case BPF_JMP|BPF_JSET|BPF_K:
375 case BPF_JMP|BPF_JSET|BPF_X: 374 case BPF_JMP|BPF_JSET|BPF_X:
376 /* for conditionals both must be safe */ 375 /* for conditionals both must be safe */
377 if (pc + ftest->jt + 1 >= flen || 376 if (pc + ftest->jt + 1 >= flen ||
378 pc + ftest->jf + 1 >= flen) 377 pc + ftest->jf + 1 >= flen)
379 return -EINVAL; 378 return -EINVAL;
380 break; 379 break;
@@ -384,7 +383,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
384 } 383 }
385 } 384 }
386 385
387 return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; 386 return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
388} 387}
389 388
390/** 389/**
@@ -404,8 +403,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
404 int err; 403 int err;
405 404
406 /* Make sure new filter is there and in the right amounts. */ 405 /* Make sure new filter is there and in the right amounts. */
407 if (fprog->filter == NULL) 406 if (fprog->filter == NULL)
408 return -EINVAL; 407 return -EINVAL;
409 408
410 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); 409 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
411 if (!fp) 410 if (!fp)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 281a632fa6a6..ea51f8d02eb8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np)
703 } 703 }
704 } 704 }
705 705
706 if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr) 706 if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
707 memcpy(np->local_mac, ndev->dev_addr, 6); 707 memcpy(np->local_mac, ndev->dev_addr, 6);
708 708
709 if (!np->local_ip) { 709 if (!np->local_ip) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 39063122fbb7..3827f881f429 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -139,6 +139,7 @@
139#include <linux/proc_fs.h> 139#include <linux/proc_fs.h>
140#include <linux/seq_file.h> 140#include <linux/seq_file.h>
141#include <linux/wait.h> 141#include <linux/wait.h>
142#include <linux/etherdevice.h>
142#include <net/checksum.h> 143#include <net/checksum.h>
143#include <net/ipv6.h> 144#include <net/ipv6.h>
144#include <net/addrconf.h> 145#include <net/addrconf.h>
@@ -281,8 +282,8 @@ struct pktgen_dev {
281 __u32 src_mac_count; /* How many MACs to iterate through */ 282 __u32 src_mac_count; /* How many MACs to iterate through */
282 __u32 dst_mac_count; /* How many MACs to iterate through */ 283 __u32 dst_mac_count; /* How many MACs to iterate through */
283 284
284 unsigned char dst_mac[6]; 285 unsigned char dst_mac[ETH_ALEN];
285 unsigned char src_mac[6]; 286 unsigned char src_mac[ETH_ALEN];
286 287
287 __u32 cur_dst_mac_offset; 288 __u32 cur_dst_mac_offset;
288 __u32 cur_src_mac_offset; 289 __u32 cur_src_mac_offset;
@@ -594,16 +595,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
594 595
595 seq_puts(seq, " src_mac: "); 596 seq_puts(seq, " src_mac: ");
596 597
597 if ((pkt_dev->src_mac[0] == 0) && 598 if (is_zero_ether_addr(pkt_dev->src_mac))
598 (pkt_dev->src_mac[1] == 0) &&
599 (pkt_dev->src_mac[2] == 0) &&
600 (pkt_dev->src_mac[3] == 0) &&
601 (pkt_dev->src_mac[4] == 0) &&
602 (pkt_dev->src_mac[5] == 0))
603
604 for (i = 0; i < 6; i++) 599 for (i = 0; i < 6; i++)
605 seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); 600 seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":");
606
607 else 601 else
608 for (i = 0; i < 6; i++) 602 for (i = 0; i < 6; i++)
609 seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); 603 seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":");
@@ -1189,9 +1183,9 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer
1189 } 1183 }
1190 if (!strcmp(name, "dst_mac")) { 1184 if (!strcmp(name, "dst_mac")) {
1191 char *v = valstr; 1185 char *v = valstr;
1192 unsigned char old_dmac[6]; 1186 unsigned char old_dmac[ETH_ALEN];
1193 unsigned char *m = pkt_dev->dst_mac; 1187 unsigned char *m = pkt_dev->dst_mac;
1194 memcpy(old_dmac, pkt_dev->dst_mac, 6); 1188 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
1195 1189
1196 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1190 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1197 if (len < 0) { return len; } 1191 if (len < 0) { return len; }
@@ -1220,8 +1214,8 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer
1220 } 1214 }
1221 1215
1222 /* Set up Dest MAC */ 1216 /* Set up Dest MAC */
1223 if (memcmp(old_dmac, pkt_dev->dst_mac, 6) != 0) 1217 if (compare_ether_addr(old_dmac, pkt_dev->dst_mac))
1224 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6); 1218 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
1225 1219
1226 sprintf(pg_result, "OK: dstmac"); 1220 sprintf(pg_result, "OK: dstmac");
1227 return count; 1221 return count;
@@ -1560,17 +1554,11 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1560 1554
1561 /* Default to the interface's mac if not explicitly set. */ 1555 /* Default to the interface's mac if not explicitly set. */
1562 1556
1563 if ((pkt_dev->src_mac[0] == 0) && 1557 if (is_zero_ether_addr(pkt_dev->src_mac))
1564 (pkt_dev->src_mac[1] == 0) && 1558 memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN);
1565 (pkt_dev->src_mac[2] == 0) &&
1566 (pkt_dev->src_mac[3] == 0) &&
1567 (pkt_dev->src_mac[4] == 0) &&
1568 (pkt_dev->src_mac[5] == 0)) {
1569 1559
1570 memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, 6);
1571 }
1572 /* Set up Dest MAC */ 1560 /* Set up Dest MAC */
1573 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6); 1561 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
1574 1562
1575 /* Set up pkt size */ 1563 /* Set up pkt size */
1576 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; 1564 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index ce9cb77c5c29..2c77dafbd091 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -144,7 +144,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
144 const unsigned char state) 144 const unsigned char state)
145{ 145{
146 unsigned int gap; 146 unsigned int gap;
147 signed long new_head; 147 long new_head;
148 148
149 if (av->dccpav_vec_len + packets > av->dccpav_buf_len) 149 if (av->dccpav_vec_len + packets > av->dccpav_buf_len)
150 return -ENOBUFS; 150 return -ENOBUFS;
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index bcefe64b9317..e5c5b3202f02 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -46,7 +46,6 @@ obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
46obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o 46obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
47 47
48# matches 48# matches
49obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
50obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o 49obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
51obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o 50obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
52obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o 51obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
index c777abf16cb7..56794797d55b 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_gre.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
@@ -32,6 +32,7 @@
32#include <linux/in.h> 32#include <linux/in.h>
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/interrupt.h>
35 36
36static DEFINE_RWLOCK(ip_ct_gre_lock); 37static DEFINE_RWLOCK(ip_ct_gre_lock);
37#define ASSERT_READ_LOCK(x) 38#define ASSERT_READ_LOCK(x)
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c
index 709debcc69c9..18ca8258a1c5 100644
--- a/net/ipv4/netfilter/ipt_policy.c
+++ b/net/ipv4/netfilter/ipt_policy.c
@@ -95,7 +95,10 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
95static int match(const struct sk_buff *skb, 95static int match(const struct sk_buff *skb,
96 const struct net_device *in, 96 const struct net_device *in,
97 const struct net_device *out, 97 const struct net_device *out,
98 const void *matchinfo, int offset, int *hotdrop) 98 const void *matchinfo,
99 int offset,
100 unsigned int protoff,
101 int *hotdrop)
99{ 102{
100 const struct ipt_policy_info *info = matchinfo; 103 const struct ipt_policy_info *info = matchinfo;
101 int ret; 104 int ret;
@@ -113,7 +116,7 @@ static int match(const struct sk_buff *skb,
113 return ret; 116 return ret;
114} 117}
115 118
116static int checkentry(const char *tablename, const struct ipt_ip *ip, 119static int checkentry(const char *tablename, const void *ip_void,
117 void *matchinfo, unsigned int matchsize, 120 void *matchinfo, unsigned int matchsize,
118 unsigned int hook_mask) 121 unsigned int hook_mask)
119{ 122{
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f701a136a6ae..f2e82afc15b3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -240,9 +240,8 @@ static unsigned rt_hash_mask;
240static int rt_hash_log; 240static int rt_hash_log;
241static unsigned int rt_hash_rnd; 241static unsigned int rt_hash_rnd;
242 242
243static struct rt_cache_stat *rt_cache_stat; 243static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
244#define RT_CACHE_STAT_INC(field) \ 244#define RT_CACHE_STAT_INC(field) (__get_cpu_var(rt_cache_stat).field++)
245 (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
246 245
247static int rt_intern_hash(unsigned hash, struct rtable *rth, 246static int rt_intern_hash(unsigned hash, struct rtable *rth,
248 struct rtable **res); 247 struct rtable **res);
@@ -401,7 +400,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
401 if (!cpu_possible(cpu)) 400 if (!cpu_possible(cpu))
402 continue; 401 continue;
403 *pos = cpu+1; 402 *pos = cpu+1;
404 return per_cpu_ptr(rt_cache_stat, cpu); 403 return &per_cpu(rt_cache_stat, cpu);
405 } 404 }
406 return NULL; 405 return NULL;
407} 406}
@@ -414,7 +413,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
414 if (!cpu_possible(cpu)) 413 if (!cpu_possible(cpu))
415 continue; 414 continue;
416 *pos = cpu+1; 415 *pos = cpu+1;
417 return per_cpu_ptr(rt_cache_stat, cpu); 416 return &per_cpu(rt_cache_stat, cpu);
418 } 417 }
419 return NULL; 418 return NULL;
420 419
@@ -3160,10 +3159,6 @@ int __init ip_rt_init(void)
3160 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); 3159 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3161 ip_rt_max_size = (rt_hash_mask + 1) * 16; 3160 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3162 3161
3163 rt_cache_stat = alloc_percpu(struct rt_cache_stat);
3164 if (!rt_cache_stat)
3165 return -ENOMEM;
3166
3167 devinet_init(); 3162 devinet_init();
3168 ip_fib_init(); 3163 ip_fib_init();
3169 3164
@@ -3191,7 +3186,6 @@ int __init ip_rt_init(void)
3191 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || 3186 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
3192 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 3187 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
3193 proc_net_stat))) { 3188 proc_net_stat))) {
3194 free_percpu(rt_cache_stat);
3195 return -ENOMEM; 3189 return -ENOMEM;
3196 } 3190 }
3197 rtstat_pde->proc_fops = &rt_cpu_seq_fops; 3191 rtstat_pde->proc_fops = &rt_cpu_seq_fops;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index dfb4f145a139..d328d5986143 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2644,7 +2644,7 @@ static int if6_seq_show(struct seq_file *seq, void *v)
2644{ 2644{
2645 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; 2645 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
2646 seq_printf(seq, 2646 seq_printf(seq,
2647 NIP6_FMT " %02x %02x %02x %02x %8s\n", 2647 NIP6_SEQFMT " %02x %02x %02x %02x %8s\n",
2648 NIP6(ifp->addr), 2648 NIP6(ifp->addr),
2649 ifp->idev->dev->ifindex, 2649 ifp->idev->dev->ifindex,
2650 ifp->prefix_len, 2650 ifp->prefix_len,
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 72bd08af2dfb..840a33d33296 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -532,7 +532,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v)
532 struct ac6_iter_state *state = ac6_seq_private(seq); 532 struct ac6_iter_state *state = ac6_seq_private(seq);
533 533
534 seq_printf(seq, 534 seq_printf(seq,
535 "%-4d %-15s " NIP6_FMT " %5d\n", 535 "%-4d %-15s " NIP6_SEQFMT " %5d\n",
536 state->dev->ifindex, state->dev->name, 536 state->dev->ifindex, state->dev->name,
537 NIP6(im->aca_addr), 537 NIP6(im->aca_addr),
538 im->aca_users); 538 im->aca_users);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 4183c8dac7f6..69cbe8a66d02 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -629,7 +629,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
629{ 629{
630 while(fl) { 630 while(fl) {
631 seq_printf(seq, 631 seq_printf(seq,
632 "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_FMT " %-4d\n", 632 "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_SEQFMT " %-4d\n",
633 (unsigned)ntohl(fl->label), 633 (unsigned)ntohl(fl->label),
634 fl->share, 634 fl->share,
635 (unsigned)fl->owner, 635 (unsigned)fl->owner,
@@ -645,7 +645,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
645static int ip6fl_seq_show(struct seq_file *seq, void *v) 645static int ip6fl_seq_show(struct seq_file *seq, void *v)
646{ 646{
647 if (v == SEQ_START_TOKEN) 647 if (v == SEQ_START_TOKEN)
648 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-39s %s\n", 648 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
649 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); 649 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
650 else 650 else
651 ip6fl_fl_seq_show(seq, v); 651 ip6fl_fl_seq_show(seq, v);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 0e03eabfb9da..6c05c7978bef 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2373,7 +2373,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2373 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2373 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2374 2374
2375 seq_printf(seq, 2375 seq_printf(seq,
2376 "%-4d %-15s " NIP6_FMT " %5d %08X %ld\n", 2376 "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n",
2377 state->dev->ifindex, state->dev->name, 2377 state->dev->ifindex, state->dev->name,
2378 NIP6(im->mca_addr), 2378 NIP6(im->mca_addr),
2379 im->mca_users, im->mca_flags, 2379 im->mca_users, im->mca_flags,
@@ -2542,12 +2542,12 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2542 if (v == SEQ_START_TOKEN) { 2542 if (v == SEQ_START_TOKEN) {
2543 seq_printf(seq, 2543 seq_printf(seq,
2544 "%3s %6s " 2544 "%3s %6s "
2545 "%39s %39s %6s %6s\n", "Idx", 2545 "%32s %32s %6s %6s\n", "Idx",
2546 "Device", "Multicast Address", 2546 "Device", "Multicast Address",
2547 "Source Address", "INC", "EXC"); 2547 "Source Address", "INC", "EXC");
2548 } else { 2548 } else {
2549 seq_printf(seq, 2549 seq_printf(seq,
2550 "%3d %6.6s " NIP6_FMT " " NIP6_FMT " %6lu %6lu\n", 2550 "%3d %6.6s " NIP6_SEQFMT " " NIP6_SEQFMT " %6lu %6lu\n",
2551 state->dev->ifindex, state->dev->name, 2551 state->dev->ifindex, state->dev->name,
2552 NIP6(state->im->mca_addr), 2552 NIP6(state->im->mca_addr),
2553 NIP6(psf->sf_addr), 2553 NIP6(psf->sf_addr),
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 663b4749820d..db6073c94163 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -4,7 +4,6 @@
4 4
5# Link order matters here. 5# Link order matters here.
6obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o 6obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
7obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o
8obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o 7obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
9obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o 8obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
10obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o 9obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
diff --git a/net/ipv6/netfilter/ip6t_dst.c b/net/ipv6/netfilter/ip6t_dst.c
index 80fe82669ce2..b4c153a53500 100644
--- a/net/ipv6/netfilter/ip6t_dst.c
+++ b/net/ipv6/netfilter/ip6t_dst.c
@@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
36#endif 36#endif
37 37
38/* 38/*
39 * (Type & 0xC0) >> 6 39 * (Type & 0xC0) >> 6
40 * 0 -> ignorable 40 * 0 -> ignorable
41 * 1 -> must drop the packet 41 * 1 -> must drop the packet
42 * 2 -> send ICMP PARM PROB regardless and drop packet 42 * 2 -> send ICMP PARM PROB regardless and drop packet
43 * 3 -> Send ICMP if not a multicast address and drop packet 43 * 3 -> Send ICMP if not a multicast address and drop packet
44 * (Type & 0x20) >> 5 44 * (Type & 0x20) >> 5
45 * 0 -> invariant 45 * 0 -> invariant
46 * 1 -> can change the routing 46 * 1 -> can change the routing
47 * (Type & 0x1F) Type 47 * (Type & 0x1F) Type
48 * 0 -> Pad1 (only 1 byte!) 48 * 0 -> Pad1 (only 1 byte!)
49 * 1 -> PadN LENGTH info (total length = length + 2) 49 * 1 -> PadN LENGTH info (total length = length + 2)
50 * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) 50 * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k )
51 * 5 -> RTALERT 2 x x 51 * 5 -> RTALERT 2 x x
52 */ 52 */
53 53
54static int 54static int
@@ -60,16 +60,16 @@ match(const struct sk_buff *skb,
60 unsigned int protoff, 60 unsigned int protoff,
61 int *hotdrop) 61 int *hotdrop)
62{ 62{
63 struct ipv6_opt_hdr _optsh, *oh; 63 struct ipv6_opt_hdr _optsh, *oh;
64 const struct ip6t_opts *optinfo = matchinfo; 64 const struct ip6t_opts *optinfo = matchinfo;
65 unsigned int temp; 65 unsigned int temp;
66 unsigned int ptr; 66 unsigned int ptr;
67 unsigned int hdrlen = 0; 67 unsigned int hdrlen = 0;
68 unsigned int ret = 0; 68 unsigned int ret = 0;
69 u8 _opttype, *tp = NULL; 69 u8 _opttype, *tp = NULL;
70 u8 _optlen, *lp = NULL; 70 u8 _optlen, *lp = NULL;
71 unsigned int optlen; 71 unsigned int optlen;
72 72
73#if HOPBYHOP 73#if HOPBYHOP
74 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0) 74 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0)
75#else 75#else
@@ -77,42 +77,41 @@ match(const struct sk_buff *skb,
77#endif 77#endif
78 return 0; 78 return 0;
79 79
80 oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); 80 oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
81 if (oh == NULL){ 81 if (oh == NULL) {
82 *hotdrop = 1; 82 *hotdrop = 1;
83 return 0; 83 return 0;
84 } 84 }
85 85
86 hdrlen = ipv6_optlen(oh); 86 hdrlen = ipv6_optlen(oh);
87 if (skb->len - ptr < hdrlen){ 87 if (skb->len - ptr < hdrlen) {
88 /* Packet smaller than it's length field */ 88 /* Packet smaller than it's length field */
89 return 0; 89 return 0;
90 } 90 }
91 91
92 DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); 92 DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
93 93
94 DEBUGP("len %02X %04X %02X ", 94 DEBUGP("len %02X %04X %02X ",
95 optinfo->hdrlen, hdrlen, 95 optinfo->hdrlen, hdrlen,
96 (!(optinfo->flags & IP6T_OPTS_LEN) || 96 (!(optinfo->flags & IP6T_OPTS_LEN) ||
97 ((optinfo->hdrlen == hdrlen) ^ 97 ((optinfo->hdrlen == hdrlen) ^
98 !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); 98 !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
99 99
100 ret = (oh != NULL) 100 ret = (oh != NULL) &&
101 && 101 (!(optinfo->flags & IP6T_OPTS_LEN) ||
102 (!(optinfo->flags & IP6T_OPTS_LEN) || 102 ((optinfo->hdrlen == hdrlen) ^
103 ((optinfo->hdrlen == hdrlen) ^ 103 !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
104 !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); 104
105 105 ptr += 2;
106 ptr += 2; 106 hdrlen -= 2;
107 hdrlen -= 2; 107 if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
108 if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){ 108 return ret;
109 return ret;
110 } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { 109 } else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
111 DEBUGP("Not strict - not implemented"); 110 DEBUGP("Not strict - not implemented");
112 } else { 111 } else {
113 DEBUGP("Strict "); 112 DEBUGP("Strict ");
114 DEBUGP("#%d ",optinfo->optsnr); 113 DEBUGP("#%d ", optinfo->optsnr);
115 for(temp=0; temp<optinfo->optsnr; temp++){ 114 for (temp = 0; temp < optinfo->optsnr; temp++) {
116 /* type field exists ? */ 115 /* type field exists ? */
117 if (hdrlen < 1) 116 if (hdrlen < 1)
118 break; 117 break;
@@ -122,10 +121,10 @@ match(const struct sk_buff *skb,
122 break; 121 break;
123 122
124 /* Type check */ 123 /* Type check */
125 if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){ 124 if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
126 DEBUGP("Tbad %02X %02X\n", 125 DEBUGP("Tbad %02X %02X\n",
127 *tp, 126 *tp,
128 (optinfo->opts[temp] & 0xFF00)>>8); 127 (optinfo->opts[temp] & 0xFF00) >> 8);
129 return 0; 128 return 0;
130 } else { 129 } else {
131 DEBUGP("Tok "); 130 DEBUGP("Tok ");
@@ -169,7 +168,8 @@ match(const struct sk_buff *skb,
169 } 168 }
170 if (temp == optinfo->optsnr) 169 if (temp == optinfo->optsnr)
171 return ret; 170 return ret;
172 else return 0; 171 else
172 return 0;
173 } 173 }
174 174
175 return 0; 175 return 0;
@@ -178,25 +178,24 @@ match(const struct sk_buff *skb,
178/* Called when user tries to insert an entry of this type. */ 178/* Called when user tries to insert an entry of this type. */
179static int 179static int
180checkentry(const char *tablename, 180checkentry(const char *tablename,
181 const void *info, 181 const void *info,
182 void *matchinfo, 182 void *matchinfo,
183 unsigned int matchinfosize, 183 unsigned int matchinfosize,
184 unsigned int hook_mask) 184 unsigned int hook_mask)
185{ 185{
186 const struct ip6t_opts *optsinfo = matchinfo; 186 const struct ip6t_opts *optsinfo = matchinfo;
187 187
188 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { 188 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
189 DEBUGP("ip6t_opts: matchsize %u != %u\n", 189 DEBUGP("ip6t_opts: matchsize %u != %u\n",
190 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); 190 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
191 return 0; 191 return 0;
192 } 192 }
193 if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { 193 if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
194 DEBUGP("ip6t_opts: unknown flags %X\n", 194 DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
195 optsinfo->invflags); 195 return 0;
196 return 0; 196 }
197 } 197
198 198 return 1;
199 return 1;
200} 199}
201 200
202static struct ip6t_match opts_match = { 201static struct ip6t_match opts_match = {
@@ -212,12 +211,12 @@ static struct ip6t_match opts_match = {
212 211
213static int __init init(void) 212static int __init init(void)
214{ 213{
215 return ip6t_register_match(&opts_match); 214 return ip6t_register_match(&opts_match);
216} 215}
217 216
218static void __exit cleanup(void) 217static void __exit cleanup(void)
219{ 218{
220 ip6t_unregister_match(&opts_match); 219 ip6t_unregister_match(&opts_match);
221} 220}
222 221
223module_init(init); 222module_init(init);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index ddf5f571909c..27396ac0b9ed 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -27,45 +27,45 @@ match(const struct sk_buff *skb,
27 unsigned int protoff, 27 unsigned int protoff,
28 int *hotdrop) 28 int *hotdrop)
29{ 29{
30 unsigned char eui64[8];
31 int i = 0;
30 32
31 unsigned char eui64[8]; 33 if (!(skb->mac.raw >= skb->head &&
32 int i=0; 34 (skb->mac.raw + ETH_HLEN) <= skb->data) &&
33 35 offset != 0) {
34 if ( !(skb->mac.raw >= skb->head 36 *hotdrop = 1;
35 && (skb->mac.raw + ETH_HLEN) <= skb->data) 37 return 0;
36 && offset != 0) { 38 }
37 *hotdrop = 1; 39
38 return 0; 40 memset(eui64, 0, sizeof(eui64));
39 } 41
40 42 if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
41 memset(eui64, 0, sizeof(eui64)); 43 if (skb->nh.ipv6h->version == 0x6) {
42 44 memcpy(eui64, eth_hdr(skb)->h_source, 3);
43 if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { 45 memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
44 if (skb->nh.ipv6h->version == 0x6) { 46 eui64[3] = 0xff;
45 memcpy(eui64, eth_hdr(skb)->h_source, 3); 47 eui64[4] = 0xfe;
46 memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); 48 eui64[0] |= 0x02;
47 eui64[3]=0xff; 49
48 eui64[4]=0xfe; 50 i = 0;
49 eui64[0] |= 0x02; 51 while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == eui64[i])
50 52 && (i < 8))
51 i=0; 53 i++;
52 while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == 54
53 eui64[i]) && (i<8)) i++; 55 if (i == 8)
54 56 return 1;
55 if ( i == 8 ) 57 }
56 return 1; 58 }
57 } 59
58 } 60 return 0;
59
60 return 0;
61} 61}
62 62
63static int 63static int
64ip6t_eui64_checkentry(const char *tablename, 64ip6t_eui64_checkentry(const char *tablename,
65 const void *ip, 65 const void *ip,
66 void *matchinfo, 66 void *matchinfo,
67 unsigned int matchsize, 67 unsigned int matchsize,
68 unsigned int hook_mask) 68 unsigned int hook_mask)
69{ 69{
70 if (hook_mask 70 if (hook_mask
71 & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) | 71 & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) |
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index a9964b946ed5..4c14125a0e26 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -31,12 +31,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
31static inline int 31static inline int
32id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) 32id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert)
33{ 33{
34 int r=0; 34 int r = 0;
35 DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', 35 DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
36 min,id,max); 36 min, id, max);
37 r=(id >= min && id <= max) ^ invert; 37 r = (id >= min && id <= max) ^ invert;
38 DEBUGP(" result %s\n",r? "PASS" : "FAILED"); 38 DEBUGP(" result %s\n", r ? "PASS" : "FAILED");
39 return r; 39 return r;
40} 40}
41 41
42static int 42static int
@@ -48,92 +48,91 @@ match(const struct sk_buff *skb,
48 unsigned int protoff, 48 unsigned int protoff,
49 int *hotdrop) 49 int *hotdrop)
50{ 50{
51 struct frag_hdr _frag, *fh; 51 struct frag_hdr _frag, *fh;
52 const struct ip6t_frag *fraginfo = matchinfo; 52 const struct ip6t_frag *fraginfo = matchinfo;
53 unsigned int ptr; 53 unsigned int ptr;
54 54
55 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL) < 0) 55 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL) < 0)
56 return 0; 56 return 0;
57 57
58 fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); 58 fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
59 if (fh == NULL){ 59 if (fh == NULL) {
60 *hotdrop = 1; 60 *hotdrop = 1;
61 return 0; 61 return 0;
62 } 62 }
63 63
64 DEBUGP("INFO %04X ", fh->frag_off); 64 DEBUGP("INFO %04X ", fh->frag_off);
65 DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7); 65 DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
66 DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6); 66 DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
67 DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF)); 67 DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF));
68 DEBUGP("ID %u %08X\n", ntohl(fh->identification), 68 DEBUGP("ID %u %08X\n", ntohl(fh->identification),
69 ntohl(fh->identification)); 69 ntohl(fh->identification));
70 70
71 DEBUGP("IPv6 FRAG id %02X ", 71 DEBUGP("IPv6 FRAG id %02X ",
72 (id_match(fraginfo->ids[0], fraginfo->ids[1], 72 (id_match(fraginfo->ids[0], fraginfo->ids[1],
73 ntohl(fh->identification), 73 ntohl(fh->identification),
74 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))); 74 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))));
75 DEBUGP("res %02X %02X%04X %02X ", 75 DEBUGP("res %02X %02X%04X %02X ",
76 (fraginfo->flags & IP6T_FRAG_RES), fh->reserved, 76 (fraginfo->flags & IP6T_FRAG_RES), fh->reserved,
77 ntohs(fh->frag_off) & 0x6, 77 ntohs(fh->frag_off) & 0x6,
78 !((fraginfo->flags & IP6T_FRAG_RES) 78 !((fraginfo->flags & IP6T_FRAG_RES)
79 && (fh->reserved || (ntohs(fh->frag_off) & 0x06)))); 79 && (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
80 DEBUGP("first %02X %02X %02X ", 80 DEBUGP("first %02X %02X %02X ",
81 (fraginfo->flags & IP6T_FRAG_FST), 81 (fraginfo->flags & IP6T_FRAG_FST),
82 ntohs(fh->frag_off) & ~0x7, 82 ntohs(fh->frag_off) & ~0x7,
83 !((fraginfo->flags & IP6T_FRAG_FST) 83 !((fraginfo->flags & IP6T_FRAG_FST)
84 && (ntohs(fh->frag_off) & ~0x7))); 84 && (ntohs(fh->frag_off) & ~0x7)));
85 DEBUGP("mf %02X %02X %02X ", 85 DEBUGP("mf %02X %02X %02X ",
86 (fraginfo->flags & IP6T_FRAG_MF), 86 (fraginfo->flags & IP6T_FRAG_MF),
87 ntohs(fh->frag_off) & IP6_MF, 87 ntohs(fh->frag_off) & IP6_MF,
88 !((fraginfo->flags & IP6T_FRAG_MF) 88 !((fraginfo->flags & IP6T_FRAG_MF)
89 && !((ntohs(fh->frag_off) & IP6_MF)))); 89 && !((ntohs(fh->frag_off) & IP6_MF))));
90 DEBUGP("last %02X %02X %02X\n", 90 DEBUGP("last %02X %02X %02X\n",
91 (fraginfo->flags & IP6T_FRAG_NMF), 91 (fraginfo->flags & IP6T_FRAG_NMF),
92 ntohs(fh->frag_off) & IP6_MF, 92 ntohs(fh->frag_off) & IP6_MF,
93 !((fraginfo->flags & IP6T_FRAG_NMF) 93 !((fraginfo->flags & IP6T_FRAG_NMF)
94 && (ntohs(fh->frag_off) & IP6_MF))); 94 && (ntohs(fh->frag_off) & IP6_MF)));
95 95
96 return (fh != NULL) 96 return (fh != NULL)
97 && 97 &&
98 (id_match(fraginfo->ids[0], fraginfo->ids[1], 98 (id_match(fraginfo->ids[0], fraginfo->ids[1],
99 ntohl(fh->identification), 99 ntohl(fh->identification),
100 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))) 100 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))
101 && 101 &&
102 !((fraginfo->flags & IP6T_FRAG_RES) 102 !((fraginfo->flags & IP6T_FRAG_RES)
103 && (fh->reserved || (ntohs(fh->frag_off) & 0x6))) 103 && (fh->reserved || (ntohs(fh->frag_off) & 0x6)))
104 && 104 &&
105 !((fraginfo->flags & IP6T_FRAG_FST) 105 !((fraginfo->flags & IP6T_FRAG_FST)
106 && (ntohs(fh->frag_off) & ~0x7)) 106 && (ntohs(fh->frag_off) & ~0x7))
107 && 107 &&
108 !((fraginfo->flags & IP6T_FRAG_MF) 108 !((fraginfo->flags & IP6T_FRAG_MF)
109 && !(ntohs(fh->frag_off) & IP6_MF)) 109 && !(ntohs(fh->frag_off) & IP6_MF))
110 && 110 &&
111 !((fraginfo->flags & IP6T_FRAG_NMF) 111 !((fraginfo->flags & IP6T_FRAG_NMF)
112 && (ntohs(fh->frag_off) & IP6_MF)); 112 && (ntohs(fh->frag_off) & IP6_MF));
113} 113}
114 114
115/* Called when user tries to insert an entry of this type. */ 115/* Called when user tries to insert an entry of this type. */
116static int 116static int
117checkentry(const char *tablename, 117checkentry(const char *tablename,
118 const void *ip, 118 const void *ip,
119 void *matchinfo, 119 void *matchinfo,
120 unsigned int matchinfosize, 120 unsigned int matchinfosize,
121 unsigned int hook_mask) 121 unsigned int hook_mask)
122{ 122{
123 const struct ip6t_frag *fraginfo = matchinfo; 123 const struct ip6t_frag *fraginfo = matchinfo;
124 124
125 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) { 125 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) {
126 DEBUGP("ip6t_frag: matchsize %u != %u\n", 126 DEBUGP("ip6t_frag: matchsize %u != %u\n",
127 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag))); 127 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag)));
128 return 0; 128 return 0;
129 } 129 }
130 if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { 130 if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
131 DEBUGP("ip6t_frag: unknown flags %X\n", 131 DEBUGP("ip6t_frag: unknown flags %X\n", fraginfo->invflags);
132 fraginfo->invflags); 132 return 0;
133 return 0; 133 }
134 } 134
135 135 return 1;
136 return 1;
137} 136}
138 137
139static struct ip6t_match frag_match = { 138static struct ip6t_match frag_match = {
@@ -145,12 +144,12 @@ static struct ip6t_match frag_match = {
145 144
146static int __init init(void) 145static int __init init(void)
147{ 146{
148 return ip6t_register_match(&frag_match); 147 return ip6t_register_match(&frag_match);
149} 148}
150 149
151static void __exit cleanup(void) 150static void __exit cleanup(void)
152{ 151{
153 ip6t_unregister_match(&frag_match); 152 ip6t_unregister_match(&frag_match);
154} 153}
155 154
156module_init(init); 155module_init(init);
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index ed8ded18bbd4..37a8474a7e0c 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
36#endif 36#endif
37 37
38/* 38/*
39 * (Type & 0xC0) >> 6 39 * (Type & 0xC0) >> 6
40 * 0 -> ignorable 40 * 0 -> ignorable
41 * 1 -> must drop the packet 41 * 1 -> must drop the packet
42 * 2 -> send ICMP PARM PROB regardless and drop packet 42 * 2 -> send ICMP PARM PROB regardless and drop packet
43 * 3 -> Send ICMP if not a multicast address and drop packet 43 * 3 -> Send ICMP if not a multicast address and drop packet
44 * (Type & 0x20) >> 5 44 * (Type & 0x20) >> 5
45 * 0 -> invariant 45 * 0 -> invariant
46 * 1 -> can change the routing 46 * 1 -> can change the routing
47 * (Type & 0x1F) Type 47 * (Type & 0x1F) Type
48 * 0 -> Pad1 (only 1 byte!) 48 * 0 -> Pad1 (only 1 byte!)
49 * 1 -> PadN LENGTH info (total length = length + 2) 49 * 1 -> PadN LENGTH info (total length = length + 2)
50 * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) 50 * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k )
51 * 5 -> RTALERT 2 x x 51 * 5 -> RTALERT 2 x x
52 */ 52 */
53 53
54static int 54static int
@@ -60,16 +60,16 @@ match(const struct sk_buff *skb,
60 unsigned int protoff, 60 unsigned int protoff,
61 int *hotdrop) 61 int *hotdrop)
62{ 62{
63 struct ipv6_opt_hdr _optsh, *oh; 63 struct ipv6_opt_hdr _optsh, *oh;
64 const struct ip6t_opts *optinfo = matchinfo; 64 const struct ip6t_opts *optinfo = matchinfo;
65 unsigned int temp; 65 unsigned int temp;
66 unsigned int ptr; 66 unsigned int ptr;
67 unsigned int hdrlen = 0; 67 unsigned int hdrlen = 0;
68 unsigned int ret = 0; 68 unsigned int ret = 0;
69 u8 _opttype, *tp = NULL; 69 u8 _opttype, *tp = NULL;
70 u8 _optlen, *lp = NULL; 70 u8 _optlen, *lp = NULL;
71 unsigned int optlen; 71 unsigned int optlen;
72 72
73#if HOPBYHOP 73#if HOPBYHOP
74 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0) 74 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0)
75#else 75#else
@@ -77,42 +77,41 @@ match(const struct sk_buff *skb,
77#endif 77#endif
78 return 0; 78 return 0;
79 79
80 oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); 80 oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
81 if (oh == NULL){ 81 if (oh == NULL) {
82 *hotdrop = 1; 82 *hotdrop = 1;
83 return 0; 83 return 0;
84 } 84 }
85 85
86 hdrlen = ipv6_optlen(oh); 86 hdrlen = ipv6_optlen(oh);
87 if (skb->len - ptr < hdrlen){ 87 if (skb->len - ptr < hdrlen) {
88 /* Packet smaller than it's length field */ 88 /* Packet smaller than it's length field */
89 return 0; 89 return 0;
90 } 90 }
91 91
92 DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); 92 DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
93 93
94 DEBUGP("len %02X %04X %02X ", 94 DEBUGP("len %02X %04X %02X ",
95 optinfo->hdrlen, hdrlen, 95 optinfo->hdrlen, hdrlen,
96 (!(optinfo->flags & IP6T_OPTS_LEN) || 96 (!(optinfo->flags & IP6T_OPTS_LEN) ||
97 ((optinfo->hdrlen == hdrlen) ^ 97 ((optinfo->hdrlen == hdrlen) ^
98 !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); 98 !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
99 99
100 ret = (oh != NULL) 100 ret = (oh != NULL) &&
101 && 101 (!(optinfo->flags & IP6T_OPTS_LEN) ||
102 (!(optinfo->flags & IP6T_OPTS_LEN) || 102 ((optinfo->hdrlen == hdrlen) ^
103 ((optinfo->hdrlen == hdrlen) ^ 103 !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
104 !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); 104
105 105 ptr += 2;
106 ptr += 2; 106 hdrlen -= 2;
107 hdrlen -= 2; 107 if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
108 if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){ 108 return ret;
109 return ret;
110 } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { 109 } else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
111 DEBUGP("Not strict - not implemented"); 110 DEBUGP("Not strict - not implemented");
112 } else { 111 } else {
113 DEBUGP("Strict "); 112 DEBUGP("Strict ");
114 DEBUGP("#%d ",optinfo->optsnr); 113 DEBUGP("#%d ", optinfo->optsnr);
115 for(temp=0; temp<optinfo->optsnr; temp++){ 114 for (temp = 0; temp < optinfo->optsnr; temp++) {
116 /* type field exists ? */ 115 /* type field exists ? */
117 if (hdrlen < 1) 116 if (hdrlen < 1)
118 break; 117 break;
@@ -122,10 +121,10 @@ match(const struct sk_buff *skb,
122 break; 121 break;
123 122
124 /* Type check */ 123 /* Type check */
125 if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){ 124 if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
126 DEBUGP("Tbad %02X %02X\n", 125 DEBUGP("Tbad %02X %02X\n",
127 *tp, 126 *tp,
128 (optinfo->opts[temp] & 0xFF00)>>8); 127 (optinfo->opts[temp] & 0xFF00) >> 8);
129 return 0; 128 return 0;
130 } else { 129 } else {
131 DEBUGP("Tok "); 130 DEBUGP("Tok ");
@@ -169,7 +168,8 @@ match(const struct sk_buff *skb,
169 } 168 }
170 if (temp == optinfo->optsnr) 169 if (temp == optinfo->optsnr)
171 return ret; 170 return ret;
172 else return 0; 171 else
172 return 0;
173 } 173 }
174 174
175 return 0; 175 return 0;
@@ -178,25 +178,24 @@ match(const struct sk_buff *skb,
178/* Called when user tries to insert an entry of this type. */ 178/* Called when user tries to insert an entry of this type. */
179static int 179static int
180checkentry(const char *tablename, 180checkentry(const char *tablename,
181 const void *entry, 181 const void *entry,
182 void *matchinfo, 182 void *matchinfo,
183 unsigned int matchinfosize, 183 unsigned int matchinfosize,
184 unsigned int hook_mask) 184 unsigned int hook_mask)
185{ 185{
186 const struct ip6t_opts *optsinfo = matchinfo; 186 const struct ip6t_opts *optsinfo = matchinfo;
187 187
188 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { 188 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
189 DEBUGP("ip6t_opts: matchsize %u != %u\n", 189 DEBUGP("ip6t_opts: matchsize %u != %u\n",
190 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); 190 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
191 return 0; 191 return 0;
192 } 192 }
193 if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { 193 if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
194 DEBUGP("ip6t_opts: unknown flags %X\n", 194 DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
195 optsinfo->invflags); 195 return 0;
196 return 0; 196 }
197 } 197
198 198 return 1;
199 return 1;
200} 199}
201 200
202static struct ip6t_match opts_match = { 201static struct ip6t_match opts_match = {
@@ -212,12 +211,12 @@ static struct ip6t_match opts_match = {
212 211
213static int __init init(void) 212static int __init init(void)
214{ 213{
215 return ip6t_register_match(&opts_match); 214 return ip6t_register_match(&opts_match);
216} 215}
217 216
218static void __exit cleanup(void) 217static void __exit cleanup(void)
219{ 218{
220 ip6t_unregister_match(&opts_match); 219 ip6t_unregister_match(&opts_match);
221} 220}
222 221
223module_init(init); 222module_init(init);
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index fda1ceaf5a29..83ad6b272f7e 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -50,20 +50,20 @@ ipv6header_match(const struct sk_buff *skb,
50 len = skb->len - ptr; 50 len = skb->len - ptr;
51 temp = 0; 51 temp = 0;
52 52
53 while (ip6t_ext_hdr(nexthdr)) { 53 while (ip6t_ext_hdr(nexthdr)) {
54 struct ipv6_opt_hdr _hdr, *hp; 54 struct ipv6_opt_hdr _hdr, *hp;
55 int hdrlen; 55 int hdrlen;
56 56
57 /* Is there enough space for the next ext header? */ 57 /* Is there enough space for the next ext header? */
58 if (len < (int)sizeof(struct ipv6_opt_hdr)) 58 if (len < (int)sizeof(struct ipv6_opt_hdr))
59 return 0; 59 return 0;
60 /* No more exthdr -> evaluate */ 60 /* No more exthdr -> evaluate */
61 if (nexthdr == NEXTHDR_NONE) { 61 if (nexthdr == NEXTHDR_NONE) {
62 temp |= MASK_NONE; 62 temp |= MASK_NONE;
63 break; 63 break;
64 } 64 }
65 /* ESP -> evaluate */ 65 /* ESP -> evaluate */
66 if (nexthdr == NEXTHDR_ESP) { 66 if (nexthdr == NEXTHDR_ESP) {
67 temp |= MASK_ESP; 67 temp |= MASK_ESP;
68 break; 68 break;
69 } 69 }
@@ -72,43 +72,43 @@ ipv6header_match(const struct sk_buff *skb,
72 BUG_ON(hp == NULL); 72 BUG_ON(hp == NULL);
73 73
74 /* Calculate the header length */ 74 /* Calculate the header length */
75 if (nexthdr == NEXTHDR_FRAGMENT) { 75 if (nexthdr == NEXTHDR_FRAGMENT) {
76 hdrlen = 8; 76 hdrlen = 8;
77 } else if (nexthdr == NEXTHDR_AUTH) 77 } else if (nexthdr == NEXTHDR_AUTH)
78 hdrlen = (hp->hdrlen+2)<<2; 78 hdrlen = (hp->hdrlen + 2) << 2;
79 else 79 else
80 hdrlen = ipv6_optlen(hp); 80 hdrlen = ipv6_optlen(hp);
81 81
82 /* set the flag */ 82 /* set the flag */
83 switch (nexthdr){ 83 switch (nexthdr) {
84 case NEXTHDR_HOP: 84 case NEXTHDR_HOP:
85 temp |= MASK_HOPOPTS; 85 temp |= MASK_HOPOPTS;
86 break; 86 break;
87 case NEXTHDR_ROUTING: 87 case NEXTHDR_ROUTING:
88 temp |= MASK_ROUTING; 88 temp |= MASK_ROUTING;
89 break; 89 break;
90 case NEXTHDR_FRAGMENT: 90 case NEXTHDR_FRAGMENT:
91 temp |= MASK_FRAGMENT; 91 temp |= MASK_FRAGMENT;
92 break; 92 break;
93 case NEXTHDR_AUTH: 93 case NEXTHDR_AUTH:
94 temp |= MASK_AH; 94 temp |= MASK_AH;
95 break; 95 break;
96 case NEXTHDR_DEST: 96 case NEXTHDR_DEST:
97 temp |= MASK_DSTOPTS; 97 temp |= MASK_DSTOPTS;
98 break; 98 break;
99 default: 99 default:
100 return 0; 100 return 0;
101 break; 101 break;
102 } 102 }
103 103
104 nexthdr = hp->nexthdr; 104 nexthdr = hp->nexthdr;
105 len -= hdrlen; 105 len -= hdrlen;
106 ptr += hdrlen; 106 ptr += hdrlen;
107 if (ptr > skb->len) 107 if (ptr > skb->len)
108 break; 108 break;
109 } 109 }
110 110
111 if ( (nexthdr != NEXTHDR_NONE ) && (nexthdr != NEXTHDR_ESP) ) 111 if ((nexthdr != NEXTHDR_NONE) && (nexthdr != NEXTHDR_ESP))
112 temp |= MASK_PROTO; 112 temp |= MASK_PROTO;
113 113
114 if (info->modeflag) 114 if (info->modeflag)
@@ -137,8 +137,8 @@ ipv6header_checkentry(const char *tablename,
137 return 0; 137 return 0;
138 138
139 /* invflags is 0 or 0xff in hard mode */ 139 /* invflags is 0 or 0xff in hard mode */
140 if ((!info->modeflag) && info->invflags != 0x00 140 if ((!info->modeflag) && info->invflags != 0x00 &&
141 && info->invflags != 0xFF) 141 info->invflags != 0xFF)
142 return 0; 142 return 0;
143 143
144 return 1; 144 return 1;
@@ -152,7 +152,7 @@ static struct ip6t_match ip6t_ipv6header_match = {
152 .me = THIS_MODULE, 152 .me = THIS_MODULE,
153}; 153};
154 154
155static int __init ipv6header_init(void) 155static int __init ipv6header_init(void)
156{ 156{
157 return ip6t_register_match(&ip6t_ipv6header_match); 157 return ip6t_register_match(&ip6t_ipv6header_match);
158} 158}
@@ -164,4 +164,3 @@ static void __exit ipv6header_exit(void)
164 164
165module_init(ipv6header_init); 165module_init(ipv6header_init);
166module_exit(ipv6header_exit); 166module_exit(ipv6header_exit);
167
diff --git a/net/ipv6/netfilter/ip6t_owner.c b/net/ipv6/netfilter/ip6t_owner.c
index 5409b375b512..8c8a4c7ec934 100644
--- a/net/ipv6/netfilter/ip6t_owner.c
+++ b/net/ipv6/netfilter/ip6t_owner.c
@@ -36,14 +36,14 @@ match(const struct sk_buff *skb,
36 if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file) 36 if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
37 return 0; 37 return 0;
38 38
39 if(info->match & IP6T_OWNER_UID) { 39 if (info->match & IP6T_OWNER_UID) {
40 if((skb->sk->sk_socket->file->f_uid != info->uid) ^ 40 if ((skb->sk->sk_socket->file->f_uid != info->uid) ^
41 !!(info->invert & IP6T_OWNER_UID)) 41 !!(info->invert & IP6T_OWNER_UID))
42 return 0; 42 return 0;
43 } 43 }
44 44
45 if(info->match & IP6T_OWNER_GID) { 45 if (info->match & IP6T_OWNER_GID) {
46 if((skb->sk->sk_socket->file->f_gid != info->gid) ^ 46 if ((skb->sk->sk_socket->file->f_gid != info->gid) ^
47 !!(info->invert & IP6T_OWNER_GID)) 47 !!(info->invert & IP6T_OWNER_GID))
48 return 0; 48 return 0;
49 } 49 }
@@ -53,23 +53,23 @@ match(const struct sk_buff *skb,
53 53
54static int 54static int
55checkentry(const char *tablename, 55checkentry(const char *tablename,
56 const void *ip, 56 const void *ip,
57 void *matchinfo, 57 void *matchinfo,
58 unsigned int matchsize, 58 unsigned int matchsize,
59 unsigned int hook_mask) 59 unsigned int hook_mask)
60{ 60{
61 const struct ip6t_owner_info *info = matchinfo; 61 const struct ip6t_owner_info *info = matchinfo;
62 62
63 if (hook_mask 63 if (hook_mask
64 & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) { 64 & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) {
65 printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n"); 65 printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
66 return 0; 66 return 0;
67 } 67 }
68 68
69 if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info))) 69 if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info)))
70 return 0; 70 return 0;
71 71
72 if (info->match & (IP6T_OWNER_PID|IP6T_OWNER_SID)) { 72 if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) {
73 printk("ipt_owner: pid and sid matching " 73 printk("ipt_owner: pid and sid matching "
74 "not supported anymore\n"); 74 "not supported anymore\n");
75 return 0; 75 return 0;
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c
index 13fedad48c1d..afe1cc4c18a5 100644
--- a/net/ipv6/netfilter/ip6t_policy.c
+++ b/net/ipv6/netfilter/ip6t_policy.c
@@ -118,7 +118,7 @@ static int match(const struct sk_buff *skb,
118 return ret; 118 return ret;
119} 119}
120 120
121static int checkentry(const char *tablename, const struct ip6t_ip6 *ip, 121static int checkentry(const char *tablename, const void *ip_void,
122 void *matchinfo, unsigned int matchsize, 122 void *matchinfo, unsigned int matchsize,
123 unsigned int hook_mask) 123 unsigned int hook_mask)
124{ 124{
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 8465b4375855..8f82476dc89e 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -33,12 +33,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
33static inline int 33static inline int
34segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) 34segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert)
35{ 35{
36 int r=0; 36 int r = 0;
37 DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', 37 DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",
38 min,id,max); 38 invert ? '!' : ' ', min, id, max);
39 r=(id >= min && id <= max) ^ invert; 39 r = (id >= min && id <= max) ^ invert;
40 DEBUGP(" result %s\n",r? "PASS" : "FAILED"); 40 DEBUGP(" result %s\n", r ? "PASS" : "FAILED");
41 return r; 41 return r;
42} 42}
43 43
44static int 44static int
@@ -50,87 +50,93 @@ match(const struct sk_buff *skb,
50 unsigned int protoff, 50 unsigned int protoff,
51 int *hotdrop) 51 int *hotdrop)
52{ 52{
53 struct ipv6_rt_hdr _route, *rh; 53 struct ipv6_rt_hdr _route, *rh;
54 const struct ip6t_rt *rtinfo = matchinfo; 54 const struct ip6t_rt *rtinfo = matchinfo;
55 unsigned int temp; 55 unsigned int temp;
56 unsigned int ptr; 56 unsigned int ptr;
57 unsigned int hdrlen = 0; 57 unsigned int hdrlen = 0;
58 unsigned int ret = 0; 58 unsigned int ret = 0;
59 struct in6_addr *ap, _addr; 59 struct in6_addr *ap, _addr;
60 60
61 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL) < 0) 61 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL) < 0)
62 return 0; 62 return 0;
63 63
64 rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); 64 rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
65 if (rh == NULL){ 65 if (rh == NULL) {
66 *hotdrop = 1; 66 *hotdrop = 1;
67 return 0; 67 return 0;
68 } 68 }
69 69
70 hdrlen = ipv6_optlen(rh); 70 hdrlen = ipv6_optlen(rh);
71 if (skb->len - ptr < hdrlen){ 71 if (skb->len - ptr < hdrlen) {
72 /* Pcket smaller than its length field */ 72 /* Pcket smaller than its length field */
73 return 0; 73 return 0;
74 } 74 }
75 75
76 DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen); 76 DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
77 DEBUGP("TYPE %04X ", rh->type); 77 DEBUGP("TYPE %04X ", rh->type);
78 DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left); 78 DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
79 79
80 DEBUGP("IPv6 RT segsleft %02X ", 80 DEBUGP("IPv6 RT segsleft %02X ",
81 (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], 81 (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
82 rh->segments_left, 82 rh->segments_left,
83 !!(rtinfo->invflags & IP6T_RT_INV_SGS)))); 83 !!(rtinfo->invflags & IP6T_RT_INV_SGS))));
84 DEBUGP("type %02X %02X %02X ", 84 DEBUGP("type %02X %02X %02X ",
85 rtinfo->rt_type, rh->type, 85 rtinfo->rt_type, rh->type,
86 (!(rtinfo->flags & IP6T_RT_TYP) || 86 (!(rtinfo->flags & IP6T_RT_TYP) ||
87 ((rtinfo->rt_type == rh->type) ^ 87 ((rtinfo->rt_type == rh->type) ^
88 !!(rtinfo->invflags & IP6T_RT_INV_TYP)))); 88 !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
89 DEBUGP("len %02X %04X %02X ", 89 DEBUGP("len %02X %04X %02X ",
90 rtinfo->hdrlen, hdrlen, 90 rtinfo->hdrlen, hdrlen,
91 (!(rtinfo->flags & IP6T_RT_LEN) || 91 (!(rtinfo->flags & IP6T_RT_LEN) ||
92 ((rtinfo->hdrlen == hdrlen) ^ 92 ((rtinfo->hdrlen == hdrlen) ^
93 !!(rtinfo->invflags & IP6T_RT_INV_LEN)))); 93 !!(rtinfo->invflags & IP6T_RT_INV_LEN))));
94 DEBUGP("res %02X %02X %02X ", 94 DEBUGP("res %02X %02X %02X ",
95 (rtinfo->flags & IP6T_RT_RES), ((struct rt0_hdr *)rh)->reserved, 95 (rtinfo->flags & IP6T_RT_RES),
96 !((rtinfo->flags & IP6T_RT_RES) && (((struct rt0_hdr *)rh)->reserved))); 96 ((struct rt0_hdr *)rh)->reserved,
97 97 !((rtinfo->flags & IP6T_RT_RES) &&
98 ret = (rh != NULL) 98 (((struct rt0_hdr *)rh)->reserved)));
99 && 99
100 (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], 100 ret = (rh != NULL)
101 rh->segments_left, 101 &&
102 !!(rtinfo->invflags & IP6T_RT_INV_SGS))) 102 (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
103 && 103 rh->segments_left,
104 (!(rtinfo->flags & IP6T_RT_LEN) || 104 !!(rtinfo->invflags & IP6T_RT_INV_SGS)))
105 ((rtinfo->hdrlen == hdrlen) ^ 105 &&
106 !!(rtinfo->invflags & IP6T_RT_INV_LEN))) 106 (!(rtinfo->flags & IP6T_RT_LEN) ||
107 && 107 ((rtinfo->hdrlen == hdrlen) ^
108 (!(rtinfo->flags & IP6T_RT_TYP) || 108 !!(rtinfo->invflags & IP6T_RT_INV_LEN)))
109 ((rtinfo->rt_type == rh->type) ^ 109 &&
110 !!(rtinfo->invflags & IP6T_RT_INV_TYP))); 110 (!(rtinfo->flags & IP6T_RT_TYP) ||
111 ((rtinfo->rt_type == rh->type) ^
112 !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
111 113
112 if (ret && (rtinfo->flags & IP6T_RT_RES)) { 114 if (ret && (rtinfo->flags & IP6T_RT_RES)) {
113 u_int32_t *rp, _reserved; 115 u_int32_t *rp, _reserved;
114 rp = skb_header_pointer(skb, 116 rp = skb_header_pointer(skb,
115 ptr + offsetof(struct rt0_hdr, reserved), 117 ptr + offsetof(struct rt0_hdr,
116 sizeof(_reserved), &_reserved); 118 reserved),
119 sizeof(_reserved),
120 &_reserved);
117 121
118 ret = (*rp == 0); 122 ret = (*rp == 0);
119 } 123 }
120 124
121 DEBUGP("#%d ",rtinfo->addrnr); 125 DEBUGP("#%d ", rtinfo->addrnr);
122 if ( !(rtinfo->flags & IP6T_RT_FST) ){ 126 if (!(rtinfo->flags & IP6T_RT_FST)) {
123 return ret; 127 return ret;
124 } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) { 128 } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
125 DEBUGP("Not strict "); 129 DEBUGP("Not strict ");
126 if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){ 130 if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
127 DEBUGP("There isn't enough space\n"); 131 DEBUGP("There isn't enough space\n");
128 return 0; 132 return 0;
129 } else { 133 } else {
130 unsigned int i = 0; 134 unsigned int i = 0;
131 135
132 DEBUGP("#%d ",rtinfo->addrnr); 136 DEBUGP("#%d ", rtinfo->addrnr);
133 for(temp=0; temp<(unsigned int)((hdrlen-8)/16); temp++){ 137 for (temp = 0;
138 temp < (unsigned int)((hdrlen - 8) / 16);
139 temp++) {
134 ap = skb_header_pointer(skb, 140 ap = skb_header_pointer(skb,
135 ptr 141 ptr
136 + sizeof(struct rt0_hdr) 142 + sizeof(struct rt0_hdr)
@@ -141,24 +147,26 @@ match(const struct sk_buff *skb,
141 BUG_ON(ap == NULL); 147 BUG_ON(ap == NULL);
142 148
143 if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { 149 if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
144 DEBUGP("i=%d temp=%d;\n",i,temp); 150 DEBUGP("i=%d temp=%d;\n", i, temp);
145 i++; 151 i++;
146 } 152 }
147 if (i==rtinfo->addrnr) break; 153 if (i == rtinfo->addrnr)
154 break;
148 } 155 }
149 DEBUGP("i=%d #%d\n", i, rtinfo->addrnr); 156 DEBUGP("i=%d #%d\n", i, rtinfo->addrnr);
150 if (i == rtinfo->addrnr) 157 if (i == rtinfo->addrnr)
151 return ret; 158 return ret;
152 else return 0; 159 else
160 return 0;
153 } 161 }
154 } else { 162 } else {
155 DEBUGP("Strict "); 163 DEBUGP("Strict ");
156 if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){ 164 if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
157 DEBUGP("There isn't enough space\n"); 165 DEBUGP("There isn't enough space\n");
158 return 0; 166 return 0;
159 } else { 167 } else {
160 DEBUGP("#%d ",rtinfo->addrnr); 168 DEBUGP("#%d ", rtinfo->addrnr);
161 for(temp=0; temp<rtinfo->addrnr; temp++){ 169 for (temp = 0; temp < rtinfo->addrnr; temp++) {
162 ap = skb_header_pointer(skb, 170 ap = skb_header_pointer(skb,
163 ptr 171 ptr
164 + sizeof(struct rt0_hdr) 172 + sizeof(struct rt0_hdr)
@@ -171,9 +179,11 @@ match(const struct sk_buff *skb,
171 break; 179 break;
172 } 180 }
173 DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr); 181 DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr);
174 if ((temp == rtinfo->addrnr) && (temp == (unsigned int)((hdrlen-8)/16))) 182 if ((temp == rtinfo->addrnr) &&
183 (temp == (unsigned int)((hdrlen - 8) / 16)))
175 return ret; 184 return ret;
176 else return 0; 185 else
186 return 0;
177 } 187 }
178 } 188 }
179 189
@@ -183,32 +193,31 @@ match(const struct sk_buff *skb,
183/* Called when user tries to insert an entry of this type. */ 193/* Called when user tries to insert an entry of this type. */
184static int 194static int
185checkentry(const char *tablename, 195checkentry(const char *tablename,
186 const void *entry, 196 const void *entry,
187 void *matchinfo, 197 void *matchinfo,
188 unsigned int matchinfosize, 198 unsigned int matchinfosize,
189 unsigned int hook_mask) 199 unsigned int hook_mask)
190{ 200{
191 const struct ip6t_rt *rtinfo = matchinfo; 201 const struct ip6t_rt *rtinfo = matchinfo;
192 202
193 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) { 203 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) {
194 DEBUGP("ip6t_rt: matchsize %u != %u\n", 204 DEBUGP("ip6t_rt: matchsize %u != %u\n",
195 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt))); 205 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt)));
196 return 0; 206 return 0;
197 } 207 }
198 if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { 208 if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
199 DEBUGP("ip6t_rt: unknown flags %X\n", 209 DEBUGP("ip6t_rt: unknown flags %X\n", rtinfo->invflags);
200 rtinfo->invflags); 210 return 0;
201 return 0; 211 }
202 } 212 if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) &&
203 if ( (rtinfo->flags & (IP6T_RT_RES|IP6T_RT_FST_MASK)) && 213 (!(rtinfo->flags & IP6T_RT_TYP) ||
204 (!(rtinfo->flags & IP6T_RT_TYP) || 214 (rtinfo->rt_type != 0) ||
205 (rtinfo->rt_type != 0) || 215 (rtinfo->invflags & IP6T_RT_INV_TYP))) {
206 (rtinfo->invflags & IP6T_RT_INV_TYP)) ) { 216 DEBUGP("`--rt-type 0' required before `--rt-0-*'");
207 DEBUGP("`--rt-type 0' required before `--rt-0-*'"); 217 return 0;
208 return 0; 218 }
209 } 219
210 220 return 1;
211 return 1;
212} 221}
213 222
214static struct ip6t_match rt_match = { 223static struct ip6t_match rt_match = {
@@ -220,12 +229,12 @@ static struct ip6t_match rt_match = {
220 229
221static int __init init(void) 230static int __init init(void)
222{ 231{
223 return ip6t_register_match(&rt_match); 232 return ip6t_register_match(&rt_match);
224} 233}
225 234
226static void __exit cleanup(void) 235static void __exit cleanup(void)
227{ 236{
228 ip6t_unregister_match(&rt_match); 237 ip6t_unregister_match(&rt_match);
229} 238}
230 239
231module_init(init); 240module_init(init);
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c
index 3ac81cdd1211..3e7466900bd4 100644
--- a/net/rxrpc/krxtimod.c
+++ b/net/rxrpc/krxtimod.c
@@ -81,7 +81,7 @@ static int krxtimod(void *arg)
81 81
82 for (;;) { 82 for (;;) {
83 unsigned long jif; 83 unsigned long jif;
84 signed long timeout; 84 long timeout;
85 85
86 /* deal with the server being asked to die */ 86 /* deal with the server being asked to die */
87 if (krxtimod_die) { 87 if (krxtimod_die) {
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 3b5ecd8e2401..29975d99d864 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -361,7 +361,7 @@ static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
361static int rxrpc_proc_peers_show(struct seq_file *m, void *v) 361static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
362{ 362{
363 struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link); 363 struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link);
364 signed long timeout; 364 long timeout;
365 365
366 /* display header on line 1 */ 366 /* display header on line 1 */
367 if (v == SEQ_START_TOKEN) { 367 if (v == SEQ_START_TOKEN) {
@@ -373,8 +373,8 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
373 /* display one peer per line on subsequent lines */ 373 /* display one peer per line on subsequent lines */
374 timeout = 0; 374 timeout = 0;
375 if (!list_empty(&peer->timeout.link)) 375 if (!list_empty(&peer->timeout.link))
376 timeout = (signed long) peer->timeout.timo_jif - 376 timeout = (long) peer->timeout.timo_jif -
377 (signed long) jiffies; 377 (long) jiffies;
378 378
379 seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n", 379 seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
380 peer->trans->port, 380 peer->trans->port,
@@ -468,7 +468,7 @@ static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
468static int rxrpc_proc_conns_show(struct seq_file *m, void *v) 468static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
469{ 469{
470 struct rxrpc_connection *conn; 470 struct rxrpc_connection *conn;
471 signed long timeout; 471 long timeout;
472 472
473 conn = list_entry(v, struct rxrpc_connection, proc_link); 473 conn = list_entry(v, struct rxrpc_connection, proc_link);
474 474
@@ -484,8 +484,8 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
484 /* display one conn per line on subsequent lines */ 484 /* display one conn per line on subsequent lines */
485 timeout = 0; 485 timeout = 0;
486 if (!list_empty(&conn->timeout.link)) 486 if (!list_empty(&conn->timeout.link))
487 timeout = (signed long) conn->timeout.timo_jif - 487 timeout = (long) conn->timeout.timo_jif -
488 (signed long) jiffies; 488 (long) jiffies;
489 489
490 seq_printf(m, 490 seq_printf(m,
491 "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n", 491 "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 5b3a3e48ed92..1641db33a994 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -228,14 +228,13 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
228 } 228 }
229 sch_tree_unlock(sch); 229 sch_tree_unlock(sch);
230 230
231 for (i=0; i<=TC_PRIO_MAX; i++) { 231 for (i=0; i<q->bands; i++) {
232 int band = q->prio2band[i]; 232 if (q->queues[i] == &noop_qdisc) {
233 if (q->queues[band] == &noop_qdisc) {
234 struct Qdisc *child; 233 struct Qdisc *child;
235 child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); 234 child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
236 if (child) { 235 if (child) {
237 sch_tree_lock(sch); 236 sch_tree_lock(sch);
238 child = xchg(&q->queues[band], child); 237 child = xchg(&q->queues[i], child);
239 238
240 if (child != &noop_qdisc) 239 if (child != &noop_qdisc)
241 qdisc_destroy(child); 240 qdisc_destroy(child);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8734bb7280e3..86d8da0cbd02 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -144,6 +144,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
144 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 144 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
145 (iph->protocol == IPPROTO_TCP || 145 (iph->protocol == IPPROTO_TCP ||
146 iph->protocol == IPPROTO_UDP || 146 iph->protocol == IPPROTO_UDP ||
147 iph->protocol == IPPROTO_SCTP ||
148 iph->protocol == IPPROTO_DCCP ||
147 iph->protocol == IPPROTO_ESP)) 149 iph->protocol == IPPROTO_ESP))
148 h2 ^= *(((u32*)iph) + iph->ihl); 150 h2 ^= *(((u32*)iph) + iph->ihl);
149 break; 151 break;
@@ -155,6 +157,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
155 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; 157 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr;
156 if (iph->nexthdr == IPPROTO_TCP || 158 if (iph->nexthdr == IPPROTO_TCP ||
157 iph->nexthdr == IPPROTO_UDP || 159 iph->nexthdr == IPPROTO_UDP ||
160 iph->nexthdr == IPPROTO_SCTP ||
161 iph->nexthdr == IPPROTO_DCCP ||
158 iph->nexthdr == IPPROTO_ESP) 162 iph->nexthdr == IPPROTO_ESP)
159 h2 ^= *(u32*)&iph[1]; 163 h2 ^= *(u32*)&iph[1];
160 break; 164 break;
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index e9086e95a31f..fd6543998788 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -69,13 +69,14 @@ struct sbus_dma_info {
69}; 69};
70#endif 70#endif
71 71
72struct snd_cs4231;
72struct cs4231_dma_control { 73struct cs4231_dma_control {
73 void (*prepare)(struct cs4231_dma_control *dma_cont, int dir); 74 void (*prepare)(struct cs4231_dma_control *dma_cont, int dir);
74 void (*enable)(struct cs4231_dma_control *dma_cont, int on); 75 void (*enable)(struct cs4231_dma_control *dma_cont, int on);
75 int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len); 76 int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len);
76 unsigned int (*address)(struct cs4231_dma_control *dma_cont); 77 unsigned int (*address)(struct cs4231_dma_control *dma_cont);
77 void (*reset)(struct snd_cs4231 *chip); 78 void (*reset)(struct snd_cs4231 *chip);
78 void (*preallocate)(struct snd_cs4231 *chip, struct snd_snd_pcm *pcm); 79 void (*preallocate)(struct snd_cs4231 *chip, struct snd_pcm *pcm);
79#ifdef EBUS_SUPPORT 80#ifdef EBUS_SUPPORT
80 struct ebus_dma_info ebus_info; 81 struct ebus_dma_info ebus_info;
81#endif 82#endif