diff options
84 files changed, 1294 insertions, 1139 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 58b16038baea..2a30bc6bdda0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -84,13 +84,6 @@ S: Status, one of the following: | |||
84 | it has been replaced by a better system and you | 84 | it has been replaced by a better system and you |
85 | should be using that. | 85 | should be using that. |
86 | 86 | ||
87 | 3C359 NETWORK DRIVER | ||
88 | P: Mike Phillips | ||
89 | M: mikep@linuxtr.net | ||
90 | L: netdev@vger.kernel.org | ||
91 | W: http://www.linuxtr.net | ||
92 | S: Maintained | ||
93 | |||
94 | 3C505 NETWORK DRIVER | 87 | 3C505 NETWORK DRIVER |
95 | P: Philip Blundell | 88 | P: Philip Blundell |
96 | M: philb@gnu.org | 89 | M: philb@gnu.org |
@@ -939,8 +932,6 @@ M: maxk@qualcomm.com | |||
939 | S: Maintained | 932 | S: Maintained |
940 | 933 | ||
941 | BONDING DRIVER | 934 | BONDING DRIVER |
942 | P: Chad Tindel | ||
943 | M: ctindel@users.sourceforge.net | ||
944 | P: Jay Vosburgh | 935 | P: Jay Vosburgh |
945 | M: fubar@us.ibm.com | 936 | M: fubar@us.ibm.com |
946 | L: bonding-devel@lists.sourceforge.net | 937 | L: bonding-devel@lists.sourceforge.net |
@@ -2864,15 +2855,6 @@ L: ocfs2-devel@oss.oracle.com | |||
2864 | W: http://oss.oracle.com/projects/ocfs2/ | 2855 | W: http://oss.oracle.com/projects/ocfs2/ |
2865 | S: Supported | 2856 | S: Supported |
2866 | 2857 | ||
2867 | OLYMPIC NETWORK DRIVER | ||
2868 | P: Peter De Shrijver | ||
2869 | M: p2@ace.ulyssis.student.kuleuven.ac.be | ||
2870 | P: Mike Phillips | ||
2871 | M: mikep@linuxtr.net | ||
2872 | L: netdev@vger.kernel.org | ||
2873 | W: http://www.linuxtr.net | ||
2874 | S: Maintained | ||
2875 | |||
2876 | OMNIKEY CARDMAN 4000 DRIVER | 2858 | OMNIKEY CARDMAN 4000 DRIVER |
2877 | P: Harald Welte | 2859 | P: Harald Welte |
2878 | M: laforge@gnumonks.org | 2860 | M: laforge@gnumonks.org |
@@ -3788,13 +3770,6 @@ L: tlan-devel@lists.sourceforge.net (subscribers-only) | |||
3788 | W: http://sourceforge.net/projects/tlan/ | 3770 | W: http://sourceforge.net/projects/tlan/ |
3789 | S: Maintained | 3771 | S: Maintained |
3790 | 3772 | ||
3791 | TOKEN-RING NETWORK DRIVER | ||
3792 | P: Mike Phillips | ||
3793 | M: mikep@linuxtr.net | ||
3794 | L: netdev@vger.kernel.org | ||
3795 | W: http://www.linuxtr.net | ||
3796 | S: Maintained | ||
3797 | |||
3798 | TOSHIBA ACPI EXTRAS DRIVER | 3773 | TOSHIBA ACPI EXTRAS DRIVER |
3799 | P: John Belmonte | 3774 | P: John Belmonte |
3800 | M: toshiba_acpi@memebeam.org | 3775 | M: toshiba_acpi@memebeam.org |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 389980f0e59e..55d224c8a0b9 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -814,8 +814,8 @@ config ULTRA32 | |||
814 | will be called smc-ultra32. | 814 | will be called smc-ultra32. |
815 | 815 | ||
816 | config BFIN_MAC | 816 | config BFIN_MAC |
817 | tristate "Blackfin 536/537 on-chip mac support" | 817 | tristate "Blackfin 527/536/537 on-chip mac support" |
818 | depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H) | 818 | depends on NET_ETHERNET && (BF527 || BF537 || BF536) && (!BF537_PORT_H) |
819 | select CRC32 | 819 | select CRC32 |
820 | select MII | 820 | select MII |
821 | select PHYLIB | 821 | select PHYLIB |
@@ -828,7 +828,7 @@ config BFIN_MAC | |||
828 | 828 | ||
829 | config BFIN_MAC_USE_L1 | 829 | config BFIN_MAC_USE_L1 |
830 | bool "Use L1 memory for rx/tx packets" | 830 | bool "Use L1 memory for rx/tx packets" |
831 | depends on BFIN_MAC && BF537 | 831 | depends on BFIN_MAC && (BF527 || BF537) |
832 | default y | 832 | default y |
833 | help | 833 | help |
834 | To get maximum network performance, you should use L1 memory as rx/tx buffers. | 834 | To get maximum network performance, you should use L1 memory as rx/tx buffers. |
@@ -855,7 +855,8 @@ config BFIN_RX_DESC_NUM | |||
855 | config BFIN_MAC_RMII | 855 | config BFIN_MAC_RMII |
856 | bool "RMII PHY Interface (EXPERIMENTAL)" | 856 | bool "RMII PHY Interface (EXPERIMENTAL)" |
857 | depends on BFIN_MAC && EXPERIMENTAL | 857 | depends on BFIN_MAC && EXPERIMENTAL |
858 | default n | 858 | default y if BFIN527_EZKIT |
859 | default n if BFIN537_STAMP | ||
859 | help | 860 | help |
860 | Use Reduced PHY MII Interface | 861 | Use Reduced PHY MII Interface |
861 | 862 | ||
@@ -1199,7 +1200,7 @@ config NE2_MCA | |||
1199 | 1200 | ||
1200 | config IBMLANA | 1201 | config IBMLANA |
1201 | tristate "IBM LAN Adapter/A support" | 1202 | tristate "IBM LAN Adapter/A support" |
1202 | depends on MCA && MCA_LEGACY | 1203 | depends on MCA |
1203 | ---help--- | 1204 | ---help--- |
1204 | This is a Micro Channel Ethernet adapter. You need to set | 1205 | This is a Micro Channel Ethernet adapter. You need to set |
1205 | CONFIG_MCA to use this driver. It is both available as an in-kernel | 1206 | CONFIG_MCA to use this driver. It is both available as an in-kernel |
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 25b114a4e2b1..0ae0d83e5d22 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c | |||
@@ -384,7 +384,7 @@ static void reset_phy(struct net_device *dev) | |||
384 | /* Wait until PHY reset is complete */ | 384 | /* Wait until PHY reset is complete */ |
385 | do { | 385 | do { |
386 | read_phy(lp->phy_address, MII_BMCR, &bmcr); | 386 | read_phy(lp->phy_address, MII_BMCR, &bmcr); |
387 | } while (!(bmcr && BMCR_RESET)); | 387 | } while (!(bmcr & BMCR_RESET)); |
388 | 388 | ||
389 | disable_mdi(); | 389 | disable_mdi(); |
390 | spin_unlock_irq(&lp->lock); | 390 | spin_unlock_irq(&lp->lock); |
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 7495a9ee8f4b..194949afacd0 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c | |||
@@ -137,11 +137,12 @@ static int ax_initial_check(struct net_device *dev) | |||
137 | static void ax_reset_8390(struct net_device *dev) | 137 | static void ax_reset_8390(struct net_device *dev) |
138 | { | 138 | { |
139 | struct ei_device *ei_local = netdev_priv(dev); | 139 | struct ei_device *ei_local = netdev_priv(dev); |
140 | struct ax_device *ax = to_ax_dev(dev); | ||
140 | unsigned long reset_start_time = jiffies; | 141 | unsigned long reset_start_time = jiffies; |
141 | void __iomem *addr = (void __iomem *)dev->base_addr; | 142 | void __iomem *addr = (void __iomem *)dev->base_addr; |
142 | 143 | ||
143 | if (ei_debug > 1) | 144 | if (ei_debug > 1) |
144 | printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); | 145 | dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies); |
145 | 146 | ||
146 | ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); | 147 | ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); |
147 | 148 | ||
@@ -151,7 +152,7 @@ static void ax_reset_8390(struct net_device *dev) | |||
151 | /* This check _should_not_ be necessary, omit eventually. */ | 152 | /* This check _should_not_ be necessary, omit eventually. */ |
152 | while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { | 153 | while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { |
153 | if (jiffies - reset_start_time > 2*HZ/100) { | 154 | if (jiffies - reset_start_time > 2*HZ/100) { |
154 | printk(KERN_WARNING "%s: %s did not complete.\n", | 155 | dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", |
155 | __FUNCTION__, dev->name); | 156 | __FUNCTION__, dev->name); |
156 | break; | 157 | break; |
157 | } | 158 | } |
@@ -165,13 +166,15 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, | |||
165 | int ring_page) | 166 | int ring_page) |
166 | { | 167 | { |
167 | struct ei_device *ei_local = netdev_priv(dev); | 168 | struct ei_device *ei_local = netdev_priv(dev); |
169 | struct ax_device *ax = to_ax_dev(dev); | ||
168 | void __iomem *nic_base = ei_local->mem; | 170 | void __iomem *nic_base = ei_local->mem; |
169 | 171 | ||
170 | /* This *shouldn't* happen. If it does, it's the last thing you'll see */ | 172 | /* This *shouldn't* happen. If it does, it's the last thing you'll see */ |
171 | if (ei_status.dmaing) { | 173 | if (ei_status.dmaing) { |
172 | printk(KERN_EMERG "%s: DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n", | 174 | dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " |
175 | "[DMAstat:%d][irqlock:%d].\n", | ||
173 | dev->name, __FUNCTION__, | 176 | dev->name, __FUNCTION__, |
174 | ei_status.dmaing, ei_status.irqlock); | 177 | ei_status.dmaing, ei_status.irqlock); |
175 | return; | 178 | return; |
176 | } | 179 | } |
177 | 180 | ||
@@ -204,13 +207,16 @@ static void ax_block_input(struct net_device *dev, int count, | |||
204 | struct sk_buff *skb, int ring_offset) | 207 | struct sk_buff *skb, int ring_offset) |
205 | { | 208 | { |
206 | struct ei_device *ei_local = netdev_priv(dev); | 209 | struct ei_device *ei_local = netdev_priv(dev); |
210 | struct ax_device *ax = to_ax_dev(dev); | ||
207 | void __iomem *nic_base = ei_local->mem; | 211 | void __iomem *nic_base = ei_local->mem; |
208 | char *buf = skb->data; | 212 | char *buf = skb->data; |
209 | 213 | ||
210 | if (ei_status.dmaing) { | 214 | if (ei_status.dmaing) { |
211 | printk(KERN_EMERG "%s: DMAing conflict in ax_block_input " | 215 | dev_err(&ax->dev->dev, |
216 | "%s: DMAing conflict in %s " | ||
212 | "[DMAstat:%d][irqlock:%d].\n", | 217 | "[DMAstat:%d][irqlock:%d].\n", |
213 | dev->name, ei_status.dmaing, ei_status.irqlock); | 218 | dev->name, __FUNCTION__, |
219 | ei_status.dmaing, ei_status.irqlock); | ||
214 | return; | 220 | return; |
215 | } | 221 | } |
216 | 222 | ||
@@ -239,6 +245,7 @@ static void ax_block_output(struct net_device *dev, int count, | |||
239 | const unsigned char *buf, const int start_page) | 245 | const unsigned char *buf, const int start_page) |
240 | { | 246 | { |
241 | struct ei_device *ei_local = netdev_priv(dev); | 247 | struct ei_device *ei_local = netdev_priv(dev); |
248 | struct ax_device *ax = to_ax_dev(dev); | ||
242 | void __iomem *nic_base = ei_local->mem; | 249 | void __iomem *nic_base = ei_local->mem; |
243 | unsigned long dma_start; | 250 | unsigned long dma_start; |
244 | 251 | ||
@@ -251,7 +258,7 @@ static void ax_block_output(struct net_device *dev, int count, | |||
251 | 258 | ||
252 | /* This *shouldn't* happen. If it does, it's the last thing you'll see */ | 259 | /* This *shouldn't* happen. If it does, it's the last thing you'll see */ |
253 | if (ei_status.dmaing) { | 260 | if (ei_status.dmaing) { |
254 | printk(KERN_EMERG "%s: DMAing conflict in %s." | 261 | dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." |
255 | "[DMAstat:%d][irqlock:%d]\n", | 262 | "[DMAstat:%d][irqlock:%d]\n", |
256 | dev->name, __FUNCTION__, | 263 | dev->name, __FUNCTION__, |
257 | ei_status.dmaing, ei_status.irqlock); | 264 | ei_status.dmaing, ei_status.irqlock); |
@@ -281,7 +288,8 @@ static void ax_block_output(struct net_device *dev, int count, | |||
281 | 288 | ||
282 | while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { | 289 | while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { |
283 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 290 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ |
284 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); | 291 | dev_warn(&ax->dev->dev, |
292 | "%s: timeout waiting for Tx RDC.\n", dev->name); | ||
285 | ax_reset_8390(dev); | 293 | ax_reset_8390(dev); |
286 | ax_NS8390_init(dev,1); | 294 | ax_NS8390_init(dev,1); |
287 | break; | 295 | break; |
@@ -424,10 +432,11 @@ static void | |||
424 | ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value) | 432 | ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value) |
425 | { | 433 | { |
426 | struct ei_device *ei = (struct ei_device *) netdev_priv(dev); | 434 | struct ei_device *ei = (struct ei_device *) netdev_priv(dev); |
435 | struct ax_device *ax = to_ax_dev(dev); | ||
427 | unsigned long flags; | 436 | unsigned long flags; |
428 | 437 | ||
429 | printk(KERN_DEBUG "%s: %p, %04x, %04x %04x\n", | 438 | dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n", |
430 | __FUNCTION__, dev, phy_addr, reg, value); | 439 | __FUNCTION__, dev, phy_addr, reg, value); |
431 | 440 | ||
432 | spin_lock_irqsave(&ei->page_lock, flags); | 441 | spin_lock_irqsave(&ei->page_lock, flags); |
433 | 442 | ||
@@ -750,14 +759,11 @@ static int ax_init_dev(struct net_device *dev, int first_init) | |||
750 | ax_NS8390_init(dev, 0); | 759 | ax_NS8390_init(dev, 0); |
751 | 760 | ||
752 | if (first_init) { | 761 | if (first_init) { |
753 | printk("AX88796: %dbit, irq %d, %lx, MAC: ", | 762 | DECLARE_MAC_BUF(mac); |
754 | ei_status.word16 ? 16:8, dev->irq, dev->base_addr); | ||
755 | |||
756 | for (i = 0; i < ETHER_ADDR_LEN; i++) | ||
757 | printk("%2.2x%c", dev->dev_addr[i], | ||
758 | (i < (ETHER_ADDR_LEN-1) ? ':' : ' ')); | ||
759 | 763 | ||
760 | printk("\n"); | 764 | dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %s\n", |
765 | ei_status.word16 ? 16:8, dev->irq, dev->base_addr, | ||
766 | print_mac(mac, dev->dev_addr)); | ||
761 | } | 767 | } |
762 | 768 | ||
763 | ret = register_netdev(dev); | 769 | ret = register_netdev(dev); |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index eb971755a3ff..c993a32b3f50 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -1,34 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * File: drivers/net/bfin_mac.c | 2 | * Blackfin On-Chip MAC Driver |
3 | * Based on: | ||
4 | * Maintainer: | ||
5 | * Bryan Wu <bryan.wu@analog.com> | ||
6 | * | 3 | * |
7 | * Original author: | 4 | * Copyright 2004-2007 Analog Devices Inc. |
8 | * Luke Yang <luke.yang@analog.com> | ||
9 | * | 5 | * |
10 | * Created: | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
11 | * Description: | ||
12 | * | 7 | * |
13 | * Modified: | 8 | * Licensed under the GPL-2 or later. |
14 | * Copyright 2004-2006 Analog Devices Inc. | ||
15 | * | ||
16 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | ||
17 | * | ||
18 | * This program is free software ; you can redistribute it and/or modify | ||
19 | * it under the terms of the GNU General Public License as published by | ||
20 | * the Free Software Foundation ; either version 2, or (at your option) | ||
21 | * any later version. | ||
22 | * | ||
23 | * This program is distributed in the hope that it will be useful, | ||
24 | * but WITHOUT ANY WARRANTY ; without even the implied warranty of | ||
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
26 | * GNU General Public License for more details. | ||
27 | * | ||
28 | * You should have received a copy of the GNU General Public License | ||
29 | * along with this program ; see the file COPYING. | ||
30 | * If not, write to the Free Software Foundation, | ||
31 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
32 | */ | 9 | */ |
33 | 10 | ||
34 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -65,7 +42,7 @@ | |||
65 | #define DRV_NAME "bfin_mac" | 42 | #define DRV_NAME "bfin_mac" |
66 | #define DRV_VERSION "1.1" | 43 | #define DRV_VERSION "1.1" |
67 | #define DRV_AUTHOR "Bryan Wu, Luke Yang" | 44 | #define DRV_AUTHOR "Bryan Wu, Luke Yang" |
68 | #define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver" | 45 | #define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver" |
69 | 46 | ||
70 | MODULE_AUTHOR(DRV_AUTHOR); | 47 | MODULE_AUTHOR(DRV_AUTHOR); |
71 | MODULE_LICENSE("GPL"); | 48 | MODULE_LICENSE("GPL"); |
@@ -296,7 +273,7 @@ static void mdio_poll(void) | |||
296 | 273 | ||
297 | /* poll the STABUSY bit */ | 274 | /* poll the STABUSY bit */ |
298 | while ((bfin_read_EMAC_STAADD()) & STABUSY) { | 275 | while ((bfin_read_EMAC_STAADD()) & STABUSY) { |
299 | mdelay(10); | 276 | udelay(1); |
300 | if (timeout_cnt-- < 0) { | 277 | if (timeout_cnt-- < 0) { |
301 | printk(KERN_ERR DRV_NAME | 278 | printk(KERN_ERR DRV_NAME |
302 | ": wait MDC/MDIO transaction to complete timeout\n"); | 279 | ": wait MDC/MDIO transaction to complete timeout\n"); |
@@ -412,20 +389,26 @@ static void bf537_adjust_link(struct net_device *dev) | |||
412 | spin_unlock_irqrestore(&lp->lock, flags); | 389 | spin_unlock_irqrestore(&lp->lock, flags); |
413 | } | 390 | } |
414 | 391 | ||
392 | /* MDC = 2.5 MHz */ | ||
393 | #define MDC_CLK 2500000 | ||
394 | |||
415 | static int mii_probe(struct net_device *dev) | 395 | static int mii_probe(struct net_device *dev) |
416 | { | 396 | { |
417 | struct bf537mac_local *lp = netdev_priv(dev); | 397 | struct bf537mac_local *lp = netdev_priv(dev); |
418 | struct phy_device *phydev = NULL; | 398 | struct phy_device *phydev = NULL; |
419 | unsigned short sysctl; | 399 | unsigned short sysctl; |
420 | int i; | 400 | int i; |
401 | u32 sclk, mdc_div; | ||
421 | 402 | ||
422 | /* Enable PHY output early */ | 403 | /* Enable PHY output early */ |
423 | if (!(bfin_read_VR_CTL() & PHYCLKOE)) | 404 | if (!(bfin_read_VR_CTL() & PHYCLKOE)) |
424 | bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); | 405 | bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); |
425 | 406 | ||
426 | /* MDC = 2.5 MHz */ | 407 | sclk = get_sclk(); |
408 | mdc_div = ((sclk / MDC_CLK) / 2) - 1; | ||
409 | |||
427 | sysctl = bfin_read_EMAC_SYSCTL(); | 410 | sysctl = bfin_read_EMAC_SYSCTL(); |
428 | sysctl |= SET_MDCDIV(24); | 411 | sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); |
429 | bfin_write_EMAC_SYSCTL(sysctl); | 412 | bfin_write_EMAC_SYSCTL(sysctl); |
430 | 413 | ||
431 | /* search for connect PHY device */ | 414 | /* search for connect PHY device */ |
@@ -477,8 +460,10 @@ static int mii_probe(struct net_device *dev) | |||
477 | lp->phydev = phydev; | 460 | lp->phydev = phydev; |
478 | 461 | ||
479 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 462 | printk(KERN_INFO "%s: attached PHY driver [%s] " |
480 | "(mii_bus:phy_addr=%s, irq=%d)\n", | 463 | "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" |
481 | DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq); | 464 | "@sclk=%dMHz)\n", |
465 | DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq, | ||
466 | MDC_CLK, mdc_div, sclk/1000000); | ||
482 | 467 | ||
483 | return 0; | 468 | return 0; |
484 | } | 469 | } |
@@ -551,7 +536,7 @@ static void adjust_tx_list(void) | |||
551 | */ | 536 | */ |
552 | if (current_tx_ptr->next->next == tx_list_head) { | 537 | if (current_tx_ptr->next->next == tx_list_head) { |
553 | while (tx_list_head->status.status_word == 0) { | 538 | while (tx_list_head->status.status_word == 0) { |
554 | mdelay(10); | 539 | mdelay(1); |
555 | if (tx_list_head->status.status_word != 0 | 540 | if (tx_list_head->status.status_word != 0 |
556 | || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { | 541 | || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { |
557 | goto adjust_head; | 542 | goto adjust_head; |
@@ -666,6 +651,12 @@ static void bf537mac_rx(struct net_device *dev) | |||
666 | current_rx_ptr->skb = new_skb; | 651 | current_rx_ptr->skb = new_skb; |
667 | current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; | 652 | current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; |
668 | 653 | ||
654 | /* Invidate the data cache of skb->data range when it is write back | ||
655 | * cache. It will prevent overwritting the new data from DMA | ||
656 | */ | ||
657 | blackfin_dcache_invalidate_range((unsigned long)new_skb->head, | ||
658 | (unsigned long)new_skb->end); | ||
659 | |||
669 | len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); | 660 | len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); |
670 | skb_put(skb, len); | 661 | skb_put(skb, len); |
671 | blackfin_dcache_invalidate_range((unsigned long)skb->head, | 662 | blackfin_dcache_invalidate_range((unsigned long)skb->head, |
@@ -767,7 +758,7 @@ static void bf537mac_enable(void) | |||
767 | 758 | ||
768 | #if defined(CONFIG_BFIN_MAC_RMII) | 759 | #if defined(CONFIG_BFIN_MAC_RMII) |
769 | opmode |= RMII; /* For Now only 100MBit are supported */ | 760 | opmode |= RMII; /* For Now only 100MBit are supported */ |
770 | #ifdef CONFIG_BF_REV_0_2 | 761 | #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 |
771 | opmode |= TE; | 762 | opmode |= TE; |
772 | #endif | 763 | #endif |
773 | #endif | 764 | #endif |
@@ -792,6 +783,39 @@ static void bf537mac_timeout(struct net_device *dev) | |||
792 | netif_wake_queue(dev); | 783 | netif_wake_queue(dev); |
793 | } | 784 | } |
794 | 785 | ||
786 | static void bf537mac_multicast_hash(struct net_device *dev) | ||
787 | { | ||
788 | u32 emac_hashhi, emac_hashlo; | ||
789 | struct dev_mc_list *dmi = dev->mc_list; | ||
790 | char *addrs; | ||
791 | int i; | ||
792 | u32 crc; | ||
793 | |||
794 | emac_hashhi = emac_hashlo = 0; | ||
795 | |||
796 | for (i = 0; i < dev->mc_count; i++) { | ||
797 | addrs = dmi->dmi_addr; | ||
798 | dmi = dmi->next; | ||
799 | |||
800 | /* skip non-multicast addresses */ | ||
801 | if (!(*addrs & 1)) | ||
802 | continue; | ||
803 | |||
804 | crc = ether_crc(ETH_ALEN, addrs); | ||
805 | crc >>= 26; | ||
806 | |||
807 | if (crc & 0x20) | ||
808 | emac_hashhi |= 1 << (crc & 0x1f); | ||
809 | else | ||
810 | emac_hashlo |= 1 << (crc & 0x1f); | ||
811 | } | ||
812 | |||
813 | bfin_write_EMAC_HASHHI(emac_hashhi); | ||
814 | bfin_write_EMAC_HASHLO(emac_hashlo); | ||
815 | |||
816 | return; | ||
817 | } | ||
818 | |||
795 | /* | 819 | /* |
796 | * This routine will, depending on the values passed to it, | 820 | * This routine will, depending on the values passed to it, |
797 | * either make it accept multicast packets, go into | 821 | * either make it accept multicast packets, go into |
@@ -807,11 +831,17 @@ static void bf537mac_set_multicast_list(struct net_device *dev) | |||
807 | sysctl = bfin_read_EMAC_OPMODE(); | 831 | sysctl = bfin_read_EMAC_OPMODE(); |
808 | sysctl |= RAF; | 832 | sysctl |= RAF; |
809 | bfin_write_EMAC_OPMODE(sysctl); | 833 | bfin_write_EMAC_OPMODE(sysctl); |
810 | } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { | 834 | } else if (dev->flags & IFF_ALLMULTI) { |
811 | /* accept all multicast */ | 835 | /* accept all multicast */ |
812 | sysctl = bfin_read_EMAC_OPMODE(); | 836 | sysctl = bfin_read_EMAC_OPMODE(); |
813 | sysctl |= PAM; | 837 | sysctl |= PAM; |
814 | bfin_write_EMAC_OPMODE(sysctl); | 838 | bfin_write_EMAC_OPMODE(sysctl); |
839 | } else if (dev->mc_count) { | ||
840 | /* set up multicast hash table */ | ||
841 | sysctl = bfin_read_EMAC_OPMODE(); | ||
842 | sysctl |= HM; | ||
843 | bfin_write_EMAC_OPMODE(sysctl); | ||
844 | bf537mac_multicast_hash(dev); | ||
815 | } else { | 845 | } else { |
816 | /* clear promisc or multicast mode */ | 846 | /* clear promisc or multicast mode */ |
817 | sysctl = bfin_read_EMAC_OPMODE(); | 847 | sysctl = bfin_read_EMAC_OPMODE(); |
@@ -860,10 +890,10 @@ static int bf537mac_open(struct net_device *dev) | |||
860 | return retval; | 890 | return retval; |
861 | 891 | ||
862 | phy_start(lp->phydev); | 892 | phy_start(lp->phydev); |
893 | phy_write(lp->phydev, MII_BMCR, BMCR_RESET); | ||
863 | setup_system_regs(dev); | 894 | setup_system_regs(dev); |
864 | bf537mac_disable(); | 895 | bf537mac_disable(); |
865 | bf537mac_enable(); | 896 | bf537mac_enable(); |
866 | |||
867 | pr_debug("hardware init finished\n"); | 897 | pr_debug("hardware init finished\n"); |
868 | netif_start_queue(dev); | 898 | netif_start_queue(dev); |
869 | netif_carrier_on(dev); | 899 | netif_carrier_on(dev); |
@@ -886,6 +916,7 @@ static int bf537mac_close(struct net_device *dev) | |||
886 | netif_carrier_off(dev); | 916 | netif_carrier_off(dev); |
887 | 917 | ||
888 | phy_stop(lp->phydev); | 918 | phy_stop(lp->phydev); |
919 | phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); | ||
889 | 920 | ||
890 | /* clear everything */ | 921 | /* clear everything */ |
891 | bf537mac_shutdown(dev); | 922 | bf537mac_shutdown(dev); |
@@ -970,7 +1001,7 @@ static int __init bf537mac_probe(struct net_device *dev) | |||
970 | /* register irq handler */ | 1001 | /* register irq handler */ |
971 | if (request_irq | 1002 | if (request_irq |
972 | (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, | 1003 | (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, |
973 | "BFIN537_MAC_RX", dev)) { | 1004 | "EMAC_RX", dev)) { |
974 | printk(KERN_WARNING DRV_NAME | 1005 | printk(KERN_WARNING DRV_NAME |
975 | ": Unable to attach BlackFin MAC RX interrupt\n"); | 1006 | ": Unable to attach BlackFin MAC RX interrupt\n"); |
976 | return -EBUSY; | 1007 | return -EBUSY; |
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index 5970ea7142cd..f774d5a36942 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h | |||
@@ -1,34 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * File: drivers/net/bfin_mac.c | 2 | * Blackfin On-Chip MAC Driver |
3 | * Based on: | ||
4 | * Maintainer: | ||
5 | * Bryan Wu <bryan.wu@analog.com> | ||
6 | * | 3 | * |
7 | * Original author: | 4 | * Copyright 2004-2007 Analog Devices Inc. |
8 | * Luke Yang <luke.yang@analog.com> | ||
9 | * | 5 | * |
10 | * Created: | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
11 | * Description: | ||
12 | * | 7 | * |
13 | * Modified: | 8 | * Licensed under the GPL-2 or later. |
14 | * Copyright 2004-2006 Analog Devices Inc. | ||
15 | * | ||
16 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | ||
17 | * | ||
18 | * This program is free software ; you can redistribute it and/or modify | ||
19 | * it under the terms of the GNU General Public License as published by | ||
20 | * the Free Software Foundation ; either version 2, or (at your option) | ||
21 | * any later version. | ||
22 | * | ||
23 | * This program is distributed in the hope that it will be useful, | ||
24 | * but WITHOUT ANY WARRANTY ; without even the implied warranty of | ||
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
26 | * GNU General Public License for more details. | ||
27 | * | ||
28 | * You should have received a copy of the GNU General Public License | ||
29 | * along with this program ; see the file COPYING. | ||
30 | * If not, write to the Free Software Foundation, | ||
31 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
32 | */ | 9 | */ |
33 | 10 | ||
34 | #define BFIN_MAC_CSUM_OFFLOAD | 11 | #define BFIN_MAC_CSUM_OFFLOAD |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2039f7838f2d..0942d82f7cbf 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1464,10 +1464,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1464 | dev_set_allmulti(slave_dev, 1); | 1464 | dev_set_allmulti(slave_dev, 1); |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | netif_tx_lock_bh(bond_dev); | ||
1467 | /* upload master's mc_list to new slave */ | 1468 | /* upload master's mc_list to new slave */ |
1468 | for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { | 1469 | for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { |
1469 | dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); | 1470 | dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); |
1470 | } | 1471 | } |
1472 | netif_tx_unlock_bh(bond_dev); | ||
1471 | } | 1473 | } |
1472 | 1474 | ||
1473 | if (bond->params.mode == BOND_MODE_8023AD) { | 1475 | if (bond->params.mode == BOND_MODE_8023AD) { |
@@ -1821,7 +1823,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1821 | } | 1823 | } |
1822 | 1824 | ||
1823 | /* flush master's mc_list from slave */ | 1825 | /* flush master's mc_list from slave */ |
1826 | netif_tx_lock_bh(bond_dev); | ||
1824 | bond_mc_list_flush(bond_dev, slave_dev); | 1827 | bond_mc_list_flush(bond_dev, slave_dev); |
1828 | netif_tx_unlock_bh(bond_dev); | ||
1825 | } | 1829 | } |
1826 | 1830 | ||
1827 | netdev_set_master(slave_dev, NULL); | 1831 | netdev_set_master(slave_dev, NULL); |
@@ -1942,7 +1946,9 @@ static int bond_release_all(struct net_device *bond_dev) | |||
1942 | } | 1946 | } |
1943 | 1947 | ||
1944 | /* flush master's mc_list from slave */ | 1948 | /* flush master's mc_list from slave */ |
1949 | netif_tx_lock_bh(bond_dev); | ||
1945 | bond_mc_list_flush(bond_dev, slave_dev); | 1950 | bond_mc_list_flush(bond_dev, slave_dev); |
1951 | netif_tx_unlock_bh(bond_dev); | ||
1946 | } | 1952 | } |
1947 | 1953 | ||
1948 | netdev_set_master(slave_dev, NULL); | 1954 | netdev_set_master(slave_dev, NULL); |
@@ -2795,14 +2801,11 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2795 | } | 2801 | } |
2796 | 2802 | ||
2797 | if (do_failover) { | 2803 | if (do_failover) { |
2798 | rtnl_lock(); | ||
2799 | write_lock_bh(&bond->curr_slave_lock); | 2804 | write_lock_bh(&bond->curr_slave_lock); |
2800 | 2805 | ||
2801 | bond_select_active_slave(bond); | 2806 | bond_select_active_slave(bond); |
2802 | 2807 | ||
2803 | write_unlock_bh(&bond->curr_slave_lock); | 2808 | write_unlock_bh(&bond->curr_slave_lock); |
2804 | rtnl_unlock(); | ||
2805 | |||
2806 | } | 2809 | } |
2807 | 2810 | ||
2808 | re_arm: | 2811 | re_arm: |
@@ -2859,8 +2862,6 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
2859 | 2862 | ||
2860 | slave->link = BOND_LINK_UP; | 2863 | slave->link = BOND_LINK_UP; |
2861 | 2864 | ||
2862 | rtnl_lock(); | ||
2863 | |||
2864 | write_lock_bh(&bond->curr_slave_lock); | 2865 | write_lock_bh(&bond->curr_slave_lock); |
2865 | 2866 | ||
2866 | if ((!bond->curr_active_slave) && | 2867 | if ((!bond->curr_active_slave) && |
@@ -2896,7 +2897,6 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
2896 | } | 2897 | } |
2897 | 2898 | ||
2898 | write_unlock_bh(&bond->curr_slave_lock); | 2899 | write_unlock_bh(&bond->curr_slave_lock); |
2899 | rtnl_unlock(); | ||
2900 | } | 2900 | } |
2901 | } else { | 2901 | } else { |
2902 | read_lock(&bond->curr_slave_lock); | 2902 | read_lock(&bond->curr_slave_lock); |
@@ -2966,7 +2966,6 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
2966 | bond->dev->name, | 2966 | bond->dev->name, |
2967 | slave->dev->name); | 2967 | slave->dev->name); |
2968 | 2968 | ||
2969 | rtnl_lock(); | ||
2970 | write_lock_bh(&bond->curr_slave_lock); | 2969 | write_lock_bh(&bond->curr_slave_lock); |
2971 | 2970 | ||
2972 | bond_select_active_slave(bond); | 2971 | bond_select_active_slave(bond); |
@@ -2974,8 +2973,6 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
2974 | 2973 | ||
2975 | write_unlock_bh(&bond->curr_slave_lock); | 2974 | write_unlock_bh(&bond->curr_slave_lock); |
2976 | 2975 | ||
2977 | rtnl_unlock(); | ||
2978 | |||
2979 | bond->current_arp_slave = slave; | 2976 | bond->current_arp_slave = slave; |
2980 | 2977 | ||
2981 | if (slave) { | 2978 | if (slave) { |
@@ -2993,13 +2990,10 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
2993 | bond->primary_slave->dev->name); | 2990 | bond->primary_slave->dev->name); |
2994 | 2991 | ||
2995 | /* primary is up so switch to it */ | 2992 | /* primary is up so switch to it */ |
2996 | rtnl_lock(); | ||
2997 | write_lock_bh(&bond->curr_slave_lock); | 2993 | write_lock_bh(&bond->curr_slave_lock); |
2998 | bond_change_active_slave(bond, bond->primary_slave); | 2994 | bond_change_active_slave(bond, bond->primary_slave); |
2999 | write_unlock_bh(&bond->curr_slave_lock); | 2995 | write_unlock_bh(&bond->curr_slave_lock); |
3000 | 2996 | ||
3001 | rtnl_unlock(); | ||
3002 | |||
3003 | slave = bond->primary_slave; | 2997 | slave = bond->primary_slave; |
3004 | slave->jiffies = jiffies; | 2998 | slave->jiffies = jiffies; |
3005 | } else { | 2999 | } else { |
@@ -3769,42 +3763,45 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) | |||
3769 | { | 3763 | { |
3770 | struct bonding *bond = bond_dev->priv; | 3764 | struct bonding *bond = bond_dev->priv; |
3771 | struct net_device_stats *stats = &(bond->stats), *sstats; | 3765 | struct net_device_stats *stats = &(bond->stats), *sstats; |
3766 | struct net_device_stats local_stats; | ||
3772 | struct slave *slave; | 3767 | struct slave *slave; |
3773 | int i; | 3768 | int i; |
3774 | 3769 | ||
3775 | memset(stats, 0, sizeof(struct net_device_stats)); | 3770 | memset(&local_stats, 0, sizeof(struct net_device_stats)); |
3776 | 3771 | ||
3777 | read_lock_bh(&bond->lock); | 3772 | read_lock_bh(&bond->lock); |
3778 | 3773 | ||
3779 | bond_for_each_slave(bond, slave, i) { | 3774 | bond_for_each_slave(bond, slave, i) { |
3780 | sstats = slave->dev->get_stats(slave->dev); | 3775 | sstats = slave->dev->get_stats(slave->dev); |
3781 | stats->rx_packets += sstats->rx_packets; | 3776 | local_stats.rx_packets += sstats->rx_packets; |
3782 | stats->rx_bytes += sstats->rx_bytes; | 3777 | local_stats.rx_bytes += sstats->rx_bytes; |
3783 | stats->rx_errors += sstats->rx_errors; | 3778 | local_stats.rx_errors += sstats->rx_errors; |
3784 | stats->rx_dropped += sstats->rx_dropped; | 3779 | local_stats.rx_dropped += sstats->rx_dropped; |
3785 | 3780 | ||
3786 | stats->tx_packets += sstats->tx_packets; | 3781 | local_stats.tx_packets += sstats->tx_packets; |
3787 | stats->tx_bytes += sstats->tx_bytes; | 3782 | local_stats.tx_bytes += sstats->tx_bytes; |
3788 | stats->tx_errors += sstats->tx_errors; | 3783 | local_stats.tx_errors += sstats->tx_errors; |
3789 | stats->tx_dropped += sstats->tx_dropped; | 3784 | local_stats.tx_dropped += sstats->tx_dropped; |
3790 | 3785 | ||
3791 | stats->multicast += sstats->multicast; | 3786 | local_stats.multicast += sstats->multicast; |
3792 | stats->collisions += sstats->collisions; | 3787 | local_stats.collisions += sstats->collisions; |
3793 | 3788 | ||
3794 | stats->rx_length_errors += sstats->rx_length_errors; | 3789 | local_stats.rx_length_errors += sstats->rx_length_errors; |
3795 | stats->rx_over_errors += sstats->rx_over_errors; | 3790 | local_stats.rx_over_errors += sstats->rx_over_errors; |
3796 | stats->rx_crc_errors += sstats->rx_crc_errors; | 3791 | local_stats.rx_crc_errors += sstats->rx_crc_errors; |
3797 | stats->rx_frame_errors += sstats->rx_frame_errors; | 3792 | local_stats.rx_frame_errors += sstats->rx_frame_errors; |
3798 | stats->rx_fifo_errors += sstats->rx_fifo_errors; | 3793 | local_stats.rx_fifo_errors += sstats->rx_fifo_errors; |
3799 | stats->rx_missed_errors += sstats->rx_missed_errors; | 3794 | local_stats.rx_missed_errors += sstats->rx_missed_errors; |
3800 | 3795 | ||
3801 | stats->tx_aborted_errors += sstats->tx_aborted_errors; | 3796 | local_stats.tx_aborted_errors += sstats->tx_aborted_errors; |
3802 | stats->tx_carrier_errors += sstats->tx_carrier_errors; | 3797 | local_stats.tx_carrier_errors += sstats->tx_carrier_errors; |
3803 | stats->tx_fifo_errors += sstats->tx_fifo_errors; | 3798 | local_stats.tx_fifo_errors += sstats->tx_fifo_errors; |
3804 | stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; | 3799 | local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors; |
3805 | stats->tx_window_errors += sstats->tx_window_errors; | 3800 | local_stats.tx_window_errors += sstats->tx_window_errors; |
3806 | } | 3801 | } |
3807 | 3802 | ||
3803 | memcpy(stats, &local_stats, sizeof(struct net_device_stats)); | ||
3804 | |||
3808 | read_unlock_bh(&bond->lock); | 3805 | read_unlock_bh(&bond->lock); |
3809 | 3806 | ||
3810 | return stats; | 3807 | return stats; |
@@ -3937,8 +3934,6 @@ static void bond_set_multicast_list(struct net_device *bond_dev) | |||
3937 | struct bonding *bond = bond_dev->priv; | 3934 | struct bonding *bond = bond_dev->priv; |
3938 | struct dev_mc_list *dmi; | 3935 | struct dev_mc_list *dmi; |
3939 | 3936 | ||
3940 | write_lock_bh(&bond->lock); | ||
3941 | |||
3942 | /* | 3937 | /* |
3943 | * Do promisc before checking multicast_mode | 3938 | * Do promisc before checking multicast_mode |
3944 | */ | 3939 | */ |
@@ -3959,6 +3954,8 @@ static void bond_set_multicast_list(struct net_device *bond_dev) | |||
3959 | bond_set_allmulti(bond, -1); | 3954 | bond_set_allmulti(bond, -1); |
3960 | } | 3955 | } |
3961 | 3956 | ||
3957 | read_lock(&bond->lock); | ||
3958 | |||
3962 | bond->flags = bond_dev->flags; | 3959 | bond->flags = bond_dev->flags; |
3963 | 3960 | ||
3964 | /* looking for addresses to add to slaves' mc list */ | 3961 | /* looking for addresses to add to slaves' mc list */ |
@@ -3979,7 +3976,7 @@ static void bond_set_multicast_list(struct net_device *bond_dev) | |||
3979 | bond_mc_list_destroy(bond); | 3976 | bond_mc_list_destroy(bond); |
3980 | bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC); | 3977 | bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC); |
3981 | 3978 | ||
3982 | write_unlock_bh(&bond->lock); | 3979 | read_unlock(&bond->lock); |
3983 | } | 3980 | } |
3984 | 3981 | ||
3985 | /* | 3982 | /* |
@@ -4526,7 +4523,9 @@ static void bond_free_all(void) | |||
4526 | struct net_device *bond_dev = bond->dev; | 4523 | struct net_device *bond_dev = bond->dev; |
4527 | 4524 | ||
4528 | bond_work_cancel_all(bond); | 4525 | bond_work_cancel_all(bond); |
4526 | netif_tx_lock_bh(bond_dev); | ||
4529 | bond_mc_list_destroy(bond); | 4527 | bond_mc_list_destroy(bond); |
4528 | netif_tx_unlock_bh(bond_dev); | ||
4530 | /* Release the bonded slaves */ | 4529 | /* Release the bonded slaves */ |
4531 | bond_release_all(bond_dev); | 4530 | bond_release_all(bond_dev); |
4532 | bond_deinit(bond_dev); | 4531 | bond_deinit(bond_dev); |
@@ -4549,14 +4548,19 @@ static void bond_free_all(void) | |||
4549 | int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl) | 4548 | int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl) |
4550 | { | 4549 | { |
4551 | int mode = -1, i, rv; | 4550 | int mode = -1, i, rv; |
4552 | char modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, }; | 4551 | char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, }; |
4553 | 4552 | ||
4554 | rv = sscanf(buf, "%d", &mode); | 4553 | for (p = (char *)buf; *p; p++) |
4555 | if (!rv) { | 4554 | if (!(isdigit(*p) || isspace(*p))) |
4555 | break; | ||
4556 | |||
4557 | if (*p) | ||
4556 | rv = sscanf(buf, "%20s", modestr); | 4558 | rv = sscanf(buf, "%20s", modestr); |
4557 | if (!rv) | 4559 | else |
4558 | return -1; | 4560 | rv = sscanf(buf, "%d", &mode); |
4559 | } | 4561 | |
4562 | if (!rv) | ||
4563 | return -1; | ||
4560 | 4564 | ||
4561 | for (i = 0; tbl[i].modename; i++) { | 4565 | for (i = 0; tbl[i].modename; i++) { |
4562 | if (mode == tbl[i].mode) | 4566 | if (mode == tbl[i].mode) |
@@ -4883,14 +4887,16 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond | |||
4883 | down_write(&bonding_rwsem); | 4887 | down_write(&bonding_rwsem); |
4884 | 4888 | ||
4885 | /* Check to see if the bond already exists. */ | 4889 | /* Check to see if the bond already exists. */ |
4886 | list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) | 4890 | if (name) { |
4887 | if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) { | 4891 | list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) |
4888 | printk(KERN_ERR DRV_NAME | 4892 | if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) { |
4893 | printk(KERN_ERR DRV_NAME | ||
4889 | ": cannot add bond %s; it already exists\n", | 4894 | ": cannot add bond %s; it already exists\n", |
4890 | name); | 4895 | name); |
4891 | res = -EPERM; | 4896 | res = -EPERM; |
4892 | goto out_rtnl; | 4897 | goto out_rtnl; |
4893 | } | 4898 | } |
4899 | } | ||
4894 | 4900 | ||
4895 | bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", | 4901 | bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", |
4896 | ether_setup); | 4902 | ether_setup); |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 6d83be49899a..67ccad69d445 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -22,8 +22,8 @@ | |||
22 | #include "bond_3ad.h" | 22 | #include "bond_3ad.h" |
23 | #include "bond_alb.h" | 23 | #include "bond_alb.h" |
24 | 24 | ||
25 | #define DRV_VERSION "3.2.3" | 25 | #define DRV_VERSION "3.2.4" |
26 | #define DRV_RELDATE "December 6, 2007" | 26 | #define DRV_RELDATE "January 28, 2008" |
27 | #define DRV_NAME "bonding" | 27 | #define DRV_NAME "bonding" |
28 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | 28 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" |
29 | 29 | ||
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c index 84c1ffa8e2d3..4c4d6e877ea6 100644 --- a/drivers/net/cxgb3/mc5.c +++ b/drivers/net/cxgb3/mc5.c | |||
@@ -452,7 +452,7 @@ void t3_mc5_intr_handler(struct mc5 *mc5) | |||
452 | t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); | 452 | t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); |
453 | } | 453 | } |
454 | 454 | ||
455 | void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) | 455 | void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) |
456 | { | 456 | { |
457 | #define K * 1024 | 457 | #define K * 1024 |
458 | 458 | ||
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index cb684d30831f..9ca8c66abd16 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -2836,7 +2836,7 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p) | |||
2836 | * defaults for the assorted SGE parameters, which admins can change until | 2836 | * defaults for the assorted SGE parameters, which admins can change until |
2837 | * they are used to initialize the SGE. | 2837 | * they are used to initialize the SGE. |
2838 | */ | 2838 | */ |
2839 | void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p) | 2839 | void t3_sge_prep(struct adapter *adap, struct sge_params *p) |
2840 | { | 2840 | { |
2841 | int i; | 2841 | int i; |
2842 | 2842 | ||
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index 7469935877bd..a99496a431c4 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c | |||
@@ -2675,7 +2675,7 @@ void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size) | |||
2675 | V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); | 2675 | V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); |
2676 | } | 2676 | } |
2677 | 2677 | ||
2678 | static void __devinit init_mtus(unsigned short mtus[]) | 2678 | static void init_mtus(unsigned short mtus[]) |
2679 | { | 2679 | { |
2680 | /* | 2680 | /* |
2681 | * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so | 2681 | * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so |
@@ -2703,7 +2703,7 @@ static void __devinit init_mtus(unsigned short mtus[]) | |||
2703 | /* | 2703 | /* |
2704 | * Initial congestion control parameters. | 2704 | * Initial congestion control parameters. |
2705 | */ | 2705 | */ |
2706 | static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) | 2706 | static void init_cong_ctrl(unsigned short *a, unsigned short *b) |
2707 | { | 2707 | { |
2708 | a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; | 2708 | a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; |
2709 | a[9] = 2; | 2709 | a[9] = 2; |
@@ -3354,8 +3354,7 @@ out_err: | |||
3354 | * Determines a card's PCI mode and associated parameters, such as speed | 3354 | * Determines a card's PCI mode and associated parameters, such as speed |
3355 | * and width. | 3355 | * and width. |
3356 | */ | 3356 | */ |
3357 | static void __devinit get_pci_mode(struct adapter *adapter, | 3357 | static void get_pci_mode(struct adapter *adapter, struct pci_params *p) |
3358 | struct pci_params *p) | ||
3359 | { | 3358 | { |
3360 | static unsigned short speed_map[] = { 33, 66, 100, 133 }; | 3359 | static unsigned short speed_map[] = { 33, 66, 100, 133 }; |
3361 | u32 pci_mode, pcie_cap; | 3360 | u32 pci_mode, pcie_cap; |
@@ -3395,8 +3394,7 @@ static void __devinit get_pci_mode(struct adapter *adapter, | |||
3395 | * capabilities and default speed/duplex/flow-control/autonegotiation | 3394 | * capabilities and default speed/duplex/flow-control/autonegotiation |
3396 | * settings. | 3395 | * settings. |
3397 | */ | 3396 | */ |
3398 | static void __devinit init_link_config(struct link_config *lc, | 3397 | static void init_link_config(struct link_config *lc, unsigned int caps) |
3399 | unsigned int caps) | ||
3400 | { | 3398 | { |
3401 | lc->supported = caps; | 3399 | lc->supported = caps; |
3402 | lc->requested_speed = lc->speed = SPEED_INVALID; | 3400 | lc->requested_speed = lc->speed = SPEED_INVALID; |
@@ -3419,7 +3417,7 @@ static void __devinit init_link_config(struct link_config *lc, | |||
3419 | * Calculates the size of an MC7 memory in bytes from the value of its | 3417 | * Calculates the size of an MC7 memory in bytes from the value of its |
3420 | * configuration register. | 3418 | * configuration register. |
3421 | */ | 3419 | */ |
3422 | static unsigned int __devinit mc7_calc_size(u32 cfg) | 3420 | static unsigned int mc7_calc_size(u32 cfg) |
3423 | { | 3421 | { |
3424 | unsigned int width = G_WIDTH(cfg); | 3422 | unsigned int width = G_WIDTH(cfg); |
3425 | unsigned int banks = !!(cfg & F_BKS) + 1; | 3423 | unsigned int banks = !!(cfg & F_BKS) + 1; |
@@ -3430,8 +3428,8 @@ static unsigned int __devinit mc7_calc_size(u32 cfg) | |||
3430 | return MBs << 20; | 3428 | return MBs << 20; |
3431 | } | 3429 | } |
3432 | 3430 | ||
3433 | static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7, | 3431 | static void mc7_prep(struct adapter *adapter, struct mc7 *mc7, |
3434 | unsigned int base_addr, const char *name) | 3432 | unsigned int base_addr, const char *name) |
3435 | { | 3433 | { |
3436 | u32 cfg; | 3434 | u32 cfg; |
3437 | 3435 | ||
@@ -3517,7 +3515,7 @@ static int t3_reset_adapter(struct adapter *adapter) | |||
3517 | return 0; | 3515 | return 0; |
3518 | } | 3516 | } |
3519 | 3517 | ||
3520 | static int __devinit init_parity(struct adapter *adap) | 3518 | static int init_parity(struct adapter *adap) |
3521 | { | 3519 | { |
3522 | int i, err, addr; | 3520 | int i, err, addr; |
3523 | 3521 | ||
@@ -3552,8 +3550,8 @@ static int __devinit init_parity(struct adapter *adap) | |||
3552 | * for some adapter tunables, take PHYs out of reset, and initialize the MDIO | 3550 | * for some adapter tunables, take PHYs out of reset, and initialize the MDIO |
3553 | * interface. | 3551 | * interface. |
3554 | */ | 3552 | */ |
3555 | int __devinit t3_prep_adapter(struct adapter *adapter, | 3553 | int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, |
3556 | const struct adapter_info *ai, int reset) | 3554 | int reset) |
3557 | { | 3555 | { |
3558 | int ret; | 3556 | int ret; |
3559 | unsigned int i, j = 0; | 3557 | unsigned int i, j = 0; |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 51cf577035be..36ba6dc96acc 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -94,7 +94,7 @@ | |||
94 | * enabled. 82557 pads with 7Eh, while the later controllers pad | 94 | * enabled. 82557 pads with 7Eh, while the later controllers pad |
95 | * with 00h. | 95 | * with 00h. |
96 | * | 96 | * |
97 | * IV. Recieve | 97 | * IV. Receive |
98 | * | 98 | * |
99 | * The Receive Frame Area (RFA) comprises a ring of Receive Frame | 99 | * The Receive Frame Area (RFA) comprises a ring of Receive Frame |
100 | * Descriptors (RFD) + data buffer, thus forming the simplified mode | 100 | * Descriptors (RFD) + data buffer, thus forming the simplified mode |
@@ -120,7 +120,7 @@ | |||
120 | * and Rx indication and re-allocation happen in the same context, | 120 | * and Rx indication and re-allocation happen in the same context, |
121 | * therefore no locking is required. A software-generated interrupt | 121 | * therefore no locking is required. A software-generated interrupt |
122 | * is generated from the watchdog to recover from a failed allocation | 122 | * is generated from the watchdog to recover from a failed allocation |
123 | * senario where all Rx resources have been indicated and none re- | 123 | * scenario where all Rx resources have been indicated and none re- |
124 | * placed. | 124 | * placed. |
125 | * | 125 | * |
126 | * V. Miscellaneous | 126 | * V. Miscellaneous |
@@ -954,7 +954,7 @@ static void e100_get_defaults(struct nic *nic) | |||
954 | /* Quadwords to DMA into FIFO before starting frame transmit */ | 954 | /* Quadwords to DMA into FIFO before starting frame transmit */ |
955 | nic->tx_threshold = 0xE0; | 955 | nic->tx_threshold = 0xE0; |
956 | 956 | ||
957 | /* no interrupt for every tx completion, delay = 256us if not 557*/ | 957 | /* no interrupt for every tx completion, delay = 256us if not 557 */ |
958 | nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | | 958 | nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | |
959 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); | 959 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); |
960 | 960 | ||
@@ -1497,7 +1497,7 @@ static void e100_update_stats(struct nic *nic) | |||
1497 | &s->complete; | 1497 | &s->complete; |
1498 | 1498 | ||
1499 | /* Device's stats reporting may take several microseconds to | 1499 | /* Device's stats reporting may take several microseconds to |
1500 | * complete, so where always waiting for results of the | 1500 | * complete, so we're always waiting for results of the |
1501 | * previous command. */ | 1501 | * previous command. */ |
1502 | 1502 | ||
1503 | if(*complete == cpu_to_le32(cuc_dump_reset_complete)) { | 1503 | if(*complete == cpu_to_le32(cuc_dump_reset_complete)) { |
@@ -1958,7 +1958,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, | |||
1958 | 1958 | ||
1959 | if(restart_required) { | 1959 | if(restart_required) { |
1960 | // ack the rnr? | 1960 | // ack the rnr? |
1961 | writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); | 1961 | iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); |
1962 | e100_start_receiver(nic, nic->rx_to_clean); | 1962 | e100_start_receiver(nic, nic->rx_to_clean); |
1963 | if(work_done) | 1963 | if(work_done) |
1964 | (*work_done)++; | 1964 | (*work_done)++; |
@@ -2774,7 +2774,7 @@ static void __devexit e100_remove(struct pci_dev *pdev) | |||
2774 | struct nic *nic = netdev_priv(netdev); | 2774 | struct nic *nic = netdev_priv(netdev); |
2775 | unregister_netdev(netdev); | 2775 | unregister_netdev(netdev); |
2776 | e100_free(nic); | 2776 | e100_free(nic); |
2777 | iounmap(nic->csr); | 2777 | pci_iounmap(pdev, nic->csr); |
2778 | free_netdev(netdev); | 2778 | free_netdev(netdev); |
2779 | pci_release_regions(pdev); | 2779 | pci_release_regions(pdev); |
2780 | pci_disable_device(pdev); | 2780 | pci_disable_device(pdev); |
@@ -2858,17 +2858,17 @@ static void e100_shutdown(struct pci_dev *pdev) | |||
2858 | /** | 2858 | /** |
2859 | * e100_io_error_detected - called when PCI error is detected. | 2859 | * e100_io_error_detected - called when PCI error is detected. |
2860 | * @pdev: Pointer to PCI device | 2860 | * @pdev: Pointer to PCI device |
2861 | * @state: The current pci conneection state | 2861 | * @state: The current pci connection state |
2862 | */ | 2862 | */ |
2863 | static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | 2863 | static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
2864 | { | 2864 | { |
2865 | struct net_device *netdev = pci_get_drvdata(pdev); | 2865 | struct net_device *netdev = pci_get_drvdata(pdev); |
2866 | struct nic *nic = netdev_priv(netdev); | 2866 | struct nic *nic = netdev_priv(netdev); |
2867 | 2867 | ||
2868 | /* Similar to calling e100_down(), but avoids adpater I/O. */ | 2868 | /* Similar to calling e100_down(), but avoids adapter I/O. */ |
2869 | netdev->stop(netdev); | 2869 | netdev->stop(netdev); |
2870 | 2870 | ||
2871 | /* Detach; put netif into state similar to hotplug unplug. */ | 2871 | /* Detach; put netif into a state similar to hotplug unplug. */ |
2872 | napi_enable(&nic->napi); | 2872 | napi_enable(&nic->napi); |
2873 | netif_device_detach(netdev); | 2873 | netif_device_detach(netdev); |
2874 | pci_disable_device(pdev); | 2874 | pci_disable_device(pdev); |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 8c87940a9ce8..7c5b05a82f0e 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -853,7 +853,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
853 | /** | 853 | /** |
854 | * Dump the eeprom for users having checksum issues | 854 | * Dump the eeprom for users having checksum issues |
855 | **/ | 855 | **/ |
856 | void e1000_dump_eeprom(struct e1000_adapter *adapter) | 856 | static void e1000_dump_eeprom(struct e1000_adapter *adapter) |
857 | { | 857 | { |
858 | struct net_device *netdev = adapter->netdev; | 858 | struct net_device *netdev = adapter->netdev; |
859 | struct ethtool_eeprom eeprom; | 859 | struct ethtool_eeprom eeprom; |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index f2175ea46b83..6232c3e96689 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -63,6 +63,7 @@ | |||
63 | #define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ | 63 | #define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ |
64 | #define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ | 64 | #define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ |
65 | #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ | 65 | #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ |
66 | #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ | ||
66 | 67 | ||
67 | /* Extended Device Control */ | 68 | /* Extended Device Control */ |
68 | #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ | 69 | #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 6d9c27fd0b53..f77a7427d3a0 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -690,8 +690,8 @@ err_setup: | |||
690 | return err; | 690 | return err; |
691 | } | 691 | } |
692 | 692 | ||
693 | bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, | 693 | static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, |
694 | int reg, int offset, u32 mask, u32 write) | 694 | int reg, int offset, u32 mask, u32 write) |
695 | { | 695 | { |
696 | int i; | 696 | int i; |
697 | u32 read; | 697 | u32 read; |
@@ -1632,7 +1632,8 @@ static void e1000_get_wol(struct net_device *netdev, | |||
1632 | return; | 1632 | return; |
1633 | 1633 | ||
1634 | wol->supported = WAKE_UCAST | WAKE_MCAST | | 1634 | wol->supported = WAKE_UCAST | WAKE_MCAST | |
1635 | WAKE_BCAST | WAKE_MAGIC; | 1635 | WAKE_BCAST | WAKE_MAGIC | |
1636 | WAKE_PHY | WAKE_ARP; | ||
1636 | 1637 | ||
1637 | /* apply any specific unsupported masks here */ | 1638 | /* apply any specific unsupported masks here */ |
1638 | if (adapter->flags & FLAG_NO_WAKE_UCAST) { | 1639 | if (adapter->flags & FLAG_NO_WAKE_UCAST) { |
@@ -1651,6 +1652,10 @@ static void e1000_get_wol(struct net_device *netdev, | |||
1651 | wol->wolopts |= WAKE_BCAST; | 1652 | wol->wolopts |= WAKE_BCAST; |
1652 | if (adapter->wol & E1000_WUFC_MAG) | 1653 | if (adapter->wol & E1000_WUFC_MAG) |
1653 | wol->wolopts |= WAKE_MAGIC; | 1654 | wol->wolopts |= WAKE_MAGIC; |
1655 | if (adapter->wol & E1000_WUFC_LNKC) | ||
1656 | wol->wolopts |= WAKE_PHY; | ||
1657 | if (adapter->wol & E1000_WUFC_ARP) | ||
1658 | wol->wolopts |= WAKE_ARP; | ||
1654 | } | 1659 | } |
1655 | 1660 | ||
1656 | static int e1000_set_wol(struct net_device *netdev, | 1661 | static int e1000_set_wol(struct net_device *netdev, |
@@ -1658,7 +1663,7 @@ static int e1000_set_wol(struct net_device *netdev, | |||
1658 | { | 1663 | { |
1659 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1664 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1660 | 1665 | ||
1661 | if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) | 1666 | if (wol->wolopts & WAKE_MAGICSECURE) |
1662 | return -EOPNOTSUPP; | 1667 | return -EOPNOTSUPP; |
1663 | 1668 | ||
1664 | if (!(adapter->flags & FLAG_HAS_WOL)) | 1669 | if (!(adapter->flags & FLAG_HAS_WOL)) |
@@ -1675,6 +1680,10 @@ static int e1000_set_wol(struct net_device *netdev, | |||
1675 | adapter->wol |= E1000_WUFC_BC; | 1680 | adapter->wol |= E1000_WUFC_BC; |
1676 | if (wol->wolopts & WAKE_MAGIC) | 1681 | if (wol->wolopts & WAKE_MAGIC) |
1677 | adapter->wol |= E1000_WUFC_MAG; | 1682 | adapter->wol |= E1000_WUFC_MAG; |
1683 | if (wol->wolopts & WAKE_PHY) | ||
1684 | adapter->wol |= E1000_WUFC_LNKC; | ||
1685 | if (wol->wolopts & WAKE_ARP) | ||
1686 | adapter->wol |= E1000_WUFC_ARP; | ||
1678 | 1687 | ||
1679 | return 0; | 1688 | return 0; |
1680 | } | 1689 | } |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 0a2cb7960c9e..f58f017ee47a 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -945,11 +945,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
945 | int irq_flags = IRQF_SHARED; | 945 | int irq_flags = IRQF_SHARED; |
946 | int err; | 946 | int err; |
947 | 947 | ||
948 | err = pci_enable_msi(adapter->pdev); | 948 | if (!pci_enable_msi(adapter->pdev)) { |
949 | if (err) { | ||
950 | ndev_warn(netdev, | ||
951 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
952 | } else { | ||
953 | adapter->flags |= FLAG_MSI_ENABLED; | 949 | adapter->flags |= FLAG_MSI_ENABLED; |
954 | handler = e1000_intr_msi; | 950 | handler = e1000_intr_msi; |
955 | irq_flags = 0; | 951 | irq_flags = 0; |
@@ -958,10 +954,12 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
958 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, | 954 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, |
959 | netdev); | 955 | netdev); |
960 | if (err) { | 956 | if (err) { |
957 | ndev_err(netdev, | ||
958 | "Unable to allocate %s interrupt (return: %d)\n", | ||
959 | adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", | ||
960 | err); | ||
961 | if (adapter->flags & FLAG_MSI_ENABLED) | 961 | if (adapter->flags & FLAG_MSI_ENABLED) |
962 | pci_disable_msi(adapter->pdev); | 962 | pci_disable_msi(adapter->pdev); |
963 | ndev_err(netdev, | ||
964 | "Unable to allocate interrupt Error: %d\n", err); | ||
965 | } | 963 | } |
966 | 964 | ||
967 | return err; | 965 | return err; |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 5f82a4647eee..88fb53eba715 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -458,4 +458,7 @@ void ehea_set_ethtool_ops(struct net_device *netdev); | |||
458 | int ehea_sense_port_attr(struct ehea_port *port); | 458 | int ehea_sense_port_attr(struct ehea_port *port); |
459 | int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); | 459 | int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); |
460 | 460 | ||
461 | extern u64 ehea_driver_flags; | ||
462 | extern struct work_struct ehea_rereg_mr_task; | ||
463 | |||
461 | #endif /* __EHEA_H__ */ | 464 | #endif /* __EHEA_H__ */ |
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 679f40ee9572..d76885223366 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c | |||
@@ -40,7 +40,7 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
40 | return ret; | 40 | return ret; |
41 | 41 | ||
42 | if (netif_carrier_ok(dev)) { | 42 | if (netif_carrier_ok(dev)) { |
43 | switch(port->port_speed) { | 43 | switch (port->port_speed) { |
44 | case EHEA_SPEED_10M: cmd->speed = SPEED_10; break; | 44 | case EHEA_SPEED_10M: cmd->speed = SPEED_10; break; |
45 | case EHEA_SPEED_100M: cmd->speed = SPEED_100; break; | 45 | case EHEA_SPEED_100M: cmd->speed = SPEED_100; break; |
46 | case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break; | 46 | case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break; |
@@ -78,7 +78,7 @@ static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
78 | goto doit; | 78 | goto doit; |
79 | } | 79 | } |
80 | 80 | ||
81 | switch(cmd->speed) { | 81 | switch (cmd->speed) { |
82 | case SPEED_10: | 82 | case SPEED_10: |
83 | if (cmd->duplex == DUPLEX_FULL) | 83 | if (cmd->duplex == DUPLEX_FULL) |
84 | sp = H_SPEED_10M_F; | 84 | sp = H_SPEED_10M_F; |
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h index 1af7ca499ec5..567981b4b2cc 100644 --- a/drivers/net/ehea/ehea_hw.h +++ b/drivers/net/ehea/ehea_hw.h | |||
@@ -29,10 +29,10 @@ | |||
29 | #ifndef __EHEA_HW_H__ | 29 | #ifndef __EHEA_HW_H__ |
30 | #define __EHEA_HW_H__ | 30 | #define __EHEA_HW_H__ |
31 | 31 | ||
32 | #define QPX_SQA_VALUE EHEA_BMASK_IBM(48,63) | 32 | #define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63) |
33 | #define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48,63) | 33 | #define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63) |
34 | #define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48,63) | 34 | #define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63) |
35 | #define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48,63) | 35 | #define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63) |
36 | 36 | ||
37 | #define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x) | 37 | #define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x) |
38 | 38 | ||
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 869e1604b16e..c051c7e09b9a 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -6,9 +6,9 @@ | |||
6 | * (C) Copyright IBM Corp. 2006 | 6 | * (C) Copyright IBM Corp. 2006 |
7 | * | 7 | * |
8 | * Authors: | 8 | * Authors: |
9 | * Christoph Raisch <raisch@de.ibm.com> | 9 | * Christoph Raisch <raisch@de.ibm.com> |
10 | * Jan-Bernd Themann <themann@de.ibm.com> | 10 | * Jan-Bernd Themann <themann@de.ibm.com> |
11 | * Thomas Klein <tklein@de.ibm.com> | 11 | * Thomas Klein <tklein@de.ibm.com> |
12 | * | 12 | * |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
@@ -54,11 +54,11 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; | |||
54 | static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; | 54 | static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; |
55 | static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; | 55 | static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; |
56 | static int sq_entries = EHEA_DEF_ENTRIES_SQ; | 56 | static int sq_entries = EHEA_DEF_ENTRIES_SQ; |
57 | static int use_mcs = 0; | 57 | static int use_mcs; |
58 | static int use_lro = 0; | 58 | static int use_lro; |
59 | static int lro_max_aggr = EHEA_LRO_MAX_AGGR; | 59 | static int lro_max_aggr = EHEA_LRO_MAX_AGGR; |
60 | static int num_tx_qps = EHEA_NUM_TX_QP; | 60 | static int num_tx_qps = EHEA_NUM_TX_QP; |
61 | static int prop_carrier_state = 0; | 61 | static int prop_carrier_state; |
62 | 62 | ||
63 | module_param(msg_level, int, 0); | 63 | module_param(msg_level, int, 0); |
64 | module_param(rq1_entries, int, 0); | 64 | module_param(rq1_entries, int, 0); |
@@ -94,9 +94,9 @@ MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = " | |||
94 | MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " | 94 | MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " |
95 | "Default = 0"); | 95 | "Default = 0"); |
96 | 96 | ||
97 | static int port_name_cnt = 0; | 97 | static int port_name_cnt; |
98 | static LIST_HEAD(adapter_list); | 98 | static LIST_HEAD(adapter_list); |
99 | u64 ehea_driver_flags = 0; | 99 | u64 ehea_driver_flags; |
100 | struct work_struct ehea_rereg_mr_task; | 100 | struct work_struct ehea_rereg_mr_task; |
101 | 101 | ||
102 | struct semaphore dlpar_mem_lock; | 102 | struct semaphore dlpar_mem_lock; |
@@ -121,12 +121,13 @@ static struct of_platform_driver ehea_driver = { | |||
121 | .remove = ehea_remove, | 121 | .remove = ehea_remove, |
122 | }; | 122 | }; |
123 | 123 | ||
124 | void ehea_dump(void *adr, int len, char *msg) { | 124 | void ehea_dump(void *adr, int len, char *msg) |
125 | { | ||
125 | int x; | 126 | int x; |
126 | unsigned char *deb = adr; | 127 | unsigned char *deb = adr; |
127 | for (x = 0; x < len; x += 16) { | 128 | for (x = 0; x < len; x += 16) { |
128 | printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg, | 129 | printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg, |
129 | deb, x, *((u64*)&deb[0]), *((u64*)&deb[8])); | 130 | deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); |
130 | deb += 16; | 131 | deb += 16; |
131 | } | 132 | } |
132 | } | 133 | } |
@@ -518,7 +519,8 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
518 | last_wqe_index = wqe_index; | 519 | last_wqe_index = wqe_index; |
519 | rmb(); | 520 | rmb(); |
520 | if (!ehea_check_cqe(cqe, &rq)) { | 521 | if (!ehea_check_cqe(cqe, &rq)) { |
521 | if (rq == 1) { /* LL RQ1 */ | 522 | if (rq == 1) { |
523 | /* LL RQ1 */ | ||
522 | skb = get_skb_by_index_ll(skb_arr_rq1, | 524 | skb = get_skb_by_index_ll(skb_arr_rq1, |
523 | skb_arr_rq1_len, | 525 | skb_arr_rq1_len, |
524 | wqe_index); | 526 | wqe_index); |
@@ -531,10 +533,11 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
531 | if (!skb) | 533 | if (!skb) |
532 | break; | 534 | break; |
533 | } | 535 | } |
534 | skb_copy_to_linear_data(skb, ((char*)cqe) + 64, | 536 | skb_copy_to_linear_data(skb, ((char *)cqe) + 64, |
535 | cqe->num_bytes_transfered - 4); | 537 | cqe->num_bytes_transfered - 4); |
536 | ehea_fill_skb(dev, skb, cqe); | 538 | ehea_fill_skb(dev, skb, cqe); |
537 | } else if (rq == 2) { /* RQ2 */ | 539 | } else if (rq == 2) { |
540 | /* RQ2 */ | ||
538 | skb = get_skb_by_index(skb_arr_rq2, | 541 | skb = get_skb_by_index(skb_arr_rq2, |
539 | skb_arr_rq2_len, cqe); | 542 | skb_arr_rq2_len, cqe); |
540 | if (unlikely(!skb)) { | 543 | if (unlikely(!skb)) { |
@@ -544,7 +547,8 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
544 | } | 547 | } |
545 | ehea_fill_skb(dev, skb, cqe); | 548 | ehea_fill_skb(dev, skb, cqe); |
546 | processed_rq2++; | 549 | processed_rq2++; |
547 | } else { /* RQ3 */ | 550 | } else { |
551 | /* RQ3 */ | ||
548 | skb = get_skb_by_index(skb_arr_rq3, | 552 | skb = get_skb_by_index(skb_arr_rq3, |
549 | skb_arr_rq3_len, cqe); | 553 | skb_arr_rq3_len, cqe); |
550 | if (unlikely(!skb)) { | 554 | if (unlikely(!skb)) { |
@@ -592,7 +596,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | |||
592 | unsigned long flags; | 596 | unsigned long flags; |
593 | 597 | ||
594 | cqe = ehea_poll_cq(send_cq); | 598 | cqe = ehea_poll_cq(send_cq); |
595 | while(cqe && (quota > 0)) { | 599 | while (cqe && (quota > 0)) { |
596 | ehea_inc_cq(send_cq); | 600 | ehea_inc_cq(send_cq); |
597 | 601 | ||
598 | cqe_counter++; | 602 | cqe_counter++; |
@@ -643,7 +647,8 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | |||
643 | 647 | ||
644 | static int ehea_poll(struct napi_struct *napi, int budget) | 648 | static int ehea_poll(struct napi_struct *napi, int budget) |
645 | { | 649 | { |
646 | struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi); | 650 | struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, |
651 | napi); | ||
647 | struct net_device *dev = pr->port->netdev; | 652 | struct net_device *dev = pr->port->netdev; |
648 | struct ehea_cqe *cqe; | 653 | struct ehea_cqe *cqe; |
649 | struct ehea_cqe *cqe_skb = NULL; | 654 | struct ehea_cqe *cqe_skb = NULL; |
@@ -743,8 +748,9 @@ int ehea_sense_port_attr(struct ehea_port *port) | |||
743 | u64 hret; | 748 | u64 hret; |
744 | struct hcp_ehea_port_cb0 *cb0; | 749 | struct hcp_ehea_port_cb0 *cb0; |
745 | 750 | ||
746 | cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */ | 751 | /* may be called via ehea_neq_tasklet() */ |
747 | if (!cb0) { /* ehea_neq_tasklet() */ | 752 | cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); |
753 | if (!cb0) { | ||
748 | ehea_error("no mem for cb0"); | 754 | ehea_error("no mem for cb0"); |
749 | ret = -ENOMEM; | 755 | ret = -ENOMEM; |
750 | goto out; | 756 | goto out; |
@@ -762,7 +768,7 @@ int ehea_sense_port_attr(struct ehea_port *port) | |||
762 | /* MAC address */ | 768 | /* MAC address */ |
763 | port->mac_addr = cb0->port_mac_addr << 16; | 769 | port->mac_addr = cb0->port_mac_addr << 16; |
764 | 770 | ||
765 | if (!is_valid_ether_addr((u8*)&port->mac_addr)) { | 771 | if (!is_valid_ether_addr((u8 *)&port->mac_addr)) { |
766 | ret = -EADDRNOTAVAIL; | 772 | ret = -EADDRNOTAVAIL; |
767 | goto out_free; | 773 | goto out_free; |
768 | } | 774 | } |
@@ -994,7 +1000,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) | |||
994 | 1000 | ||
995 | static void ehea_neq_tasklet(unsigned long data) | 1001 | static void ehea_neq_tasklet(unsigned long data) |
996 | { | 1002 | { |
997 | struct ehea_adapter *adapter = (struct ehea_adapter*)data; | 1003 | struct ehea_adapter *adapter = (struct ehea_adapter *)data; |
998 | struct ehea_eqe *eqe; | 1004 | struct ehea_eqe *eqe; |
999 | u64 event_mask; | 1005 | u64 event_mask; |
1000 | 1006 | ||
@@ -1204,7 +1210,7 @@ int ehea_rem_smrs(struct ehea_port_res *pr) | |||
1204 | 1210 | ||
1205 | static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) | 1211 | static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) |
1206 | { | 1212 | { |
1207 | int arr_size = sizeof(void*) * max_q_entries; | 1213 | int arr_size = sizeof(void *) * max_q_entries; |
1208 | 1214 | ||
1209 | q_skba->arr = vmalloc(arr_size); | 1215 | q_skba->arr = vmalloc(arr_size); |
1210 | if (!q_skba->arr) | 1216 | if (!q_skba->arr) |
@@ -1489,7 +1495,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, | |||
1489 | 1495 | ||
1490 | nfrags = skb_shinfo(skb)->nr_frags; | 1496 | nfrags = skb_shinfo(skb)->nr_frags; |
1491 | sg1entry = &swqe->u.immdata_desc.sg_entry; | 1497 | sg1entry = &swqe->u.immdata_desc.sg_entry; |
1492 | sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list; | 1498 | sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; |
1493 | swqe->descriptors = 0; | 1499 | swqe->descriptors = 0; |
1494 | sg1entry_contains_frag_data = 0; | 1500 | sg1entry_contains_frag_data = 0; |
1495 | 1501 | ||
@@ -1542,7 +1548,7 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) | |||
1542 | reg_type, port->mac_addr, 0, hcallid); | 1548 | reg_type, port->mac_addr, 0, hcallid); |
1543 | if (hret != H_SUCCESS) { | 1549 | if (hret != H_SUCCESS) { |
1544 | ehea_error("%sregistering bc address failed (tagged)", | 1550 | ehea_error("%sregistering bc address failed (tagged)", |
1545 | hcallid == H_REG_BCMC ? "" : "de"); | 1551 | hcallid == H_REG_BCMC ? "" : "de"); |
1546 | ret = -EIO; | 1552 | ret = -EIO; |
1547 | goto out_herr; | 1553 | goto out_herr; |
1548 | } | 1554 | } |
@@ -1732,7 +1738,7 @@ static void ehea_allmulti(struct net_device *dev, int enable) | |||
1732 | } | 1738 | } |
1733 | } | 1739 | } |
1734 | 1740 | ||
1735 | static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr) | 1741 | static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) |
1736 | { | 1742 | { |
1737 | struct ehea_mc_list *ehea_mcl_entry; | 1743 | struct ehea_mc_list *ehea_mcl_entry; |
1738 | u64 hret; | 1744 | u64 hret; |
@@ -1791,11 +1797,10 @@ static void ehea_set_multicast_list(struct net_device *dev) | |||
1791 | goto out; | 1797 | goto out; |
1792 | } | 1798 | } |
1793 | 1799 | ||
1794 | for (i = 0, k_mcl_entry = dev->mc_list; | 1800 | for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++, |
1795 | i < dev->mc_count; | 1801 | k_mcl_entry = k_mcl_entry->next) |
1796 | i++, k_mcl_entry = k_mcl_entry->next) { | ||
1797 | ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); | 1802 | ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); |
1798 | } | 1803 | |
1799 | } | 1804 | } |
1800 | out: | 1805 | out: |
1801 | return; | 1806 | return; |
@@ -1925,12 +1930,12 @@ static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps) | |||
1925 | 1930 | ||
1926 | if ((skb->protocol == htons(ETH_P_IP)) && | 1931 | if ((skb->protocol == htons(ETH_P_IP)) && |
1927 | (ip_hdr(skb)->protocol == IPPROTO_TCP)) { | 1932 | (ip_hdr(skb)->protocol == IPPROTO_TCP)) { |
1928 | tcp = (struct tcphdr*)(skb_network_header(skb) + (ip_hdr(skb)->ihl * 4)); | 1933 | tcp = (struct tcphdr *)(skb_network_header(skb) + |
1934 | (ip_hdr(skb)->ihl * 4)); | ||
1929 | tmp = (tcp->source + (tcp->dest << 16)) % 31; | 1935 | tmp = (tcp->source + (tcp->dest << 16)) % 31; |
1930 | tmp += ip_hdr(skb)->daddr % 31; | 1936 | tmp += ip_hdr(skb)->daddr % 31; |
1931 | return tmp % num_qps; | 1937 | return tmp % num_qps; |
1932 | } | 1938 | } else |
1933 | else | ||
1934 | return 0; | 1939 | return 0; |
1935 | } | 1940 | } |
1936 | 1941 | ||
@@ -2122,7 +2127,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2122 | u64 hret; | 2127 | u64 hret; |
2123 | u16 dummy16 = 0; | 2128 | u16 dummy16 = 0; |
2124 | u64 dummy64 = 0; | 2129 | u64 dummy64 = 0; |
2125 | struct hcp_modify_qp_cb0* cb0; | 2130 | struct hcp_modify_qp_cb0 *cb0; |
2126 | 2131 | ||
2127 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2132 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
2128 | if (!cb0) { | 2133 | if (!cb0) { |
@@ -2248,7 +2253,7 @@ static int ehea_clean_all_portres(struct ehea_port *port) | |||
2248 | int ret = 0; | 2253 | int ret = 0; |
2249 | int i; | 2254 | int i; |
2250 | 2255 | ||
2251 | for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) | 2256 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) |
2252 | ret |= ehea_clean_portres(port, &port->port_res[i]); | 2257 | ret |= ehea_clean_portres(port, &port->port_res[i]); |
2253 | 2258 | ||
2254 | ret |= ehea_destroy_eq(port->qp_eq); | 2259 | ret |= ehea_destroy_eq(port->qp_eq); |
@@ -2300,7 +2305,7 @@ static int ehea_up(struct net_device *dev) | |||
2300 | goto out_clean_pr; | 2305 | goto out_clean_pr; |
2301 | } | 2306 | } |
2302 | 2307 | ||
2303 | for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | 2308 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { |
2304 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); | 2309 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); |
2305 | if (ret) { | 2310 | if (ret) { |
2306 | ehea_error("activate_qp failed"); | 2311 | ehea_error("activate_qp failed"); |
@@ -2308,7 +2313,7 @@ static int ehea_up(struct net_device *dev) | |||
2308 | } | 2313 | } |
2309 | } | 2314 | } |
2310 | 2315 | ||
2311 | for(i = 0; i < port->num_def_qps; i++) { | 2316 | for (i = 0; i < port->num_def_qps; i++) { |
2312 | ret = ehea_fill_port_res(&port->port_res[i]); | 2317 | ret = ehea_fill_port_res(&port->port_res[i]); |
2313 | if (ret) { | 2318 | if (ret) { |
2314 | ehea_error("out_free_irqs"); | 2319 | ehea_error("out_free_irqs"); |
@@ -2425,7 +2430,7 @@ int ehea_stop_qps(struct net_device *dev) | |||
2425 | { | 2430 | { |
2426 | struct ehea_port *port = netdev_priv(dev); | 2431 | struct ehea_port *port = netdev_priv(dev); |
2427 | struct ehea_adapter *adapter = port->adapter; | 2432 | struct ehea_adapter *adapter = port->adapter; |
2428 | struct hcp_modify_qp_cb0* cb0; | 2433 | struct hcp_modify_qp_cb0 *cb0; |
2429 | int ret = -EIO; | 2434 | int ret = -EIO; |
2430 | int dret; | 2435 | int dret; |
2431 | int i; | 2436 | int i; |
@@ -2490,7 +2495,7 @@ out: | |||
2490 | return ret; | 2495 | return ret; |
2491 | } | 2496 | } |
2492 | 2497 | ||
2493 | void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr) | 2498 | void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) |
2494 | { | 2499 | { |
2495 | struct ehea_qp qp = *orig_qp; | 2500 | struct ehea_qp qp = *orig_qp; |
2496 | struct ehea_qp_init_attr *init_attr = &qp.init_attr; | 2501 | struct ehea_qp_init_attr *init_attr = &qp.init_attr; |
@@ -2530,7 +2535,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2530 | int ret = 0; | 2535 | int ret = 0; |
2531 | int i; | 2536 | int i; |
2532 | 2537 | ||
2533 | struct hcp_modify_qp_cb0* cb0; | 2538 | struct hcp_modify_qp_cb0 *cb0; |
2534 | u64 hret; | 2539 | u64 hret; |
2535 | u64 dummy64 = 0; | 2540 | u64 dummy64 = 0; |
2536 | u16 dummy16 = 0; | 2541 | u16 dummy16 = 0; |
@@ -2804,34 +2809,6 @@ static void __devinit logical_port_release(struct device *dev) | |||
2804 | of_node_put(port->ofdev.node); | 2809 | of_node_put(port->ofdev.node); |
2805 | } | 2810 | } |
2806 | 2811 | ||
2807 | static int ehea_driver_sysfs_add(struct device *dev, | ||
2808 | struct device_driver *driver) | ||
2809 | { | ||
2810 | int ret; | ||
2811 | |||
2812 | ret = sysfs_create_link(&driver->kobj, &dev->kobj, | ||
2813 | kobject_name(&dev->kobj)); | ||
2814 | if (ret == 0) { | ||
2815 | ret = sysfs_create_link(&dev->kobj, &driver->kobj, | ||
2816 | "driver"); | ||
2817 | if (ret) | ||
2818 | sysfs_remove_link(&driver->kobj, | ||
2819 | kobject_name(&dev->kobj)); | ||
2820 | } | ||
2821 | return ret; | ||
2822 | } | ||
2823 | |||
2824 | static void ehea_driver_sysfs_remove(struct device *dev, | ||
2825 | struct device_driver *driver) | ||
2826 | { | ||
2827 | struct device_driver *drv = driver; | ||
2828 | |||
2829 | if (drv) { | ||
2830 | sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj)); | ||
2831 | sysfs_remove_link(&dev->kobj, "driver"); | ||
2832 | } | ||
2833 | } | ||
2834 | |||
2835 | static struct device *ehea_register_port(struct ehea_port *port, | 2812 | static struct device *ehea_register_port(struct ehea_port *port, |
2836 | struct device_node *dn) | 2813 | struct device_node *dn) |
2837 | { | 2814 | { |
@@ -2856,16 +2833,8 @@ static struct device *ehea_register_port(struct ehea_port *port, | |||
2856 | goto out_unreg_of_dev; | 2833 | goto out_unreg_of_dev; |
2857 | } | 2834 | } |
2858 | 2835 | ||
2859 | ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver); | ||
2860 | if (ret) { | ||
2861 | ehea_error("failed to register sysfs driver link"); | ||
2862 | goto out_rem_dev_file; | ||
2863 | } | ||
2864 | |||
2865 | return &port->ofdev.dev; | 2836 | return &port->ofdev.dev; |
2866 | 2837 | ||
2867 | out_rem_dev_file: | ||
2868 | device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); | ||
2869 | out_unreg_of_dev: | 2838 | out_unreg_of_dev: |
2870 | of_device_unregister(&port->ofdev); | 2839 | of_device_unregister(&port->ofdev); |
2871 | out: | 2840 | out: |
@@ -2874,7 +2843,6 @@ out: | |||
2874 | 2843 | ||
2875 | static void ehea_unregister_port(struct ehea_port *port) | 2844 | static void ehea_unregister_port(struct ehea_port *port) |
2876 | { | 2845 | { |
2877 | ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver); | ||
2878 | device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); | 2846 | device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); |
2879 | of_device_unregister(&port->ofdev); | 2847 | of_device_unregister(&port->ofdev); |
2880 | } | 2848 | } |
@@ -3109,7 +3077,7 @@ static ssize_t ehea_probe_port(struct device *dev, | |||
3109 | of_node_put(eth_dn); | 3077 | of_node_put(eth_dn); |
3110 | 3078 | ||
3111 | if (port) { | 3079 | if (port) { |
3112 | for (i=0; i < EHEA_MAX_PORTS; i++) | 3080 | for (i = 0; i < EHEA_MAX_PORTS; i++) |
3113 | if (!adapter->port[i]) { | 3081 | if (!adapter->port[i]) { |
3114 | adapter->port[i] = port; | 3082 | adapter->port[i] = port; |
3115 | break; | 3083 | break; |
@@ -3144,7 +3112,7 @@ static ssize_t ehea_remove_port(struct device *dev, | |||
3144 | 3112 | ||
3145 | ehea_shutdown_single_port(port); | 3113 | ehea_shutdown_single_port(port); |
3146 | 3114 | ||
3147 | for (i=0; i < EHEA_MAX_PORTS; i++) | 3115 | for (i = 0; i < EHEA_MAX_PORTS; i++) |
3148 | if (adapter->port[i] == port) { | 3116 | if (adapter->port[i] == port) { |
3149 | adapter->port[i] = NULL; | 3117 | adapter->port[i] = NULL; |
3150 | break; | 3118 | break; |
@@ -3313,7 +3281,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb, | |||
3313 | } | 3281 | } |
3314 | 3282 | ||
3315 | static struct notifier_block ehea_reboot_nb = { | 3283 | static struct notifier_block ehea_reboot_nb = { |
3316 | .notifier_call = ehea_reboot_notifier, | 3284 | .notifier_call = ehea_reboot_notifier, |
3317 | }; | 3285 | }; |
3318 | 3286 | ||
3319 | static int check_module_parm(void) | 3287 | static int check_module_parm(void) |
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c index 95c4a7f9cc88..156eb6320b4e 100644 --- a/drivers/net/ehea/ehea_phyp.c +++ b/drivers/net/ehea/ehea_phyp.c | |||
@@ -6,9 +6,9 @@ | |||
6 | * (C) Copyright IBM Corp. 2006 | 6 | * (C) Copyright IBM Corp. 2006 |
7 | * | 7 | * |
8 | * Authors: | 8 | * Authors: |
9 | * Christoph Raisch <raisch@de.ibm.com> | 9 | * Christoph Raisch <raisch@de.ibm.com> |
10 | * Jan-Bernd Themann <themann@de.ibm.com> | 10 | * Jan-Bernd Themann <themann@de.ibm.com> |
11 | * Thomas Klein <tklein@de.ibm.com> | 11 | * Thomas Klein <tklein@de.ibm.com> |
12 | * | 12 | * |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
@@ -38,11 +38,11 @@ static inline u16 get_order_of_qentries(u16 queue_entries) | |||
38 | } | 38 | } |
39 | 39 | ||
40 | /* Defines for H_CALL H_ALLOC_RESOURCE */ | 40 | /* Defines for H_CALL H_ALLOC_RESOURCE */ |
41 | #define H_ALL_RES_TYPE_QP 1 | 41 | #define H_ALL_RES_TYPE_QP 1 |
42 | #define H_ALL_RES_TYPE_CQ 2 | 42 | #define H_ALL_RES_TYPE_CQ 2 |
43 | #define H_ALL_RES_TYPE_EQ 3 | 43 | #define H_ALL_RES_TYPE_EQ 3 |
44 | #define H_ALL_RES_TYPE_MR 5 | 44 | #define H_ALL_RES_TYPE_MR 5 |
45 | #define H_ALL_RES_TYPE_MW 6 | 45 | #define H_ALL_RES_TYPE_MW 6 |
46 | 46 | ||
47 | static long ehea_plpar_hcall_norets(unsigned long opcode, | 47 | static long ehea_plpar_hcall_norets(unsigned long opcode, |
48 | unsigned long arg1, | 48 | unsigned long arg1, |
@@ -137,77 +137,77 @@ u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category, | |||
137 | const u64 qp_handle, const u64 sel_mask, void *cb_addr) | 137 | const u64 qp_handle, const u64 sel_mask, void *cb_addr) |
138 | { | 138 | { |
139 | return ehea_plpar_hcall_norets(H_QUERY_HEA_QP, | 139 | return ehea_plpar_hcall_norets(H_QUERY_HEA_QP, |
140 | adapter_handle, /* R4 */ | 140 | adapter_handle, /* R4 */ |
141 | qp_category, /* R5 */ | 141 | qp_category, /* R5 */ |
142 | qp_handle, /* R6 */ | 142 | qp_handle, /* R6 */ |
143 | sel_mask, /* R7 */ | 143 | sel_mask, /* R7 */ |
144 | virt_to_abs(cb_addr), /* R8 */ | 144 | virt_to_abs(cb_addr), /* R8 */ |
145 | 0, 0); | 145 | 0, 0); |
146 | } | 146 | } |
147 | 147 | ||
148 | /* input param R5 */ | 148 | /* input param R5 */ |
149 | #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11) | 149 | #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11) |
150 | #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12) | 150 | #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12) |
151 | #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15) | 151 | #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15) |
152 | #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16) | 152 | #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16) |
153 | #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17) | 153 | #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17) |
154 | #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19) | 154 | #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19) |
155 | #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21) | 155 | #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21) |
156 | #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23) | 156 | #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23) |
157 | #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55) | 157 | #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55) |
158 | #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63) | 158 | #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63) |
159 | 159 | ||
160 | /* input param R9 */ | 160 | /* input param R9 */ |
161 | #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31) | 161 | #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31) |
162 | #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63) | 162 | #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63) |
163 | 163 | ||
164 | /* input param R10 */ | 164 | /* input param R10 */ |
165 | #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7) | 165 | #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7) |
166 | #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15) | 166 | #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15) |
167 | #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23) | 167 | #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23) |
168 | #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31) | 168 | #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31) |
169 | /* Max Send Scatter Gather Elements */ | 169 | /* Max Send Scatter Gather Elements */ |
170 | #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39) | 170 | #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39) |
171 | #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47) | 171 | #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47) |
172 | /* Max Receive SG Elements RQ1 */ | 172 | /* Max Receive SG Elements RQ1 */ |
173 | #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55) | 173 | #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55) |
174 | #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63) | 174 | #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63) |
175 | 175 | ||
176 | /* input param R11 */ | 176 | /* input param R11 */ |
177 | #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7) | 177 | #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7) |
178 | /* max swqe immediate data length */ | 178 | /* max swqe immediate data length */ |
179 | #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63) | 179 | #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63) |
180 | 180 | ||
181 | /* input param R12 */ | 181 | /* input param R12 */ |
182 | #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15) | 182 | #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15) |
183 | /* Threshold RQ2 */ | 183 | /* Threshold RQ2 */ |
184 | #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31) | 184 | #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31) |
185 | /* Threshold RQ3 */ | 185 | /* Threshold RQ3 */ |
186 | 186 | ||
187 | /* output param R6 */ | 187 | /* output param R6 */ |
188 | #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15) | 188 | #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15) |
189 | #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31) | 189 | #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31) |
190 | #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47) | 190 | #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47) |
191 | #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63) | 191 | #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63) |
192 | 192 | ||
193 | /* output param, R7 */ | 193 | /* output param, R7 */ |
194 | #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7) | 194 | #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7) |
195 | #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15) | 195 | #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15) |
196 | #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23) | 196 | #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23) |
197 | #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31) | 197 | #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31) |
198 | #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39) | 198 | #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39) |
199 | 199 | ||
200 | /* output param R8,R9 */ | 200 | /* output param R8,R9 */ |
201 | #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31) | 201 | #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31) |
202 | #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63) | 202 | #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63) |
203 | #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31) | 203 | #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31) |
204 | #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63) | 204 | #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63) |
205 | 205 | ||
206 | /* output param R11,R12 */ | 206 | /* output param R11,R12 */ |
207 | #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31) | 207 | #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31) |
208 | #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63) | 208 | #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63) |
209 | #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31) | 209 | #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31) |
210 | #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63) | 210 | #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63) |
211 | 211 | ||
212 | u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, | 212 | u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, |
213 | struct ehea_qp_init_attr *init_attr, const u32 pd, | 213 | struct ehea_qp_init_attr *init_attr, const u32 pd, |
@@ -334,28 +334,28 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle, | |||
334 | } | 334 | } |
335 | 335 | ||
336 | /* Defines for H_CALL H_ALLOC_RESOURCE */ | 336 | /* Defines for H_CALL H_ALLOC_RESOURCE */ |
337 | #define H_ALL_RES_TYPE_QP 1 | 337 | #define H_ALL_RES_TYPE_QP 1 |
338 | #define H_ALL_RES_TYPE_CQ 2 | 338 | #define H_ALL_RES_TYPE_CQ 2 |
339 | #define H_ALL_RES_TYPE_EQ 3 | 339 | #define H_ALL_RES_TYPE_EQ 3 |
340 | #define H_ALL_RES_TYPE_MR 5 | 340 | #define H_ALL_RES_TYPE_MR 5 |
341 | #define H_ALL_RES_TYPE_MW 6 | 341 | #define H_ALL_RES_TYPE_MW 6 |
342 | 342 | ||
343 | /* input param R5 */ | 343 | /* input param R5 */ |
344 | #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0) | 344 | #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0) |
345 | #define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7) | 345 | #define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7) |
346 | #define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16) | 346 | #define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16) |
347 | #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63) | 347 | #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63) |
348 | /* input param R6 */ | 348 | /* input param R6 */ |
349 | #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63) | 349 | #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63) |
350 | 350 | ||
351 | /* output param R6 */ | 351 | /* output param R6 */ |
352 | #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63) | 352 | #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63) |
353 | 353 | ||
354 | /* output param R7 */ | 354 | /* output param R7 */ |
355 | #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63) | 355 | #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63) |
356 | 356 | ||
357 | /* output param R8 */ | 357 | /* output param R8 */ |
358 | #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63) | 358 | #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63) |
359 | 359 | ||
360 | /* output param R9 */ | 360 | /* output param R9 */ |
361 | #define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31) | 361 | #define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31) |
@@ -453,12 +453,12 @@ u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle, | |||
453 | 453 | ||
454 | hret = ehea_plpar_hcall9(H_REGISTER_SMR, | 454 | hret = ehea_plpar_hcall9(H_REGISTER_SMR, |
455 | outs, | 455 | outs, |
456 | adapter_handle , /* R4 */ | 456 | adapter_handle , /* R4 */ |
457 | orig_mr_handle, /* R5 */ | 457 | orig_mr_handle, /* R5 */ |
458 | vaddr_in, /* R6 */ | 458 | vaddr_in, /* R6 */ |
459 | (((u64)access_ctrl) << 32ULL), /* R7 */ | 459 | (((u64)access_ctrl) << 32ULL), /* R7 */ |
460 | pd, /* R8 */ | 460 | pd, /* R8 */ |
461 | 0, 0, 0, 0); /* R9-R12 */ | 461 | 0, 0, 0, 0); /* R9-R12 */ |
462 | 462 | ||
463 | mr->handle = outs[0]; | 463 | mr->handle = outs[0]; |
464 | mr->lkey = (u32)outs[2]; | 464 | mr->lkey = (u32)outs[2]; |
@@ -471,11 +471,11 @@ u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle) | |||
471 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | 471 | u64 outs[PLPAR_HCALL9_BUFSIZE]; |
472 | 472 | ||
473 | return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA, | 473 | return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA, |
474 | outs, | 474 | outs, |
475 | adapter_handle, /* R4 */ | 475 | adapter_handle, /* R4 */ |
476 | H_DISABLE_GET_EHEA_WQE_P, /* R5 */ | 476 | H_DISABLE_GET_EHEA_WQE_P, /* R5 */ |
477 | qp_handle, /* R6 */ | 477 | qp_handle, /* R6 */ |
478 | 0, 0, 0, 0, 0, 0); /* R7-R12 */ | 478 | 0, 0, 0, 0, 0, 0); /* R7-R12 */ |
479 | } | 479 | } |
480 | 480 | ||
481 | u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, | 481 | u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, |
@@ -483,9 +483,9 @@ u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, | |||
483 | { | 483 | { |
484 | return ehea_plpar_hcall_norets(H_FREE_RESOURCE, | 484 | return ehea_plpar_hcall_norets(H_FREE_RESOURCE, |
485 | adapter_handle, /* R4 */ | 485 | adapter_handle, /* R4 */ |
486 | res_handle, /* R5 */ | 486 | res_handle, /* R5 */ |
487 | force_bit, | 487 | force_bit, |
488 | 0, 0, 0, 0); /* R7-R10 */ | 488 | 0, 0, 0, 0); /* R7-R10 */ |
489 | } | 489 | } |
490 | 490 | ||
491 | u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, | 491 | u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, |
@@ -493,13 +493,13 @@ u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, | |||
493 | const u32 pd, u64 *mr_handle, u32 *lkey) | 493 | const u32 pd, u64 *mr_handle, u32 *lkey) |
494 | { | 494 | { |
495 | u64 hret; | 495 | u64 hret; |
496 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | 496 | u64 outs[PLPAR_HCALL9_BUFSIZE]; |
497 | 497 | ||
498 | hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, | 498 | hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, |
499 | outs, | 499 | outs, |
500 | adapter_handle, /* R4 */ | 500 | adapter_handle, /* R4 */ |
501 | 5, /* R5 */ | 501 | 5, /* R5 */ |
502 | vaddr, /* R6 */ | 502 | vaddr, /* R6 */ |
503 | length, /* R7 */ | 503 | length, /* R7 */ |
504 | (((u64) access_ctrl) << 32ULL), /* R8 */ | 504 | (((u64) access_ctrl) << 32ULL), /* R8 */ |
505 | pd, /* R9 */ | 505 | pd, /* R9 */ |
@@ -619,8 +619,8 @@ u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle, | |||
619 | void *rblock) | 619 | void *rblock) |
620 | { | 620 | { |
621 | return ehea_plpar_hcall_norets(H_ERROR_DATA, | 621 | return ehea_plpar_hcall_norets(H_ERROR_DATA, |
622 | adapter_handle, /* R4 */ | 622 | adapter_handle, /* R4 */ |
623 | ressource_handle, /* R5 */ | 623 | ressource_handle, /* R5 */ |
624 | virt_to_abs(rblock), /* R6 */ | 624 | virt_to_abs(rblock), /* R6 */ |
625 | 0, 0, 0, 0); /* R7-R12 */ | 625 | 0, 0, 0, 0); /* R7-R12 */ |
626 | } | 626 | } |
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index faa191d23b86..f3628c803567 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h | |||
@@ -93,7 +93,7 @@ static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel, | |||
93 | static inline void hcp_epas_dtor(struct h_epas *epas) | 93 | static inline void hcp_epas_dtor(struct h_epas *epas) |
94 | { | 94 | { |
95 | if (epas->kernel.addr) | 95 | if (epas->kernel.addr) |
96 | iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK)); | 96 | iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK)); |
97 | 97 | ||
98 | epas->user.addr = 0; | 98 | epas->user.addr = 0; |
99 | epas->kernel.addr = 0; | 99 | epas->kernel.addr = 0; |
@@ -388,23 +388,23 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, | |||
388 | const u64 qp_handle, | 388 | const u64 qp_handle, |
389 | const u64 sel_mask, | 389 | const u64 sel_mask, |
390 | void *cb_addr, | 390 | void *cb_addr, |
391 | u64 * inv_attr_id, | 391 | u64 *inv_attr_id, |
392 | u64 * proc_mask, u16 * out_swr, u16 * out_rwr); | 392 | u64 *proc_mask, u16 *out_swr, u16 *out_rwr); |
393 | 393 | ||
394 | u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, | 394 | u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, |
395 | struct ehea_eq_attr *eq_attr, u64 * eq_handle); | 395 | struct ehea_eq_attr *eq_attr, u64 *eq_handle); |
396 | 396 | ||
397 | u64 ehea_h_alloc_resource_cq(const u64 adapter_handle, | 397 | u64 ehea_h_alloc_resource_cq(const u64 adapter_handle, |
398 | struct ehea_cq_attr *cq_attr, | 398 | struct ehea_cq_attr *cq_attr, |
399 | u64 * cq_handle, struct h_epas *epas); | 399 | u64 *cq_handle, struct h_epas *epas); |
400 | 400 | ||
401 | u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, | 401 | u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, |
402 | struct ehea_qp_init_attr *init_attr, | 402 | struct ehea_qp_init_attr *init_attr, |
403 | const u32 pd, | 403 | const u32 pd, |
404 | u64 * qp_handle, struct h_epas *h_epas); | 404 | u64 *qp_handle, struct h_epas *h_epas); |
405 | 405 | ||
406 | #define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48,55) | 406 | #define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55) |
407 | #define H_REG_RPAGE_QT EHEA_BMASK_IBM(62,63) | 407 | #define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63) |
408 | 408 | ||
409 | u64 ehea_h_register_rpage(const u64 adapter_handle, | 409 | u64 ehea_h_register_rpage(const u64 adapter_handle, |
410 | const u8 pagesize, | 410 | const u8 pagesize, |
@@ -426,7 +426,7 @@ u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, | |||
426 | 426 | ||
427 | u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, | 427 | u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, |
428 | const u64 length, const u32 access_ctrl, | 428 | const u64 length, const u32 access_ctrl, |
429 | const u32 pd, u64 * mr_handle, u32 * lkey); | 429 | const u32 pd, u64 *mr_handle, u32 *lkey); |
430 | 430 | ||
431 | u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, | 431 | u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, |
432 | const u8 pagesize, const u8 queue_type, | 432 | const u8 pagesize, const u8 queue_type, |
@@ -439,8 +439,8 @@ u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle, | |||
439 | u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr); | 439 | u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr); |
440 | 440 | ||
441 | /* output param R5 */ | 441 | /* output param R5 */ |
442 | #define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40,47) | 442 | #define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47) |
443 | #define H_MEHEAPORT_PN EHEA_BMASK_IBM(48,63) | 443 | #define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63) |
444 | 444 | ||
445 | u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, | 445 | u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, |
446 | const u8 cb_cat, const u64 select_mask, | 446 | const u8 cb_cat, const u64 select_mask, |
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 83b76432b41a..d522e905f460 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -33,8 +33,6 @@ | |||
33 | 33 | ||
34 | 34 | ||
35 | struct ehea_busmap ehea_bmap = { 0, 0, NULL }; | 35 | struct ehea_busmap ehea_bmap = { 0, 0, NULL }; |
36 | extern u64 ehea_driver_flags; | ||
37 | extern struct work_struct ehea_rereg_mr_task; | ||
38 | 36 | ||
39 | 37 | ||
40 | static void *hw_qpageit_get_inc(struct hw_queue *queue) | 38 | static void *hw_qpageit_get_inc(struct hw_queue *queue) |
@@ -65,7 +63,7 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, | |||
65 | } | 63 | } |
66 | 64 | ||
67 | queue->queue_length = nr_of_pages * pagesize; | 65 | queue->queue_length = nr_of_pages * pagesize; |
68 | queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL); | 66 | queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); |
69 | if (!queue->queue_pages) { | 67 | if (!queue->queue_pages) { |
70 | ehea_error("no mem for queue_pages"); | 68 | ehea_error("no mem for queue_pages"); |
71 | return -ENOMEM; | 69 | return -ENOMEM; |
@@ -78,11 +76,11 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, | |||
78 | */ | 76 | */ |
79 | i = 0; | 77 | i = 0; |
80 | while (i < nr_of_pages) { | 78 | while (i < nr_of_pages) { |
81 | u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); | 79 | u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL); |
82 | if (!kpage) | 80 | if (!kpage) |
83 | goto out_nomem; | 81 | goto out_nomem; |
84 | for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) { | 82 | for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) { |
85 | (queue->queue_pages)[i] = (struct ehea_page*)kpage; | 83 | (queue->queue_pages)[i] = (struct ehea_page *)kpage; |
86 | kpage += pagesize; | 84 | kpage += pagesize; |
87 | i++; | 85 | i++; |
88 | } | 86 | } |
@@ -235,8 +233,8 @@ int ehea_destroy_cq(struct ehea_cq *cq) | |||
235 | return 0; | 233 | return 0; |
236 | 234 | ||
237 | hcp_epas_dtor(&cq->epas); | 235 | hcp_epas_dtor(&cq->epas); |
238 | 236 | hret = ehea_destroy_cq_res(cq, NORMAL_FREE); | |
239 | if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) { | 237 | if (hret == H_R_STATE) { |
240 | ehea_error_data(cq->adapter, cq->fw_handle); | 238 | ehea_error_data(cq->adapter, cq->fw_handle); |
241 | hret = ehea_destroy_cq_res(cq, FORCE_FREE); | 239 | hret = ehea_destroy_cq_res(cq, FORCE_FREE); |
242 | } | 240 | } |
@@ -301,13 +299,13 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | |||
301 | if (i == (eq->attr.nr_pages - 1)) { | 299 | if (i == (eq->attr.nr_pages - 1)) { |
302 | /* last page */ | 300 | /* last page */ |
303 | vpage = hw_qpageit_get_inc(&eq->hw_queue); | 301 | vpage = hw_qpageit_get_inc(&eq->hw_queue); |
304 | if ((hret != H_SUCCESS) || (vpage)) { | 302 | if ((hret != H_SUCCESS) || (vpage)) |
305 | goto out_kill_hwq; | 303 | goto out_kill_hwq; |
306 | } | 304 | |
307 | } else { | 305 | } else { |
308 | if ((hret != H_PAGE_REGISTERED) || (!vpage)) { | 306 | if ((hret != H_PAGE_REGISTERED) || (!vpage)) |
309 | goto out_kill_hwq; | 307 | goto out_kill_hwq; |
310 | } | 308 | |
311 | } | 309 | } |
312 | } | 310 | } |
313 | 311 | ||
@@ -331,7 +329,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq) | |||
331 | unsigned long flags; | 329 | unsigned long flags; |
332 | 330 | ||
333 | spin_lock_irqsave(&eq->spinlock, flags); | 331 | spin_lock_irqsave(&eq->spinlock, flags); |
334 | eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue); | 332 | eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue); |
335 | spin_unlock_irqrestore(&eq->spinlock, flags); | 333 | spin_unlock_irqrestore(&eq->spinlock, flags); |
336 | 334 | ||
337 | return eqe; | 335 | return eqe; |
@@ -364,7 +362,8 @@ int ehea_destroy_eq(struct ehea_eq *eq) | |||
364 | 362 | ||
365 | hcp_epas_dtor(&eq->epas); | 363 | hcp_epas_dtor(&eq->epas); |
366 | 364 | ||
367 | if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) { | 365 | hret = ehea_destroy_eq_res(eq, NORMAL_FREE); |
366 | if (hret == H_R_STATE) { | ||
368 | ehea_error_data(eq->adapter, eq->fw_handle); | 367 | ehea_error_data(eq->adapter, eq->fw_handle); |
369 | hret = ehea_destroy_eq_res(eq, FORCE_FREE); | 368 | hret = ehea_destroy_eq_res(eq, FORCE_FREE); |
370 | } | 369 | } |
@@ -546,7 +545,8 @@ int ehea_destroy_qp(struct ehea_qp *qp) | |||
546 | 545 | ||
547 | hcp_epas_dtor(&qp->epas); | 546 | hcp_epas_dtor(&qp->epas); |
548 | 547 | ||
549 | if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { | 548 | hret = ehea_destroy_qp_res(qp, NORMAL_FREE); |
549 | if (hret == H_R_STATE) { | ||
550 | ehea_error_data(qp->adapter, qp->fw_handle); | 550 | ehea_error_data(qp->adapter, qp->fw_handle); |
551 | hret = ehea_destroy_qp_res(qp, FORCE_FREE); | 551 | hret = ehea_destroy_qp_res(qp, FORCE_FREE); |
552 | } | 552 | } |
@@ -559,7 +559,7 @@ int ehea_destroy_qp(struct ehea_qp *qp) | |||
559 | return 0; | 559 | return 0; |
560 | } | 560 | } |
561 | 561 | ||
562 | int ehea_create_busmap( void ) | 562 | int ehea_create_busmap(void) |
563 | { | 563 | { |
564 | u64 vaddr = EHEA_BUSMAP_START; | 564 | u64 vaddr = EHEA_BUSMAP_START; |
565 | unsigned long high_section_index = 0; | 565 | unsigned long high_section_index = 0; |
@@ -595,7 +595,7 @@ int ehea_create_busmap( void ) | |||
595 | return 0; | 595 | return 0; |
596 | } | 596 | } |
597 | 597 | ||
598 | void ehea_destroy_busmap( void ) | 598 | void ehea_destroy_busmap(void) |
599 | { | 599 | { |
600 | vfree(ehea_bmap.vaddr); | 600 | vfree(ehea_bmap.vaddr); |
601 | } | 601 | } |
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index bc62d389c166..0bb6f92fa2f8 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h | |||
@@ -41,8 +41,8 @@ | |||
41 | #define EHEA_SECTSIZE (1UL << 24) | 41 | #define EHEA_SECTSIZE (1UL << 24) |
42 | #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) | 42 | #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) |
43 | 43 | ||
44 | #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE | 44 | #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE) |
45 | #error eHEA module can't work if kernel sectionsize < ehea sectionsize | 45 | #error eHEA module cannot work if kernel sectionsize < ehea sectionsize |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | /* Some abbreviations used here: | 48 | /* Some abbreviations used here: |
@@ -188,8 +188,8 @@ struct ehea_eqe { | |||
188 | u64 entry; | 188 | u64 entry; |
189 | }; | 189 | }; |
190 | 190 | ||
191 | #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63) | 191 | #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63) |
192 | #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7) | 192 | #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7) |
193 | 193 | ||
194 | static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) | 194 | static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) |
195 | { | 195 | { |
@@ -279,7 +279,7 @@ static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue) | |||
279 | static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) | 279 | static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) |
280 | { | 280 | { |
281 | void *retvalue = hw_qeit_get(queue); | 281 | void *retvalue = hw_qeit_get(queue); |
282 | u32 qe = *(u8*)retvalue; | 282 | u32 qe = *(u8 *)retvalue; |
283 | if ((qe >> 7) == (queue->toggle_state & 1)) | 283 | if ((qe >> 7) == (queue->toggle_state & 1)) |
284 | hw_qeit_eq_get_inc(queue); | 284 | hw_qeit_eq_get_inc(queue); |
285 | else | 285 | else |
@@ -364,7 +364,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe, | |||
364 | 364 | ||
365 | int ehea_destroy_cq(struct ehea_cq *cq); | 365 | int ehea_destroy_cq(struct ehea_cq *cq); |
366 | 366 | ||
367 | struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd, | 367 | struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd, |
368 | struct ehea_qp_init_attr *init_attr); | 368 | struct ehea_qp_init_attr *init_attr); |
369 | 369 | ||
370 | int ehea_destroy_qp(struct ehea_qp *qp); | 370 | int ehea_destroy_qp(struct ehea_qp *qp); |
@@ -378,8 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr); | |||
378 | 378 | ||
379 | void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); | 379 | void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); |
380 | 380 | ||
381 | int ehea_create_busmap( void ); | 381 | int ehea_create_busmap(void); |
382 | void ehea_destroy_busmap( void ); | 382 | void ehea_destroy_busmap(void); |
383 | u64 ehea_map_vaddr(void *caddr); | 383 | u64 ehea_map_vaddr(void *caddr); |
384 | 384 | ||
385 | #endif /* __EHEA_QMR_H__ */ | 385 | #endif /* __EHEA_QMR_H__ */ |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 7667a62ac31f..36342230a6de 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -13,7 +13,7 @@ | |||
13 | * Copyright (C) 2004 Andrew de Quincey (wol support) | 13 | * Copyright (C) 2004 Andrew de Quincey (wol support) |
14 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane | 14 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane |
15 | * IRQ rate fixes, bigendian fixes, cleanups, verification) | 15 | * IRQ rate fixes, bigendian fixes, cleanups, verification) |
16 | * Copyright (c) 2004,5,6 NVIDIA Corporation | 16 | * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation |
17 | * | 17 | * |
18 | * This program is free software; you can redistribute it and/or modify | 18 | * This program is free software; you can redistribute it and/or modify |
19 | * it under the terms of the GNU General Public License as published by | 19 | * it under the terms of the GNU General Public License as published by |
@@ -226,7 +226,7 @@ enum { | |||
226 | #define NVREG_MISC1_HD 0x02 | 226 | #define NVREG_MISC1_HD 0x02 |
227 | #define NVREG_MISC1_FORCE 0x3b0f3c | 227 | #define NVREG_MISC1_FORCE 0x3b0f3c |
228 | 228 | ||
229 | NvRegMacReset = 0x3c, | 229 | NvRegMacReset = 0x34, |
230 | #define NVREG_MAC_RESET_ASSERT 0x0F3 | 230 | #define NVREG_MAC_RESET_ASSERT 0x0F3 |
231 | NvRegTransmitterControl = 0x084, | 231 | NvRegTransmitterControl = 0x084, |
232 | #define NVREG_XMITCTL_START 0x01 | 232 | #define NVREG_XMITCTL_START 0x01 |
@@ -277,7 +277,9 @@ enum { | |||
277 | #define NVREG_MCASTADDRA_FORCE 0x01 | 277 | #define NVREG_MCASTADDRA_FORCE 0x01 |
278 | NvRegMulticastAddrB = 0xB4, | 278 | NvRegMulticastAddrB = 0xB4, |
279 | NvRegMulticastMaskA = 0xB8, | 279 | NvRegMulticastMaskA = 0xB8, |
280 | #define NVREG_MCASTMASKA_NONE 0xffffffff | ||
280 | NvRegMulticastMaskB = 0xBC, | 281 | NvRegMulticastMaskB = 0xBC, |
282 | #define NVREG_MCASTMASKB_NONE 0xffff | ||
281 | 283 | ||
282 | NvRegPhyInterface = 0xC0, | 284 | NvRegPhyInterface = 0xC0, |
283 | #define PHY_RGMII 0x10000000 | 285 | #define PHY_RGMII 0x10000000 |
@@ -316,8 +318,8 @@ enum { | |||
316 | NvRegTxRingPhysAddrHigh = 0x148, | 318 | NvRegTxRingPhysAddrHigh = 0x148, |
317 | NvRegRxRingPhysAddrHigh = 0x14C, | 319 | NvRegRxRingPhysAddrHigh = 0x14C, |
318 | NvRegTxPauseFrame = 0x170, | 320 | NvRegTxPauseFrame = 0x170, |
319 | #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 | 321 | #define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080 |
320 | #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 | 322 | #define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010 |
321 | NvRegMIIStatus = 0x180, | 323 | NvRegMIIStatus = 0x180, |
322 | #define NVREG_MIISTAT_ERROR 0x0001 | 324 | #define NVREG_MIISTAT_ERROR 0x0001 |
323 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | 325 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
@@ -471,9 +473,9 @@ union ring_type { | |||
471 | #define NV_RX_AVAIL (1<<31) | 473 | #define NV_RX_AVAIL (1<<31) |
472 | 474 | ||
473 | #define NV_RX2_CHECKSUMMASK (0x1C000000) | 475 | #define NV_RX2_CHECKSUMMASK (0x1C000000) |
474 | #define NV_RX2_CHECKSUMOK1 (0x10000000) | 476 | #define NV_RX2_CHECKSUM_IP (0x10000000) |
475 | #define NV_RX2_CHECKSUMOK2 (0x14000000) | 477 | #define NV_RX2_CHECKSUM_IP_TCP (0x14000000) |
476 | #define NV_RX2_CHECKSUMOK3 (0x18000000) | 478 | #define NV_RX2_CHECKSUM_IP_UDP (0x18000000) |
477 | #define NV_RX2_DESCRIPTORVALID (1<<29) | 479 | #define NV_RX2_DESCRIPTORVALID (1<<29) |
478 | #define NV_RX2_SUBSTRACT1 (1<<25) | 480 | #define NV_RX2_SUBSTRACT1 (1<<25) |
479 | #define NV_RX2_ERROR1 (1<<18) | 481 | #define NV_RX2_ERROR1 (1<<18) |
@@ -2375,14 +2377,9 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2375 | goto next_pkt; | 2377 | goto next_pkt; |
2376 | } | 2378 | } |
2377 | } | 2379 | } |
2378 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { | 2380 | if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ |
2381 | ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ | ||
2379 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2382 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2380 | } else { | ||
2381 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || | ||
2382 | (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { | ||
2383 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2384 | } | ||
2385 | } | ||
2386 | } else { | 2383 | } else { |
2387 | dev_kfree_skb(skb); | 2384 | dev_kfree_skb(skb); |
2388 | goto next_pkt; | 2385 | goto next_pkt; |
@@ -2474,14 +2471,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2474 | } | 2471 | } |
2475 | } | 2472 | } |
2476 | 2473 | ||
2477 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { | 2474 | if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ |
2475 | ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ | ||
2478 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2476 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2479 | } else { | ||
2480 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || | ||
2481 | (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { | ||
2482 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2483 | } | ||
2484 | } | ||
2485 | 2477 | ||
2486 | /* got a valid packet - forward it to the network core */ | 2478 | /* got a valid packet - forward it to the network core */ |
2487 | skb_put(skb, len); | 2479 | skb_put(skb, len); |
@@ -2703,6 +2695,9 @@ static void nv_set_multicast(struct net_device *dev) | |||
2703 | addr[1] = alwaysOn[1]; | 2695 | addr[1] = alwaysOn[1]; |
2704 | mask[0] = alwaysOn[0] | alwaysOff[0]; | 2696 | mask[0] = alwaysOn[0] | alwaysOff[0]; |
2705 | mask[1] = alwaysOn[1] | alwaysOff[1]; | 2697 | mask[1] = alwaysOn[1] | alwaysOff[1]; |
2698 | } else { | ||
2699 | mask[0] = NVREG_MCASTMASKA_NONE; | ||
2700 | mask[1] = NVREG_MCASTMASKB_NONE; | ||
2706 | } | 2701 | } |
2707 | } | 2702 | } |
2708 | addr[0] |= NVREG_MCASTADDRA_FORCE; | 2703 | addr[0] |= NVREG_MCASTADDRA_FORCE; |
@@ -4813,8 +4808,8 @@ static int nv_open(struct net_device *dev) | |||
4813 | nv_mac_reset(dev); | 4808 | nv_mac_reset(dev); |
4814 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | 4809 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); |
4815 | writel(0, base + NvRegMulticastAddrB); | 4810 | writel(0, base + NvRegMulticastAddrB); |
4816 | writel(0, base + NvRegMulticastMaskA); | 4811 | writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); |
4817 | writel(0, base + NvRegMulticastMaskB); | 4812 | writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); |
4818 | writel(0, base + NvRegPacketFilterFlags); | 4813 | writel(0, base + NvRegPacketFilterFlags); |
4819 | 4814 | ||
4820 | writel(0, base + NvRegTransmitterControl); | 4815 | writel(0, base + NvRegTransmitterControl); |
@@ -4908,8 +4903,8 @@ static int nv_open(struct net_device *dev) | |||
4908 | spin_lock_irq(&np->lock); | 4903 | spin_lock_irq(&np->lock); |
4909 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | 4904 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); |
4910 | writel(0, base + NvRegMulticastAddrB); | 4905 | writel(0, base + NvRegMulticastAddrB); |
4911 | writel(0, base + NvRegMulticastMaskA); | 4906 | writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); |
4912 | writel(0, base + NvRegMulticastMaskB); | 4907 | writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); |
4913 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); | 4908 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); |
4914 | /* One manual link speed update: Interrupts are enabled, future link | 4909 | /* One manual link speed update: Interrupts are enabled, future link |
4915 | * speed changes cause interrupts and are handled by nv_link_irq(). | 4910 | * speed changes cause interrupts and are handled by nv_link_irq(). |
@@ -5603,35 +5598,35 @@ static struct pci_device_id pci_tbl[] = { | |||
5603 | }, | 5598 | }, |
5604 | { /* MCP77 Ethernet Controller */ | 5599 | { /* MCP77 Ethernet Controller */ |
5605 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), | 5600 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), |
5606 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5601 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5607 | }, | 5602 | }, |
5608 | { /* MCP77 Ethernet Controller */ | 5603 | { /* MCP77 Ethernet Controller */ |
5609 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), | 5604 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), |
5610 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5605 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5611 | }, | 5606 | }, |
5612 | { /* MCP77 Ethernet Controller */ | 5607 | { /* MCP77 Ethernet Controller */ |
5613 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), | 5608 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), |
5614 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5609 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5615 | }, | 5610 | }, |
5616 | { /* MCP77 Ethernet Controller */ | 5611 | { /* MCP77 Ethernet Controller */ |
5617 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), | 5612 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), |
5618 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5613 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5619 | }, | 5614 | }, |
5620 | { /* MCP79 Ethernet Controller */ | 5615 | { /* MCP79 Ethernet Controller */ |
5621 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), | 5616 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), |
5622 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5617 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5623 | }, | 5618 | }, |
5624 | { /* MCP79 Ethernet Controller */ | 5619 | { /* MCP79 Ethernet Controller */ |
5625 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), | 5620 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), |
5626 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5621 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5627 | }, | 5622 | }, |
5628 | { /* MCP79 Ethernet Controller */ | 5623 | { /* MCP79 Ethernet Controller */ |
5629 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), | 5624 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), |
5630 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5625 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5631 | }, | 5626 | }, |
5632 | { /* MCP79 Ethernet Controller */ | 5627 | { /* MCP79 Ethernet Controller */ |
5633 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), | 5628 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), |
5634 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5629 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5635 | }, | 5630 | }, |
5636 | {0,}, | 5631 | {0,}, |
5637 | }; | 5632 | }; |
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index 46e2c52c7862..95e3464068db 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c | |||
@@ -901,12 +901,12 @@ static short ibmlana_adapter_ids[] __initdata = { | |||
901 | 0x0000 | 901 | 0x0000 |
902 | }; | 902 | }; |
903 | 903 | ||
904 | static char *ibmlana_adapter_names[] __initdata = { | 904 | static char *ibmlana_adapter_names[] __devinitdata = { |
905 | "IBM LAN Adapter/A", | 905 | "IBM LAN Adapter/A", |
906 | NULL | 906 | NULL |
907 | }; | 907 | }; |
908 | 908 | ||
909 | static int ibmlana_init_one(struct device *kdev) | 909 | static int __devinit ibmlana_init_one(struct device *kdev) |
910 | { | 910 | { |
911 | struct mca_device *mdev = to_mca_device(kdev); | 911 | struct mca_device *mdev = to_mca_device(kdev); |
912 | struct net_device *dev; | 912 | struct net_device *dev; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index f3c144d5d72f..d4eb8e2d8720 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -438,7 +438,6 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
438 | if (adapter->msix_entries) { | 438 | if (adapter->msix_entries) { |
439 | err = igb_request_msix(adapter); | 439 | err = igb_request_msix(adapter); |
440 | if (!err) { | 440 | if (!err) { |
441 | struct e1000_hw *hw = &adapter->hw; | ||
442 | /* enable IAM, auto-mask, | 441 | /* enable IAM, auto-mask, |
443 | * DO NOT USE EIAME or IAME in legacy mode */ | 442 | * DO NOT USE EIAME or IAME in legacy mode */ |
444 | wr32(E1000_IAM, IMS_ENABLE_MASK); | 443 | wr32(E1000_IAM, IMS_ENABLE_MASK); |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index e10528ed9081..81bf005ff280 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -1084,7 +1084,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1084 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); | 1084 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | static int __devinit macb_probe(struct platform_device *pdev) | 1087 | static int __init macb_probe(struct platform_device *pdev) |
1088 | { | 1088 | { |
1089 | struct eth_platform_data *pdata; | 1089 | struct eth_platform_data *pdata; |
1090 | struct resource *regs; | 1090 | struct resource *regs; |
@@ -1248,7 +1248,7 @@ err_out: | |||
1248 | return err; | 1248 | return err; |
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | static int __devexit macb_remove(struct platform_device *pdev) | 1251 | static int __exit macb_remove(struct platform_device *pdev) |
1252 | { | 1252 | { |
1253 | struct net_device *dev; | 1253 | struct net_device *dev; |
1254 | struct macb *bp; | 1254 | struct macb *bp; |
@@ -1276,8 +1276,7 @@ static int __devexit macb_remove(struct platform_device *pdev) | |||
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | static struct platform_driver macb_driver = { | 1278 | static struct platform_driver macb_driver = { |
1279 | .probe = macb_probe, | 1279 | .remove = __exit_p(macb_remove), |
1280 | .remove = __devexit_p(macb_remove), | ||
1281 | .driver = { | 1280 | .driver = { |
1282 | .name = "macb", | 1281 | .name = "macb", |
1283 | }, | 1282 | }, |
@@ -1285,7 +1284,7 @@ static struct platform_driver macb_driver = { | |||
1285 | 1284 | ||
1286 | static int __init macb_init(void) | 1285 | static int __init macb_init(void) |
1287 | { | 1286 | { |
1288 | return platform_driver_register(&macb_driver); | 1287 | return platform_driver_probe(&macb_driver, macb_probe); |
1289 | } | 1288 | } |
1290 | 1289 | ||
1291 | static void __exit macb_exit(void) | 1290 | static void __exit macb_exit(void) |
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index aafc3ce59cbb..6d343efb2717 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c | |||
@@ -4,8 +4,6 @@ | |||
4 | * for more details. | 4 | * for more details. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #define DEBUG | ||
8 | |||
9 | #include <linux/init.h> | 7 | #include <linux/init.h> |
10 | #include <linux/io.h> | 8 | #include <linux/io.h> |
11 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
@@ -15,11 +13,93 @@ | |||
15 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
16 | #include <asm/mips-boards/simint.h> | 14 | #include <asm/mips-boards/simint.h> |
17 | 15 | ||
18 | #include "mipsnet.h" /* actual device IO mapping */ | 16 | #define MIPSNET_VERSION "2007-11-17" |
17 | |||
18 | /* | ||
19 | * Net status/control block as seen by sw in the core. | ||
20 | */ | ||
21 | struct mipsnet_regs { | ||
22 | /* | ||
23 | * Device info for probing, reads as MIPSNET%d where %d is some | ||
24 | * form of version. | ||
25 | */ | ||
26 | u64 devId; /*0x00 */ | ||
19 | 27 | ||
20 | #define MIPSNET_VERSION "2005-06-20" | 28 | /* |
29 | * read only busy flag. | ||
30 | * Set and cleared by the Net Device to indicate that an rx or a tx | ||
31 | * is in progress. | ||
32 | */ | ||
33 | u32 busy; /*0x08 */ | ||
21 | 34 | ||
22 | #define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field)) | 35 | /* |
36 | * Set by the Net Device. | ||
37 | * The device will set it once data has been received. | ||
38 | * The value is the number of bytes that should be read from | ||
39 | * rxDataBuffer. The value will decrease till 0 until all the data | ||
40 | * from rxDataBuffer has been read. | ||
41 | */ | ||
42 | u32 rxDataCount; /*0x0c */ | ||
43 | #define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16) | ||
44 | |||
45 | /* | ||
46 | * Settable from the MIPS core, cleared by the Net Device. | ||
47 | * The core should set the number of bytes it wants to send, | ||
48 | * then it should write those bytes of data to txDataBuffer. | ||
49 | * The device will clear txDataCount has been processed (not | ||
50 | * necessarily sent). | ||
51 | */ | ||
52 | u32 txDataCount; /*0x10 */ | ||
53 | |||
54 | /* | ||
55 | * Interrupt control | ||
56 | * | ||
57 | * Used to clear the interrupted generated by this dev. | ||
58 | * Write a 1 to clear the interrupt. (except bit31). | ||
59 | * | ||
60 | * Bit0 is set if it was a tx-done interrupt. | ||
61 | * Bit1 is set when new rx-data is available. | ||
62 | * Until this bit is cleared there will be no other RXs. | ||
63 | * | ||
64 | * Bit31 is used for testing, it clears after a read. | ||
65 | * Writing 1 to this bit will cause an interrupt to be generated. | ||
66 | * To clear the test interrupt, write 0 to this register. | ||
67 | */ | ||
68 | u32 interruptControl; /*0x14 */ | ||
69 | #define MIPSNET_INTCTL_TXDONE (1u << 0) | ||
70 | #define MIPSNET_INTCTL_RXDONE (1u << 1) | ||
71 | #define MIPSNET_INTCTL_TESTBIT (1u << 31) | ||
72 | |||
73 | /* | ||
74 | * Readonly core-specific interrupt info for the device to signal | ||
75 | * the core. The meaning of the contents of this field might change. | ||
76 | */ | ||
77 | /* XXX: the whole memIntf interrupt scheme is messy: the device | ||
78 | * should have no control what so ever of what VPE/register set is | ||
79 | * being used. | ||
80 | * The MemIntf should only expose interrupt lines, and something in | ||
81 | * the config should be responsible for the line<->core/vpe bindings. | ||
82 | */ | ||
83 | u32 interruptInfo; /*0x18 */ | ||
84 | |||
85 | /* | ||
86 | * This is where the received data is read out. | ||
87 | * There is more data to read until rxDataReady is 0. | ||
88 | * Only 1 byte at this regs offset is used. | ||
89 | */ | ||
90 | u32 rxDataBuffer; /*0x1c */ | ||
91 | |||
92 | /* | ||
93 | * This is where the data to transmit is written. | ||
94 | * Data should be written for the amount specified in the | ||
95 | * txDataCount register. | ||
96 | * Only 1 byte at this regs offset is used. | ||
97 | */ | ||
98 | u32 txDataBuffer; /*0x20 */ | ||
99 | }; | ||
100 | |||
101 | #define regaddr(dev, field) \ | ||
102 | (dev->base_addr + offsetof(struct mipsnet_regs, field)) | ||
23 | 103 | ||
24 | static char mipsnet_string[] = "mipsnet"; | 104 | static char mipsnet_string[] = "mipsnet"; |
25 | 105 | ||
@@ -29,32 +109,27 @@ static char mipsnet_string[] = "mipsnet"; | |||
29 | static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata, | 109 | static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata, |
30 | int len) | 110 | int len) |
31 | { | 111 | { |
32 | uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount)); | ||
33 | |||
34 | if (available_len < len) | ||
35 | return -EFAULT; | ||
36 | |||
37 | for (; len > 0; len--, kdata++) | 112 | for (; len > 0; len--, kdata++) |
38 | *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer)); | 113 | *kdata = inb(regaddr(dev, rxDataBuffer)); |
39 | 114 | ||
40 | return inl(mipsnet_reg_address(dev, rxDataCount)); | 115 | return inl(regaddr(dev, rxDataCount)); |
41 | } | 116 | } |
42 | 117 | ||
43 | static inline ssize_t mipsnet_put_todevice(struct net_device *dev, | 118 | static inline void mipsnet_put_todevice(struct net_device *dev, |
44 | struct sk_buff *skb) | 119 | struct sk_buff *skb) |
45 | { | 120 | { |
46 | int count_to_go = skb->len; | 121 | int count_to_go = skb->len; |
47 | char *buf_ptr = skb->data; | 122 | char *buf_ptr = skb->data; |
48 | 123 | ||
49 | outl(skb->len, mipsnet_reg_address(dev, txDataCount)); | 124 | outl(skb->len, regaddr(dev, txDataCount)); |
50 | 125 | ||
51 | for (; count_to_go; buf_ptr++, count_to_go--) | 126 | for (; count_to_go; buf_ptr++, count_to_go--) |
52 | outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); | 127 | outb(*buf_ptr, regaddr(dev, txDataBuffer)); |
53 | 128 | ||
54 | dev->stats.tx_packets++; | 129 | dev->stats.tx_packets++; |
55 | dev->stats.tx_bytes += skb->len; | 130 | dev->stats.tx_bytes += skb->len; |
56 | 131 | ||
57 | return skb->len; | 132 | dev_kfree_skb(skb); |
58 | } | 133 | } |
59 | 134 | ||
60 | static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) | 135 | static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -69,18 +144,20 @@ static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) | |||
69 | return 0; | 144 | return 0; |
70 | } | 145 | } |
71 | 146 | ||
72 | static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) | 147 | static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len) |
73 | { | 148 | { |
74 | struct sk_buff *skb; | 149 | struct sk_buff *skb; |
75 | size_t len = count; | ||
76 | 150 | ||
77 | skb = alloc_skb(len + 2, GFP_KERNEL); | 151 | if (!len) |
152 | return len; | ||
153 | |||
154 | skb = dev_alloc_skb(len + NET_IP_ALIGN); | ||
78 | if (!skb) { | 155 | if (!skb) { |
79 | dev->stats.rx_dropped++; | 156 | dev->stats.rx_dropped++; |
80 | return -ENOMEM; | 157 | return -ENOMEM; |
81 | } | 158 | } |
82 | 159 | ||
83 | skb_reserve(skb, 2); | 160 | skb_reserve(skb, NET_IP_ALIGN); |
84 | if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) | 161 | if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) |
85 | return -EFAULT; | 162 | return -EFAULT; |
86 | 163 | ||
@@ -92,50 +169,42 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) | |||
92 | dev->stats.rx_packets++; | 169 | dev->stats.rx_packets++; |
93 | dev->stats.rx_bytes += len; | 170 | dev->stats.rx_bytes += len; |
94 | 171 | ||
95 | return count; | 172 | return len; |
96 | } | 173 | } |
97 | 174 | ||
98 | static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) | 175 | static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) |
99 | { | 176 | { |
100 | struct net_device *dev = dev_id; | 177 | struct net_device *dev = dev_id; |
101 | 178 | u32 int_flags; | |
102 | irqreturn_t retval = IRQ_NONE; | 179 | irqreturn_t ret = IRQ_NONE; |
103 | uint64_t interruptFlags; | 180 | |
104 | 181 | if (irq != dev->irq) | |
105 | if (irq == dev->irq) { | 182 | goto out_badirq; |
106 | retval = IRQ_HANDLED; | 183 | |
107 | 184 | /* TESTBIT is cleared on read. */ | |
108 | interruptFlags = | 185 | int_flags = inl(regaddr(dev, interruptControl)); |
109 | inl(mipsnet_reg_address(dev, interruptControl)); | 186 | if (int_flags & MIPSNET_INTCTL_TESTBIT) { |
110 | 187 | /* TESTBIT takes effect after a write with 0. */ | |
111 | if (interruptFlags & MIPSNET_INTCTL_TXDONE) { | 188 | outl(0, regaddr(dev, interruptControl)); |
112 | outl(MIPSNET_INTCTL_TXDONE, | 189 | ret = IRQ_HANDLED; |
113 | mipsnet_reg_address(dev, interruptControl)); | 190 | } else if (int_flags & MIPSNET_INTCTL_TXDONE) { |
114 | /* only one packet at a time, we are done. */ | 191 | /* Only one packet at a time, we are done. */ |
115 | netif_wake_queue(dev); | 192 | dev->stats.tx_packets++; |
116 | } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { | 193 | netif_wake_queue(dev); |
117 | mipsnet_get_fromdev(dev, | 194 | outl(MIPSNET_INTCTL_TXDONE, |
118 | inl(mipsnet_reg_address(dev, rxDataCount))); | 195 | regaddr(dev, interruptControl)); |
119 | outl(MIPSNET_INTCTL_RXDONE, | 196 | ret = IRQ_HANDLED; |
120 | mipsnet_reg_address(dev, interruptControl)); | 197 | } else if (int_flags & MIPSNET_INTCTL_RXDONE) { |
121 | 198 | mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount))); | |
122 | } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { | 199 | outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl)); |
123 | /* | 200 | ret = IRQ_HANDLED; |
124 | * TESTBIT is cleared on read. | ||
125 | * And takes effect after a write with 0 | ||
126 | */ | ||
127 | outl(0, mipsnet_reg_address(dev, interruptControl)); | ||
128 | } else { | ||
129 | /* Maybe shared IRQ, just ignore, no clearing. */ | ||
130 | retval = IRQ_NONE; | ||
131 | } | ||
132 | |||
133 | } else { | ||
134 | printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", | ||
135 | dev->name, __FUNCTION__, irq); | ||
136 | retval = IRQ_NONE; | ||
137 | } | 201 | } |
138 | return retval; | 202 | return ret; |
203 | |||
204 | out_badirq: | ||
205 | printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", | ||
206 | dev->name, __FUNCTION__, irq); | ||
207 | return ret; | ||
139 | } | 208 | } |
140 | 209 | ||
141 | static int mipsnet_open(struct net_device *dev) | 210 | static int mipsnet_open(struct net_device *dev) |
@@ -144,18 +213,15 @@ static int mipsnet_open(struct net_device *dev) | |||
144 | 213 | ||
145 | err = request_irq(dev->irq, &mipsnet_interrupt, | 214 | err = request_irq(dev->irq, &mipsnet_interrupt, |
146 | IRQF_SHARED, dev->name, (void *) dev); | 215 | IRQF_SHARED, dev->name, (void *) dev); |
147 | |||
148 | if (err) { | 216 | if (err) { |
149 | release_region(dev->base_addr, MIPSNET_IO_EXTENT); | 217 | release_region(dev->base_addr, sizeof(struct mipsnet_regs)); |
150 | return err; | 218 | return err; |
151 | } | 219 | } |
152 | 220 | ||
153 | netif_start_queue(dev); | 221 | netif_start_queue(dev); |
154 | 222 | ||
155 | /* test interrupt handler */ | 223 | /* test interrupt handler */ |
156 | outl(MIPSNET_INTCTL_TESTBIT, | 224 | outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl)); |
157 | mipsnet_reg_address(dev, interruptControl)); | ||
158 | |||
159 | 225 | ||
160 | return 0; | 226 | return 0; |
161 | } | 227 | } |
@@ -163,7 +229,7 @@ static int mipsnet_open(struct net_device *dev) | |||
163 | static int mipsnet_close(struct net_device *dev) | 229 | static int mipsnet_close(struct net_device *dev) |
164 | { | 230 | { |
165 | netif_stop_queue(dev); | 231 | netif_stop_queue(dev); |
166 | 232 | free_irq(dev->irq, dev); | |
167 | return 0; | 233 | return 0; |
168 | } | 234 | } |
169 | 235 | ||
@@ -194,10 +260,11 @@ static int __init mipsnet_probe(struct device *dev) | |||
194 | */ | 260 | */ |
195 | netdev->base_addr = 0x4200; | 261 | netdev->base_addr = 0x4200; |
196 | netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 + | 262 | netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 + |
197 | inl(mipsnet_reg_address(netdev, interruptInfo)); | 263 | inl(regaddr(netdev, interruptInfo)); |
198 | 264 | ||
199 | /* Get the io region now, get irq on open() */ | 265 | /* Get the io region now, get irq on open() */ |
200 | if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { | 266 | if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs), |
267 | "mipsnet")) { | ||
201 | err = -EBUSY; | 268 | err = -EBUSY; |
202 | goto out_free_netdev; | 269 | goto out_free_netdev; |
203 | } | 270 | } |
@@ -217,7 +284,7 @@ static int __init mipsnet_probe(struct device *dev) | |||
217 | return 0; | 284 | return 0; |
218 | 285 | ||
219 | out_free_region: | 286 | out_free_region: |
220 | release_region(netdev->base_addr, MIPSNET_IO_EXTENT); | 287 | release_region(netdev->base_addr, sizeof(struct mipsnet_regs)); |
221 | 288 | ||
222 | out_free_netdev: | 289 | out_free_netdev: |
223 | free_netdev(netdev); | 290 | free_netdev(netdev); |
@@ -231,7 +298,7 @@ static int __devexit mipsnet_device_remove(struct device *device) | |||
231 | struct net_device *dev = dev_get_drvdata(device); | 298 | struct net_device *dev = dev_get_drvdata(device); |
232 | 299 | ||
233 | unregister_netdev(dev); | 300 | unregister_netdev(dev); |
234 | release_region(dev->base_addr, MIPSNET_IO_EXTENT); | 301 | release_region(dev->base_addr, sizeof(struct mipsnet_regs)); |
235 | free_netdev(dev); | 302 | free_netdev(dev); |
236 | dev_set_drvdata(device, NULL); | 303 | dev_set_drvdata(device, NULL); |
237 | 304 | ||
diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h deleted file mode 100644 index 0132c6714a40..000000000000 --- a/drivers/net/mipsnet.h +++ /dev/null | |||
@@ -1,112 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | */ | ||
6 | #ifndef __MIPSNET_H | ||
7 | #define __MIPSNET_H | ||
8 | |||
9 | /* | ||
10 | * Id of this Net device, as seen by the core. | ||
11 | */ | ||
12 | #define MIPS_NET_DEV_ID ((uint64_t) \ | ||
13 | ((uint64_t) 'M' << 0)| \ | ||
14 | ((uint64_t) 'I' << 8)| \ | ||
15 | ((uint64_t) 'P' << 16)| \ | ||
16 | ((uint64_t) 'S' << 24)| \ | ||
17 | ((uint64_t) 'N' << 32)| \ | ||
18 | ((uint64_t) 'E' << 40)| \ | ||
19 | ((uint64_t) 'T' << 48)| \ | ||
20 | ((uint64_t) '0' << 56)) | ||
21 | |||
22 | /* | ||
23 | * Net status/control block as seen by sw in the core. | ||
24 | * (Why not use bit fields? can't be bothered with cross-platform struct | ||
25 | * packing.) | ||
26 | */ | ||
27 | struct net_control_block { | ||
28 | /* | ||
29 | * dev info for probing | ||
30 | * reads as MIPSNET%d where %d is some form of version | ||
31 | */ | ||
32 | uint64_t devId; /* 0x00 */ | ||
33 | |||
34 | /* | ||
35 | * read only busy flag. | ||
36 | * Set and cleared by the Net Device to indicate that an rx or a tx | ||
37 | * is in progress. | ||
38 | */ | ||
39 | uint32_t busy; /* 0x08 */ | ||
40 | |||
41 | /* | ||
42 | * Set by the Net Device. | ||
43 | * The device will set it once data has been received. | ||
44 | * The value is the number of bytes that should be read from | ||
45 | * rxDataBuffer. The value will decrease till 0 until all the data | ||
46 | * from rxDataBuffer has been read. | ||
47 | */ | ||
48 | uint32_t rxDataCount; /* 0x0c */ | ||
49 | #define MIPSNET_MAX_RXTX_DATACOUNT (1<<16) | ||
50 | |||
51 | /* | ||
52 | * Settable from the MIPS core, cleared by the Net Device. The core | ||
53 | * should set the number of bytes it wants to send, then it should | ||
54 | * write those bytes of data to txDataBuffer. The device will clear | ||
55 | * txDataCount has been processed (not necessarily sent). | ||
56 | */ | ||
57 | uint32_t txDataCount; /* 0x10 */ | ||
58 | |||
59 | /* | ||
60 | * Interrupt control | ||
61 | * | ||
62 | * Used to clear the interrupted generated by this dev. | ||
63 | * Write a 1 to clear the interrupt. (except bit31). | ||
64 | * | ||
65 | * Bit0 is set if it was a tx-done interrupt. | ||
66 | * Bit1 is set when new rx-data is available. | ||
67 | * Until this bit is cleared there will be no other RXs. | ||
68 | * | ||
69 | * Bit31 is used for testing, it clears after a read. | ||
70 | * Writing 1 to this bit will cause an interrupt to be generated. | ||
71 | * To clear the test interrupt, write 0 to this register. | ||
72 | */ | ||
73 | uint32_t interruptControl; /*0x14 */ | ||
74 | #define MIPSNET_INTCTL_TXDONE ((uint32_t)(1 << 0)) | ||
75 | #define MIPSNET_INTCTL_RXDONE ((uint32_t)(1 << 1)) | ||
76 | #define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1 << 31)) | ||
77 | #define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE | \ | ||
78 | MIPSNET_INTCTL_RXDONE | \ | ||
79 | MIPSNET_INTCTL_TESTBIT) | ||
80 | |||
81 | /* | ||
82 | * Readonly core-specific interrupt info for the device to signal the | ||
83 | * core. The meaning of the contents of this field might change. | ||
84 | * | ||
85 | * TODO: the whole memIntf interrupt scheme is messy: the device should | ||
86 | * have no control what so ever of what VPE/register set is being | ||
87 | * used. The MemIntf should only expose interrupt lines, and | ||
88 | * something in the config should be responsible for the | ||
89 | * line<->core/vpe bindings. | ||
90 | */ | ||
91 | uint32_t interruptInfo; /* 0x18 */ | ||
92 | |||
93 | /* | ||
94 | * This is where the received data is read out. | ||
95 | * There is more data to read until rxDataReady is 0. | ||
96 | * Only 1 byte at this regs offset is used. | ||
97 | */ | ||
98 | uint32_t rxDataBuffer; /* 0x1c */ | ||
99 | |||
100 | /* | ||
101 | * This is where the data to transmit is written. Data should be | ||
102 | * written for the amount specified in the txDataCount register. Only | ||
103 | * 1 byte at this regs offset is used. | ||
104 | */ | ||
105 | uint32_t txDataBuffer; /* 0x20 */ | ||
106 | }; | ||
107 | |||
108 | #define MIPSNET_IO_EXTENT 0x40 /* being generous */ | ||
109 | |||
110 | #define field_offset(field) (offsetof(struct net_control_block, field)) | ||
111 | |||
112 | #endif /* __MIPSNET_H */ | ||
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index c329a4f5840c..0a3e60418e53 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -203,22 +203,8 @@ skbuff at an offset of "+2", 16-byte aligning the IP header. | |||
203 | IIId. Synchronization | 203 | IIId. Synchronization |
204 | 204 | ||
205 | Most operations are synchronized on the np->lock irq spinlock, except the | 205 | Most operations are synchronized on the np->lock irq spinlock, except the |
206 | performance critical codepaths: | 206 | recieve and transmit paths which are synchronised using a combination of |
207 | 207 | hardware descriptor ownership, disabling interrupts and NAPI poll scheduling. | |
208 | The rx process only runs in the interrupt handler. Access from outside | ||
209 | the interrupt handler is only permitted after disable_irq(). | ||
210 | |||
211 | The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap | ||
212 | is set, then access is permitted under spin_lock_irq(&np->lock). | ||
213 | |||
214 | Thus configuration functions that want to access everything must call | ||
215 | disable_irq(dev->irq); | ||
216 | netif_tx_lock_bh(dev); | ||
217 | spin_lock_irq(&np->lock); | ||
218 | |||
219 | IV. Notes | ||
220 | |||
221 | NatSemi PCI network controllers are very uncommon. | ||
222 | 208 | ||
223 | IVb. References | 209 | IVb. References |
224 | 210 | ||
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index bb88a41b7591..2e39e0285d8f 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -62,6 +62,10 @@ | |||
62 | 62 | ||
63 | #define LRO_MAX_AGGR 64 | 63 | #define LRO_MAX_AGGR 64 |
64 | 64 | ||
65 | #define PE_MIN_MTU 64 | ||
66 | #define PE_MAX_MTU 1500 | ||
67 | #define PE_DEF_MTU ETH_DATA_LEN | ||
68 | |||
65 | #define DEFAULT_MSG_ENABLE \ | 69 | #define DEFAULT_MSG_ENABLE \ |
66 | (NETIF_MSG_DRV | \ | 70 | (NETIF_MSG_DRV | \ |
67 | NETIF_MSG_PROBE | \ | 71 | NETIF_MSG_PROBE | \ |
@@ -82,8 +86,6 @@ | |||
82 | & ((ring)->size - 1)) | 86 | & ((ring)->size - 1)) |
83 | #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) | 87 | #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) |
84 | 88 | ||
85 | #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ | ||
86 | |||
87 | MODULE_LICENSE("GPL"); | 89 | MODULE_LICENSE("GPL"); |
88 | MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); | 90 | MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); |
89 | MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); | 91 | MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); |
@@ -175,6 +177,24 @@ static int mac_to_intf(struct pasemi_mac *mac) | |||
175 | return -1; | 177 | return -1; |
176 | } | 178 | } |
177 | 179 | ||
180 | static void pasemi_mac_intf_disable(struct pasemi_mac *mac) | ||
181 | { | ||
182 | unsigned int flags; | ||
183 | |||
184 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | ||
185 | flags &= ~PAS_MAC_CFG_PCFG_PE; | ||
186 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | ||
187 | } | ||
188 | |||
189 | static void pasemi_mac_intf_enable(struct pasemi_mac *mac) | ||
190 | { | ||
191 | unsigned int flags; | ||
192 | |||
193 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | ||
194 | flags |= PAS_MAC_CFG_PCFG_PE; | ||
195 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | ||
196 | } | ||
197 | |||
178 | static int pasemi_get_mac_addr(struct pasemi_mac *mac) | 198 | static int pasemi_get_mac_addr(struct pasemi_mac *mac) |
179 | { | 199 | { |
180 | struct pci_dev *pdev = mac->pdev; | 200 | struct pci_dev *pdev = mac->pdev; |
@@ -221,6 +241,33 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) | |||
221 | return 0; | 241 | return 0; |
222 | } | 242 | } |
223 | 243 | ||
244 | static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) | ||
245 | { | ||
246 | struct pasemi_mac *mac = netdev_priv(dev); | ||
247 | struct sockaddr *addr = p; | ||
248 | unsigned int adr0, adr1; | ||
249 | |||
250 | if (!is_valid_ether_addr(addr->sa_data)) | ||
251 | return -EINVAL; | ||
252 | |||
253 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
254 | |||
255 | adr0 = dev->dev_addr[2] << 24 | | ||
256 | dev->dev_addr[3] << 16 | | ||
257 | dev->dev_addr[4] << 8 | | ||
258 | dev->dev_addr[5]; | ||
259 | adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1); | ||
260 | adr1 &= ~0xffff; | ||
261 | adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1]; | ||
262 | |||
263 | pasemi_mac_intf_disable(mac); | ||
264 | write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0); | ||
265 | write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1); | ||
266 | pasemi_mac_intf_enable(mac); | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
224 | static int get_skb_hdr(struct sk_buff *skb, void **iphdr, | 271 | static int get_skb_hdr(struct sk_buff *skb, void **iphdr, |
225 | void **tcph, u64 *hdr_flags, void *data) | 272 | void **tcph, u64 *hdr_flags, void *data) |
226 | { | 273 | { |
@@ -453,7 +500,7 @@ static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) | |||
453 | 500 | ||
454 | } | 501 | } |
455 | 502 | ||
456 | static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) | 503 | static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) |
457 | { | 504 | { |
458 | struct pasemi_mac_rxring *rx = rx_ring(mac); | 505 | struct pasemi_mac_rxring *rx = rx_ring(mac); |
459 | unsigned int i; | 506 | unsigned int i; |
@@ -473,7 +520,12 @@ static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) | |||
473 | } | 520 | } |
474 | 521 | ||
475 | for (i = 0; i < RX_RING_SIZE; i++) | 522 | for (i = 0; i < RX_RING_SIZE; i++) |
476 | RX_DESC(rx, i) = 0; | 523 | RX_BUFF(rx, i) = 0; |
524 | } | ||
525 | |||
526 | static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) | ||
527 | { | ||
528 | pasemi_mac_free_rx_buffers(mac); | ||
477 | 529 | ||
478 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), | 530 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), |
479 | rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); | 531 | rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); |
@@ -503,14 +555,14 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, | |||
503 | /* Entry in use? */ | 555 | /* Entry in use? */ |
504 | WARN_ON(*buff); | 556 | WARN_ON(*buff); |
505 | 557 | ||
506 | skb = dev_alloc_skb(BUF_SIZE); | 558 | skb = dev_alloc_skb(mac->bufsz); |
507 | skb_reserve(skb, LOCAL_SKB_ALIGN); | 559 | skb_reserve(skb, LOCAL_SKB_ALIGN); |
508 | 560 | ||
509 | if (unlikely(!skb)) | 561 | if (unlikely(!skb)) |
510 | break; | 562 | break; |
511 | 563 | ||
512 | dma = pci_map_single(mac->dma_pdev, skb->data, | 564 | dma = pci_map_single(mac->dma_pdev, skb->data, |
513 | BUF_SIZE - LOCAL_SKB_ALIGN, | 565 | mac->bufsz - LOCAL_SKB_ALIGN, |
514 | PCI_DMA_FROMDEVICE); | 566 | PCI_DMA_FROMDEVICE); |
515 | 567 | ||
516 | if (unlikely(dma_mapping_error(dma))) { | 568 | if (unlikely(dma_mapping_error(dma))) { |
@@ -520,7 +572,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, | |||
520 | 572 | ||
521 | info->skb = skb; | 573 | info->skb = skb; |
522 | info->dma = dma; | 574 | info->dma = dma; |
523 | *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); | 575 | *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma); |
524 | fill++; | 576 | fill++; |
525 | } | 577 | } |
526 | 578 | ||
@@ -650,7 +702,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, | |||
650 | 702 | ||
651 | len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; | 703 | len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; |
652 | 704 | ||
653 | pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN, | 705 | pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, |
654 | PCI_DMA_FROMDEVICE); | 706 | PCI_DMA_FROMDEVICE); |
655 | 707 | ||
656 | if (macrx & XCT_MACRX_CRC) { | 708 | if (macrx & XCT_MACRX_CRC) { |
@@ -874,24 +926,6 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) | |||
874 | return IRQ_HANDLED; | 926 | return IRQ_HANDLED; |
875 | } | 927 | } |
876 | 928 | ||
877 | static void pasemi_mac_intf_disable(struct pasemi_mac *mac) | ||
878 | { | ||
879 | unsigned int flags; | ||
880 | |||
881 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | ||
882 | flags &= ~PAS_MAC_CFG_PCFG_PE; | ||
883 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | ||
884 | } | ||
885 | |||
886 | static void pasemi_mac_intf_enable(struct pasemi_mac *mac) | ||
887 | { | ||
888 | unsigned int flags; | ||
889 | |||
890 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | ||
891 | flags |= PAS_MAC_CFG_PCFG_PE; | ||
892 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | ||
893 | } | ||
894 | |||
895 | static void pasemi_adjust_link(struct net_device *dev) | 929 | static void pasemi_adjust_link(struct net_device *dev) |
896 | { | 930 | { |
897 | struct pasemi_mac *mac = netdev_priv(dev); | 931 | struct pasemi_mac *mac = netdev_priv(dev); |
@@ -1148,11 +1182,71 @@ out_rx_resources: | |||
1148 | 1182 | ||
1149 | #define MAX_RETRIES 5000 | 1183 | #define MAX_RETRIES 5000 |
1150 | 1184 | ||
1185 | static void pasemi_mac_pause_txchan(struct pasemi_mac *mac) | ||
1186 | { | ||
1187 | unsigned int sta, retries; | ||
1188 | int txch = tx_ring(mac)->chan.chno; | ||
1189 | |||
1190 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), | ||
1191 | PAS_DMA_TXCHAN_TCMDSTA_ST); | ||
1192 | |||
1193 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1194 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); | ||
1195 | if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) | ||
1196 | break; | ||
1197 | cond_resched(); | ||
1198 | } | ||
1199 | |||
1200 | if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) | ||
1201 | dev_err(&mac->dma_pdev->dev, | ||
1202 | "Failed to stop tx channel, tcmdsta %08x\n", sta); | ||
1203 | |||
1204 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); | ||
1205 | } | ||
1206 | |||
1207 | static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac) | ||
1208 | { | ||
1209 | unsigned int sta, retries; | ||
1210 | int rxch = rx_ring(mac)->chan.chno; | ||
1211 | |||
1212 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), | ||
1213 | PAS_DMA_RXCHAN_CCMDSTA_ST); | ||
1214 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1215 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); | ||
1216 | if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) | ||
1217 | break; | ||
1218 | cond_resched(); | ||
1219 | } | ||
1220 | |||
1221 | if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) | ||
1222 | dev_err(&mac->dma_pdev->dev, | ||
1223 | "Failed to stop rx channel, ccmdsta 08%x\n", sta); | ||
1224 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); | ||
1225 | } | ||
1226 | |||
1227 | static void pasemi_mac_pause_rxint(struct pasemi_mac *mac) | ||
1228 | { | ||
1229 | unsigned int sta, retries; | ||
1230 | |||
1231 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | ||
1232 | PAS_DMA_RXINT_RCMDSTA_ST); | ||
1233 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1234 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | ||
1235 | if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) | ||
1236 | break; | ||
1237 | cond_resched(); | ||
1238 | } | ||
1239 | |||
1240 | if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) | ||
1241 | dev_err(&mac->dma_pdev->dev, | ||
1242 | "Failed to stop rx interface, rcmdsta %08x\n", sta); | ||
1243 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); | ||
1244 | } | ||
1245 | |||
1151 | static int pasemi_mac_close(struct net_device *dev) | 1246 | static int pasemi_mac_close(struct net_device *dev) |
1152 | { | 1247 | { |
1153 | struct pasemi_mac *mac = netdev_priv(dev); | 1248 | struct pasemi_mac *mac = netdev_priv(dev); |
1154 | unsigned int sta; | 1249 | unsigned int sta; |
1155 | int retries; | ||
1156 | int rxch, txch; | 1250 | int rxch, txch; |
1157 | 1251 | ||
1158 | rxch = rx_ring(mac)->chan.chno; | 1252 | rxch = rx_ring(mac)->chan.chno; |
@@ -1190,51 +1284,10 @@ static int pasemi_mac_close(struct net_device *dev) | |||
1190 | pasemi_mac_clean_tx(tx_ring(mac)); | 1284 | pasemi_mac_clean_tx(tx_ring(mac)); |
1191 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); | 1285 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); |
1192 | 1286 | ||
1193 | /* Disable interface */ | 1287 | pasemi_mac_pause_txchan(mac); |
1194 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), | 1288 | pasemi_mac_pause_rxint(mac); |
1195 | PAS_DMA_TXCHAN_TCMDSTA_ST); | 1289 | pasemi_mac_pause_rxchan(mac); |
1196 | write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | 1290 | pasemi_mac_intf_disable(mac); |
1197 | PAS_DMA_RXINT_RCMDSTA_ST); | ||
1198 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), | ||
1199 | PAS_DMA_RXCHAN_CCMDSTA_ST); | ||
1200 | |||
1201 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1202 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch)); | ||
1203 | if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) | ||
1204 | break; | ||
1205 | cond_resched(); | ||
1206 | } | ||
1207 | |||
1208 | if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) | ||
1209 | dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n"); | ||
1210 | |||
1211 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1212 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); | ||
1213 | if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) | ||
1214 | break; | ||
1215 | cond_resched(); | ||
1216 | } | ||
1217 | |||
1218 | if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) | ||
1219 | dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n"); | ||
1220 | |||
1221 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1222 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | ||
1223 | if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) | ||
1224 | break; | ||
1225 | cond_resched(); | ||
1226 | } | ||
1227 | |||
1228 | if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) | ||
1229 | dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n"); | ||
1230 | |||
1231 | /* Then, disable the channel. This must be done separately from | ||
1232 | * stopping, since you can't disable when active. | ||
1233 | */ | ||
1234 | |||
1235 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); | ||
1236 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); | ||
1237 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); | ||
1238 | 1291 | ||
1239 | free_irq(mac->tx->chan.irq, mac->tx); | 1292 | free_irq(mac->tx->chan.irq, mac->tx); |
1240 | free_irq(mac->rx->chan.irq, mac->rx); | 1293 | free_irq(mac->rx->chan.irq, mac->rx); |
@@ -1388,6 +1441,62 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget) | |||
1388 | return pkts; | 1441 | return pkts; |
1389 | } | 1442 | } |
1390 | 1443 | ||
1444 | static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) | ||
1445 | { | ||
1446 | struct pasemi_mac *mac = netdev_priv(dev); | ||
1447 | unsigned int reg; | ||
1448 | unsigned int rcmdsta; | ||
1449 | int running; | ||
1450 | |||
1451 | if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) | ||
1452 | return -EINVAL; | ||
1453 | |||
1454 | running = netif_running(dev); | ||
1455 | |||
1456 | if (running) { | ||
1457 | /* Need to stop the interface, clean out all already | ||
1458 | * received buffers, free all unused buffers on the RX | ||
1459 | * interface ring, then finally re-fill the rx ring with | ||
1460 | * the new-size buffers and restart. | ||
1461 | */ | ||
1462 | |||
1463 | napi_disable(&mac->napi); | ||
1464 | netif_tx_disable(dev); | ||
1465 | pasemi_mac_intf_disable(mac); | ||
1466 | |||
1467 | rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | ||
1468 | pasemi_mac_pause_rxint(mac); | ||
1469 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); | ||
1470 | pasemi_mac_free_rx_buffers(mac); | ||
1471 | } | ||
1472 | |||
1473 | /* Change maxf, i.e. what size frames are accepted. | ||
1474 | * Need room for ethernet header and CRC word | ||
1475 | */ | ||
1476 | reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG); | ||
1477 | reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M; | ||
1478 | reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); | ||
1479 | write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); | ||
1480 | |||
1481 | dev->mtu = new_mtu; | ||
1482 | /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ | ||
1483 | mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; | ||
1484 | |||
1485 | if (running) { | ||
1486 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | ||
1487 | rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); | ||
1488 | |||
1489 | rx_ring(mac)->next_to_fill = 0; | ||
1490 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1); | ||
1491 | |||
1492 | napi_enable(&mac->napi); | ||
1493 | netif_start_queue(dev); | ||
1494 | pasemi_mac_intf_enable(mac); | ||
1495 | } | ||
1496 | |||
1497 | return 0; | ||
1498 | } | ||
1499 | |||
1391 | static int __devinit | 1500 | static int __devinit |
1392 | pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 1501 | pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1393 | { | 1502 | { |
@@ -1475,6 +1584,12 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1475 | dev->stop = pasemi_mac_close; | 1584 | dev->stop = pasemi_mac_close; |
1476 | dev->hard_start_xmit = pasemi_mac_start_tx; | 1585 | dev->hard_start_xmit = pasemi_mac_start_tx; |
1477 | dev->set_multicast_list = pasemi_mac_set_rx_mode; | 1586 | dev->set_multicast_list = pasemi_mac_set_rx_mode; |
1587 | dev->set_mac_address = pasemi_mac_set_mac_addr; | ||
1588 | dev->mtu = PE_DEF_MTU; | ||
1589 | /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ | ||
1590 | mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; | ||
1591 | |||
1592 | dev->change_mtu = pasemi_mac_change_mtu; | ||
1478 | 1593 | ||
1479 | if (err) | 1594 | if (err) |
1480 | goto out; | 1595 | goto out; |
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index 8bee2a664c83..99e7b9329a6f 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h | |||
@@ -59,6 +59,7 @@ struct pasemi_mac { | |||
59 | struct phy_device *phydev; | 59 | struct phy_device *phydev; |
60 | struct napi_struct napi; | 60 | struct napi_struct napi; |
61 | 61 | ||
62 | int bufsz; /* RX ring buffer size */ | ||
62 | u8 type; | 63 | u8 type; |
63 | #define MAC_TYPE_GMAC 1 | 64 | #define MAC_TYPE_GMAC 1 |
64 | #define MAC_TYPE_XAUI 2 | 65 | #define MAC_TYPE_XAUI 2 |
@@ -96,6 +97,9 @@ struct pasemi_mac_buffer { | |||
96 | /* MAC CFG register offsets */ | 97 | /* MAC CFG register offsets */ |
97 | enum { | 98 | enum { |
98 | PAS_MAC_CFG_PCFG = 0x80, | 99 | PAS_MAC_CFG_PCFG = 0x80, |
100 | PAS_MAC_CFG_MACCFG = 0x84, | ||
101 | PAS_MAC_CFG_ADR0 = 0x8c, | ||
102 | PAS_MAC_CFG_ADR1 = 0x90, | ||
99 | PAS_MAC_CFG_TXP = 0x98, | 103 | PAS_MAC_CFG_TXP = 0x98, |
100 | PAS_MAC_IPC_CHNL = 0x208, | 104 | PAS_MAC_IPC_CHNL = 0x208, |
101 | }; | 105 | }; |
@@ -130,6 +134,18 @@ enum { | |||
130 | #define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001 | 134 | #define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001 |
131 | #define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002 | 135 | #define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002 |
132 | #define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003 | 136 | #define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003 |
137 | |||
138 | #define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000 | ||
139 | #define PAS_MAC_CFG_MACCFG_TXT_S 28 | ||
140 | #define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000 | ||
141 | #define PAS_MAC_CFG_MACCFG_PRES_S 24 | ||
142 | #define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00 | ||
143 | #define PAS_MAC_CFG_MACCFG_MAXF_S 8 | ||
144 | #define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \ | ||
145 | PAS_MAC_CFG_MACCFG_MAXF_M) | ||
146 | #define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff | ||
147 | #define PAS_MAC_CFG_MACCFG_MINF_S 0 | ||
148 | |||
133 | #define PAS_MAC_CFG_TXP_FCF 0x01000000 | 149 | #define PAS_MAC_CFG_TXP_FCF 0x01000000 |
134 | #define PAS_MAC_CFG_TXP_FCE 0x00800000 | 150 | #define PAS_MAC_CFG_TXP_FCE 0x00800000 |
135 | #define PAS_MAC_CFG_TXP_FC 0x00400000 | 151 | #define PAS_MAC_CFG_TXP_FC 0x00400000 |
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index ed402e00e730..fffc49befe04 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c | |||
@@ -541,7 +541,7 @@ static void netdrv_hw_start (struct net_device *dev); | |||
541 | #define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0) | 541 | #define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0) |
542 | 542 | ||
543 | 543 | ||
544 | #if MMIO_FLUSH_AUDIT_COMPLETE | 544 | #ifdef MMIO_FLUSH_AUDIT_COMPLETE |
545 | 545 | ||
546 | /* write MMIO register */ | 546 | /* write MMIO register */ |
547 | #define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg)) | 547 | #define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg)) |
@@ -603,7 +603,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, | |||
603 | return -ENOMEM; | 603 | return -ENOMEM; |
604 | } | 604 | } |
605 | SET_NETDEV_DEV(dev, &pdev->dev); | 605 | SET_NETDEV_DEV(dev, &pdev->dev); |
606 | tp = dev->priv; | 606 | tp = netdev_priv(dev); |
607 | 607 | ||
608 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ | 608 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ |
609 | rc = pci_enable_device (pdev); | 609 | rc = pci_enable_device (pdev); |
@@ -759,7 +759,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, | |||
759 | return i; | 759 | return i; |
760 | } | 760 | } |
761 | 761 | ||
762 | tp = dev->priv; | 762 | tp = netdev_priv(dev); |
763 | 763 | ||
764 | assert (ioaddr != NULL); | 764 | assert (ioaddr != NULL); |
765 | assert (dev != NULL); | 765 | assert (dev != NULL); |
@@ -783,7 +783,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, | |||
783 | dev->base_addr = (unsigned long) ioaddr; | 783 | dev->base_addr = (unsigned long) ioaddr; |
784 | 784 | ||
785 | /* dev->priv/tp zeroed and aligned in alloc_etherdev */ | 785 | /* dev->priv/tp zeroed and aligned in alloc_etherdev */ |
786 | tp = dev->priv; | 786 | tp = netdev_priv(dev); |
787 | 787 | ||
788 | /* note: tp->chipset set in netdrv_init_board */ | 788 | /* note: tp->chipset set in netdrv_init_board */ |
789 | tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | 789 | tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
@@ -841,7 +841,7 @@ static void __devexit netdrv_remove_one (struct pci_dev *pdev) | |||
841 | 841 | ||
842 | assert (dev != NULL); | 842 | assert (dev != NULL); |
843 | 843 | ||
844 | np = dev->priv; | 844 | np = netdev_priv(dev); |
845 | assert (np != NULL); | 845 | assert (np != NULL); |
846 | 846 | ||
847 | unregister_netdev (dev); | 847 | unregister_netdev (dev); |
@@ -974,7 +974,7 @@ static void mdio_sync (void *mdio_addr) | |||
974 | 974 | ||
975 | static int mdio_read (struct net_device *dev, int phy_id, int location) | 975 | static int mdio_read (struct net_device *dev, int phy_id, int location) |
976 | { | 976 | { |
977 | struct netdrv_private *tp = dev->priv; | 977 | struct netdrv_private *tp = netdev_priv(dev); |
978 | void *mdio_addr = tp->mmio_addr + Config4; | 978 | void *mdio_addr = tp->mmio_addr + Config4; |
979 | int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; | 979 | int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; |
980 | int retval = 0; | 980 | int retval = 0; |
@@ -1017,7 +1017,7 @@ static int mdio_read (struct net_device *dev, int phy_id, int location) | |||
1017 | static void mdio_write (struct net_device *dev, int phy_id, int location, | 1017 | static void mdio_write (struct net_device *dev, int phy_id, int location, |
1018 | int value) | 1018 | int value) |
1019 | { | 1019 | { |
1020 | struct netdrv_private *tp = dev->priv; | 1020 | struct netdrv_private *tp = netdev_priv(dev); |
1021 | void *mdio_addr = tp->mmio_addr + Config4; | 1021 | void *mdio_addr = tp->mmio_addr + Config4; |
1022 | int mii_cmd = | 1022 | int mii_cmd = |
1023 | (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; | 1023 | (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; |
@@ -1060,7 +1060,7 @@ static void mdio_write (struct net_device *dev, int phy_id, int location, | |||
1060 | 1060 | ||
1061 | static int netdrv_open (struct net_device *dev) | 1061 | static int netdrv_open (struct net_device *dev) |
1062 | { | 1062 | { |
1063 | struct netdrv_private *tp = dev->priv; | 1063 | struct netdrv_private *tp = netdev_priv(dev); |
1064 | int retval; | 1064 | int retval; |
1065 | #ifdef NETDRV_DEBUG | 1065 | #ifdef NETDRV_DEBUG |
1066 | void *ioaddr = tp->mmio_addr; | 1066 | void *ioaddr = tp->mmio_addr; |
@@ -1121,7 +1121,7 @@ static int netdrv_open (struct net_device *dev) | |||
1121 | /* Start the hardware at open or resume. */ | 1121 | /* Start the hardware at open or resume. */ |
1122 | static void netdrv_hw_start (struct net_device *dev) | 1122 | static void netdrv_hw_start (struct net_device *dev) |
1123 | { | 1123 | { |
1124 | struct netdrv_private *tp = dev->priv; | 1124 | struct netdrv_private *tp = netdev_priv(dev); |
1125 | void *ioaddr = tp->mmio_addr; | 1125 | void *ioaddr = tp->mmio_addr; |
1126 | u32 i; | 1126 | u32 i; |
1127 | 1127 | ||
@@ -1191,7 +1191,7 @@ static void netdrv_hw_start (struct net_device *dev) | |||
1191 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | 1191 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ |
1192 | static void netdrv_init_ring (struct net_device *dev) | 1192 | static void netdrv_init_ring (struct net_device *dev) |
1193 | { | 1193 | { |
1194 | struct netdrv_private *tp = dev->priv; | 1194 | struct netdrv_private *tp = netdev_priv(dev); |
1195 | int i; | 1195 | int i; |
1196 | 1196 | ||
1197 | DPRINTK ("ENTER\n"); | 1197 | DPRINTK ("ENTER\n"); |
@@ -1213,7 +1213,7 @@ static void netdrv_init_ring (struct net_device *dev) | |||
1213 | static void netdrv_timer (unsigned long data) | 1213 | static void netdrv_timer (unsigned long data) |
1214 | { | 1214 | { |
1215 | struct net_device *dev = (struct net_device *) data; | 1215 | struct net_device *dev = (struct net_device *) data; |
1216 | struct netdrv_private *tp = dev->priv; | 1216 | struct netdrv_private *tp = netdev_priv(dev); |
1217 | void *ioaddr = tp->mmio_addr; | 1217 | void *ioaddr = tp->mmio_addr; |
1218 | int next_tick = 60 * HZ; | 1218 | int next_tick = 60 * HZ; |
1219 | int mii_lpa; | 1219 | int mii_lpa; |
@@ -1252,9 +1252,10 @@ static void netdrv_timer (unsigned long data) | |||
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | 1254 | ||
1255 | static void netdrv_tx_clear (struct netdrv_private *tp) | 1255 | static void netdrv_tx_clear (struct net_device *dev) |
1256 | { | 1256 | { |
1257 | int i; | 1257 | int i; |
1258 | struct netdrv_private *tp = netdev_priv(dev); | ||
1258 | 1259 | ||
1259 | atomic_set (&tp->cur_tx, 0); | 1260 | atomic_set (&tp->cur_tx, 0); |
1260 | atomic_set (&tp->dirty_tx, 0); | 1261 | atomic_set (&tp->dirty_tx, 0); |
@@ -1278,7 +1279,7 @@ static void netdrv_tx_clear (struct netdrv_private *tp) | |||
1278 | 1279 | ||
1279 | static void netdrv_tx_timeout (struct net_device *dev) | 1280 | static void netdrv_tx_timeout (struct net_device *dev) |
1280 | { | 1281 | { |
1281 | struct netdrv_private *tp = dev->priv; | 1282 | struct netdrv_private *tp = netdev_priv(dev); |
1282 | void *ioaddr = tp->mmio_addr; | 1283 | void *ioaddr = tp->mmio_addr; |
1283 | int i; | 1284 | int i; |
1284 | u8 tmp8; | 1285 | u8 tmp8; |
@@ -1311,7 +1312,7 @@ static void netdrv_tx_timeout (struct net_device *dev) | |||
1311 | /* Stop a shared interrupt from scavenging while we are. */ | 1312 | /* Stop a shared interrupt from scavenging while we are. */ |
1312 | spin_lock_irqsave (&tp->lock, flags); | 1313 | spin_lock_irqsave (&tp->lock, flags); |
1313 | 1314 | ||
1314 | netdrv_tx_clear (tp); | 1315 | netdrv_tx_clear (dev); |
1315 | 1316 | ||
1316 | spin_unlock_irqrestore (&tp->lock, flags); | 1317 | spin_unlock_irqrestore (&tp->lock, flags); |
1317 | 1318 | ||
@@ -1325,7 +1326,7 @@ static void netdrv_tx_timeout (struct net_device *dev) | |||
1325 | 1326 | ||
1326 | static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev) | 1327 | static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev) |
1327 | { | 1328 | { |
1328 | struct netdrv_private *tp = dev->priv; | 1329 | struct netdrv_private *tp = netdev_priv(dev); |
1329 | void *ioaddr = tp->mmio_addr; | 1330 | void *ioaddr = tp->mmio_addr; |
1330 | int entry; | 1331 | int entry; |
1331 | 1332 | ||
@@ -1525,7 +1526,7 @@ static void netdrv_rx_interrupt (struct net_device *dev, | |||
1525 | DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x," | 1526 | DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x," |
1526 | " cur %4.4x.\n", dev->name, rx_status, | 1527 | " cur %4.4x.\n", dev->name, rx_status, |
1527 | rx_size, cur_rx); | 1528 | rx_size, cur_rx); |
1528 | #if NETDRV_DEBUG > 2 | 1529 | #if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2) |
1529 | { | 1530 | { |
1530 | int i; | 1531 | int i; |
1531 | DPRINTK ("%s: Frame contents ", dev->name); | 1532 | DPRINTK ("%s: Frame contents ", dev->name); |
@@ -1648,7 +1649,7 @@ static void netdrv_weird_interrupt (struct net_device *dev, | |||
1648 | static irqreturn_t netdrv_interrupt (int irq, void *dev_instance) | 1649 | static irqreturn_t netdrv_interrupt (int irq, void *dev_instance) |
1649 | { | 1650 | { |
1650 | struct net_device *dev = (struct net_device *) dev_instance; | 1651 | struct net_device *dev = (struct net_device *) dev_instance; |
1651 | struct netdrv_private *tp = dev->priv; | 1652 | struct netdrv_private *tp = netdev_priv(dev); |
1652 | int boguscnt = max_interrupt_work; | 1653 | int boguscnt = max_interrupt_work; |
1653 | void *ioaddr = tp->mmio_addr; | 1654 | void *ioaddr = tp->mmio_addr; |
1654 | int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */ | 1655 | int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */ |
@@ -1711,7 +1712,7 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance) | |||
1711 | 1712 | ||
1712 | static int netdrv_close (struct net_device *dev) | 1713 | static int netdrv_close (struct net_device *dev) |
1713 | { | 1714 | { |
1714 | struct netdrv_private *tp = dev->priv; | 1715 | struct netdrv_private *tp = netdev_priv(dev); |
1715 | void *ioaddr = tp->mmio_addr; | 1716 | void *ioaddr = tp->mmio_addr; |
1716 | unsigned long flags; | 1717 | unsigned long flags; |
1717 | 1718 | ||
@@ -1738,10 +1739,10 @@ static int netdrv_close (struct net_device *dev) | |||
1738 | 1739 | ||
1739 | spin_unlock_irqrestore (&tp->lock, flags); | 1740 | spin_unlock_irqrestore (&tp->lock, flags); |
1740 | 1741 | ||
1741 | synchronize_irq (); | 1742 | synchronize_irq (dev->irq); |
1742 | free_irq (dev->irq, dev); | 1743 | free_irq (dev->irq, dev); |
1743 | 1744 | ||
1744 | netdrv_tx_clear (tp); | 1745 | netdrv_tx_clear (dev); |
1745 | 1746 | ||
1746 | pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, | 1747 | pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, |
1747 | tp->rx_ring, tp->rx_ring_dma); | 1748 | tp->rx_ring, tp->rx_ring_dma); |
@@ -1762,7 +1763,7 @@ static int netdrv_close (struct net_device *dev) | |||
1762 | 1763 | ||
1763 | static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | 1764 | static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) |
1764 | { | 1765 | { |
1765 | struct netdrv_private *tp = dev->priv; | 1766 | struct netdrv_private *tp = netdev_priv(dev); |
1766 | struct mii_ioctl_data *data = if_mii(rq); | 1767 | struct mii_ioctl_data *data = if_mii(rq); |
1767 | unsigned long flags; | 1768 | unsigned long flags; |
1768 | int rc = 0; | 1769 | int rc = 0; |
@@ -1805,7 +1806,7 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | |||
1805 | 1806 | ||
1806 | static void netdrv_set_rx_mode (struct net_device *dev) | 1807 | static void netdrv_set_rx_mode (struct net_device *dev) |
1807 | { | 1808 | { |
1808 | struct netdrv_private *tp = dev->priv; | 1809 | struct netdrv_private *tp = netdev_priv(dev); |
1809 | void *ioaddr = tp->mmio_addr; | 1810 | void *ioaddr = tp->mmio_addr; |
1810 | u32 mc_filter[2]; /* Multicast hash filter */ | 1811 | u32 mc_filter[2]; /* Multicast hash filter */ |
1811 | int i, rx_mode; | 1812 | int i, rx_mode; |
@@ -1862,7 +1863,7 @@ static void netdrv_set_rx_mode (struct net_device *dev) | |||
1862 | static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) | 1863 | static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) |
1863 | { | 1864 | { |
1864 | struct net_device *dev = pci_get_drvdata (pdev); | 1865 | struct net_device *dev = pci_get_drvdata (pdev); |
1865 | struct netdrv_private *tp = dev->priv; | 1866 | struct netdrv_private *tp = netdev_priv(dev); |
1866 | void *ioaddr = tp->mmio_addr; | 1867 | void *ioaddr = tp->mmio_addr; |
1867 | unsigned long flags; | 1868 | unsigned long flags; |
1868 | 1869 | ||
@@ -1892,7 +1893,7 @@ static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1892 | static int netdrv_resume (struct pci_dev *pdev) | 1893 | static int netdrv_resume (struct pci_dev *pdev) |
1893 | { | 1894 | { |
1894 | struct net_device *dev = pci_get_drvdata (pdev); | 1895 | struct net_device *dev = pci_get_drvdata (pdev); |
1895 | struct netdrv_private *tp = dev->priv; | 1896 | /*struct netdrv_private *tp = netdev_priv(dev);*/ |
1896 | 1897 | ||
1897 | if (!netif_running(dev)) | 1898 | if (!netif_running(dev)) |
1898 | return 0; | 1899 | return 0; |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 7fe03ce774b1..f4ca0591231d 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -60,6 +60,11 @@ config ICPLUS_PHY | |||
60 | ---help--- | 60 | ---help--- |
61 | Currently supports the IP175C PHY. | 61 | Currently supports the IP175C PHY. |
62 | 62 | ||
63 | config REALTEK_PHY | ||
64 | tristate "Drivers for Realtek PHYs" | ||
65 | ---help--- | ||
66 | Supports the Realtek 821x PHY. | ||
67 | |||
63 | config FIXED_PHY | 68 | config FIXED_PHY |
64 | bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" | 69 | bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" |
65 | ---help--- | 70 | ---help--- |
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 3d6cc7b67a80..5997d6ef702b 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile | |||
@@ -12,5 +12,6 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o | |||
12 | obj-$(CONFIG_VITESSE_PHY) += vitesse.o | 12 | obj-$(CONFIG_VITESSE_PHY) += vitesse.o |
13 | obj-$(CONFIG_BROADCOM_PHY) += broadcom.o | 13 | obj-$(CONFIG_BROADCOM_PHY) += broadcom.o |
14 | obj-$(CONFIG_ICPLUS_PHY) += icplus.o | 14 | obj-$(CONFIG_ICPLUS_PHY) += icplus.o |
15 | obj-$(CONFIG_REALTEK_PHY) += realtek.o | ||
15 | obj-$(CONFIG_FIXED_PHY) += fixed.o | 16 | obj-$(CONFIG_FIXED_PHY) += fixed.o |
16 | obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o | 17 | obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o |
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 29666c85ed55..5b80358af658 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c | |||
@@ -141,6 +141,20 @@ static struct phy_driver bcm5461_driver = { | |||
141 | .driver = { .owner = THIS_MODULE }, | 141 | .driver = { .owner = THIS_MODULE }, |
142 | }; | 142 | }; |
143 | 143 | ||
144 | static struct phy_driver bcm5482_driver = { | ||
145 | .phy_id = 0x0143bcb0, | ||
146 | .phy_id_mask = 0xfffffff0, | ||
147 | .name = "Broadcom BCM5482", | ||
148 | .features = PHY_GBIT_FEATURES, | ||
149 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | ||
150 | .config_init = bcm54xx_config_init, | ||
151 | .config_aneg = genphy_config_aneg, | ||
152 | .read_status = genphy_read_status, | ||
153 | .ack_interrupt = bcm54xx_ack_interrupt, | ||
154 | .config_intr = bcm54xx_config_intr, | ||
155 | .driver = { .owner = THIS_MODULE }, | ||
156 | }; | ||
157 | |||
144 | static int __init broadcom_init(void) | 158 | static int __init broadcom_init(void) |
145 | { | 159 | { |
146 | int ret; | 160 | int ret; |
@@ -154,8 +168,13 @@ static int __init broadcom_init(void) | |||
154 | ret = phy_driver_register(&bcm5461_driver); | 168 | ret = phy_driver_register(&bcm5461_driver); |
155 | if (ret) | 169 | if (ret) |
156 | goto out_5461; | 170 | goto out_5461; |
171 | ret = phy_driver_register(&bcm5482_driver); | ||
172 | if (ret) | ||
173 | goto out_5482; | ||
157 | return ret; | 174 | return ret; |
158 | 175 | ||
176 | out_5482: | ||
177 | phy_driver_unregister(&bcm5461_driver); | ||
159 | out_5461: | 178 | out_5461: |
160 | phy_driver_unregister(&bcm5421_driver); | 179 | phy_driver_unregister(&bcm5421_driver); |
161 | out_5421: | 180 | out_5421: |
@@ -166,6 +185,7 @@ out_5411: | |||
166 | 185 | ||
167 | static void __exit broadcom_exit(void) | 186 | static void __exit broadcom_exit(void) |
168 | { | 187 | { |
188 | phy_driver_unregister(&bcm5482_driver); | ||
169 | phy_driver_unregister(&bcm5461_driver); | 189 | phy_driver_unregister(&bcm5461_driver); |
170 | phy_driver_unregister(&bcm5421_driver); | 190 | phy_driver_unregister(&bcm5421_driver); |
171 | phy_driver_unregister(&bcm5411_driver); | 191 | phy_driver_unregister(&bcm5411_driver); |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index c30196d0ad16..6e9f619c491f 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -49,7 +49,7 @@ int mdiobus_register(struct mii_bus *bus) | |||
49 | int i; | 49 | int i; |
50 | int err = 0; | 50 | int err = 0; |
51 | 51 | ||
52 | spin_lock_init(&bus->mdio_lock); | 52 | mutex_init(&bus->mdio_lock); |
53 | 53 | ||
54 | if (NULL == bus || NULL == bus->name || | 54 | if (NULL == bus || NULL == bus->name || |
55 | NULL == bus->read || | 55 | NULL == bus->read || |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7c9e6e349503..12fccb1c76dc 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
28 | #include <linux/skbuff.h> | 28 | #include <linux/skbuff.h> |
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
31 | #include <linux/module.h> | 30 | #include <linux/module.h> |
32 | #include <linux/mii.h> | 31 | #include <linux/mii.h> |
@@ -72,9 +71,11 @@ int phy_read(struct phy_device *phydev, u16 regnum) | |||
72 | int retval; | 71 | int retval; |
73 | struct mii_bus *bus = phydev->bus; | 72 | struct mii_bus *bus = phydev->bus; |
74 | 73 | ||
75 | spin_lock_bh(&bus->mdio_lock); | 74 | BUG_ON(in_interrupt()); |
75 | |||
76 | mutex_lock(&bus->mdio_lock); | ||
76 | retval = bus->read(bus, phydev->addr, regnum); | 77 | retval = bus->read(bus, phydev->addr, regnum); |
77 | spin_unlock_bh(&bus->mdio_lock); | 78 | mutex_unlock(&bus->mdio_lock); |
78 | 79 | ||
79 | return retval; | 80 | return retval; |
80 | } | 81 | } |
@@ -95,9 +96,11 @@ int phy_write(struct phy_device *phydev, u16 regnum, u16 val) | |||
95 | int err; | 96 | int err; |
96 | struct mii_bus *bus = phydev->bus; | 97 | struct mii_bus *bus = phydev->bus; |
97 | 98 | ||
98 | spin_lock_bh(&bus->mdio_lock); | 99 | BUG_ON(in_interrupt()); |
100 | |||
101 | mutex_lock(&bus->mdio_lock); | ||
99 | err = bus->write(bus, phydev->addr, regnum, val); | 102 | err = bus->write(bus, phydev->addr, regnum, val); |
100 | spin_unlock_bh(&bus->mdio_lock); | 103 | mutex_unlock(&bus->mdio_lock); |
101 | 104 | ||
102 | return err; | 105 | return err; |
103 | } | 106 | } |
@@ -428,7 +431,7 @@ int phy_start_aneg(struct phy_device *phydev) | |||
428 | { | 431 | { |
429 | int err; | 432 | int err; |
430 | 433 | ||
431 | spin_lock_bh(&phydev->lock); | 434 | mutex_lock(&phydev->lock); |
432 | 435 | ||
433 | if (AUTONEG_DISABLE == phydev->autoneg) | 436 | if (AUTONEG_DISABLE == phydev->autoneg) |
434 | phy_sanitize_settings(phydev); | 437 | phy_sanitize_settings(phydev); |
@@ -449,13 +452,14 @@ int phy_start_aneg(struct phy_device *phydev) | |||
449 | } | 452 | } |
450 | 453 | ||
451 | out_unlock: | 454 | out_unlock: |
452 | spin_unlock_bh(&phydev->lock); | 455 | mutex_unlock(&phydev->lock); |
453 | return err; | 456 | return err; |
454 | } | 457 | } |
455 | EXPORT_SYMBOL(phy_start_aneg); | 458 | EXPORT_SYMBOL(phy_start_aneg); |
456 | 459 | ||
457 | 460 | ||
458 | static void phy_change(struct work_struct *work); | 461 | static void phy_change(struct work_struct *work); |
462 | static void phy_state_machine(struct work_struct *work); | ||
459 | static void phy_timer(unsigned long data); | 463 | static void phy_timer(unsigned long data); |
460 | 464 | ||
461 | /** | 465 | /** |
@@ -476,6 +480,7 @@ void phy_start_machine(struct phy_device *phydev, | |||
476 | { | 480 | { |
477 | phydev->adjust_state = handler; | 481 | phydev->adjust_state = handler; |
478 | 482 | ||
483 | INIT_WORK(&phydev->state_queue, phy_state_machine); | ||
479 | init_timer(&phydev->phy_timer); | 484 | init_timer(&phydev->phy_timer); |
480 | phydev->phy_timer.function = &phy_timer; | 485 | phydev->phy_timer.function = &phy_timer; |
481 | phydev->phy_timer.data = (unsigned long) phydev; | 486 | phydev->phy_timer.data = (unsigned long) phydev; |
@@ -493,11 +498,12 @@ void phy_start_machine(struct phy_device *phydev, | |||
493 | void phy_stop_machine(struct phy_device *phydev) | 498 | void phy_stop_machine(struct phy_device *phydev) |
494 | { | 499 | { |
495 | del_timer_sync(&phydev->phy_timer); | 500 | del_timer_sync(&phydev->phy_timer); |
501 | cancel_work_sync(&phydev->state_queue); | ||
496 | 502 | ||
497 | spin_lock_bh(&phydev->lock); | 503 | mutex_lock(&phydev->lock); |
498 | if (phydev->state > PHY_UP) | 504 | if (phydev->state > PHY_UP) |
499 | phydev->state = PHY_UP; | 505 | phydev->state = PHY_UP; |
500 | spin_unlock_bh(&phydev->lock); | 506 | mutex_unlock(&phydev->lock); |
501 | 507 | ||
502 | phydev->adjust_state = NULL; | 508 | phydev->adjust_state = NULL; |
503 | } | 509 | } |
@@ -541,9 +547,9 @@ static void phy_force_reduction(struct phy_device *phydev) | |||
541 | */ | 547 | */ |
542 | void phy_error(struct phy_device *phydev) | 548 | void phy_error(struct phy_device *phydev) |
543 | { | 549 | { |
544 | spin_lock_bh(&phydev->lock); | 550 | mutex_lock(&phydev->lock); |
545 | phydev->state = PHY_HALTED; | 551 | phydev->state = PHY_HALTED; |
546 | spin_unlock_bh(&phydev->lock); | 552 | mutex_unlock(&phydev->lock); |
547 | } | 553 | } |
548 | 554 | ||
549 | /** | 555 | /** |
@@ -705,10 +711,10 @@ static void phy_change(struct work_struct *work) | |||
705 | if (err) | 711 | if (err) |
706 | goto phy_err; | 712 | goto phy_err; |
707 | 713 | ||
708 | spin_lock_bh(&phydev->lock); | 714 | mutex_lock(&phydev->lock); |
709 | if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) | 715 | if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) |
710 | phydev->state = PHY_CHANGELINK; | 716 | phydev->state = PHY_CHANGELINK; |
711 | spin_unlock_bh(&phydev->lock); | 717 | mutex_unlock(&phydev->lock); |
712 | 718 | ||
713 | atomic_dec(&phydev->irq_disable); | 719 | atomic_dec(&phydev->irq_disable); |
714 | enable_irq(phydev->irq); | 720 | enable_irq(phydev->irq); |
@@ -735,7 +741,7 @@ phy_err: | |||
735 | */ | 741 | */ |
736 | void phy_stop(struct phy_device *phydev) | 742 | void phy_stop(struct phy_device *phydev) |
737 | { | 743 | { |
738 | spin_lock_bh(&phydev->lock); | 744 | mutex_lock(&phydev->lock); |
739 | 745 | ||
740 | if (PHY_HALTED == phydev->state) | 746 | if (PHY_HALTED == phydev->state) |
741 | goto out_unlock; | 747 | goto out_unlock; |
@@ -751,7 +757,7 @@ void phy_stop(struct phy_device *phydev) | |||
751 | phydev->state = PHY_HALTED; | 757 | phydev->state = PHY_HALTED; |
752 | 758 | ||
753 | out_unlock: | 759 | out_unlock: |
754 | spin_unlock_bh(&phydev->lock); | 760 | mutex_unlock(&phydev->lock); |
755 | 761 | ||
756 | /* | 762 | /* |
757 | * Cannot call flush_scheduled_work() here as desired because | 763 | * Cannot call flush_scheduled_work() here as desired because |
@@ -773,7 +779,7 @@ out_unlock: | |||
773 | */ | 779 | */ |
774 | void phy_start(struct phy_device *phydev) | 780 | void phy_start(struct phy_device *phydev) |
775 | { | 781 | { |
776 | spin_lock_bh(&phydev->lock); | 782 | mutex_lock(&phydev->lock); |
777 | 783 | ||
778 | switch (phydev->state) { | 784 | switch (phydev->state) { |
779 | case PHY_STARTING: | 785 | case PHY_STARTING: |
@@ -787,19 +793,26 @@ void phy_start(struct phy_device *phydev) | |||
787 | default: | 793 | default: |
788 | break; | 794 | break; |
789 | } | 795 | } |
790 | spin_unlock_bh(&phydev->lock); | 796 | mutex_unlock(&phydev->lock); |
791 | } | 797 | } |
792 | EXPORT_SYMBOL(phy_stop); | 798 | EXPORT_SYMBOL(phy_stop); |
793 | EXPORT_SYMBOL(phy_start); | 799 | EXPORT_SYMBOL(phy_start); |
794 | 800 | ||
795 | /* PHY timer which handles the state machine */ | 801 | /** |
796 | static void phy_timer(unsigned long data) | 802 | * phy_state_machine - Handle the state machine |
803 | * @work: work_struct that describes the work to be done | ||
804 | * | ||
805 | * Description: Scheduled by the state_queue workqueue each time | ||
806 | * phy_timer is triggered. | ||
807 | */ | ||
808 | static void phy_state_machine(struct work_struct *work) | ||
797 | { | 809 | { |
798 | struct phy_device *phydev = (struct phy_device *)data; | 810 | struct phy_device *phydev = |
811 | container_of(work, struct phy_device, state_queue); | ||
799 | int needs_aneg = 0; | 812 | int needs_aneg = 0; |
800 | int err = 0; | 813 | int err = 0; |
801 | 814 | ||
802 | spin_lock_bh(&phydev->lock); | 815 | mutex_lock(&phydev->lock); |
803 | 816 | ||
804 | if (phydev->adjust_state) | 817 | if (phydev->adjust_state) |
805 | phydev->adjust_state(phydev->attached_dev); | 818 | phydev->adjust_state(phydev->attached_dev); |
@@ -965,7 +978,7 @@ static void phy_timer(unsigned long data) | |||
965 | break; | 978 | break; |
966 | } | 979 | } |
967 | 980 | ||
968 | spin_unlock_bh(&phydev->lock); | 981 | mutex_unlock(&phydev->lock); |
969 | 982 | ||
970 | if (needs_aneg) | 983 | if (needs_aneg) |
971 | err = phy_start_aneg(phydev); | 984 | err = phy_start_aneg(phydev); |
@@ -976,3 +989,14 @@ static void phy_timer(unsigned long data) | |||
976 | mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); | 989 | mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); |
977 | } | 990 | } |
978 | 991 | ||
992 | /* PHY timer which schedules the state machine work */ | ||
993 | static void phy_timer(unsigned long data) | ||
994 | { | ||
995 | struct phy_device *phydev = (struct phy_device *)data; | ||
996 | |||
997 | /* | ||
998 | * PHY I/O operations can potentially sleep so we ensure that | ||
999 | * it's done from a process context | ||
1000 | */ | ||
1001 | schedule_work(&phydev->state_queue); | ||
1002 | } | ||
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 5b9e1751e1b4..f4c4fd85425f 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
26 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
27 | #include <linux/skbuff.h> | 27 | #include <linux/skbuff.h> |
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
30 | #include <linux/module.h> | 29 | #include <linux/module.h> |
31 | #include <linux/mii.h> | 30 | #include <linux/mii.h> |
@@ -80,7 +79,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) | |||
80 | 79 | ||
81 | dev->state = PHY_DOWN; | 80 | dev->state = PHY_DOWN; |
82 | 81 | ||
83 | spin_lock_init(&dev->lock); | 82 | mutex_init(&dev->lock); |
84 | 83 | ||
85 | return dev; | 84 | return dev; |
86 | } | 85 | } |
@@ -656,7 +655,7 @@ static int phy_probe(struct device *dev) | |||
656 | if (!(phydrv->flags & PHY_HAS_INTERRUPT)) | 655 | if (!(phydrv->flags & PHY_HAS_INTERRUPT)) |
657 | phydev->irq = PHY_POLL; | 656 | phydev->irq = PHY_POLL; |
658 | 657 | ||
659 | spin_lock_bh(&phydev->lock); | 658 | mutex_lock(&phydev->lock); |
660 | 659 | ||
661 | /* Start out supporting everything. Eventually, | 660 | /* Start out supporting everything. Eventually, |
662 | * a controller will attach, and may modify one | 661 | * a controller will attach, and may modify one |
@@ -670,7 +669,7 @@ static int phy_probe(struct device *dev) | |||
670 | if (phydev->drv->probe) | 669 | if (phydev->drv->probe) |
671 | err = phydev->drv->probe(phydev); | 670 | err = phydev->drv->probe(phydev); |
672 | 671 | ||
673 | spin_unlock_bh(&phydev->lock); | 672 | mutex_unlock(&phydev->lock); |
674 | 673 | ||
675 | return err; | 674 | return err; |
676 | 675 | ||
@@ -682,9 +681,9 @@ static int phy_remove(struct device *dev) | |||
682 | 681 | ||
683 | phydev = to_phy_device(dev); | 682 | phydev = to_phy_device(dev); |
684 | 683 | ||
685 | spin_lock_bh(&phydev->lock); | 684 | mutex_lock(&phydev->lock); |
686 | phydev->state = PHY_DOWN; | 685 | phydev->state = PHY_DOWN; |
687 | spin_unlock_bh(&phydev->lock); | 686 | mutex_unlock(&phydev->lock); |
688 | 687 | ||
689 | if (phydev->drv->remove) | 688 | if (phydev->drv->remove) |
690 | phydev->drv->remove(phydev); | 689 | phydev->drv->remove(phydev); |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c new file mode 100644 index 000000000000..a052a6744a51 --- /dev/null +++ b/drivers/net/phy/realtek.c | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * drivers/net/phy/realtek.c | ||
3 | * | ||
4 | * Driver for Realtek PHYs | ||
5 | * | ||
6 | * Author: Johnson Leung <r58129@freescale.com> | ||
7 | * | ||
8 | * Copyright (c) 2004 Freescale Semiconductor, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/phy.h> | ||
17 | |||
18 | #define RTL821x_PHYSR 0x11 | ||
19 | #define RTL821x_PHYSR_DUPLEX 0x2000 | ||
20 | #define RTL821x_PHYSR_SPEED 0xc000 | ||
21 | #define RTL821x_INER 0x12 | ||
22 | #define RTL821x_INER_INIT 0x6400 | ||
23 | #define RTL821x_INSR 0x13 | ||
24 | |||
25 | MODULE_DESCRIPTION("Realtek PHY driver"); | ||
26 | MODULE_AUTHOR("Johnson Leung"); | ||
27 | MODULE_LICENSE("GPL"); | ||
28 | |||
29 | static int rtl821x_ack_interrupt(struct phy_device *phydev) | ||
30 | { | ||
31 | int err; | ||
32 | |||
33 | err = phy_read(phydev, RTL821x_INSR); | ||
34 | |||
35 | return (err < 0) ? err : 0; | ||
36 | } | ||
37 | |||
38 | static int rtl821x_config_intr(struct phy_device *phydev) | ||
39 | { | ||
40 | int err; | ||
41 | |||
42 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | ||
43 | err = phy_write(phydev, RTL821x_INER, | ||
44 | RTL821x_INER_INIT); | ||
45 | else | ||
46 | err = phy_write(phydev, RTL821x_INER, 0); | ||
47 | |||
48 | return err; | ||
49 | } | ||
50 | |||
51 | /* RTL8211B */ | ||
52 | static struct phy_driver rtl821x_driver = { | ||
53 | .phy_id = 0x001cc912, | ||
54 | .name = "RTL821x Gigabit Ethernet", | ||
55 | .phy_id_mask = 0x001fffff, | ||
56 | .features = PHY_GBIT_FEATURES, | ||
57 | .flags = PHY_HAS_INTERRUPT, | ||
58 | .config_aneg = &genphy_config_aneg, | ||
59 | .read_status = &genphy_read_status, | ||
60 | .ack_interrupt = &rtl821x_ack_interrupt, | ||
61 | .config_intr = &rtl821x_config_intr, | ||
62 | .driver = { .owner = THIS_MODULE,}, | ||
63 | }; | ||
64 | |||
65 | static int __init realtek_init(void) | ||
66 | { | ||
67 | int ret; | ||
68 | |||
69 | ret = phy_driver_register(&rtl821x_driver); | ||
70 | |||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | static void __exit realtek_exit(void) | ||
75 | { | ||
76 | phy_driver_unregister(&rtl821x_driver); | ||
77 | } | ||
78 | |||
79 | module_init(realtek_init); | ||
80 | module_exit(realtek_exit); | ||
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 5fab7d7b5d74..6179a0a2032c 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -8118,7 +8118,7 @@ static void initiate_new_session(struct lro *lro, u8 *l2h, | |||
8118 | lro->iph = ip; | 8118 | lro->iph = ip; |
8119 | lro->tcph = tcp; | 8119 | lro->tcph = tcp; |
8120 | lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); | 8120 | lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); |
8121 | lro->tcp_ack = ntohl(tcp->ack_seq); | 8121 | lro->tcp_ack = tcp->ack_seq; |
8122 | lro->sg_num = 1; | 8122 | lro->sg_num = 1; |
8123 | lro->total_len = ntohs(ip->tot_len); | 8123 | lro->total_len = ntohs(ip->tot_len); |
8124 | lro->frags_len = 0; | 8124 | lro->frags_len = 0; |
@@ -8127,10 +8127,10 @@ static void initiate_new_session(struct lro *lro, u8 *l2h, | |||
8127 | * already been done. | 8127 | * already been done. |
8128 | */ | 8128 | */ |
8129 | if (tcp->doff == 8) { | 8129 | if (tcp->doff == 8) { |
8130 | u32 *ptr; | 8130 | __be32 *ptr; |
8131 | ptr = (u32 *)(tcp+1); | 8131 | ptr = (__be32 *)(tcp+1); |
8132 | lro->saw_ts = 1; | 8132 | lro->saw_ts = 1; |
8133 | lro->cur_tsval = *(ptr+1); | 8133 | lro->cur_tsval = ntohl(*(ptr+1)); |
8134 | lro->cur_tsecr = *(ptr+2); | 8134 | lro->cur_tsecr = *(ptr+2); |
8135 | } | 8135 | } |
8136 | lro->in_use = 1; | 8136 | lro->in_use = 1; |
@@ -8156,7 +8156,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro) | |||
8156 | 8156 | ||
8157 | /* Update tsecr field if this session has timestamps enabled */ | 8157 | /* Update tsecr field if this session has timestamps enabled */ |
8158 | if (lro->saw_ts) { | 8158 | if (lro->saw_ts) { |
8159 | u32 *ptr = (u32 *)(tcp + 1); | 8159 | __be32 *ptr = (__be32 *)(tcp + 1); |
8160 | *(ptr+2) = lro->cur_tsecr; | 8160 | *(ptr+2) = lro->cur_tsecr; |
8161 | } | 8161 | } |
8162 | 8162 | ||
@@ -8181,10 +8181,10 @@ static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, | |||
8181 | lro->window = tcp->window; | 8181 | lro->window = tcp->window; |
8182 | 8182 | ||
8183 | if (lro->saw_ts) { | 8183 | if (lro->saw_ts) { |
8184 | u32 *ptr; | 8184 | __be32 *ptr; |
8185 | /* Update tsecr and tsval from this packet */ | 8185 | /* Update tsecr and tsval from this packet */ |
8186 | ptr = (u32 *) (tcp + 1); | 8186 | ptr = (__be32 *)(tcp+1); |
8187 | lro->cur_tsval = *(ptr + 1); | 8187 | lro->cur_tsval = ntohl(*(ptr+1)); |
8188 | lro->cur_tsecr = *(ptr + 2); | 8188 | lro->cur_tsecr = *(ptr + 2); |
8189 | } | 8189 | } |
8190 | } | 8190 | } |
@@ -8235,11 +8235,11 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip, | |||
8235 | 8235 | ||
8236 | /* Ensure timestamp value increases monotonically */ | 8236 | /* Ensure timestamp value increases monotonically */ |
8237 | if (l_lro) | 8237 | if (l_lro) |
8238 | if (l_lro->cur_tsval > *((u32 *)(ptr+2))) | 8238 | if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2)))) |
8239 | return -1; | 8239 | return -1; |
8240 | 8240 | ||
8241 | /* timestamp echo reply should be non-zero */ | 8241 | /* timestamp echo reply should be non-zero */ |
8242 | if (*((u32 *)(ptr+6)) == 0) | 8242 | if (*((__be32 *)(ptr+6)) == 0) |
8243 | return -1; | 8243 | return -1; |
8244 | } | 8244 | } |
8245 | 8245 | ||
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 9f6016c6f135..64b88eb48287 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -809,7 +809,7 @@ struct lro { | |||
809 | int in_use; | 809 | int in_use; |
810 | __be16 window; | 810 | __be16 window; |
811 | u32 cur_tsval; | 811 | u32 cur_tsval; |
812 | u32 cur_tsecr; | 812 | __be32 cur_tsecr; |
813 | u8 saw_ts; | 813 | u8 saw_ts; |
814 | }; | 814 | }; |
815 | 815 | ||
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index b570402f7fed..2e9e88be7b33 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -326,7 +326,7 @@ static const struct { | |||
326 | { "SiS 191 PCI Gigabit Ethernet adapter" }, | 326 | { "SiS 191 PCI Gigabit Ethernet adapter" }, |
327 | }; | 327 | }; |
328 | 328 | ||
329 | static struct pci_device_id sis190_pci_tbl[] __devinitdata = { | 329 | static struct pci_device_id sis190_pci_tbl[] = { |
330 | { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, | 330 | { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, |
331 | { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, | 331 | { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, |
332 | { 0, }, | 332 | { 0, }, |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 626190eb91e7..dc062367a1c8 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -623,6 +623,7 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) | |||
623 | static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; | 623 | static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; |
624 | static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; | 624 | static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; |
625 | 625 | ||
626 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
626 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | 627 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
627 | /* Turn on/off phy power saving */ | 628 | /* Turn on/off phy power saving */ |
628 | if (onoff) | 629 | if (onoff) |
@@ -634,7 +635,8 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) | |||
634 | reg1 |= coma_mode[port]; | 635 | reg1 |= coma_mode[port]; |
635 | 636 | ||
636 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 637 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
637 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | 638 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
639 | sky2_pci_read32(hw, PCI_DEV_REG1); | ||
638 | 640 | ||
639 | udelay(100); | 641 | udelay(100); |
640 | } | 642 | } |
@@ -1422,6 +1424,7 @@ static int sky2_up(struct net_device *dev) | |||
1422 | imask |= portirq_msk[port]; | 1424 | imask |= portirq_msk[port]; |
1423 | sky2_write32(hw, B0_IMSK, imask); | 1425 | sky2_write32(hw, B0_IMSK, imask); |
1424 | 1426 | ||
1427 | sky2_set_multicast(dev); | ||
1425 | return 0; | 1428 | return 0; |
1426 | 1429 | ||
1427 | err_out: | 1430 | err_out: |
@@ -2436,6 +2439,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2436 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { | 2439 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { |
2437 | u16 pci_err; | 2440 | u16 pci_err; |
2438 | 2441 | ||
2442 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2439 | pci_err = sky2_pci_read16(hw, PCI_STATUS); | 2443 | pci_err = sky2_pci_read16(hw, PCI_STATUS); |
2440 | if (net_ratelimit()) | 2444 | if (net_ratelimit()) |
2441 | dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", | 2445 | dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", |
@@ -2443,12 +2447,14 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2443 | 2447 | ||
2444 | sky2_pci_write16(hw, PCI_STATUS, | 2448 | sky2_pci_write16(hw, PCI_STATUS, |
2445 | pci_err | PCI_STATUS_ERROR_BITS); | 2449 | pci_err | PCI_STATUS_ERROR_BITS); |
2450 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2446 | } | 2451 | } |
2447 | 2452 | ||
2448 | if (status & Y2_IS_PCI_EXP) { | 2453 | if (status & Y2_IS_PCI_EXP) { |
2449 | /* PCI-Express uncorrectable Error occurred */ | 2454 | /* PCI-Express uncorrectable Error occurred */ |
2450 | u32 err; | 2455 | u32 err; |
2451 | 2456 | ||
2457 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2452 | err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); | 2458 | err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); |
2453 | sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, | 2459 | sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, |
2454 | 0xfffffffful); | 2460 | 0xfffffffful); |
@@ -2456,6 +2462,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2456 | dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); | 2462 | dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); |
2457 | 2463 | ||
2458 | sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); | 2464 | sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); |
2465 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2459 | } | 2466 | } |
2460 | 2467 | ||
2461 | if (status & Y2_HWE_L1_MASK) | 2468 | if (status & Y2_HWE_L1_MASK) |
@@ -2831,6 +2838,7 @@ static void sky2_reset(struct sky2_hw *hw) | |||
2831 | } | 2838 | } |
2832 | 2839 | ||
2833 | sky2_power_on(hw); | 2840 | sky2_power_on(hw); |
2841 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2834 | 2842 | ||
2835 | for (i = 0; i < hw->ports; i++) { | 2843 | for (i = 0; i < hw->ports; i++) { |
2836 | sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); | 2844 | sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); |
@@ -3554,8 +3562,6 @@ static int sky2_set_ringparam(struct net_device *dev, | |||
3554 | err = sky2_up(dev); | 3562 | err = sky2_up(dev); |
3555 | if (err) | 3563 | if (err) |
3556 | dev_close(dev); | 3564 | dev_close(dev); |
3557 | else | ||
3558 | sky2_set_multicast(dev); | ||
3559 | } | 3565 | } |
3560 | 3566 | ||
3561 | return err; | 3567 | return err; |
@@ -4389,8 +4395,6 @@ static int sky2_resume(struct pci_dev *pdev) | |||
4389 | dev_close(dev); | 4395 | dev_close(dev); |
4390 | goto out; | 4396 | goto out; |
4391 | } | 4397 | } |
4392 | |||
4393 | sky2_set_multicast(dev); | ||
4394 | } | 4398 | } |
4395 | } | 4399 | } |
4396 | 4400 | ||
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index fe3ac6f9ae89..0e4a88d16327 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c | |||
@@ -1075,7 +1075,7 @@ static const struct ethtool_ops bigmac_ethtool_ops = { | |||
1075 | .get_link = bigmac_get_link, | 1075 | .get_link = bigmac_get_link, |
1076 | }; | 1076 | }; |
1077 | 1077 | ||
1078 | static int __init bigmac_ether_init(struct sbus_dev *qec_sdev) | 1078 | static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev) |
1079 | { | 1079 | { |
1080 | struct net_device *dev; | 1080 | struct net_device *dev; |
1081 | static int version_printed; | 1081 | static int version_printed; |
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index ff23c6489efd..e811331d4608 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c | |||
@@ -747,7 +747,7 @@ static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev) | |||
747 | qecp->gregs + GLOB_RSIZE); | 747 | qecp->gregs + GLOB_RSIZE); |
748 | } | 748 | } |
749 | 749 | ||
750 | static u8 __init qec_get_burst(struct device_node *dp) | 750 | static u8 __devinit qec_get_burst(struct device_node *dp) |
751 | { | 751 | { |
752 | u8 bsizes, bsizes_more; | 752 | u8 bsizes, bsizes_more; |
753 | 753 | ||
@@ -767,7 +767,7 @@ static u8 __init qec_get_burst(struct device_node *dp) | |||
767 | return bsizes; | 767 | return bsizes; |
768 | } | 768 | } |
769 | 769 | ||
770 | static struct sunqec * __init get_qec(struct sbus_dev *child_sdev) | 770 | static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev) |
771 | { | 771 | { |
772 | struct sbus_dev *qec_sdev = child_sdev->parent; | 772 | struct sbus_dev *qec_sdev = child_sdev->parent; |
773 | struct sunqec *qecp; | 773 | struct sunqec *qecp; |
@@ -823,7 +823,7 @@ fail: | |||
823 | return NULL; | 823 | return NULL; |
824 | } | 824 | } |
825 | 825 | ||
826 | static int __init qec_ether_init(struct sbus_dev *sdev) | 826 | static int __devinit qec_ether_init(struct sbus_dev *sdev) |
827 | { | 827 | { |
828 | static unsigned version_printed; | 828 | static unsigned version_printed; |
829 | struct net_device *dev; | 829 | struct net_device *dev; |
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index 4a0035f7a842..6415ce15c2ef 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c | |||
@@ -1130,7 +1130,7 @@ static struct vio_driver_ops vnet_vio_ops = { | |||
1130 | .handshake_complete = vnet_handshake_complete, | 1130 | .handshake_complete = vnet_handshake_complete, |
1131 | }; | 1131 | }; |
1132 | 1132 | ||
1133 | static void print_version(void) | 1133 | static void __devinit print_version(void) |
1134 | { | 1134 | { |
1135 | static int version_printed; | 1135 | static int version_printed; |
1136 | 1136 | ||
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index e7b4adc5c4e7..433c994ea9d8 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c | |||
@@ -434,7 +434,7 @@ static int __devinit olympic_init(struct net_device *dev) | |||
434 | 434 | ||
435 | } | 435 | } |
436 | 436 | ||
437 | static int olympic_open(struct net_device *dev) | 437 | static int __devinit olympic_open(struct net_device *dev) |
438 | { | 438 | { |
439 | struct olympic_private *olympic_priv=netdev_priv(dev); | 439 | struct olympic_private *olympic_priv=netdev_priv(dev); |
440 | u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb; | 440 | u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb; |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 4ffd8739f8b7..fba0811d2608 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -2084,8 +2084,10 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |||
2084 | if (!ugeth) | 2084 | if (!ugeth) |
2085 | return; | 2085 | return; |
2086 | 2086 | ||
2087 | if (ugeth->uccf) | 2087 | if (ugeth->uccf) { |
2088 | ucc_fast_free(ugeth->uccf); | 2088 | ucc_fast_free(ugeth->uccf); |
2089 | ugeth->uccf = NULL; | ||
2090 | } | ||
2089 | 2091 | ||
2090 | if (ugeth->p_thread_data_tx) { | 2092 | if (ugeth->p_thread_data_tx) { |
2091 | qe_muram_free(ugeth->thread_dat_tx_offset); | 2093 | qe_muram_free(ugeth->thread_dat_tx_offset); |
@@ -2305,10 +2307,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) | |||
2305 | ug_info = ugeth->ug_info; | 2307 | ug_info = ugeth->ug_info; |
2306 | uf_info = &ug_info->uf_info; | 2308 | uf_info = &ug_info->uf_info; |
2307 | 2309 | ||
2308 | /* Create CQs for hash tables */ | ||
2309 | INIT_LIST_HEAD(&ugeth->group_hash_q); | ||
2310 | INIT_LIST_HEAD(&ugeth->ind_hash_q); | ||
2311 | |||
2312 | if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || | 2310 | if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || |
2313 | (uf_info->bd_mem_part == MEM_PART_MURAM))) { | 2311 | (uf_info->bd_mem_part == MEM_PART_MURAM))) { |
2314 | if (netif_msg_probe(ugeth)) | 2312 | if (netif_msg_probe(ugeth)) |
@@ -3668,6 +3666,23 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) | |||
3668 | return IRQ_HANDLED; | 3666 | return IRQ_HANDLED; |
3669 | } | 3667 | } |
3670 | 3668 | ||
3669 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3670 | /* | ||
3671 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
3672 | * without having to re-enable interrupts. It's not called while | ||
3673 | * the interrupt routine is executing. | ||
3674 | */ | ||
3675 | static void ucc_netpoll(struct net_device *dev) | ||
3676 | { | ||
3677 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3678 | int irq = ugeth->ug_info->uf_info.irq; | ||
3679 | |||
3680 | disable_irq(irq); | ||
3681 | ucc_geth_irq_handler(irq, dev); | ||
3682 | enable_irq(irq); | ||
3683 | } | ||
3684 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
3685 | |||
3671 | /* Called when something needs to use the ethernet device */ | 3686 | /* Called when something needs to use the ethernet device */ |
3672 | /* Returns 0 for success. */ | 3687 | /* Returns 0 for success. */ |
3673 | static int ucc_geth_open(struct net_device *dev) | 3688 | static int ucc_geth_open(struct net_device *dev) |
@@ -3990,6 +4005,10 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3990 | ugeth = netdev_priv(dev); | 4005 | ugeth = netdev_priv(dev); |
3991 | spin_lock_init(&ugeth->lock); | 4006 | spin_lock_init(&ugeth->lock); |
3992 | 4007 | ||
4008 | /* Create CQs for hash tables */ | ||
4009 | INIT_LIST_HEAD(&ugeth->group_hash_q); | ||
4010 | INIT_LIST_HEAD(&ugeth->ind_hash_q); | ||
4011 | |||
3993 | dev_set_drvdata(device, dev); | 4012 | dev_set_drvdata(device, dev); |
3994 | 4013 | ||
3995 | /* Set the dev->base_addr to the gfar reg region */ | 4014 | /* Set the dev->base_addr to the gfar reg region */ |
@@ -4006,6 +4025,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
4006 | #ifdef CONFIG_UGETH_NAPI | 4025 | #ifdef CONFIG_UGETH_NAPI |
4007 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); | 4026 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); |
4008 | #endif /* CONFIG_UGETH_NAPI */ | 4027 | #endif /* CONFIG_UGETH_NAPI */ |
4028 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4029 | dev->poll_controller = ucc_netpoll; | ||
4030 | #endif | ||
4009 | dev->stop = ucc_geth_close; | 4031 | dev->stop = ucc_geth_close; |
4010 | // dev->change_mtu = ucc_geth_change_mtu; | 4032 | // dev->change_mtu = ucc_geth_change_mtu; |
4011 | dev->mtu = 1500; | 4033 | dev->mtu = 1500; |
@@ -4040,9 +4062,10 @@ static int ucc_geth_remove(struct of_device* ofdev) | |||
4040 | struct net_device *dev = dev_get_drvdata(device); | 4062 | struct net_device *dev = dev_get_drvdata(device); |
4041 | struct ucc_geth_private *ugeth = netdev_priv(dev); | 4063 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
4042 | 4064 | ||
4043 | dev_set_drvdata(device, NULL); | 4065 | unregister_netdev(dev); |
4044 | ucc_geth_memclean(ugeth); | ||
4045 | free_netdev(dev); | 4066 | free_netdev(dev); |
4067 | ucc_geth_memclean(ugeth); | ||
4068 | dev_set_drvdata(device, NULL); | ||
4046 | 4069 | ||
4047 | return 0; | 4070 | return 0; |
4048 | } | 4071 | } |
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 33cbc306226c..7e1f00131f91 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c | |||
@@ -926,7 +926,6 @@ static int rtl8150_probe(struct usb_interface *intf, | |||
926 | netdev->set_multicast_list = rtl8150_set_multicast; | 926 | netdev->set_multicast_list = rtl8150_set_multicast; |
927 | netdev->set_mac_address = rtl8150_set_mac_address; | 927 | netdev->set_mac_address = rtl8150_set_mac_address; |
928 | netdev->get_stats = rtl8150_netdev_stats; | 928 | netdev->get_stats = rtl8150_netdev_stats; |
929 | netdev->mtu = RTL8150_MTU; | ||
930 | SET_ETHTOOL_OPS(netdev, &ops); | 929 | SET_ETHTOOL_OPS(netdev, &ops); |
931 | dev->intr_interval = 100; /* 100ms */ | 930 | dev->intr_interval = 100; /* 100ms */ |
932 | 931 | ||
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 87c180b563d3..7c851b1e6daa 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -606,7 +606,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) | |||
606 | } | 606 | } |
607 | #endif | 607 | #endif |
608 | 608 | ||
609 | static void rhine_hw_init(struct net_device *dev, long pioaddr) | 609 | static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr) |
610 | { | 610 | { |
611 | struct rhine_private *rp = netdev_priv(dev); | 611 | struct rhine_private *rp = netdev_priv(dev); |
612 | 612 | ||
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 35cd65d6b9ed..8c9fb824cbd4 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * for 64bit hardware platforms. | 8 | * for 64bit hardware platforms. |
9 | * | 9 | * |
10 | * TODO | 10 | * TODO |
11 | * Big-endian support | ||
12 | * rx_copybreak/alignment | 11 | * rx_copybreak/alignment |
13 | * Scatter gather | 12 | * Scatter gather |
14 | * More testing | 13 | * More testing |
@@ -681,7 +680,7 @@ static void velocity_rx_reset(struct velocity_info *vptr) | |||
681 | * Init state, all RD entries belong to the NIC | 680 | * Init state, all RD entries belong to the NIC |
682 | */ | 681 | */ |
683 | for (i = 0; i < vptr->options.numrx; ++i) | 682 | for (i = 0; i < vptr->options.numrx; ++i) |
684 | vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; | 683 | vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; |
685 | 684 | ||
686 | writew(vptr->options.numrx, ®s->RBRDU); | 685 | writew(vptr->options.numrx, ®s->RBRDU); |
687 | writel(vptr->rd_pool_dma, ®s->RDBaseLo); | 686 | writel(vptr->rd_pool_dma, ®s->RDBaseLo); |
@@ -777,7 +776,7 @@ static void velocity_init_registers(struct velocity_info *vptr, | |||
777 | 776 | ||
778 | vptr->int_mask = INT_MASK_DEF; | 777 | vptr->int_mask = INT_MASK_DEF; |
779 | 778 | ||
780 | writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo); | 779 | writel(vptr->rd_pool_dma, ®s->RDBaseLo); |
781 | writew(vptr->options.numrx - 1, ®s->RDCSize); | 780 | writew(vptr->options.numrx - 1, ®s->RDCSize); |
782 | mac_rx_queue_run(regs); | 781 | mac_rx_queue_run(regs); |
783 | mac_rx_queue_wake(regs); | 782 | mac_rx_queue_wake(regs); |
@@ -785,7 +784,7 @@ static void velocity_init_registers(struct velocity_info *vptr, | |||
785 | writew(vptr->options.numtx - 1, ®s->TDCSize); | 784 | writew(vptr->options.numtx - 1, ®s->TDCSize); |
786 | 785 | ||
787 | for (i = 0; i < vptr->num_txq; i++) { | 786 | for (i = 0; i < vptr->num_txq; i++) { |
788 | writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); | 787 | writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]); |
789 | mac_tx_queue_run(regs, i); | 788 | mac_tx_queue_run(regs, i); |
790 | } | 789 | } |
791 | 790 | ||
@@ -1195,7 +1194,7 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) | |||
1195 | dirty = vptr->rd_dirty - unusable; | 1194 | dirty = vptr->rd_dirty - unusable; |
1196 | for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { | 1195 | for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { |
1197 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; | 1196 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; |
1198 | vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; | 1197 | vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; |
1199 | } | 1198 | } |
1200 | 1199 | ||
1201 | writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); | 1200 | writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); |
@@ -1210,7 +1209,7 @@ static int velocity_rx_refill(struct velocity_info *vptr) | |||
1210 | struct rx_desc *rd = vptr->rd_ring + dirty; | 1209 | struct rx_desc *rd = vptr->rd_ring + dirty; |
1211 | 1210 | ||
1212 | /* Fine for an all zero Rx desc at init time as well */ | 1211 | /* Fine for an all zero Rx desc at init time as well */ |
1213 | if (rd->rdesc0.owner == OWNED_BY_NIC) | 1212 | if (rd->rdesc0.len & OWNED_BY_NIC) |
1214 | break; | 1213 | break; |
1215 | 1214 | ||
1216 | if (!vptr->rd_info[dirty].skb) { | 1215 | if (!vptr->rd_info[dirty].skb) { |
@@ -1413,7 +1412,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
1413 | if (!vptr->rd_info[rd_curr].skb) | 1412 | if (!vptr->rd_info[rd_curr].skb) |
1414 | break; | 1413 | break; |
1415 | 1414 | ||
1416 | if (rd->rdesc0.owner == OWNED_BY_NIC) | 1415 | if (rd->rdesc0.len & OWNED_BY_NIC) |
1417 | break; | 1416 | break; |
1418 | 1417 | ||
1419 | rmb(); | 1418 | rmb(); |
@@ -1421,7 +1420,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
1421 | /* | 1420 | /* |
1422 | * Don't drop CE or RL error frame although RXOK is off | 1421 | * Don't drop CE or RL error frame although RXOK is off |
1423 | */ | 1422 | */ |
1424 | if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { | 1423 | if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) { |
1425 | if (velocity_receive_frame(vptr, rd_curr) < 0) | 1424 | if (velocity_receive_frame(vptr, rd_curr) < 0) |
1426 | stats->rx_dropped++; | 1425 | stats->rx_dropped++; |
1427 | } else { | 1426 | } else { |
@@ -1433,7 +1432,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
1433 | stats->rx_dropped++; | 1432 | stats->rx_dropped++; |
1434 | } | 1433 | } |
1435 | 1434 | ||
1436 | rd->inten = 1; | 1435 | rd->size |= RX_INTEN; |
1437 | 1436 | ||
1438 | vptr->dev->last_rx = jiffies; | 1437 | vptr->dev->last_rx = jiffies; |
1439 | 1438 | ||
@@ -1554,7 +1553,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1554 | struct net_device_stats *stats = &vptr->stats; | 1553 | struct net_device_stats *stats = &vptr->stats; |
1555 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); | 1554 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); |
1556 | struct rx_desc *rd = &(vptr->rd_ring[idx]); | 1555 | struct rx_desc *rd = &(vptr->rd_ring[idx]); |
1557 | int pkt_len = rd->rdesc0.len; | 1556 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; |
1558 | struct sk_buff *skb; | 1557 | struct sk_buff *skb; |
1559 | 1558 | ||
1560 | if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { | 1559 | if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { |
@@ -1637,8 +1636,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |||
1637 | */ | 1636 | */ |
1638 | 1637 | ||
1639 | *((u32 *) & (rd->rdesc0)) = 0; | 1638 | *((u32 *) & (rd->rdesc0)) = 0; |
1640 | rd->len = cpu_to_le32(vptr->rx_buf_sz); | 1639 | rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; |
1641 | rd->inten = 1; | ||
1642 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); | 1640 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); |
1643 | rd->pa_high = 0; | 1641 | rd->pa_high = 0; |
1644 | return 0; | 1642 | return 0; |
@@ -1674,7 +1672,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status) | |||
1674 | td = &(vptr->td_rings[qnum][idx]); | 1672 | td = &(vptr->td_rings[qnum][idx]); |
1675 | tdinfo = &(vptr->td_infos[qnum][idx]); | 1673 | tdinfo = &(vptr->td_infos[qnum][idx]); |
1676 | 1674 | ||
1677 | if (td->tdesc0.owner == OWNED_BY_NIC) | 1675 | if (td->tdesc0.len & OWNED_BY_NIC) |
1678 | break; | 1676 | break; |
1679 | 1677 | ||
1680 | if ((works++ > 15)) | 1678 | if ((works++ > 15)) |
@@ -1874,7 +1872,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ | |||
1874 | 1872 | ||
1875 | for (i = 0; i < tdinfo->nskb_dma; i++) { | 1873 | for (i = 0; i < tdinfo->nskb_dma; i++) { |
1876 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 1874 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
1877 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE); | 1875 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); |
1878 | #else | 1876 | #else |
1879 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); | 1877 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); |
1880 | #endif | 1878 | #endif |
@@ -2067,8 +2065,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2067 | struct velocity_td_info *tdinfo; | 2065 | struct velocity_td_info *tdinfo; |
2068 | unsigned long flags; | 2066 | unsigned long flags; |
2069 | int index; | 2067 | int index; |
2070 | |||
2071 | int pktlen = skb->len; | 2068 | int pktlen = skb->len; |
2069 | __le16 len = cpu_to_le16(pktlen); | ||
2072 | 2070 | ||
2073 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 2071 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
2074 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { | 2072 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { |
@@ -2083,9 +2081,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2083 | td_ptr = &(vptr->td_rings[qnum][index]); | 2081 | td_ptr = &(vptr->td_rings[qnum][index]); |
2084 | tdinfo = &(vptr->td_infos[qnum][index]); | 2082 | tdinfo = &(vptr->td_infos[qnum][index]); |
2085 | 2083 | ||
2086 | td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; | ||
2087 | td_ptr->tdesc1.TCR = TCR0_TIC; | 2084 | td_ptr->tdesc1.TCR = TCR0_TIC; |
2088 | td_ptr->td_buf[0].queue = 0; | 2085 | td_ptr->td_buf[0].size &= ~TD_QUEUE; |
2089 | 2086 | ||
2090 | /* | 2087 | /* |
2091 | * Pad short frames. | 2088 | * Pad short frames. |
@@ -2093,16 +2090,16 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2093 | if (pktlen < ETH_ZLEN) { | 2090 | if (pktlen < ETH_ZLEN) { |
2094 | /* Cannot occur until ZC support */ | 2091 | /* Cannot occur until ZC support */ |
2095 | pktlen = ETH_ZLEN; | 2092 | pktlen = ETH_ZLEN; |
2093 | len = cpu_to_le16(ETH_ZLEN); | ||
2096 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); | 2094 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
2097 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); | 2095 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); |
2098 | tdinfo->skb = skb; | 2096 | tdinfo->skb = skb; |
2099 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 2097 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
2100 | td_ptr->tdesc0.pktsize = pktlen; | 2098 | td_ptr->tdesc0.len = len; |
2101 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 2099 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2102 | td_ptr->td_buf[0].pa_high = 0; | 2100 | td_ptr->td_buf[0].pa_high = 0; |
2103 | td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; | 2101 | td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ |
2104 | tdinfo->nskb_dma = 1; | 2102 | tdinfo->nskb_dma = 1; |
2105 | td_ptr->tdesc1.CMDZ = 2; | ||
2106 | } else | 2103 | } else |
2107 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 2104 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
2108 | if (skb_shinfo(skb)->nr_frags > 0) { | 2105 | if (skb_shinfo(skb)->nr_frags > 0) { |
@@ -2111,36 +2108,35 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2111 | if (nfrags > 6) { | 2108 | if (nfrags > 6) { |
2112 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); | 2109 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
2113 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 2110 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
2114 | td_ptr->tdesc0.pktsize = | 2111 | td_ptr->tdesc0.len = len; |
2115 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 2112 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2116 | td_ptr->td_buf[0].pa_high = 0; | 2113 | td_ptr->td_buf[0].pa_high = 0; |
2117 | td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; | 2114 | td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ |
2118 | tdinfo->nskb_dma = 1; | 2115 | tdinfo->nskb_dma = 1; |
2119 | td_ptr->tdesc1.CMDZ = 2; | ||
2120 | } else { | 2116 | } else { |
2121 | int i = 0; | 2117 | int i = 0; |
2122 | tdinfo->nskb_dma = 0; | 2118 | tdinfo->nskb_dma = 0; |
2123 | tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE); | 2119 | tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, |
2120 | skb_headlen(skb), PCI_DMA_TODEVICE); | ||
2124 | 2121 | ||
2125 | td_ptr->tdesc0.pktsize = pktlen; | 2122 | td_ptr->tdesc0.len = len; |
2126 | 2123 | ||
2127 | /* FIXME: support 48bit DMA later */ | 2124 | /* FIXME: support 48bit DMA later */ |
2128 | td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); | 2125 | td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); |
2129 | td_ptr->td_buf[i].pa_high = 0; | 2126 | td_ptr->td_buf[i].pa_high = 0; |
2130 | td_ptr->td_buf[i].bufsize = skb->len->skb->data_len; | 2127 | td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); |
2131 | 2128 | ||
2132 | for (i = 0; i < nfrags; i++) { | 2129 | for (i = 0; i < nfrags; i++) { |
2133 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2130 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2134 | void *addr = ((void *) page_address(frag->page + frag->page_offset)); | 2131 | void *addr = (void *)page_address(frag->page) + frag->page_offset; |
2135 | 2132 | ||
2136 | tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); | 2133 | tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); |
2137 | 2134 | ||
2138 | td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); | 2135 | td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); |
2139 | td_ptr->td_buf[i + 1].pa_high = 0; | 2136 | td_ptr->td_buf[i + 1].pa_high = 0; |
2140 | td_ptr->td_buf[i + 1].bufsize = frag->size; | 2137 | td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); |
2141 | } | 2138 | } |
2142 | tdinfo->nskb_dma = i - 1; | 2139 | tdinfo->nskb_dma = i - 1; |
2143 | td_ptr->tdesc1.CMDZ = i; | ||
2144 | } | 2140 | } |
2145 | 2141 | ||
2146 | } else | 2142 | } else |
@@ -2152,18 +2148,16 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2152 | */ | 2148 | */ |
2153 | tdinfo->skb = skb; | 2149 | tdinfo->skb = skb; |
2154 | tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); | 2150 | tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); |
2155 | td_ptr->tdesc0.pktsize = pktlen; | 2151 | td_ptr->tdesc0.len = len; |
2156 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 2152 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2157 | td_ptr->td_buf[0].pa_high = 0; | 2153 | td_ptr->td_buf[0].pa_high = 0; |
2158 | td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; | 2154 | td_ptr->td_buf[0].size = len; |
2159 | tdinfo->nskb_dma = 1; | 2155 | tdinfo->nskb_dma = 1; |
2160 | td_ptr->tdesc1.CMDZ = 2; | ||
2161 | } | 2156 | } |
2157 | td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; | ||
2162 | 2158 | ||
2163 | if (vptr->vlgrp && vlan_tx_tag_present(skb)) { | 2159 | if (vptr->vlgrp && vlan_tx_tag_present(skb)) { |
2164 | td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb); | 2160 | td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); |
2165 | td_ptr->tdesc1.pqinf.priority = 0; | ||
2166 | td_ptr->tdesc1.pqinf.CFI = 0; | ||
2167 | td_ptr->tdesc1.TCR |= TCR0_VETAG; | 2161 | td_ptr->tdesc1.TCR |= TCR0_VETAG; |
2168 | } | 2162 | } |
2169 | 2163 | ||
@@ -2185,7 +2179,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2185 | 2179 | ||
2186 | if (prev < 0) | 2180 | if (prev < 0) |
2187 | prev = vptr->options.numtx - 1; | 2181 | prev = vptr->options.numtx - 1; |
2188 | td_ptr->tdesc0.owner = OWNED_BY_NIC; | 2182 | td_ptr->tdesc0.len |= OWNED_BY_NIC; |
2189 | vptr->td_used[qnum]++; | 2183 | vptr->td_used[qnum]++; |
2190 | vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; | 2184 | vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; |
2191 | 2185 | ||
@@ -2193,7 +2187,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2193 | netif_stop_queue(dev); | 2187 | netif_stop_queue(dev); |
2194 | 2188 | ||
2195 | td_ptr = &(vptr->td_rings[qnum][prev]); | 2189 | td_ptr = &(vptr->td_rings[qnum][prev]); |
2196 | td_ptr->td_buf[0].queue = 1; | 2190 | td_ptr->td_buf[0].size |= TD_QUEUE; |
2197 | mac_tx_queue_wake(vptr->mac_regs, qnum); | 2191 | mac_tx_queue_wake(vptr->mac_regs, qnum); |
2198 | } | 2192 | } |
2199 | dev->trans_start = jiffies; | 2193 | dev->trans_start = jiffies; |
@@ -3410,7 +3404,7 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3410 | velocity_save_context(vptr, &vptr->context); | 3404 | velocity_save_context(vptr, &vptr->context); |
3411 | velocity_shutdown(vptr); | 3405 | velocity_shutdown(vptr); |
3412 | velocity_set_wol(vptr); | 3406 | velocity_set_wol(vptr); |
3413 | pci_enable_wake(pdev, 3, 1); | 3407 | pci_enable_wake(pdev, PCI_D3hot, 1); |
3414 | pci_set_power_state(pdev, PCI_D3hot); | 3408 | pci_set_power_state(pdev, PCI_D3hot); |
3415 | } else { | 3409 | } else { |
3416 | velocity_save_context(vptr, &vptr->context); | 3410 | velocity_save_context(vptr, &vptr->context); |
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index aa9179623d90..7387be4f428d 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h | |||
@@ -70,40 +70,27 @@ | |||
70 | * Bits in the RSR0 register | 70 | * Bits in the RSR0 register |
71 | */ | 71 | */ |
72 | 72 | ||
73 | #define RSR_DETAG 0x0080 | 73 | #define RSR_DETAG cpu_to_le16(0x0080) |
74 | #define RSR_SNTAG 0x0040 | 74 | #define RSR_SNTAG cpu_to_le16(0x0040) |
75 | #define RSR_RXER 0x0020 | 75 | #define RSR_RXER cpu_to_le16(0x0020) |
76 | #define RSR_RL 0x0010 | 76 | #define RSR_RL cpu_to_le16(0x0010) |
77 | #define RSR_CE 0x0008 | 77 | #define RSR_CE cpu_to_le16(0x0008) |
78 | #define RSR_FAE 0x0004 | 78 | #define RSR_FAE cpu_to_le16(0x0004) |
79 | #define RSR_CRC 0x0002 | 79 | #define RSR_CRC cpu_to_le16(0x0002) |
80 | #define RSR_VIDM 0x0001 | 80 | #define RSR_VIDM cpu_to_le16(0x0001) |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Bits in the RSR1 register | 83 | * Bits in the RSR1 register |
84 | */ | 84 | */ |
85 | 85 | ||
86 | #define RSR_RXOK 0x8000 // rx OK | 86 | #define RSR_RXOK cpu_to_le16(0x8000) // rx OK |
87 | #define RSR_PFT 0x4000 // Perfect filtering address match | 87 | #define RSR_PFT cpu_to_le16(0x4000) // Perfect filtering address match |
88 | #define RSR_MAR 0x2000 // MAC accept multicast address packet | 88 | #define RSR_MAR cpu_to_le16(0x2000) // MAC accept multicast address packet |
89 | #define RSR_BAR 0x1000 // MAC accept broadcast address packet | 89 | #define RSR_BAR cpu_to_le16(0x1000) // MAC accept broadcast address packet |
90 | #define RSR_PHY 0x0800 // MAC accept physical address packet | 90 | #define RSR_PHY cpu_to_le16(0x0800) // MAC accept physical address packet |
91 | #define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator | 91 | #define RSR_VTAG cpu_to_le16(0x0400) // 802.1p/1q tagging packet indicator |
92 | #define RSR_STP 0x0200 // start of packet | 92 | #define RSR_STP cpu_to_le16(0x0200) // start of packet |
93 | #define RSR_EDP 0x0100 // end of packet | 93 | #define RSR_EDP cpu_to_le16(0x0100) // end of packet |
94 | |||
95 | /* | ||
96 | * Bits in the RSR1 register | ||
97 | */ | ||
98 | |||
99 | #define RSR1_RXOK 0x80 // rx OK | ||
100 | #define RSR1_PFT 0x40 // Perfect filtering address match | ||
101 | #define RSR1_MAR 0x20 // MAC accept multicast address packet | ||
102 | #define RSR1_BAR 0x10 // MAC accept broadcast address packet | ||
103 | #define RSR1_PHY 0x08 // MAC accept physical address packet | ||
104 | #define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator | ||
105 | #define RSR1_STP 0x02 // start of packet | ||
106 | #define RSR1_EDP 0x01 // end of packet | ||
107 | 94 | ||
108 | /* | 95 | /* |
109 | * Bits in the CSM register | 96 | * Bits in the CSM register |
@@ -120,33 +107,21 @@ | |||
120 | * Bits in the TSR0 register | 107 | * Bits in the TSR0 register |
121 | */ | 108 | */ |
122 | 109 | ||
123 | #define TSR0_ABT 0x0080 // Tx abort because of excessive collision | 110 | #define TSR0_ABT cpu_to_le16(0x0080) // Tx abort because of excessive collision |
124 | #define TSR0_OWT 0x0040 // Jumbo frame Tx abort | 111 | #define TSR0_OWT cpu_to_le16(0x0040) // Jumbo frame Tx abort |
125 | #define TSR0_OWC 0x0020 // Out of window collision | 112 | #define TSR0_OWC cpu_to_le16(0x0020) // Out of window collision |
126 | #define TSR0_COLS 0x0010 // experience collision in this transmit event | 113 | #define TSR0_COLS cpu_to_le16(0x0010) // experience collision in this transmit event |
127 | #define TSR0_NCR3 0x0008 // collision retry counter[3] | 114 | #define TSR0_NCR3 cpu_to_le16(0x0008) // collision retry counter[3] |
128 | #define TSR0_NCR2 0x0004 // collision retry counter[2] | 115 | #define TSR0_NCR2 cpu_to_le16(0x0004) // collision retry counter[2] |
129 | #define TSR0_NCR1 0x0002 // collision retry counter[1] | 116 | #define TSR0_NCR1 cpu_to_le16(0x0002) // collision retry counter[1] |
130 | #define TSR0_NCR0 0x0001 // collision retry counter[0] | 117 | #define TSR0_NCR0 cpu_to_le16(0x0001) // collision retry counter[0] |
131 | #define TSR0_TERR 0x8000 // | 118 | #define TSR0_TERR cpu_to_le16(0x8000) // |
132 | #define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode | 119 | #define TSR0_FDX cpu_to_le16(0x4000) // current transaction is serviced by full duplex mode |
133 | #define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode | 120 | #define TSR0_GMII cpu_to_le16(0x2000) // current transaction is serviced by GMII mode |
134 | #define TSR0_LNKFL 0x1000 // packet serviced during link down | 121 | #define TSR0_LNKFL cpu_to_le16(0x1000) // packet serviced during link down |
135 | #define TSR0_SHDN 0x0400 // shutdown case | 122 | #define TSR0_SHDN cpu_to_le16(0x0400) // shutdown case |
136 | #define TSR0_CRS 0x0200 // carrier sense lost | 123 | #define TSR0_CRS cpu_to_le16(0x0200) // carrier sense lost |
137 | #define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat) | 124 | #define TSR0_CDH cpu_to_le16(0x0100) // AQE test fail (CD heartbeat) |
138 | |||
139 | /* | ||
140 | * Bits in the TSR1 register | ||
141 | */ | ||
142 | |||
143 | #define TSR1_TERR 0x80 // | ||
144 | #define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode | ||
145 | #define TSR1_GMII 0x20 // current transaction is serviced by GMII mode | ||
146 | #define TSR1_LNKFL 0x10 // packet serviced during link down | ||
147 | #define TSR1_SHDN 0x04 // shutdown case | ||
148 | #define TSR1_CRS 0x02 // carrier sense lost | ||
149 | #define TSR1_CDH 0x01 // AQE test fail (CD heartbeat) | ||
150 | 125 | ||
151 | // | 126 | // |
152 | // Bits in the TCR0 register | 127 | // Bits in the TCR0 register |
@@ -197,25 +172,26 @@ | |||
197 | */ | 172 | */ |
198 | 173 | ||
199 | struct rdesc0 { | 174 | struct rdesc0 { |
200 | u16 RSR; /* Receive status */ | 175 | __le16 RSR; /* Receive status */ |
201 | u16 len:14; /* Received packet length */ | 176 | __le16 len; /* bits 0--13; bit 15 - owner */ |
202 | u16 reserved:1; | ||
203 | u16 owner:1; /* Who owns this buffer ? */ | ||
204 | }; | 177 | }; |
205 | 178 | ||
206 | struct rdesc1 { | 179 | struct rdesc1 { |
207 | u16 PQTAG; | 180 | __le16 PQTAG; |
208 | u8 CSM; | 181 | u8 CSM; |
209 | u8 IPKT; | 182 | u8 IPKT; |
210 | }; | 183 | }; |
211 | 184 | ||
185 | enum { | ||
186 | RX_INTEN = __constant_cpu_to_le16(0x8000) | ||
187 | }; | ||
188 | |||
212 | struct rx_desc { | 189 | struct rx_desc { |
213 | struct rdesc0 rdesc0; | 190 | struct rdesc0 rdesc0; |
214 | struct rdesc1 rdesc1; | 191 | struct rdesc1 rdesc1; |
215 | u32 pa_low; /* Low 32 bit PCI address */ | 192 | __le32 pa_low; /* Low 32 bit PCI address */ |
216 | u16 pa_high; /* Next 16 bit PCI address (48 total) */ | 193 | __le16 pa_high; /* Next 16 bit PCI address (48 total) */ |
217 | u16 len:15; /* Frame size */ | 194 | __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */ |
218 | u16 inten:1; /* Enable interrupt */ | ||
219 | } __attribute__ ((__packed__)); | 195 | } __attribute__ ((__packed__)); |
220 | 196 | ||
221 | /* | 197 | /* |
@@ -223,32 +199,24 @@ struct rx_desc { | |||
223 | */ | 199 | */ |
224 | 200 | ||
225 | struct tdesc0 { | 201 | struct tdesc0 { |
226 | u16 TSR; /* Transmit status register */ | 202 | __le16 TSR; /* Transmit status register */ |
227 | u16 pktsize:14; /* Size of frame */ | 203 | __le16 len; /* bits 0--13 - size of frame, bit 15 - owner */ |
228 | u16 reserved:1; | ||
229 | u16 owner:1; /* Who owns the buffer */ | ||
230 | }; | 204 | }; |
231 | 205 | ||
232 | struct pqinf { /* Priority queue info */ | ||
233 | u16 VID:12; | ||
234 | u16 CFI:1; | ||
235 | u16 priority:3; | ||
236 | } __attribute__ ((__packed__)); | ||
237 | |||
238 | struct tdesc1 { | 206 | struct tdesc1 { |
239 | struct pqinf pqinf; | 207 | __le16 vlan; |
240 | u8 TCR; | 208 | u8 TCR; |
241 | u8 TCPLS:2; | 209 | u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */ |
242 | u8 reserved:2; | ||
243 | u8 CMDZ:4; | ||
244 | } __attribute__ ((__packed__)); | 210 | } __attribute__ ((__packed__)); |
245 | 211 | ||
212 | enum { | ||
213 | TD_QUEUE = __constant_cpu_to_le16(0x8000) | ||
214 | }; | ||
215 | |||
246 | struct td_buf { | 216 | struct td_buf { |
247 | u32 pa_low; | 217 | __le32 pa_low; |
248 | u16 pa_high; | 218 | __le16 pa_high; |
249 | u16 bufsize:14; | 219 | __le16 size; /* bits 0--13 - size, bit 15 - queue */ |
250 | u16 reserved:1; | ||
251 | u16 queue:1; | ||
252 | } __attribute__ ((__packed__)); | 220 | } __attribute__ ((__packed__)); |
253 | 221 | ||
254 | struct tx_desc { | 222 | struct tx_desc { |
@@ -276,7 +244,7 @@ struct velocity_td_info { | |||
276 | 244 | ||
277 | enum velocity_owner { | 245 | enum velocity_owner { |
278 | OWNED_BY_HOST = 0, | 246 | OWNED_BY_HOST = 0, |
279 | OWNED_BY_NIC = 1 | 247 | OWNED_BY_NIC = __constant_cpu_to_le16(0x8000) |
280 | }; | 248 | }; |
281 | 249 | ||
282 | 250 | ||
@@ -1012,45 +980,45 @@ struct mac_regs { | |||
1012 | volatile u8 RCR; | 980 | volatile u8 RCR; |
1013 | volatile u8 TCR; | 981 | volatile u8 TCR; |
1014 | 982 | ||
1015 | volatile u32 CR0Set; /* 0x08 */ | 983 | volatile __le32 CR0Set; /* 0x08 */ |
1016 | volatile u32 CR0Clr; /* 0x0C */ | 984 | volatile __le32 CR0Clr; /* 0x0C */ |
1017 | 985 | ||
1018 | volatile u8 MARCAM[8]; /* 0x10 */ | 986 | volatile u8 MARCAM[8]; /* 0x10 */ |
1019 | 987 | ||
1020 | volatile u32 DecBaseHi; /* 0x18 */ | 988 | volatile __le32 DecBaseHi; /* 0x18 */ |
1021 | volatile u16 DbfBaseHi; /* 0x1C */ | 989 | volatile __le16 DbfBaseHi; /* 0x1C */ |
1022 | volatile u16 reserved_1E; | 990 | volatile __le16 reserved_1E; |
1023 | 991 | ||
1024 | volatile u16 ISRCTL; /* 0x20 */ | 992 | volatile __le16 ISRCTL; /* 0x20 */ |
1025 | volatile u8 TXESR; | 993 | volatile u8 TXESR; |
1026 | volatile u8 RXESR; | 994 | volatile u8 RXESR; |
1027 | 995 | ||
1028 | volatile u32 ISR; /* 0x24 */ | 996 | volatile __le32 ISR; /* 0x24 */ |
1029 | volatile u32 IMR; | 997 | volatile __le32 IMR; |
1030 | 998 | ||
1031 | volatile u32 TDStatusPort; /* 0x2C */ | 999 | volatile __le32 TDStatusPort; /* 0x2C */ |
1032 | 1000 | ||
1033 | volatile u16 TDCSRSet; /* 0x30 */ | 1001 | volatile __le16 TDCSRSet; /* 0x30 */ |
1034 | volatile u8 RDCSRSet; | 1002 | volatile u8 RDCSRSet; |
1035 | volatile u8 reserved_33; | 1003 | volatile u8 reserved_33; |
1036 | volatile u16 TDCSRClr; | 1004 | volatile __le16 TDCSRClr; |
1037 | volatile u8 RDCSRClr; | 1005 | volatile u8 RDCSRClr; |
1038 | volatile u8 reserved_37; | 1006 | volatile u8 reserved_37; |
1039 | 1007 | ||
1040 | volatile u32 RDBaseLo; /* 0x38 */ | 1008 | volatile __le32 RDBaseLo; /* 0x38 */ |
1041 | volatile u16 RDIdx; /* 0x3C */ | 1009 | volatile __le16 RDIdx; /* 0x3C */ |
1042 | volatile u16 reserved_3E; | 1010 | volatile __le16 reserved_3E; |
1043 | 1011 | ||
1044 | volatile u32 TDBaseLo[4]; /* 0x40 */ | 1012 | volatile __le32 TDBaseLo[4]; /* 0x40 */ |
1045 | 1013 | ||
1046 | volatile u16 RDCSize; /* 0x50 */ | 1014 | volatile __le16 RDCSize; /* 0x50 */ |
1047 | volatile u16 TDCSize; /* 0x52 */ | 1015 | volatile __le16 TDCSize; /* 0x52 */ |
1048 | volatile u16 TDIdx[4]; /* 0x54 */ | 1016 | volatile __le16 TDIdx[4]; /* 0x54 */ |
1049 | volatile u16 tx_pause_timer; /* 0x5C */ | 1017 | volatile __le16 tx_pause_timer; /* 0x5C */ |
1050 | volatile u16 RBRDU; /* 0x5E */ | 1018 | volatile __le16 RBRDU; /* 0x5E */ |
1051 | 1019 | ||
1052 | volatile u32 FIFOTest0; /* 0x60 */ | 1020 | volatile __le32 FIFOTest0; /* 0x60 */ |
1053 | volatile u32 FIFOTest1; /* 0x64 */ | 1021 | volatile __le32 FIFOTest1; /* 0x64 */ |
1054 | 1022 | ||
1055 | volatile u8 CAMADDR; /* 0x68 */ | 1023 | volatile u8 CAMADDR; /* 0x68 */ |
1056 | volatile u8 CAMCR; /* 0x69 */ | 1024 | volatile u8 CAMCR; /* 0x69 */ |
@@ -1063,18 +1031,18 @@ struct mac_regs { | |||
1063 | volatile u8 PHYSR1; | 1031 | volatile u8 PHYSR1; |
1064 | volatile u8 MIICR; | 1032 | volatile u8 MIICR; |
1065 | volatile u8 MIIADR; | 1033 | volatile u8 MIIADR; |
1066 | volatile u16 MIIDATA; | 1034 | volatile __le16 MIIDATA; |
1067 | 1035 | ||
1068 | volatile u16 SoftTimer0; /* 0x74 */ | 1036 | volatile __le16 SoftTimer0; /* 0x74 */ |
1069 | volatile u16 SoftTimer1; | 1037 | volatile __le16 SoftTimer1; |
1070 | 1038 | ||
1071 | volatile u8 CFGA; /* 0x78 */ | 1039 | volatile u8 CFGA; /* 0x78 */ |
1072 | volatile u8 CFGB; | 1040 | volatile u8 CFGB; |
1073 | volatile u8 CFGC; | 1041 | volatile u8 CFGC; |
1074 | volatile u8 CFGD; | 1042 | volatile u8 CFGD; |
1075 | 1043 | ||
1076 | volatile u16 DCFG; /* 0x7C */ | 1044 | volatile __le16 DCFG; /* 0x7C */ |
1077 | volatile u16 MCFG; | 1045 | volatile __le16 MCFG; |
1078 | 1046 | ||
1079 | volatile u8 TBIST; /* 0x80 */ | 1047 | volatile u8 TBIST; /* 0x80 */ |
1080 | volatile u8 RBIST; | 1048 | volatile u8 RBIST; |
@@ -1086,9 +1054,9 @@ struct mac_regs { | |||
1086 | volatile u8 rev_id; | 1054 | volatile u8 rev_id; |
1087 | volatile u8 PORSTS; | 1055 | volatile u8 PORSTS; |
1088 | 1056 | ||
1089 | volatile u32 MIBData; /* 0x88 */ | 1057 | volatile __le32 MIBData; /* 0x88 */ |
1090 | 1058 | ||
1091 | volatile u16 EEWrData; | 1059 | volatile __le16 EEWrData; |
1092 | 1060 | ||
1093 | volatile u8 reserved_8E; | 1061 | volatile u8 reserved_8E; |
1094 | volatile u8 BPMDWr; | 1062 | volatile u8 BPMDWr; |
@@ -1098,7 +1066,7 @@ struct mac_regs { | |||
1098 | volatile u8 EECHKSUM; /* 0x92 */ | 1066 | volatile u8 EECHKSUM; /* 0x92 */ |
1099 | volatile u8 EECSR; | 1067 | volatile u8 EECSR; |
1100 | 1068 | ||
1101 | volatile u16 EERdData; /* 0x94 */ | 1069 | volatile __le16 EERdData; /* 0x94 */ |
1102 | volatile u8 EADDR; | 1070 | volatile u8 EADDR; |
1103 | volatile u8 EMBCMD; | 1071 | volatile u8 EMBCMD; |
1104 | 1072 | ||
@@ -1112,22 +1080,22 @@ struct mac_regs { | |||
1112 | volatile u8 DEBUG; | 1080 | volatile u8 DEBUG; |
1113 | volatile u8 CHIPGCR; | 1081 | volatile u8 CHIPGCR; |
1114 | 1082 | ||
1115 | volatile u16 WOLCRSet; /* 0xA0 */ | 1083 | volatile __le16 WOLCRSet; /* 0xA0 */ |
1116 | volatile u8 PWCFGSet; | 1084 | volatile u8 PWCFGSet; |
1117 | volatile u8 WOLCFGSet; | 1085 | volatile u8 WOLCFGSet; |
1118 | 1086 | ||
1119 | volatile u16 WOLCRClr; /* 0xA4 */ | 1087 | volatile __le16 WOLCRClr; /* 0xA4 */ |
1120 | volatile u8 PWCFGCLR; | 1088 | volatile u8 PWCFGCLR; |
1121 | volatile u8 WOLCFGClr; | 1089 | volatile u8 WOLCFGClr; |
1122 | 1090 | ||
1123 | volatile u16 WOLSRSet; /* 0xA8 */ | 1091 | volatile __le16 WOLSRSet; /* 0xA8 */ |
1124 | volatile u16 reserved_AA; | 1092 | volatile __le16 reserved_AA; |
1125 | 1093 | ||
1126 | volatile u16 WOLSRClr; /* 0xAC */ | 1094 | volatile __le16 WOLSRClr; /* 0xAC */ |
1127 | volatile u16 reserved_AE; | 1095 | volatile __le16 reserved_AE; |
1128 | 1096 | ||
1129 | volatile u16 PatternCRC[8]; /* 0xB0 */ | 1097 | volatile __le16 PatternCRC[8]; /* 0xB0 */ |
1130 | volatile u32 ByteMask[4][4]; /* 0xC0 */ | 1098 | volatile __le32 ByteMask[4][4]; /* 0xC0 */ |
1131 | } __attribute__ ((__packed__)); | 1099 | } __attribute__ ((__packed__)); |
1132 | 1100 | ||
1133 | 1101 | ||
@@ -1238,12 +1206,12 @@ typedef u8 MCAM_ADDR[ETH_ALEN]; | |||
1238 | struct arp_packet { | 1206 | struct arp_packet { |
1239 | u8 dest_mac[ETH_ALEN]; | 1207 | u8 dest_mac[ETH_ALEN]; |
1240 | u8 src_mac[ETH_ALEN]; | 1208 | u8 src_mac[ETH_ALEN]; |
1241 | u16 type; | 1209 | __be16 type; |
1242 | u16 ar_hrd; | 1210 | __be16 ar_hrd; |
1243 | u16 ar_pro; | 1211 | __be16 ar_pro; |
1244 | u8 ar_hln; | 1212 | u8 ar_hln; |
1245 | u8 ar_pln; | 1213 | u8 ar_pln; |
1246 | u16 ar_op; | 1214 | __be16 ar_op; |
1247 | u8 ar_sha[ETH_ALEN]; | 1215 | u8 ar_sha[ETH_ALEN]; |
1248 | u8 ar_sip[4]; | 1216 | u8 ar_sip[4]; |
1249 | u8 ar_tha[ETH_ALEN]; | 1217 | u8 ar_tha[ETH_ALEN]; |
@@ -1253,7 +1221,7 @@ struct arp_packet { | |||
1253 | struct _magic_packet { | 1221 | struct _magic_packet { |
1254 | u8 dest_mac[6]; | 1222 | u8 dest_mac[6]; |
1255 | u8 src_mac[6]; | 1223 | u8 src_mac[6]; |
1256 | u16 type; | 1224 | __be16 type; |
1257 | u8 MAC[16][6]; | 1225 | u8 MAC[16][6]; |
1258 | u8 password[6]; | 1226 | u8 password[6]; |
1259 | } __attribute__ ((__packed__)); | 1227 | } __attribute__ ((__packed__)); |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index d6599d219193..ddc87149fe31 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -153,7 +153,7 @@ static int ath5k_pci_resume(struct pci_dev *pdev); | |||
153 | #define ath5k_pci_resume NULL | 153 | #define ath5k_pci_resume NULL |
154 | #endif /* CONFIG_PM */ | 154 | #endif /* CONFIG_PM */ |
155 | 155 | ||
156 | static struct pci_driver ath5k_pci_drv_id = { | 156 | static struct pci_driver ath5k_pci_driver = { |
157 | .name = "ath5k_pci", | 157 | .name = "ath5k_pci", |
158 | .id_table = ath5k_pci_id_table, | 158 | .id_table = ath5k_pci_id_table, |
159 | .probe = ath5k_pci_probe, | 159 | .probe = ath5k_pci_probe, |
@@ -329,7 +329,7 @@ init_ath5k_pci(void) | |||
329 | 329 | ||
330 | ath5k_debug_init(); | 330 | ath5k_debug_init(); |
331 | 331 | ||
332 | ret = pci_register_driver(&ath5k_pci_drv_id); | 332 | ret = pci_register_driver(&ath5k_pci_driver); |
333 | if (ret) { | 333 | if (ret) { |
334 | printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); | 334 | printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); |
335 | return ret; | 335 | return ret; |
@@ -341,7 +341,7 @@ init_ath5k_pci(void) | |||
341 | static void __exit | 341 | static void __exit |
342 | exit_ath5k_pci(void) | 342 | exit_ath5k_pci(void) |
343 | { | 343 | { |
344 | pci_unregister_driver(&ath5k_pci_drv_id); | 344 | pci_unregister_driver(&ath5k_pci_driver); |
345 | 345 | ||
346 | ath5k_debug_finish(); | 346 | ath5k_debug_finish(); |
347 | } | 347 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 4fdeb5323248..8d4d91d35fd2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -238,9 +238,10 @@ void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_b | |||
238 | priv->last_statistics_time = jiffies; | 238 | priv->last_statistics_time = jiffies; |
239 | } | 239 | } |
240 | 240 | ||
241 | void iwl3945_add_radiotap(struct iwl3945_priv *priv, struct sk_buff *skb, | 241 | static void iwl3945_add_radiotap(struct iwl3945_priv *priv, |
242 | struct iwl3945_rx_frame_hdr *rx_hdr, | 242 | struct sk_buff *skb, |
243 | struct ieee80211_rx_status *stats) | 243 | struct iwl3945_rx_frame_hdr *rx_hdr, |
244 | struct ieee80211_rx_status *stats) | ||
244 | { | 245 | { |
245 | /* First cache any information we need before we overwrite | 246 | /* First cache any information we need before we overwrite |
246 | * the information provided in the skb from the hardware */ | 247 | * the information provided in the skb from the hardware */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 569347ff377b..d727de8b96fe 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -4658,17 +4658,30 @@ void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index, | |||
4658 | struct ieee80211_ht_info *sta_ht_inf) | 4658 | struct ieee80211_ht_info *sta_ht_inf) |
4659 | { | 4659 | { |
4660 | __le32 sta_flags; | 4660 | __le32 sta_flags; |
4661 | u8 mimo_ps_mode; | ||
4661 | 4662 | ||
4662 | if (!sta_ht_inf || !sta_ht_inf->ht_supported) | 4663 | if (!sta_ht_inf || !sta_ht_inf->ht_supported) |
4663 | goto done; | 4664 | goto done; |
4664 | 4665 | ||
4666 | mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2; | ||
4667 | |||
4665 | sta_flags = priv->stations[index].sta.station_flags; | 4668 | sta_flags = priv->stations[index].sta.station_flags; |
4666 | 4669 | ||
4667 | if (((sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS >> 2)) | 4670 | sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); |
4668 | == IWL_MIMO_PS_DYNAMIC) | 4671 | |
4672 | switch (mimo_ps_mode) { | ||
4673 | case WLAN_HT_CAP_MIMO_PS_STATIC: | ||
4674 | sta_flags |= STA_FLG_MIMO_DIS_MSK; | ||
4675 | break; | ||
4676 | case WLAN_HT_CAP_MIMO_PS_DYNAMIC: | ||
4669 | sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; | 4677 | sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; |
4670 | else | 4678 | break; |
4671 | sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK; | 4679 | case WLAN_HT_CAP_MIMO_PS_DISABLED: |
4680 | break; | ||
4681 | default: | ||
4682 | IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); | ||
4683 | break; | ||
4684 | } | ||
4672 | 4685 | ||
4673 | sta_flags |= cpu_to_le32( | 4686 | sta_flags |= cpu_to_le32( |
4674 | (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); | 4687 | (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); |
@@ -4679,7 +4692,7 @@ void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index, | |||
4679 | if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf)) | 4692 | if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf)) |
4680 | sta_flags |= STA_FLG_FAT_EN_MSK; | 4693 | sta_flags |= STA_FLG_FAT_EN_MSK; |
4681 | else | 4694 | else |
4682 | sta_flags &= (~STA_FLG_FAT_EN_MSK); | 4695 | sta_flags &= ~STA_FLG_FAT_EN_MSK; |
4683 | 4696 | ||
4684 | priv->stations[index].sta.station_flags = sta_flags; | 4697 | priv->stations[index].sta.station_flags = sta_flags; |
4685 | done: | 4698 | done: |
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h index cb009f4c401f..8993cca81b40 100644 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h | |||
@@ -147,9 +147,6 @@ static inline struct ieee80211_conf *ieee80211_get_hw_conf( | |||
147 | 147 | ||
148 | #define QOS_CONTROL_LEN 2 | 148 | #define QOS_CONTROL_LEN 2 |
149 | 149 | ||
150 | #define IEEE80211_STYPE_BACK_REQ 0x0080 | ||
151 | #define IEEE80211_STYPE_BACK 0x0090 | ||
152 | |||
153 | 150 | ||
154 | static inline int ieee80211_is_management(u16 fc) | 151 | static inline int ieee80211_is_management(u16 fc) |
155 | { | 152 | { |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 33239f197984..f55c75712b55 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -6330,6 +6330,11 @@ static int __iwl3945_up(struct iwl3945_priv *priv) | |||
6330 | return -ENODEV; | 6330 | return -ENODEV; |
6331 | } | 6331 | } |
6332 | 6332 | ||
6333 | if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { | ||
6334 | IWL_ERROR("ucode not available for device bringup\n"); | ||
6335 | return -EIO; | ||
6336 | } | ||
6337 | |||
6333 | /* If platform's RF_KILL switch is NOT set to KILL */ | 6338 | /* If platform's RF_KILL switch is NOT set to KILL */ |
6334 | if (iwl3945_read32(priv, CSR_GP_CNTRL) & | 6339 | if (iwl3945_read32(priv, CSR_GP_CNTRL) & |
6335 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | 6340 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) |
@@ -6342,11 +6347,6 @@ static int __iwl3945_up(struct iwl3945_priv *priv) | |||
6342 | } | 6347 | } |
6343 | } | 6348 | } |
6344 | 6349 | ||
6345 | if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { | ||
6346 | IWL_ERROR("ucode not available for device bringup\n"); | ||
6347 | return -EIO; | ||
6348 | } | ||
6349 | |||
6350 | iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF); | 6350 | iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF); |
6351 | 6351 | ||
6352 | rc = iwl3945_hw_nic_init(priv); | 6352 | rc = iwl3945_hw_nic_init(priv); |
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c index bf3a60c037aa..f423241b9567 100644 --- a/drivers/net/wireless/iwlwifi/iwl4965-base.c +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c | |||
@@ -6755,6 +6755,11 @@ static int __iwl4965_up(struct iwl4965_priv *priv) | |||
6755 | return -ENODEV; | 6755 | return -ENODEV; |
6756 | } | 6756 | } |
6757 | 6757 | ||
6758 | if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { | ||
6759 | IWL_ERROR("ucode not available for device bringup\n"); | ||
6760 | return -EIO; | ||
6761 | } | ||
6762 | |||
6758 | /* If platform's RF_KILL switch is NOT set to KILL */ | 6763 | /* If platform's RF_KILL switch is NOT set to KILL */ |
6759 | if (iwl4965_read32(priv, CSR_GP_CNTRL) & | 6764 | if (iwl4965_read32(priv, CSR_GP_CNTRL) & |
6760 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | 6765 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) |
@@ -6767,11 +6772,6 @@ static int __iwl4965_up(struct iwl4965_priv *priv) | |||
6767 | } | 6772 | } |
6768 | } | 6773 | } |
6769 | 6774 | ||
6770 | if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { | ||
6771 | IWL_ERROR("ucode not available for device bringup\n"); | ||
6772 | return -EIO; | ||
6773 | } | ||
6774 | |||
6775 | iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF); | 6775 | iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF); |
6776 | 6776 | ||
6777 | rc = iwl4965_hw_nic_init(priv); | 6777 | rc = iwl4965_hw_nic_init(priv); |
diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 484e45c7c89a..aa0737019e37 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h | |||
@@ -525,6 +525,7 @@ struct dccp_sock { | |||
525 | __u64 dccps_gsr; | 525 | __u64 dccps_gsr; |
526 | __u64 dccps_gar; | 526 | __u64 dccps_gar; |
527 | __be32 dccps_service; | 527 | __be32 dccps_service; |
528 | __u32 dccps_mss_cache; | ||
528 | struct dccp_service_list *dccps_service_list; | 529 | struct dccp_service_list *dccps_service_list; |
529 | __u32 dccps_timestamp_echo; | 530 | __u32 dccps_timestamp_echo; |
530 | __u32 dccps_timestamp_time; | 531 | __u32 dccps_timestamp_time; |
@@ -533,7 +534,6 @@ struct dccp_sock { | |||
533 | __u16 dccps_pcslen; | 534 | __u16 dccps_pcslen; |
534 | __u16 dccps_pcrlen; | 535 | __u16 dccps_pcrlen; |
535 | unsigned long dccps_ndp_count; | 536 | unsigned long dccps_ndp_count; |
536 | __u32 dccps_mss_cache; | ||
537 | unsigned long dccps_rate_last; | 537 | unsigned long dccps_rate_last; |
538 | struct dccp_minisock dccps_minisock; | 538 | struct dccp_minisock dccps_minisock; |
539 | struct dccp_ackvec *dccps_hc_rx_ackvec; | 539 | struct dccp_ackvec *dccps_hc_rx_ackvec; |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 5de6d911cdf7..f577c8f1c66d 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -287,6 +287,12 @@ struct ieee80211_ht_addt_info { | |||
287 | #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 | 287 | #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 |
288 | #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 | 288 | #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 |
289 | 289 | ||
290 | /* MIMO Power Save Modes */ | ||
291 | #define WLAN_HT_CAP_MIMO_PS_STATIC 0 | ||
292 | #define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 | ||
293 | #define WLAN_HT_CAP_MIMO_PS_INVALID 2 | ||
294 | #define WLAN_HT_CAP_MIMO_PS_DISABLED 3 | ||
295 | |||
290 | /* Authentication algorithms */ | 296 | /* Authentication algorithms */ |
291 | #define WLAN_AUTH_OPEN 0 | 297 | #define WLAN_AUTH_OPEN 0 |
292 | #define WLAN_AUTH_SHARED_KEY 1 | 298 | #define WLAN_AUTH_SHARED_KEY 1 |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 554836edd915..5e43ae751412 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -88,7 +88,7 @@ struct mii_bus { | |||
88 | 88 | ||
89 | /* A lock to ensure that only one thing can read/write | 89 | /* A lock to ensure that only one thing can read/write |
90 | * the MDIO bus at a time */ | 90 | * the MDIO bus at a time */ |
91 | spinlock_t mdio_lock; | 91 | struct mutex mdio_lock; |
92 | 92 | ||
93 | struct device *dev; | 93 | struct device *dev; |
94 | 94 | ||
@@ -284,10 +284,11 @@ struct phy_device { | |||
284 | 284 | ||
285 | /* Interrupt and Polling infrastructure */ | 285 | /* Interrupt and Polling infrastructure */ |
286 | struct work_struct phy_queue; | 286 | struct work_struct phy_queue; |
287 | struct work_struct state_queue; | ||
287 | struct timer_list phy_timer; | 288 | struct timer_list phy_timer; |
288 | atomic_t irq_disable; | 289 | atomic_t irq_disable; |
289 | 290 | ||
290 | spinlock_t lock; | 291 | struct mutex lock; |
291 | 292 | ||
292 | struct net_device *attached_dev; | 293 | struct net_device *attached_dev; |
293 | 294 | ||
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index b24508abb850..b2cfc4927257 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h | |||
@@ -112,13 +112,13 @@ struct ifmcaddr6 | |||
112 | struct ip6_sf_list *mca_sources; | 112 | struct ip6_sf_list *mca_sources; |
113 | struct ip6_sf_list *mca_tomb; | 113 | struct ip6_sf_list *mca_tomb; |
114 | unsigned int mca_sfmode; | 114 | unsigned int mca_sfmode; |
115 | unsigned char mca_crcount; | ||
115 | unsigned long mca_sfcount[2]; | 116 | unsigned long mca_sfcount[2]; |
116 | struct timer_list mca_timer; | 117 | struct timer_list mca_timer; |
117 | unsigned mca_flags; | 118 | unsigned mca_flags; |
118 | int mca_users; | 119 | int mca_users; |
119 | atomic_t mca_refcnt; | 120 | atomic_t mca_refcnt; |
120 | spinlock_t mca_lock; | 121 | spinlock_t mca_lock; |
121 | unsigned char mca_crcount; | ||
122 | unsigned long mca_cstamp; | 122 | unsigned long mca_cstamp; |
123 | unsigned long mca_tstamp; | 123 | unsigned long mca_tstamp; |
124 | }; | 124 | }; |
@@ -166,11 +166,11 @@ struct inet6_dev | |||
166 | struct ifmcaddr6 *mc_list; | 166 | struct ifmcaddr6 *mc_list; |
167 | struct ifmcaddr6 *mc_tomb; | 167 | struct ifmcaddr6 *mc_tomb; |
168 | rwlock_t mc_lock; | 168 | rwlock_t mc_lock; |
169 | unsigned long mc_v1_seen; | ||
170 | unsigned long mc_maxdelay; | ||
171 | unsigned char mc_qrv; | 169 | unsigned char mc_qrv; |
172 | unsigned char mc_gq_running; | 170 | unsigned char mc_gq_running; |
173 | unsigned char mc_ifc_count; | 171 | unsigned char mc_ifc_count; |
172 | unsigned long mc_v1_seen; | ||
173 | unsigned long mc_maxdelay; | ||
174 | struct timer_list mc_gq_timer; /* general query timer */ | 174 | struct timer_list mc_gq_timer; /* general query timer */ |
175 | struct timer_list mc_ifc_timer; /* interface change timer */ | 175 | struct timer_list mc_ifc_timer; /* interface change timer */ |
176 | 176 | ||
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index fdff630708ce..62a5b691858e 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h | |||
@@ -49,7 +49,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk) | |||
49 | return inet6_ehashfn(laddr, lport, faddr, fport); | 49 | return inet6_ehashfn(laddr, lport, faddr, fport); |
50 | } | 50 | } |
51 | 51 | ||
52 | extern void __inet6_hash(struct inet_hashinfo *hashinfo, struct sock *sk); | 52 | extern void __inet6_hash(struct sock *sk); |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so | 55 | * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so |
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 133cf30d2d79..f00f0573627b 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h | |||
@@ -29,7 +29,6 @@ | |||
29 | #undef INET_CSK_CLEAR_TIMERS | 29 | #undef INET_CSK_CLEAR_TIMERS |
30 | 30 | ||
31 | struct inet_bind_bucket; | 31 | struct inet_bind_bucket; |
32 | struct inet_hashinfo; | ||
33 | struct tcp_congestion_ops; | 32 | struct tcp_congestion_ops; |
34 | 33 | ||
35 | /* | 34 | /* |
@@ -59,6 +58,8 @@ struct inet_connection_sock_af_ops { | |||
59 | int level, int optname, | 58 | int level, int optname, |
60 | char __user *optval, int __user *optlen); | 59 | char __user *optval, int __user *optlen); |
61 | void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); | 60 | void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); |
61 | int (*bind_conflict)(const struct sock *sk, | ||
62 | const struct inet_bind_bucket *tb); | ||
62 | }; | 63 | }; |
63 | 64 | ||
64 | /** inet_connection_sock - INET connection oriented sock | 65 | /** inet_connection_sock - INET connection oriented sock |
@@ -244,10 +245,7 @@ extern struct request_sock *inet_csk_search_req(const struct sock *sk, | |||
244 | const __be32 laddr); | 245 | const __be32 laddr); |
245 | extern int inet_csk_bind_conflict(const struct sock *sk, | 246 | extern int inet_csk_bind_conflict(const struct sock *sk, |
246 | const struct inet_bind_bucket *tb); | 247 | const struct inet_bind_bucket *tb); |
247 | extern int inet_csk_get_port(struct inet_hashinfo *hashinfo, | 248 | extern int inet_csk_get_port(struct sock *sk, unsigned short snum); |
248 | struct sock *sk, unsigned short snum, | ||
249 | int (*bind_conflict)(const struct sock *sk, | ||
250 | const struct inet_bind_bucket *tb)); | ||
251 | 249 | ||
252 | extern struct dst_entry* inet_csk_route_req(struct sock *sk, | 250 | extern struct dst_entry* inet_csk_route_req(struct sock *sk, |
253 | const struct request_sock *req); | 251 | const struct request_sock *req); |
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index c23c4ed30724..48ac620cb846 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -221,9 +221,9 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | /* Caller must disable local BH processing. */ | 223 | /* Caller must disable local BH processing. */ |
224 | static inline void __inet_inherit_port(struct inet_hashinfo *table, | 224 | static inline void __inet_inherit_port(struct sock *sk, struct sock *child) |
225 | struct sock *sk, struct sock *child) | ||
226 | { | 225 | { |
226 | struct inet_hashinfo *table = sk->sk_prot->hashinfo; | ||
227 | const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); | 227 | const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); |
228 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; | 228 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; |
229 | struct inet_bind_bucket *tb; | 229 | struct inet_bind_bucket *tb; |
@@ -235,15 +235,14 @@ static inline void __inet_inherit_port(struct inet_hashinfo *table, | |||
235 | spin_unlock(&head->lock); | 235 | spin_unlock(&head->lock); |
236 | } | 236 | } |
237 | 237 | ||
238 | static inline void inet_inherit_port(struct inet_hashinfo *table, | 238 | static inline void inet_inherit_port(struct sock *sk, struct sock *child) |
239 | struct sock *sk, struct sock *child) | ||
240 | { | 239 | { |
241 | local_bh_disable(); | 240 | local_bh_disable(); |
242 | __inet_inherit_port(table, sk, child); | 241 | __inet_inherit_port(sk, child); |
243 | local_bh_enable(); | 242 | local_bh_enable(); |
244 | } | 243 | } |
245 | 244 | ||
246 | extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk); | 245 | extern void inet_put_port(struct sock *sk); |
247 | 246 | ||
248 | extern void inet_listen_wlock(struct inet_hashinfo *hashinfo); | 247 | extern void inet_listen_wlock(struct inet_hashinfo *hashinfo); |
249 | 248 | ||
@@ -266,41 +265,9 @@ static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo) | |||
266 | wake_up(&hashinfo->lhash_wait); | 265 | wake_up(&hashinfo->lhash_wait); |
267 | } | 266 | } |
268 | 267 | ||
269 | extern void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk); | 268 | extern void __inet_hash_nolisten(struct sock *sk); |
270 | extern void __inet_hash_nolisten(struct inet_hashinfo *hinfo, struct sock *sk); | 269 | extern void inet_hash(struct sock *sk); |
271 | 270 | extern void inet_unhash(struct sock *sk); | |
272 | static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) | ||
273 | { | ||
274 | if (sk->sk_state != TCP_CLOSE) { | ||
275 | local_bh_disable(); | ||
276 | __inet_hash(hashinfo, sk); | ||
277 | local_bh_enable(); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk) | ||
282 | { | ||
283 | rwlock_t *lock; | ||
284 | |||
285 | if (sk_unhashed(sk)) | ||
286 | goto out; | ||
287 | |||
288 | if (sk->sk_state == TCP_LISTEN) { | ||
289 | local_bh_disable(); | ||
290 | inet_listen_wlock(hashinfo); | ||
291 | lock = &hashinfo->lhash_lock; | ||
292 | } else { | ||
293 | lock = inet_ehash_lockp(hashinfo, sk->sk_hash); | ||
294 | write_lock_bh(lock); | ||
295 | } | ||
296 | |||
297 | if (__sk_del_node_init(sk)) | ||
298 | sock_prot_inuse_add(sk->sk_prot, -1); | ||
299 | write_unlock_bh(lock); | ||
300 | out: | ||
301 | if (sk->sk_state == TCP_LISTEN) | ||
302 | wake_up(&hashinfo->lhash_wait); | ||
303 | } | ||
304 | 271 | ||
305 | extern struct sock *__inet_lookup_listener(struct net *net, | 272 | extern struct sock *__inet_lookup_listener(struct net *net, |
306 | struct inet_hashinfo *hashinfo, | 273 | struct inet_hashinfo *hashinfo, |
@@ -425,7 +392,7 @@ extern int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
425 | struct sock *sk, | 392 | struct sock *sk, |
426 | int (*check_established)(struct inet_timewait_death_row *, | 393 | int (*check_established)(struct inet_timewait_death_row *, |
427 | struct sock *, __u16, struct inet_timewait_sock **), | 394 | struct sock *, __u16, struct inet_timewait_sock **), |
428 | void (*hash)(struct inet_hashinfo *, struct sock *)); | 395 | void (*hash)(struct sock *sk)); |
429 | extern int inet_hash_connect(struct inet_timewait_death_row *death_row, | 396 | extern int inet_hash_connect(struct inet_timewait_death_row *death_row, |
430 | struct sock *sk); | 397 | struct sock *sk); |
431 | #endif /* _INET_HASHTABLES_H */ | 398 | #endif /* _INET_HASHTABLES_H */ |
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 67e925065aae..296547bfb0b7 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h | |||
@@ -116,6 +116,7 @@ struct inet_timewait_sock { | |||
116 | #define tw_hash __tw_common.skc_hash | 116 | #define tw_hash __tw_common.skc_hash |
117 | #define tw_prot __tw_common.skc_prot | 117 | #define tw_prot __tw_common.skc_prot |
118 | #define tw_net __tw_common.skc_net | 118 | #define tw_net __tw_common.skc_net |
119 | int tw_timeout; | ||
119 | volatile unsigned char tw_substate; | 120 | volatile unsigned char tw_substate; |
120 | /* 3 bits hole, try to pack */ | 121 | /* 3 bits hole, try to pack */ |
121 | unsigned char tw_rcv_wscale; | 122 | unsigned char tw_rcv_wscale; |
@@ -130,7 +131,6 @@ struct inet_timewait_sock { | |||
130 | __u8 tw_ipv6only:1; | 131 | __u8 tw_ipv6only:1; |
131 | /* 15 bits hole, try to pack */ | 132 | /* 15 bits hole, try to pack */ |
132 | __u16 tw_ipv6_offset; | 133 | __u16 tw_ipv6_offset; |
133 | int tw_timeout; | ||
134 | unsigned long tw_ttd; | 134 | unsigned long tw_ttd; |
135 | struct inet_bind_bucket *tw_tb; | 135 | struct inet_bind_bucket *tw_tb; |
136 | struct hlist_node tw_death_node; | 136 | struct hlist_node tw_death_node; |
diff --git a/include/net/sock.h b/include/net/sock.h index e3fb4c047f4c..8a7889b35810 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -496,6 +496,7 @@ extern int sk_wait_data(struct sock *sk, long *timeo); | |||
496 | 496 | ||
497 | struct request_sock_ops; | 497 | struct request_sock_ops; |
498 | struct timewait_sock_ops; | 498 | struct timewait_sock_ops; |
499 | struct inet_hashinfo; | ||
499 | 500 | ||
500 | /* Networking protocol blocks we attach to sockets. | 501 | /* Networking protocol blocks we attach to sockets. |
501 | * socket layer -> transport layer interface | 502 | * socket layer -> transport layer interface |
@@ -578,6 +579,8 @@ struct proto { | |||
578 | struct request_sock_ops *rsk_prot; | 579 | struct request_sock_ops *rsk_prot; |
579 | struct timewait_sock_ops *twsk_prot; | 580 | struct timewait_sock_ops *twsk_prot; |
580 | 581 | ||
582 | struct inet_hashinfo *hashinfo; | ||
583 | |||
581 | struct module *owner; | 584 | struct module *owner; |
582 | 585 | ||
583 | char name[32]; | 586 | char name[32]; |
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index ebe59d98721a..287a62bc2e0f 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -271,8 +271,6 @@ extern struct sk_buff *dccp_make_response(struct sock *sk, | |||
271 | 271 | ||
272 | extern int dccp_connect(struct sock *sk); | 272 | extern int dccp_connect(struct sock *sk); |
273 | extern int dccp_disconnect(struct sock *sk, int flags); | 273 | extern int dccp_disconnect(struct sock *sk, int flags); |
274 | extern void dccp_hash(struct sock *sk); | ||
275 | extern void dccp_unhash(struct sock *sk); | ||
276 | extern int dccp_getsockopt(struct sock *sk, int level, int optname, | 274 | extern int dccp_getsockopt(struct sock *sk, int level, int optname, |
277 | char __user *optval, int __user *optlen); | 275 | char __user *optval, int __user *optlen); |
278 | extern int dccp_setsockopt(struct sock *sk, int level, int optname, | 276 | extern int dccp_setsockopt(struct sock *sk, int level, int optname, |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index c982ad88223d..474075adbde4 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -38,12 +38,6 @@ | |||
38 | */ | 38 | */ |
39 | static struct socket *dccp_v4_ctl_socket; | 39 | static struct socket *dccp_v4_ctl_socket; |
40 | 40 | ||
41 | static int dccp_v4_get_port(struct sock *sk, const unsigned short snum) | ||
42 | { | ||
43 | return inet_csk_get_port(&dccp_hashinfo, sk, snum, | ||
44 | inet_csk_bind_conflict); | ||
45 | } | ||
46 | |||
47 | int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 41 | int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
48 | { | 42 | { |
49 | struct inet_sock *inet = inet_sk(sk); | 43 | struct inet_sock *inet = inet_sk(sk); |
@@ -408,8 +402,8 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
408 | 402 | ||
409 | dccp_sync_mss(newsk, dst_mtu(dst)); | 403 | dccp_sync_mss(newsk, dst_mtu(dst)); |
410 | 404 | ||
411 | __inet_hash_nolisten(&dccp_hashinfo, newsk); | 405 | __inet_hash_nolisten(newsk); |
412 | __inet_inherit_port(&dccp_hashinfo, sk, newsk); | 406 | __inet_inherit_port(sk, newsk); |
413 | 407 | ||
414 | return newsk; | 408 | return newsk; |
415 | 409 | ||
@@ -898,6 +892,7 @@ static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { | |||
898 | .getsockopt = ip_getsockopt, | 892 | .getsockopt = ip_getsockopt, |
899 | .addr2sockaddr = inet_csk_addr2sockaddr, | 893 | .addr2sockaddr = inet_csk_addr2sockaddr, |
900 | .sockaddr_len = sizeof(struct sockaddr_in), | 894 | .sockaddr_len = sizeof(struct sockaddr_in), |
895 | .bind_conflict = inet_csk_bind_conflict, | ||
901 | #ifdef CONFIG_COMPAT | 896 | #ifdef CONFIG_COMPAT |
902 | .compat_setsockopt = compat_ip_setsockopt, | 897 | .compat_setsockopt = compat_ip_setsockopt, |
903 | .compat_getsockopt = compat_ip_getsockopt, | 898 | .compat_getsockopt = compat_ip_getsockopt, |
@@ -937,10 +932,10 @@ static struct proto dccp_v4_prot = { | |||
937 | .sendmsg = dccp_sendmsg, | 932 | .sendmsg = dccp_sendmsg, |
938 | .recvmsg = dccp_recvmsg, | 933 | .recvmsg = dccp_recvmsg, |
939 | .backlog_rcv = dccp_v4_do_rcv, | 934 | .backlog_rcv = dccp_v4_do_rcv, |
940 | .hash = dccp_hash, | 935 | .hash = inet_hash, |
941 | .unhash = dccp_unhash, | 936 | .unhash = inet_unhash, |
942 | .accept = inet_csk_accept, | 937 | .accept = inet_csk_accept, |
943 | .get_port = dccp_v4_get_port, | 938 | .get_port = inet_csk_get_port, |
944 | .shutdown = dccp_shutdown, | 939 | .shutdown = dccp_shutdown, |
945 | .destroy = dccp_destroy_sock, | 940 | .destroy = dccp_destroy_sock, |
946 | .orphan_count = &dccp_orphan_count, | 941 | .orphan_count = &dccp_orphan_count, |
@@ -948,6 +943,7 @@ static struct proto dccp_v4_prot = { | |||
948 | .obj_size = sizeof(struct dccp_sock), | 943 | .obj_size = sizeof(struct dccp_sock), |
949 | .rsk_prot = &dccp_request_sock_ops, | 944 | .rsk_prot = &dccp_request_sock_ops, |
950 | .twsk_prot = &dccp_timewait_sock_ops, | 945 | .twsk_prot = &dccp_timewait_sock_ops, |
946 | .hashinfo = &dccp_hashinfo, | ||
951 | #ifdef CONFIG_COMPAT | 947 | #ifdef CONFIG_COMPAT |
952 | .compat_setsockopt = compat_dccp_setsockopt, | 948 | .compat_setsockopt = compat_dccp_setsockopt, |
953 | .compat_getsockopt = compat_dccp_getsockopt, | 949 | .compat_getsockopt = compat_dccp_getsockopt, |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index ed0a0053a797..490333d47c7b 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -39,21 +39,15 @@ static struct socket *dccp_v6_ctl_socket; | |||
39 | static struct inet_connection_sock_af_ops dccp_ipv6_mapped; | 39 | static struct inet_connection_sock_af_ops dccp_ipv6_mapped; |
40 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; | 40 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; |
41 | 41 | ||
42 | static int dccp_v6_get_port(struct sock *sk, unsigned short snum) | ||
43 | { | ||
44 | return inet_csk_get_port(&dccp_hashinfo, sk, snum, | ||
45 | inet6_csk_bind_conflict); | ||
46 | } | ||
47 | |||
48 | static void dccp_v6_hash(struct sock *sk) | 42 | static void dccp_v6_hash(struct sock *sk) |
49 | { | 43 | { |
50 | if (sk->sk_state != DCCP_CLOSED) { | 44 | if (sk->sk_state != DCCP_CLOSED) { |
51 | if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { | 45 | if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { |
52 | dccp_hash(sk); | 46 | inet_hash(sk); |
53 | return; | 47 | return; |
54 | } | 48 | } |
55 | local_bh_disable(); | 49 | local_bh_disable(); |
56 | __inet6_hash(&dccp_hashinfo, sk); | 50 | __inet6_hash(sk); |
57 | local_bh_enable(); | 51 | local_bh_enable(); |
58 | } | 52 | } |
59 | } | 53 | } |
@@ -630,8 +624,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
630 | 624 | ||
631 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; | 625 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; |
632 | 626 | ||
633 | __inet6_hash(&dccp_hashinfo, newsk); | 627 | __inet6_hash(newsk); |
634 | inet_inherit_port(&dccp_hashinfo, sk, newsk); | 628 | inet_inherit_port(sk, newsk); |
635 | 629 | ||
636 | return newsk; | 630 | return newsk; |
637 | 631 | ||
@@ -1054,6 +1048,7 @@ static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { | |||
1054 | .getsockopt = ipv6_getsockopt, | 1048 | .getsockopt = ipv6_getsockopt, |
1055 | .addr2sockaddr = inet6_csk_addr2sockaddr, | 1049 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
1056 | .sockaddr_len = sizeof(struct sockaddr_in6), | 1050 | .sockaddr_len = sizeof(struct sockaddr_in6), |
1051 | .bind_conflict = inet6_csk_bind_conflict, | ||
1057 | #ifdef CONFIG_COMPAT | 1052 | #ifdef CONFIG_COMPAT |
1058 | .compat_setsockopt = compat_ipv6_setsockopt, | 1053 | .compat_setsockopt = compat_ipv6_setsockopt, |
1059 | .compat_getsockopt = compat_ipv6_getsockopt, | 1054 | .compat_getsockopt = compat_ipv6_getsockopt, |
@@ -1123,9 +1118,9 @@ static struct proto dccp_v6_prot = { | |||
1123 | .recvmsg = dccp_recvmsg, | 1118 | .recvmsg = dccp_recvmsg, |
1124 | .backlog_rcv = dccp_v6_do_rcv, | 1119 | .backlog_rcv = dccp_v6_do_rcv, |
1125 | .hash = dccp_v6_hash, | 1120 | .hash = dccp_v6_hash, |
1126 | .unhash = dccp_unhash, | 1121 | .unhash = inet_unhash, |
1127 | .accept = inet_csk_accept, | 1122 | .accept = inet_csk_accept, |
1128 | .get_port = dccp_v6_get_port, | 1123 | .get_port = inet_csk_get_port, |
1129 | .shutdown = dccp_shutdown, | 1124 | .shutdown = dccp_shutdown, |
1130 | .destroy = dccp_v6_destroy_sock, | 1125 | .destroy = dccp_v6_destroy_sock, |
1131 | .orphan_count = &dccp_orphan_count, | 1126 | .orphan_count = &dccp_orphan_count, |
@@ -1133,6 +1128,7 @@ static struct proto dccp_v6_prot = { | |||
1133 | .obj_size = sizeof(struct dccp6_sock), | 1128 | .obj_size = sizeof(struct dccp6_sock), |
1134 | .rsk_prot = &dccp6_request_sock_ops, | 1129 | .rsk_prot = &dccp6_request_sock_ops, |
1135 | .twsk_prot = &dccp6_timewait_sock_ops, | 1130 | .twsk_prot = &dccp6_timewait_sock_ops, |
1131 | .hashinfo = &dccp_hashinfo, | ||
1136 | #ifdef CONFIG_COMPAT | 1132 | #ifdef CONFIG_COMPAT |
1137 | .compat_setsockopt = compat_dccp_setsockopt, | 1133 | .compat_setsockopt = compat_dccp_setsockopt, |
1138 | .compat_getsockopt = compat_dccp_getsockopt, | 1134 | .compat_getsockopt = compat_dccp_getsockopt, |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 0bed4a6095b7..e3f5d37b84be 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -78,7 +78,7 @@ void dccp_set_state(struct sock *sk, const int state) | |||
78 | sk->sk_prot->unhash(sk); | 78 | sk->sk_prot->unhash(sk); |
79 | if (inet_csk(sk)->icsk_bind_hash != NULL && | 79 | if (inet_csk(sk)->icsk_bind_hash != NULL && |
80 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | 80 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
81 | inet_put_port(&dccp_hashinfo, sk); | 81 | inet_put_port(sk); |
82 | /* fall through */ | 82 | /* fall through */ |
83 | default: | 83 | default: |
84 | if (oldstate == DCCP_OPEN) | 84 | if (oldstate == DCCP_OPEN) |
@@ -173,20 +173,6 @@ const char *dccp_state_name(const int state) | |||
173 | 173 | ||
174 | EXPORT_SYMBOL_GPL(dccp_state_name); | 174 | EXPORT_SYMBOL_GPL(dccp_state_name); |
175 | 175 | ||
176 | void dccp_hash(struct sock *sk) | ||
177 | { | ||
178 | inet_hash(&dccp_hashinfo, sk); | ||
179 | } | ||
180 | |||
181 | EXPORT_SYMBOL_GPL(dccp_hash); | ||
182 | |||
183 | void dccp_unhash(struct sock *sk) | ||
184 | { | ||
185 | inet_unhash(&dccp_hashinfo, sk); | ||
186 | } | ||
187 | |||
188 | EXPORT_SYMBOL_GPL(dccp_unhash); | ||
189 | |||
190 | int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) | 176 | int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) |
191 | { | 177 | { |
192 | struct dccp_sock *dp = dccp_sk(sk); | 178 | struct dccp_sock *dp = dccp_sk(sk); |
@@ -268,7 +254,7 @@ int dccp_destroy_sock(struct sock *sk) | |||
268 | 254 | ||
269 | /* Clean up a referenced DCCP bind bucket. */ | 255 | /* Clean up a referenced DCCP bind bucket. */ |
270 | if (inet_csk(sk)->icsk_bind_hash != NULL) | 256 | if (inet_csk(sk)->icsk_bind_hash != NULL) |
271 | inet_put_port(&dccp_hashinfo, sk); | 257 | inet_put_port(sk); |
272 | 258 | ||
273 | kfree(dp->dccps_service_list); | 259 | kfree(dp->dccps_service_list); |
274 | dp->dccps_service_list = NULL; | 260 | dp->dccps_service_list = NULL; |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index de5a41de191a..b189278c7bc1 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -78,11 +78,9 @@ EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); | |||
78 | /* Obtain a reference to a local port for the given sock, | 78 | /* Obtain a reference to a local port for the given sock, |
79 | * if snum is zero it means select any available local port. | 79 | * if snum is zero it means select any available local port. |
80 | */ | 80 | */ |
81 | int inet_csk_get_port(struct inet_hashinfo *hashinfo, | 81 | int inet_csk_get_port(struct sock *sk, unsigned short snum) |
82 | struct sock *sk, unsigned short snum, | ||
83 | int (*bind_conflict)(const struct sock *sk, | ||
84 | const struct inet_bind_bucket *tb)) | ||
85 | { | 82 | { |
83 | struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; | ||
86 | struct inet_bind_hashbucket *head; | 84 | struct inet_bind_hashbucket *head; |
87 | struct hlist_node *node; | 85 | struct hlist_node *node; |
88 | struct inet_bind_bucket *tb; | 86 | struct inet_bind_bucket *tb; |
@@ -142,7 +140,7 @@ tb_found: | |||
142 | goto success; | 140 | goto success; |
143 | } else { | 141 | } else { |
144 | ret = 1; | 142 | ret = 1; |
145 | if (bind_conflict(sk, tb)) | 143 | if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) |
146 | goto fail_unlock; | 144 | goto fail_unlock; |
147 | } | 145 | } |
148 | } | 146 | } |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 48d45008f749..90f422c9447b 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -66,8 +66,9 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | |||
66 | /* | 66 | /* |
67 | * Get rid of any references to a local port held by the given sock. | 67 | * Get rid of any references to a local port held by the given sock. |
68 | */ | 68 | */ |
69 | static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) | 69 | static void __inet_put_port(struct sock *sk) |
70 | { | 70 | { |
71 | struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; | ||
71 | const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); | 72 | const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); |
72 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; | 73 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; |
73 | struct inet_bind_bucket *tb; | 74 | struct inet_bind_bucket *tb; |
@@ -81,10 +82,10 @@ static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) | |||
81 | spin_unlock(&head->lock); | 82 | spin_unlock(&head->lock); |
82 | } | 83 | } |
83 | 84 | ||
84 | void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) | 85 | void inet_put_port(struct sock *sk) |
85 | { | 86 | { |
86 | local_bh_disable(); | 87 | local_bh_disable(); |
87 | __inet_put_port(hashinfo, sk); | 88 | __inet_put_port(sk); |
88 | local_bh_enable(); | 89 | local_bh_enable(); |
89 | } | 90 | } |
90 | 91 | ||
@@ -317,8 +318,9 @@ static inline u32 inet_sk_port_offset(const struct sock *sk) | |||
317 | inet->dport); | 318 | inet->dport); |
318 | } | 319 | } |
319 | 320 | ||
320 | void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk) | 321 | void __inet_hash_nolisten(struct sock *sk) |
321 | { | 322 | { |
323 | struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; | ||
322 | struct hlist_head *list; | 324 | struct hlist_head *list; |
323 | rwlock_t *lock; | 325 | rwlock_t *lock; |
324 | struct inet_ehash_bucket *head; | 326 | struct inet_ehash_bucket *head; |
@@ -337,13 +339,14 @@ void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk) | |||
337 | } | 339 | } |
338 | EXPORT_SYMBOL_GPL(__inet_hash_nolisten); | 340 | EXPORT_SYMBOL_GPL(__inet_hash_nolisten); |
339 | 341 | ||
340 | void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) | 342 | static void __inet_hash(struct sock *sk) |
341 | { | 343 | { |
344 | struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; | ||
342 | struct hlist_head *list; | 345 | struct hlist_head *list; |
343 | rwlock_t *lock; | 346 | rwlock_t *lock; |
344 | 347 | ||
345 | if (sk->sk_state != TCP_LISTEN) { | 348 | if (sk->sk_state != TCP_LISTEN) { |
346 | __inet_hash_nolisten(hashinfo, sk); | 349 | __inet_hash_nolisten(sk); |
347 | return; | 350 | return; |
348 | } | 351 | } |
349 | 352 | ||
@@ -357,13 +360,48 @@ void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) | |||
357 | write_unlock(lock); | 360 | write_unlock(lock); |
358 | wake_up(&hashinfo->lhash_wait); | 361 | wake_up(&hashinfo->lhash_wait); |
359 | } | 362 | } |
360 | EXPORT_SYMBOL_GPL(__inet_hash); | 363 | |
364 | void inet_hash(struct sock *sk) | ||
365 | { | ||
366 | if (sk->sk_state != TCP_CLOSE) { | ||
367 | local_bh_disable(); | ||
368 | __inet_hash(sk); | ||
369 | local_bh_enable(); | ||
370 | } | ||
371 | } | ||
372 | EXPORT_SYMBOL_GPL(inet_hash); | ||
373 | |||
374 | void inet_unhash(struct sock *sk) | ||
375 | { | ||
376 | rwlock_t *lock; | ||
377 | struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; | ||
378 | |||
379 | if (sk_unhashed(sk)) | ||
380 | goto out; | ||
381 | |||
382 | if (sk->sk_state == TCP_LISTEN) { | ||
383 | local_bh_disable(); | ||
384 | inet_listen_wlock(hashinfo); | ||
385 | lock = &hashinfo->lhash_lock; | ||
386 | } else { | ||
387 | lock = inet_ehash_lockp(hashinfo, sk->sk_hash); | ||
388 | write_lock_bh(lock); | ||
389 | } | ||
390 | |||
391 | if (__sk_del_node_init(sk)) | ||
392 | sock_prot_inuse_add(sk->sk_prot, -1); | ||
393 | write_unlock_bh(lock); | ||
394 | out: | ||
395 | if (sk->sk_state == TCP_LISTEN) | ||
396 | wake_up(&hashinfo->lhash_wait); | ||
397 | } | ||
398 | EXPORT_SYMBOL_GPL(inet_unhash); | ||
361 | 399 | ||
362 | int __inet_hash_connect(struct inet_timewait_death_row *death_row, | 400 | int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
363 | struct sock *sk, | 401 | struct sock *sk, |
364 | int (*check_established)(struct inet_timewait_death_row *, | 402 | int (*check_established)(struct inet_timewait_death_row *, |
365 | struct sock *, __u16, struct inet_timewait_sock **), | 403 | struct sock *, __u16, struct inet_timewait_sock **), |
366 | void (*hash)(struct inet_hashinfo *, struct sock *)) | 404 | void (*hash)(struct sock *sk)) |
367 | { | 405 | { |
368 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 406 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
369 | const unsigned short snum = inet_sk(sk)->num; | 407 | const unsigned short snum = inet_sk(sk)->num; |
@@ -427,7 +465,7 @@ ok: | |||
427 | inet_bind_hash(sk, tb, port); | 465 | inet_bind_hash(sk, tb, port); |
428 | if (sk_unhashed(sk)) { | 466 | if (sk_unhashed(sk)) { |
429 | inet_sk(sk)->sport = htons(port); | 467 | inet_sk(sk)->sport = htons(port); |
430 | hash(hinfo, sk); | 468 | hash(sk); |
431 | } | 469 | } |
432 | spin_unlock(&head->lock); | 470 | spin_unlock(&head->lock); |
433 | 471 | ||
@@ -444,7 +482,7 @@ ok: | |||
444 | tb = inet_csk(sk)->icsk_bind_hash; | 482 | tb = inet_csk(sk)->icsk_bind_hash; |
445 | spin_lock_bh(&head->lock); | 483 | spin_lock_bh(&head->lock); |
446 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 484 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
447 | hash(hinfo, sk); | 485 | hash(sk); |
448 | spin_unlock_bh(&head->lock); | 486 | spin_unlock_bh(&head->lock); |
449 | return 0; | 487 | return 0; |
450 | } else { | 488 | } else { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index a0d373bd9065..071e83a894ad 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1669,7 +1669,7 @@ void tcp_set_state(struct sock *sk, int state) | |||
1669 | sk->sk_prot->unhash(sk); | 1669 | sk->sk_prot->unhash(sk); |
1670 | if (inet_csk(sk)->icsk_bind_hash && | 1670 | if (inet_csk(sk)->icsk_bind_hash && |
1671 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | 1671 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
1672 | inet_put_port(&tcp_hashinfo, sk); | 1672 | inet_put_port(sk); |
1673 | /* fall through */ | 1673 | /* fall through */ |
1674 | default: | 1674 | default: |
1675 | if (oldstate==TCP_ESTABLISHED) | 1675 | if (oldstate==TCP_ESTABLISHED) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 77c1939a2b0d..63414ea427c5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -108,22 +108,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | |||
108 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), | 108 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), |
109 | }; | 109 | }; |
110 | 110 | ||
111 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | ||
112 | { | ||
113 | return inet_csk_get_port(&tcp_hashinfo, sk, snum, | ||
114 | inet_csk_bind_conflict); | ||
115 | } | ||
116 | |||
117 | static void tcp_v4_hash(struct sock *sk) | ||
118 | { | ||
119 | inet_hash(&tcp_hashinfo, sk); | ||
120 | } | ||
121 | |||
122 | void tcp_unhash(struct sock *sk) | ||
123 | { | ||
124 | inet_unhash(&tcp_hashinfo, sk); | ||
125 | } | ||
126 | |||
127 | static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) | 111 | static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) |
128 | { | 112 | { |
129 | return secure_tcp_sequence_number(ip_hdr(skb)->daddr, | 113 | return secure_tcp_sequence_number(ip_hdr(skb)->daddr, |
@@ -1478,8 +1462,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1478 | } | 1462 | } |
1479 | #endif | 1463 | #endif |
1480 | 1464 | ||
1481 | __inet_hash_nolisten(&tcp_hashinfo, newsk); | 1465 | __inet_hash_nolisten(newsk); |
1482 | __inet_inherit_port(&tcp_hashinfo, sk, newsk); | 1466 | __inet_inherit_port(sk, newsk); |
1483 | 1467 | ||
1484 | return newsk; | 1468 | return newsk; |
1485 | 1469 | ||
@@ -1827,6 +1811,7 @@ struct inet_connection_sock_af_ops ipv4_specific = { | |||
1827 | .getsockopt = ip_getsockopt, | 1811 | .getsockopt = ip_getsockopt, |
1828 | .addr2sockaddr = inet_csk_addr2sockaddr, | 1812 | .addr2sockaddr = inet_csk_addr2sockaddr, |
1829 | .sockaddr_len = sizeof(struct sockaddr_in), | 1813 | .sockaddr_len = sizeof(struct sockaddr_in), |
1814 | .bind_conflict = inet_csk_bind_conflict, | ||
1830 | #ifdef CONFIG_COMPAT | 1815 | #ifdef CONFIG_COMPAT |
1831 | .compat_setsockopt = compat_ip_setsockopt, | 1816 | .compat_setsockopt = compat_ip_setsockopt, |
1832 | .compat_getsockopt = compat_ip_getsockopt, | 1817 | .compat_getsockopt = compat_ip_getsockopt, |
@@ -1926,7 +1911,7 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1926 | 1911 | ||
1927 | /* Clean up a referenced TCP bind bucket. */ | 1912 | /* Clean up a referenced TCP bind bucket. */ |
1928 | if (inet_csk(sk)->icsk_bind_hash) | 1913 | if (inet_csk(sk)->icsk_bind_hash) |
1929 | inet_put_port(&tcp_hashinfo, sk); | 1914 | inet_put_port(sk); |
1930 | 1915 | ||
1931 | /* | 1916 | /* |
1932 | * If sendmsg cached page exists, toss it. | 1917 | * If sendmsg cached page exists, toss it. |
@@ -2435,9 +2420,9 @@ struct proto tcp_prot = { | |||
2435 | .getsockopt = tcp_getsockopt, | 2420 | .getsockopt = tcp_getsockopt, |
2436 | .recvmsg = tcp_recvmsg, | 2421 | .recvmsg = tcp_recvmsg, |
2437 | .backlog_rcv = tcp_v4_do_rcv, | 2422 | .backlog_rcv = tcp_v4_do_rcv, |
2438 | .hash = tcp_v4_hash, | 2423 | .hash = inet_hash, |
2439 | .unhash = tcp_unhash, | 2424 | .unhash = inet_unhash, |
2440 | .get_port = tcp_v4_get_port, | 2425 | .get_port = inet_csk_get_port, |
2441 | .enter_memory_pressure = tcp_enter_memory_pressure, | 2426 | .enter_memory_pressure = tcp_enter_memory_pressure, |
2442 | .sockets_allocated = &tcp_sockets_allocated, | 2427 | .sockets_allocated = &tcp_sockets_allocated, |
2443 | .orphan_count = &tcp_orphan_count, | 2428 | .orphan_count = &tcp_orphan_count, |
@@ -2450,6 +2435,7 @@ struct proto tcp_prot = { | |||
2450 | .obj_size = sizeof(struct tcp_sock), | 2435 | .obj_size = sizeof(struct tcp_sock), |
2451 | .twsk_prot = &tcp_timewait_sock_ops, | 2436 | .twsk_prot = &tcp_timewait_sock_ops, |
2452 | .rsk_prot = &tcp_request_sock_ops, | 2437 | .rsk_prot = &tcp_request_sock_ops, |
2438 | .hashinfo = &tcp_hashinfo, | ||
2453 | #ifdef CONFIG_COMPAT | 2439 | #ifdef CONFIG_COMPAT |
2454 | .compat_setsockopt = compat_tcp_setsockopt, | 2440 | .compat_setsockopt = compat_tcp_setsockopt, |
2455 | .compat_getsockopt = compat_tcp_getsockopt, | 2441 | .compat_getsockopt = compat_tcp_getsockopt, |
@@ -2467,7 +2453,6 @@ void __init tcp_v4_init(struct net_proto_family *ops) | |||
2467 | EXPORT_SYMBOL(ipv4_specific); | 2453 | EXPORT_SYMBOL(ipv4_specific); |
2468 | EXPORT_SYMBOL(tcp_hashinfo); | 2454 | EXPORT_SYMBOL(tcp_hashinfo); |
2469 | EXPORT_SYMBOL(tcp_prot); | 2455 | EXPORT_SYMBOL(tcp_prot); |
2470 | EXPORT_SYMBOL(tcp_unhash); | ||
2471 | EXPORT_SYMBOL(tcp_v4_conn_request); | 2456 | EXPORT_SYMBOL(tcp_v4_conn_request); |
2472 | EXPORT_SYMBOL(tcp_v4_connect); | 2457 | EXPORT_SYMBOL(tcp_v4_connect); |
2473 | EXPORT_SYMBOL(tcp_v4_do_rcv); | 2458 | EXPORT_SYMBOL(tcp_v4_do_rcv); |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index d325a9958909..43f3993e1f30 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -22,9 +22,9 @@ | |||
22 | #include <net/inet6_hashtables.h> | 22 | #include <net/inet6_hashtables.h> |
23 | #include <net/ip.h> | 23 | #include <net/ip.h> |
24 | 24 | ||
25 | void __inet6_hash(struct inet_hashinfo *hashinfo, | 25 | void __inet6_hash(struct sock *sk) |
26 | struct sock *sk) | ||
27 | { | 26 | { |
27 | struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; | ||
28 | struct hlist_head *list; | 28 | struct hlist_head *list; |
29 | rwlock_t *lock; | 29 | rwlock_t *lock; |
30 | 30 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 59d0029e93a7..12750f2b05ab 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -86,12 +86,6 @@ static struct tcp_sock_af_ops tcp_sock_ipv6_specific; | |||
86 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; | 86 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; |
87 | #endif | 87 | #endif |
88 | 88 | ||
89 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | ||
90 | { | ||
91 | return inet_csk_get_port(&tcp_hashinfo, sk, snum, | ||
92 | inet6_csk_bind_conflict); | ||
93 | } | ||
94 | |||
95 | static void tcp_v6_hash(struct sock *sk) | 89 | static void tcp_v6_hash(struct sock *sk) |
96 | { | 90 | { |
97 | if (sk->sk_state != TCP_CLOSE) { | 91 | if (sk->sk_state != TCP_CLOSE) { |
@@ -100,7 +94,7 @@ static void tcp_v6_hash(struct sock *sk) | |||
100 | return; | 94 | return; |
101 | } | 95 | } |
102 | local_bh_disable(); | 96 | local_bh_disable(); |
103 | __inet6_hash(&tcp_hashinfo, sk); | 97 | __inet6_hash(sk); |
104 | local_bh_enable(); | 98 | local_bh_enable(); |
105 | } | 99 | } |
106 | } | 100 | } |
@@ -1504,8 +1498,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1504 | } | 1498 | } |
1505 | #endif | 1499 | #endif |
1506 | 1500 | ||
1507 | __inet6_hash(&tcp_hashinfo, newsk); | 1501 | __inet6_hash(newsk); |
1508 | inet_inherit_port(&tcp_hashinfo, sk, newsk); | 1502 | inet_inherit_port(sk, newsk); |
1509 | 1503 | ||
1510 | return newsk; | 1504 | return newsk; |
1511 | 1505 | ||
@@ -1833,6 +1827,7 @@ static struct inet_connection_sock_af_ops ipv6_specific = { | |||
1833 | .getsockopt = ipv6_getsockopt, | 1827 | .getsockopt = ipv6_getsockopt, |
1834 | .addr2sockaddr = inet6_csk_addr2sockaddr, | 1828 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
1835 | .sockaddr_len = sizeof(struct sockaddr_in6), | 1829 | .sockaddr_len = sizeof(struct sockaddr_in6), |
1830 | .bind_conflict = inet6_csk_bind_conflict, | ||
1836 | #ifdef CONFIG_COMPAT | 1831 | #ifdef CONFIG_COMPAT |
1837 | .compat_setsockopt = compat_ipv6_setsockopt, | 1832 | .compat_setsockopt = compat_ipv6_setsockopt, |
1838 | .compat_getsockopt = compat_ipv6_getsockopt, | 1833 | .compat_getsockopt = compat_ipv6_getsockopt, |
@@ -1864,6 +1859,7 @@ static struct inet_connection_sock_af_ops ipv6_mapped = { | |||
1864 | .getsockopt = ipv6_getsockopt, | 1859 | .getsockopt = ipv6_getsockopt, |
1865 | .addr2sockaddr = inet6_csk_addr2sockaddr, | 1860 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
1866 | .sockaddr_len = sizeof(struct sockaddr_in6), | 1861 | .sockaddr_len = sizeof(struct sockaddr_in6), |
1862 | .bind_conflict = inet6_csk_bind_conflict, | ||
1867 | #ifdef CONFIG_COMPAT | 1863 | #ifdef CONFIG_COMPAT |
1868 | .compat_setsockopt = compat_ipv6_setsockopt, | 1864 | .compat_setsockopt = compat_ipv6_setsockopt, |
1869 | .compat_getsockopt = compat_ipv6_getsockopt, | 1865 | .compat_getsockopt = compat_ipv6_getsockopt, |
@@ -2127,8 +2123,8 @@ struct proto tcpv6_prot = { | |||
2127 | .recvmsg = tcp_recvmsg, | 2123 | .recvmsg = tcp_recvmsg, |
2128 | .backlog_rcv = tcp_v6_do_rcv, | 2124 | .backlog_rcv = tcp_v6_do_rcv, |
2129 | .hash = tcp_v6_hash, | 2125 | .hash = tcp_v6_hash, |
2130 | .unhash = tcp_unhash, | 2126 | .unhash = inet_unhash, |
2131 | .get_port = tcp_v6_get_port, | 2127 | .get_port = inet_csk_get_port, |
2132 | .enter_memory_pressure = tcp_enter_memory_pressure, | 2128 | .enter_memory_pressure = tcp_enter_memory_pressure, |
2133 | .sockets_allocated = &tcp_sockets_allocated, | 2129 | .sockets_allocated = &tcp_sockets_allocated, |
2134 | .memory_allocated = &tcp_memory_allocated, | 2130 | .memory_allocated = &tcp_memory_allocated, |
@@ -2141,6 +2137,7 @@ struct proto tcpv6_prot = { | |||
2141 | .obj_size = sizeof(struct tcp6_sock), | 2137 | .obj_size = sizeof(struct tcp6_sock), |
2142 | .twsk_prot = &tcp6_timewait_sock_ops, | 2138 | .twsk_prot = &tcp6_timewait_sock_ops, |
2143 | .rsk_prot = &tcp6_request_sock_ops, | 2139 | .rsk_prot = &tcp6_request_sock_ops, |
2140 | .hashinfo = &tcp_hashinfo, | ||
2144 | #ifdef CONFIG_COMPAT | 2141 | #ifdef CONFIG_COMPAT |
2145 | .compat_setsockopt = compat_tcp_setsockopt, | 2142 | .compat_setsockopt = compat_tcp_setsockopt, |
2146 | .compat_getsockopt = compat_tcp_getsockopt, | 2143 | .compat_getsockopt = compat_tcp_getsockopt, |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 09c255002e56..e77592d050ce 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -98,6 +98,18 @@ config MAC80211_DEBUGFS | |||
98 | 98 | ||
99 | Say N unless you know you need this. | 99 | Say N unless you know you need this. |
100 | 100 | ||
101 | config MAC80211_DEBUG_PACKET_ALIGNMENT | ||
102 | bool "Enable packet alignment debugging" | ||
103 | depends on MAC80211 | ||
104 | help | ||
105 | This option is recommended for driver authors and strongly | ||
106 | discouraged for everybody else, it will trigger a warning | ||
107 | when a driver hands mac80211 a buffer that is aligned in | ||
108 | a way that will cause problems with the IP stack on some | ||
109 | architectures. | ||
110 | |||
111 | Say N unless you're writing a mac80211 based driver. | ||
112 | |||
101 | config MAC80211_DEBUG | 113 | config MAC80211_DEBUG |
102 | bool "Enable debugging output" | 114 | bool "Enable debugging output" |
103 | depends on MAC80211 | 115 | depends on MAC80211 |
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c index 5dcc2d61551f..67b7c75c430d 100644 --- a/net/mac80211/ieee80211.c +++ b/net/mac80211/ieee80211.c | |||
@@ -1344,17 +1344,17 @@ static int __init ieee80211_init(void) | |||
1344 | 1344 | ||
1345 | ret = rc80211_simple_init(); | 1345 | ret = rc80211_simple_init(); |
1346 | if (ret) | 1346 | if (ret) |
1347 | goto fail; | 1347 | goto out; |
1348 | 1348 | ||
1349 | ret = rc80211_pid_init(); | 1349 | ret = rc80211_pid_init(); |
1350 | if (ret) | 1350 | if (ret) |
1351 | goto fail_simple; | 1351 | goto out_cleanup_simple; |
1352 | 1352 | ||
1353 | ret = ieee80211_wme_register(); | 1353 | ret = ieee80211_wme_register(); |
1354 | if (ret) { | 1354 | if (ret) { |
1355 | printk(KERN_DEBUG "ieee80211_init: failed to " | 1355 | printk(KERN_DEBUG "ieee80211_init: failed to " |
1356 | "initialize WME (err=%d)\n", ret); | 1356 | "initialize WME (err=%d)\n", ret); |
1357 | goto fail_pid; | 1357 | goto out_cleanup_pid; |
1358 | } | 1358 | } |
1359 | 1359 | ||
1360 | ieee80211_debugfs_netdev_init(); | 1360 | ieee80211_debugfs_netdev_init(); |
@@ -1362,11 +1362,11 @@ static int __init ieee80211_init(void) | |||
1362 | 1362 | ||
1363 | return 0; | 1363 | return 0; |
1364 | 1364 | ||
1365 | fail_pid: | 1365 | out_cleanup_pid: |
1366 | rc80211_simple_exit(); | ||
1367 | fail_simple: | ||
1368 | rc80211_pid_exit(); | 1366 | rc80211_pid_exit(); |
1369 | fail: | 1367 | out_cleanup_simple: |
1368 | rc80211_simple_exit(); | ||
1369 | out: | ||
1370 | return ret; | 1370 | return ret; |
1371 | } | 1371 | } |
1372 | 1372 | ||
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index 554c4baed6fb..c339571632b2 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -538,7 +538,7 @@ int __init rc80211_pid_init(void) | |||
538 | return ieee80211_rate_control_register(&mac80211_rcpid); | 538 | return ieee80211_rate_control_register(&mac80211_rcpid); |
539 | } | 539 | } |
540 | 540 | ||
541 | void __exit rc80211_pid_exit(void) | 541 | void rc80211_pid_exit(void) |
542 | { | 542 | { |
543 | ieee80211_rate_control_unregister(&mac80211_rcpid); | 543 | ieee80211_rate_control_unregister(&mac80211_rcpid); |
544 | } | 544 | } |
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c index 934676d687d6..9a78b116acff 100644 --- a/net/mac80211/rc80211_simple.c +++ b/net/mac80211/rc80211_simple.c | |||
@@ -389,7 +389,7 @@ int __init rc80211_simple_init(void) | |||
389 | return ieee80211_rate_control_register(&mac80211_rcsimple); | 389 | return ieee80211_rate_control_register(&mac80211_rcsimple); |
390 | } | 390 | } |
391 | 391 | ||
392 | void __exit rc80211_simple_exit(void) | 392 | void rc80211_simple_exit(void) |
393 | { | 393 | { |
394 | ieee80211_rate_control_unregister(&mac80211_rcsimple); | 394 | ieee80211_rate_control_unregister(&mac80211_rcsimple); |
395 | } | 395 | } |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d44c87269bcb..535407d07fa4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -340,11 +340,15 @@ static u32 ieee80211_rx_load_stats(struct ieee80211_local *local, | |||
340 | return load; | 340 | return load; |
341 | } | 341 | } |
342 | 342 | ||
343 | #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT | ||
343 | static ieee80211_txrx_result | 344 | static ieee80211_txrx_result |
344 | ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx) | 345 | ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx) |
345 | { | 346 | { |
346 | int hdrlen; | 347 | int hdrlen; |
347 | 348 | ||
349 | if (!WLAN_FC_DATA_PRESENT(rx->fc)) | ||
350 | return TXRX_CONTINUE; | ||
351 | |||
348 | /* | 352 | /* |
349 | * Drivers are required to align the payload data in a way that | 353 | * Drivers are required to align the payload data in a way that |
350 | * guarantees that the contained IP header is aligned to a four- | 354 | * guarantees that the contained IP header is aligned to a four- |
@@ -371,11 +375,14 @@ ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx) | |||
371 | 375 | ||
372 | return TXRX_CONTINUE; | 376 | return TXRX_CONTINUE; |
373 | } | 377 | } |
378 | #endif | ||
374 | 379 | ||
375 | ieee80211_rx_handler ieee80211_rx_pre_handlers[] = | 380 | ieee80211_rx_handler ieee80211_rx_pre_handlers[] = |
376 | { | 381 | { |
377 | ieee80211_rx_h_parse_qos, | 382 | ieee80211_rx_h_parse_qos, |
383 | #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT | ||
378 | ieee80211_rx_h_verify_ip_alignment, | 384 | ieee80211_rx_h_verify_ip_alignment, |
385 | #endif | ||
379 | NULL | 386 | NULL |
380 | }; | 387 | }; |
381 | 388 | ||