aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/mpc5200.txt10
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/alpha/include/asm/unistd.h3
-rw-r--r--drivers/atm/solos-pci.c29
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c352
-rw-r--r--drivers/misc/iwmc3200top/main.c30
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/ks8695net.c35
-rw-r--r--drivers/net/arm/w90p910_ether.c4
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/bnx2x.h21
-rw-r--r--drivers/net/bnx2x_main.c370
-rw-r--r--drivers/net/bonding/bond_3ad.c85
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/Kconfig62
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/dev.c6
-rw-r--r--drivers/net/can/mcp251x.c18
-rw-r--r--drivers/net/can/mscan/Kconfig23
-rw-r--r--drivers/net/can/mscan/Makefile5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c259
-rw-r--r--drivers/net/can/mscan/mscan.c668
-rw-r--r--drivers/net/can/mscan/mscan.h296
-rw-r--r--drivers/net/can/sja1000/Kconfig47
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/usb/Kconfig10
-rw-r--r--drivers/net/can/usb/Makefile2
-rw-r--r--drivers/net/davinci_emac.c3
-rw-r--r--drivers/net/dm9000.c143
-rw-r--r--drivers/net/dm9000.h7
-rw-r--r--drivers/net/ethoc.c2
-rw-r--r--drivers/net/forcedeth.c5
-rw-r--r--drivers/net/gianfar.c43
-rw-r--r--drivers/net/gianfar_sysfs.c2
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/igb/igb.h13
-rw-r--r--drivers/net/igb/igb_ethtool.c181
-rw-r--r--drivers/net/igb/igb_main.c213
-rw-r--r--drivers/net/ipg.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/macvlan.c78
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/ppp_async.c2
-rw-r--r--drivers/net/ppp_generic.c13
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/pppol2tp.c4
-rw-r--r--drivers/net/qlge/qlge.h2
-rw-r--r--drivers/net/qlge/qlge_main.c26
-rw-r--r--drivers/net/r6040.c4
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/s2io.c1
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/tg3.c706
-rw-r--r--drivers/net/tg3.h34
-rw-r--r--drivers/net/tokenring/3c359.c3
-rw-r--r--drivers/net/tokenring/olympic.c4
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h246
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c359
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h12
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wireless/adm8211.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c141
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h8
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c2
-rw-r--r--drivers/s390/net/Makefile6
-rw-r--r--drivers/s390/net/claw.c82
-rw-r--r--drivers/s390/net/claw.h12
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_main.c109
-rw-r--r--drivers/s390/net/ctcm_main.h20
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/ctcm_sysfs.c11
-rw-r--r--drivers/s390/net/cu3088.c148
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c1
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/lcs.c101
-rw-r--r--drivers/s390/net/lcs.h18
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c214
-rw-r--r--drivers/s390/net/qeth_core_mpc.h45
-rw-r--r--drivers/s390/net/qeth_core_sys.c83
-rw-r--r--drivers/s390/net/qeth_l2_main.c29
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c142
-rw-r--r--drivers/s390/net/qeth_l3_sys.c67
-rw-r--r--include/linux/if_ether.h4
-rw-r--r--include/linux/isdn_ppp.h2
-rw-r--r--include/linux/netdevice.h77
-rw-r--r--include/linux/notifier.h1
-rw-r--r--include/linux/tcp.h6
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inetpeer.h16
-rw-r--r--include/net/phonet/pn_dev.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/tcp.h3
-rw-r--r--kernel/time/clocksource.c6
-rw-r--r--kernel/time/timecompare.c6
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan.h17
-rw-r--r--net/8021q/vlan_core.c16
-rw-r--r--net/8021q/vlan_dev.c51
-rw-r--r--net/atm/ioctl.c177
-rw-r--r--net/bluetooth/hci_conn.c1
-rw-r--r--net/bluetooth/l2cap.c13
-rw-r--r--net/core/dev.c193
-rw-r--r--net/core/link_watch.c94
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/decnet/dn_dev.c23
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ieee802154/wpan-class.c2
-rw-r--r--net/ipv4/devinet.c61
-rw-r--r--net/ipv4/fib_frontend.c11
-rw-r--r--net/ipv4/igmp.c27
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv6/addrconf.c173
-rw-r--r--net/ipv6/anycast.c29
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/mcast.c51
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/iucv/iucv.c16
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/phonet/af_phonet.c22
-rw-r--r--net/phonet/pn_dev.c124
-rw-r--r--net/phonet/pn_netlink.c6
-rw-r--r--net/sched/act_mirred.c105
-rw-r--r--net/sched/sch_generic.c18
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/sm_statefuns.c15
-rw-r--r--net/sctp/socket.c40
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/socket.c335
160 files changed, 5029 insertions, 2683 deletions
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
index 8447fd7090d0..cabc780f7258 100644
--- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
@@ -178,3 +178,13 @@ External interrupts:
178 external irq3: interrupts = <1 3 n>; 178 external irq3: interrupts = <1 3 n>;
179'n' is sense (0: level high, 1: edge rising, 2: edge falling 3: level low) 179'n' is sense (0: level high, 1: edge rising, 2: edge falling 3: level low)
180 180
181fsl,mpc5200-mscan nodes
182-----------------------
183In addition to the required compatible-, reg- and interrupt-properites, you can
184also specify which clock source shall be used for the controller:
185
186- fsl,mscan-clock-source- a string describing the clock source. Valid values
187 are: "ip" for ip bus clock
188 "ref" for reference clock (XTAL)
189 "ref" is default in case this property is not
190 present.
diff --git a/MAINTAINERS b/MAINTAINERS
index 7f2f29cf75ff..b2e3f3507ca3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3704,9 +3704,9 @@ F: include/linux/if_*
3704F: include/linux/*device.h 3704F: include/linux/*device.h
3705 3705
3706NETXEN (1/10) GbE SUPPORT 3706NETXEN (1/10) GbE SUPPORT
3707M: Dhananjay Phadke <dhananjay@netxen.com> 3707M: Amit Kumar Salecha <amit.salecha@qlogic.com>
3708L: netdev@vger.kernel.org 3708L: netdev@vger.kernel.org
3709W: http://www.netxen.com 3709W: http://www.qlogic.com
3710S: Supported 3710S: Supported
3711F: drivers/net/netxen/ 3711F: drivers/net/netxen/
3712 3712
@@ -4304,6 +4304,7 @@ F: drivers/video/aty/aty128fb.c
4304RALINK RT2X00 WIRELESS LAN DRIVER 4304RALINK RT2X00 WIRELESS LAN DRIVER
4305P: rt2x00 project 4305P: rt2x00 project
4306M: Ivo van Doorn <IvDoorn@gmail.com> 4306M: Ivo van Doorn <IvDoorn@gmail.com>
4307M: Gertjan van Wingerde <gwingerde@gmail.com>
4307L: linux-wireless@vger.kernel.org 4308L: linux-wireless@vger.kernel.org
4308L: users@rt2x00.serialmonkey.com (moderated for non-subscribers) 4309L: users@rt2x00.serialmonkey.com (moderated for non-subscribers)
4309W: http://rt2x00.serialmonkey.com/ 4310W: http://rt2x00.serialmonkey.com/
@@ -4391,7 +4392,7 @@ RFKILL
4391M: Johannes Berg <johannes@sipsolutions.net> 4392M: Johannes Berg <johannes@sipsolutions.net>
4392L: linux-wireless@vger.kernel.org 4393L: linux-wireless@vger.kernel.org
4393S: Maintained 4394S: Maintained
4394F Documentation/rfkill.txt 4395F: Documentation/rfkill.txt
4395F: net/rfkill/ 4396F: net/rfkill/
4396 4397
4397RISCOM8 DRIVER 4398RISCOM8 DRIVER
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 5b5c17485942..7f23665122df 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -433,10 +433,11 @@
433#define __NR_signalfd 476 433#define __NR_signalfd 476
434#define __NR_timerfd 477 434#define __NR_timerfd 477
435#define __NR_eventfd 478 435#define __NR_eventfd 478
436#define __NR_recvmmsg 479
436 437
437#ifdef __KERNEL__ 438#ifdef __KERNEL__
438 439
439#define NR_SYSCALLS 479 440#define NR_SYSCALLS 480
440 441
441#define __ARCH_WANT_IPC_PARSE_VERSION 442#define __ARCH_WANT_IPC_PARSE_VERSION
442#define __ARCH_WANT_OLD_READDIR 443#define __ARCH_WANT_OLD_READDIR
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index d7ad19d2603a..51eed679a059 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -531,34 +531,37 @@ static int flash_upgrade(struct solos_card *card, int chip)
531 int numblocks = 0; 531 int numblocks = 0;
532 int offset; 532 int offset;
533 533
534 if (chip == 0) { 534 switch (chip) {
535 case 0:
535 fw_name = "solos-FPGA.bin"; 536 fw_name = "solos-FPGA.bin";
536 blocksize = FPGA_BLOCK; 537 blocksize = FPGA_BLOCK;
537 } 538 break;
538 539 case 1:
539 if (chip == 1) {
540 fw_name = "solos-Firmware.bin"; 540 fw_name = "solos-Firmware.bin";
541 blocksize = SOLOS_BLOCK; 541 blocksize = SOLOS_BLOCK;
542 } 542 break;
543 543 case 2:
544 if (chip == 2){
545 if (card->fpga_version > LEGACY_BUFFERS){ 544 if (card->fpga_version > LEGACY_BUFFERS){
546 fw_name = "solos-db-FPGA.bin"; 545 fw_name = "solos-db-FPGA.bin";
547 blocksize = FPGA_BLOCK; 546 blocksize = FPGA_BLOCK;
548 } else { 547 } else {
549 dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); 548 dev_info(&card->dev->dev, "FPGA version doesn't support"
549 " daughter board upgrades\n");
550 return -EPERM; 550 return -EPERM;
551 } 551 }
552 } 552 break;
553 553 case 3:
554 if (chip == 3){
555 if (card->fpga_version > LEGACY_BUFFERS){ 554 if (card->fpga_version > LEGACY_BUFFERS){
556 fw_name = "solos-Firmware.bin"; 555 fw_name = "solos-Firmware.bin";
557 blocksize = SOLOS_BLOCK; 556 blocksize = SOLOS_BLOCK;
558 } else { 557 } else {
559 dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); 558 dev_info(&card->dev->dev, "FPGA version doesn't support"
560 return -EPERM; 559 " daughter board upgrades\n");
560 return -EPERM;
561 } 561 }
562 break;
563 default:
564 return -ENODEV;
562 } 565 }
563 566
564 if (request_firmware(&fw, fw_name, &card->dev->dev)) 567 if (request_firmware(&fw, fw_name, &card->dev->dev))
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index faed794cf75a..a6624ad252c5 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5481,7 +5481,7 @@ HFCmulti_init(void)
5481 if (err) { 5481 if (err) {
5482 printk(KERN_ERR "error registering embedded driver: " 5482 printk(KERN_ERR "error registering embedded driver: "
5483 "%x\n", err); 5483 "%x\n", err);
5484 return -err; 5484 return err;
5485 } 5485 }
5486 HFC_cnt++; 5486 HFC_cnt++;
5487 printk(KERN_INFO "%d devices registered\n", HFC_cnt); 5487 printk(KERN_INFO "%d devices registered\n", HFC_cnt);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 2d14b64202a3..642d5aaf53ce 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1535,10 +1535,8 @@ static int isdn_ppp_mp_bundle_array_init(void)
1535 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); 1535 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle);
1536 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL ) 1536 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL )
1537 return -ENOMEM; 1537 return -ENOMEM;
1538 for (i = 0; i < ISDN_MAX_CHANNELS; i++) { 1538 for( i = 0; i < ISDN_MAX_CHANNELS; i++ )
1539 spin_lock_init(&isdn_ppp_bundle_arr[i].lock); 1539 spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
1540 skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags);
1541 }
1542 return 0; 1540 return 0;
1543} 1541}
1544 1542
@@ -1571,7 +1569,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1571 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) 1569 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
1572 return -ENOMEM; 1570 return -ENOMEM;
1573 lp->next = lp->last = lp; /* nobody else in a queue */ 1571 lp->next = lp->last = lp; /* nobody else in a queue */
1574 skb_queue_head_init(&lp->netdev->pb->frags); 1572 lp->netdev->pb->frags = NULL;
1575 lp->netdev->pb->frames = 0; 1573 lp->netdev->pb->frames = 0;
1576 lp->netdev->pb->seq = UINT_MAX; 1574 lp->netdev->pb->seq = UINT_MAX;
1577 } 1575 }
@@ -1583,29 +1581,28 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1583 1581
1584static u32 isdn_ppp_mp_get_seq( int short_seq, 1582static u32 isdn_ppp_mp_get_seq( int short_seq,
1585 struct sk_buff * skb, u32 last_seq ); 1583 struct sk_buff * skb, u32 last_seq );
1586static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, 1584static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
1587 struct sk_buff *to); 1585 struct sk_buff * from, struct sk_buff * to );
1588static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, 1586static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
1589 struct sk_buff *from, struct sk_buff *to, 1587 struct sk_buff * from, struct sk_buff * to );
1590 u32 lastseq); 1588static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb );
1591static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
1592static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ); 1589static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );
1593 1590
1594static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, 1591static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1595 struct sk_buff *skb) 1592 struct sk_buff *skb)
1596{ 1593{
1597 struct sk_buff *newfrag, *frag, *start, *nextf;
1598 u32 newseq, minseq, thisseq;
1599 isdn_mppp_stats *stats;
1600 struct ippp_struct *is; 1594 struct ippp_struct *is;
1595 isdn_net_local * lpq;
1596 ippp_bundle * mp;
1597 isdn_mppp_stats * stats;
1598 struct sk_buff * newfrag, * frag, * start, *nextf;
1599 u32 newseq, minseq, thisseq;
1601 unsigned long flags; 1600 unsigned long flags;
1602 isdn_net_local *lpq;
1603 ippp_bundle *mp;
1604 int slot; 1601 int slot;
1605 1602
1606 spin_lock_irqsave(&net_dev->pb->lock, flags); 1603 spin_lock_irqsave(&net_dev->pb->lock, flags);
1607 mp = net_dev->pb; 1604 mp = net_dev->pb;
1608 stats = &mp->stats; 1605 stats = &mp->stats;
1609 slot = lp->ppp_slot; 1606 slot = lp->ppp_slot;
1610 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { 1607 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
1611 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", 1608 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
@@ -1616,19 +1613,20 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1616 return; 1613 return;
1617 } 1614 }
1618 is = ippp_table[slot]; 1615 is = ippp_table[slot];
1619 if (++mp->frames > stats->max_queue_len) 1616 if( ++mp->frames > stats->max_queue_len )
1620 stats->max_queue_len = mp->frames; 1617 stats->max_queue_len = mp->frames;
1621 1618
1622 if (is->debug & 0x8) 1619 if (is->debug & 0x8)
1623 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); 1620 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
1624 1621
1625 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, 1622 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
1626 skb, is->last_link_seqno); 1623 skb, is->last_link_seqno);
1624
1627 1625
1628 /* if this packet seq # is less than last already processed one, 1626 /* if this packet seq # is less than last already processed one,
1629 * toss it right away, but check for sequence start case first 1627 * toss it right away, but check for sequence start case first
1630 */ 1628 */
1631 if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) { 1629 if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) {
1632 mp->seq = newseq; /* the first packet: required for 1630 mp->seq = newseq; /* the first packet: required for
1633 * rfc1990 non-compliant clients -- 1631 * rfc1990 non-compliant clients --
1634 * prevents constant packet toss */ 1632 * prevents constant packet toss */
@@ -1638,7 +1636,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1638 spin_unlock_irqrestore(&mp->lock, flags); 1636 spin_unlock_irqrestore(&mp->lock, flags);
1639 return; 1637 return;
1640 } 1638 }
1641 1639
1642 /* find the minimum received sequence number over all links */ 1640 /* find the minimum received sequence number over all links */
1643 is->last_link_seqno = minseq = newseq; 1641 is->last_link_seqno = minseq = newseq;
1644 for (lpq = net_dev->queue;;) { 1642 for (lpq = net_dev->queue;;) {
@@ -1659,31 +1657,22 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1659 * packets */ 1657 * packets */
1660 newfrag = skb; 1658 newfrag = skb;
1661 1659
1662 /* Insert new fragment into the proper sequence slot. */ 1660 /* if this new fragment is before the first one, then enqueue it now. */
1663 skb_queue_walk(&mp->frags, frag) { 1661 if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) {
1664 if (MP_SEQ(frag) == newseq) { 1662 newfrag->next = frag;
1665 isdn_ppp_mp_free_skb(mp, newfrag); 1663 mp->frags = frag = newfrag;
1666 newfrag = NULL; 1664 newfrag = NULL;
1667 break; 1665 }
1668 }
1669 if (MP_LT(newseq, MP_SEQ(frag))) {
1670 __skb_queue_before(&mp->frags, frag, newfrag);
1671 newfrag = NULL;
1672 break;
1673 }
1674 }
1675 if (newfrag)
1676 __skb_queue_tail(&mp->frags, newfrag);
1677 1666
1678 frag = skb_peek(&mp->frags); 1667 start = MP_FLAGS(frag) & MP_BEGIN_FRAG &&
1679 start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) && 1668 MP_SEQ(frag) == mp->seq ? frag : NULL;
1680 (MP_SEQ(frag) == mp->seq)) ? frag : NULL;
1681 if (!start)
1682 goto check_overflow;
1683 1669
1684 /* main fragment traversing loop 1670 /*
1671 * main fragment traversing loop
1685 * 1672 *
1686 * try to accomplish several tasks: 1673 * try to accomplish several tasks:
1674 * - insert new fragment into the proper sequence slot (once that's done
1675 * newfrag will be set to NULL)
1687 * - reassemble any complete fragment sequence (non-null 'start' 1676 * - reassemble any complete fragment sequence (non-null 'start'
1688 * indicates there is a continguous sequence present) 1677 * indicates there is a continguous sequence present)
1689 * - discard any incomplete sequences that are below minseq -- due 1678 * - discard any incomplete sequences that are below minseq -- due
@@ -1692,46 +1681,71 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1692 * come to complete such sequence and it should be discarded 1681 * come to complete such sequence and it should be discarded
1693 * 1682 *
1694 * loop completes when we accomplished the following tasks: 1683 * loop completes when we accomplished the following tasks:
1684 * - new fragment is inserted in the proper sequence ('newfrag' is
1685 * set to NULL)
1695 * - we hit a gap in the sequence, so no reassembly/processing is 1686 * - we hit a gap in the sequence, so no reassembly/processing is
1696 * possible ('start' would be set to NULL) 1687 * possible ('start' would be set to NULL)
1697 * 1688 *
1698 * algorithm for this code is derived from code in the book 1689 * algorithm for this code is derived from code in the book
1699 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) 1690 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
1700 */ 1691 */
1701 skb_queue_walk_safe(&mp->frags, frag, nextf) { 1692 while (start != NULL || newfrag != NULL) {
1702 thisseq = MP_SEQ(frag); 1693
1703 1694 thisseq = MP_SEQ(frag);
1704 /* check for misplaced start */ 1695 nextf = frag->next;
1705 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { 1696
1706 printk(KERN_WARNING"isdn_mppp(seq %d): new " 1697 /* drop any duplicate fragments */
1707 "BEGIN flag with no prior END", thisseq); 1698 if (newfrag != NULL && thisseq == newseq) {
1708 stats->seqerrs++; 1699 isdn_ppp_mp_free_skb(mp, newfrag);
1709 stats->frame_drops++; 1700 newfrag = NULL;
1710 isdn_ppp_mp_discard(mp, start, frag); 1701 }
1711 start = frag; 1702
1712 } else if (MP_LE(thisseq, minseq)) { 1703 /* insert new fragment before next element if possible. */
1713 if (MP_FLAGS(frag) & MP_BEGIN_FRAG) 1704 if (newfrag != NULL && (nextf == NULL ||
1705 MP_LT(newseq, MP_SEQ(nextf)))) {
1706 newfrag->next = nextf;
1707 frag->next = nextf = newfrag;
1708 newfrag = NULL;
1709 }
1710
1711 if (start != NULL) {
1712 /* check for misplaced start */
1713 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
1714 printk(KERN_WARNING"isdn_mppp(seq %d): new "
1715 "BEGIN flag with no prior END", thisseq);
1716 stats->seqerrs++;
1717 stats->frame_drops++;
1718 start = isdn_ppp_mp_discard(mp, start,frag);
1719 nextf = frag->next;
1720 }
1721 } else if (MP_LE(thisseq, minseq)) {
1722 if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
1714 start = frag; 1723 start = frag;
1715 else { 1724 else {
1716 if (MP_FLAGS(frag) & MP_END_FRAG) 1725 if (MP_FLAGS(frag) & MP_END_FRAG)
1717 stats->frame_drops++; 1726 stats->frame_drops++;
1718 __skb_unlink(skb, &mp->frags); 1727 if( mp->frags == frag )
1728 mp->frags = nextf;
1719 isdn_ppp_mp_free_skb(mp, frag); 1729 isdn_ppp_mp_free_skb(mp, frag);
1730 frag = nextf;
1720 continue; 1731 continue;
1721 } 1732 }
1722 } 1733 }
1723 1734
1724 /* if we have end fragment, then we have full reassembly 1735 /* if start is non-null and we have end fragment, then
1725 * sequence -- reassemble and process packet now 1736 * we have full reassembly sequence -- reassemble
1737 * and process packet now
1726 */ 1738 */
1727 if (MP_FLAGS(frag) & MP_END_FRAG) { 1739 if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) {
1728 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; 1740 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
1729 /* Reassemble the packet then dispatch it */ 1741 /* Reassemble the packet then dispatch it */
1730 isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq); 1742 isdn_ppp_mp_reassembly(net_dev, lp, start, nextf);
1743
1744 start = NULL;
1745 frag = NULL;
1731 1746
1732 start = NULL; 1747 mp->frags = nextf;
1733 frag = NULL; 1748 }
1734 }
1735 1749
1736 /* check if need to update start pointer: if we just 1750 /* check if need to update start pointer: if we just
1737 * reassembled the packet and sequence is contiguous 1751 * reassembled the packet and sequence is contiguous
@@ -1742,25 +1756,26 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1742 * below low watermark and set start to the next frag or 1756 * below low watermark and set start to the next frag or
1743 * clear start ptr. 1757 * clear start ptr.
1744 */ 1758 */
1745 if (nextf != (struct sk_buff *)&mp->frags && 1759 if (nextf != NULL &&
1746 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { 1760 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
1747 /* if we just reassembled and the next one is here, 1761 /* if we just reassembled and the next one is here,
1748 * then start another reassembly. 1762 * then start another reassembly. */
1749 */ 1763
1750 if (frag == NULL) { 1764 if (frag == NULL) {
1751 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) 1765 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
1752 start = nextf; 1766 start = nextf;
1753 else { 1767 else
1754 printk(KERN_WARNING"isdn_mppp(seq %d):" 1768 {
1755 " END flag with no following " 1769 printk(KERN_WARNING"isdn_mppp(seq %d):"
1756 "BEGIN", thisseq); 1770 " END flag with no following "
1771 "BEGIN", thisseq);
1757 stats->seqerrs++; 1772 stats->seqerrs++;
1758 } 1773 }
1759 } 1774 }
1760 } else { 1775
1761 if (nextf != (struct sk_buff *)&mp->frags && 1776 } else {
1762 frag != NULL && 1777 if ( nextf != NULL && frag != NULL &&
1763 MP_LT(thisseq, minseq)) { 1778 MP_LT(thisseq, minseq)) {
1764 /* we've got a break in the sequence 1779 /* we've got a break in the sequence
1765 * and we not at the end yet 1780 * and we not at the end yet
1766 * and we did not just reassembled 1781 * and we did not just reassembled
@@ -1769,39 +1784,41 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1769 * discard all the frames below low watermark 1784 * discard all the frames below low watermark
1770 * and start over */ 1785 * and start over */
1771 stats->frame_drops++; 1786 stats->frame_drops++;
1772 isdn_ppp_mp_discard(mp, start, nextf); 1787 mp->frags = isdn_ppp_mp_discard(mp,start,nextf);
1773 } 1788 }
1774 /* break in the sequence, no reassembly */ 1789 /* break in the sequence, no reassembly */
1775 start = NULL; 1790 start = NULL;
1776 } 1791 }
1777 if (!start) 1792
1778 break; 1793 frag = nextf;
1779 } 1794 } /* while -- main loop */
1780 1795
1781check_overflow: 1796 if (mp->frags == NULL)
1797 mp->frags = frag;
1798
1782 /* rather straighforward way to deal with (not very) possible 1799 /* rather straighforward way to deal with (not very) possible
1783 * queue overflow 1800 * queue overflow */
1784 */
1785 if (mp->frames > MP_MAX_QUEUE_LEN) { 1801 if (mp->frames > MP_MAX_QUEUE_LEN) {
1786 stats->overflows++; 1802 stats->overflows++;
1787 skb_queue_walk_safe(&mp->frags, frag, nextf) { 1803 while (mp->frames > MP_MAX_QUEUE_LEN) {
1788 if (mp->frames <= MP_MAX_QUEUE_LEN) 1804 frag = mp->frags->next;
1789 break; 1805 isdn_ppp_mp_free_skb(mp, mp->frags);
1790 __skb_unlink(frag, &mp->frags); 1806 mp->frags = frag;
1791 isdn_ppp_mp_free_skb(mp, frag);
1792 } 1807 }
1793 } 1808 }
1794 spin_unlock_irqrestore(&mp->lock, flags); 1809 spin_unlock_irqrestore(&mp->lock, flags);
1795} 1810}
1796 1811
1797static void isdn_ppp_mp_cleanup(isdn_net_local *lp) 1812static void isdn_ppp_mp_cleanup( isdn_net_local * lp )
1798{ 1813{
1799 struct sk_buff *skb, *tmp; 1814 struct sk_buff * frag = lp->netdev->pb->frags;
1800 1815 struct sk_buff * nextfrag;
1801 skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) { 1816 while( frag ) {
1802 __skb_unlink(skb, &lp->netdev->pb->frags); 1817 nextfrag = frag->next;
1803 isdn_ppp_mp_free_skb(lp->netdev->pb, skb); 1818 isdn_ppp_mp_free_skb(lp->netdev->pb, frag);
1804 } 1819 frag = nextfrag;
1820 }
1821 lp->netdev->pb->frags = NULL;
1805} 1822}
1806 1823
1807static u32 isdn_ppp_mp_get_seq( int short_seq, 1824static u32 isdn_ppp_mp_get_seq( int short_seq,
@@ -1838,115 +1855,72 @@ static u32 isdn_ppp_mp_get_seq( int short_seq,
1838 return seq; 1855 return seq;
1839} 1856}
1840 1857
1841static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, 1858struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
1842 struct sk_buff *to) 1859 struct sk_buff * from, struct sk_buff * to )
1843{ 1860{
1844 if (from) { 1861 if( from )
1845 struct sk_buff *skb, *tmp; 1862 while (from != to) {
1846 int freeing = 0; 1863 struct sk_buff * next = from->next;
1847 1864 isdn_ppp_mp_free_skb(mp, from);
1848 skb_queue_walk_safe(&mp->frags, skb, tmp) { 1865 from = next;
1849 if (skb == to)
1850 break;
1851 if (skb == from)
1852 freeing = 1;
1853 if (!freeing)
1854 continue;
1855 __skb_unlink(skb, &mp->frags);
1856 isdn_ppp_mp_free_skb(mp, skb);
1857 } 1866 }
1858 } 1867 return from;
1859}
1860
1861static unsigned int calc_tot_len(struct sk_buff_head *queue,
1862 struct sk_buff *from, struct sk_buff *to)
1863{
1864 unsigned int tot_len = 0;
1865 struct sk_buff *skb;
1866 int found_start = 0;
1867
1868 skb_queue_walk(queue, skb) {
1869 if (skb == from)
1870 found_start = 1;
1871 if (!found_start)
1872 continue;
1873 tot_len += skb->len - MP_HEADER_LEN;
1874 if (skb == to)
1875 break;
1876 }
1877 return tot_len;
1878} 1868}
1879 1869
1880/* Reassemble packet using fragments in the reassembly queue from 1870void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
1881 * 'from' until 'to', inclusive. 1871 struct sk_buff * from, struct sk_buff * to )
1882 */
1883static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
1884 struct sk_buff *from, struct sk_buff *to,
1885 u32 lastseq)
1886{ 1872{
1887 ippp_bundle *mp = net_dev->pb; 1873 ippp_bundle * mp = net_dev->pb;
1888 unsigned int tot_len;
1889 struct sk_buff *skb;
1890 int proto; 1874 int proto;
1875 struct sk_buff * skb;
1876 unsigned int tot_len;
1891 1877
1892 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { 1878 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
1893 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", 1879 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
1894 __func__, lp->ppp_slot); 1880 __func__, lp->ppp_slot);
1895 return; 1881 return;
1896 } 1882 }
1897 1883 if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) {
1898 tot_len = calc_tot_len(&mp->frags, from, to); 1884 if( ippp_table[lp->ppp_slot]->debug & 0x40 )
1899
1900 if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
1901 if (ippp_table[lp->ppp_slot]->debug & 0x40)
1902 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " 1885 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
1903 "len %d\n", MP_SEQ(from), from->len); 1886 "len %d\n", MP_SEQ(from), from->len );
1904 skb = from; 1887 skb = from;
1905 skb_pull(skb, MP_HEADER_LEN); 1888 skb_pull(skb, MP_HEADER_LEN);
1906 __skb_unlink(skb, &mp->frags);
1907 mp->frames--; 1889 mp->frames--;
1908 } else { 1890 } else {
1909 struct sk_buff *walk, *tmp; 1891 struct sk_buff * frag;
1910 int found_start = 0; 1892 int n;
1911 1893
1912 if (ippp_table[lp->ppp_slot]->debug & 0x40) 1894 for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++)
1913 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " 1895 tot_len += frag->len - MP_HEADER_LEN;
1914 "to %d, len %d\n", MP_SEQ(from), lastseq,
1915 tot_len);
1916 1896
1917 skb = dev_alloc_skb(tot_len); 1897 if( ippp_table[lp->ppp_slot]->debug & 0x40 )
1918 if (!skb) 1898 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
1899 "to %d, len %d\n", MP_SEQ(from),
1900 (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len );
1901 if( (skb = dev_alloc_skb(tot_len)) == NULL ) {
1919 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " 1902 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
1920 "of size %d\n", tot_len); 1903 "of size %d\n", tot_len);
1921 1904 isdn_ppp_mp_discard(mp, from, to);
1922 found_start = 0; 1905 return;
1923 skb_queue_walk_safe(&mp->frags, walk, tmp) { 1906 }
1924 if (walk == from)
1925 found_start = 1;
1926 if (!found_start)
1927 continue;
1928 1907
1929 if (skb) { 1908 while( from != to ) {
1930 unsigned int len = walk->len - MP_HEADER_LEN; 1909 unsigned int len = from->len - MP_HEADER_LEN;
1931 skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN,
1932 skb_put(skb, len),
1933 len);
1934 }
1935 __skb_unlink(walk, &mp->frags);
1936 isdn_ppp_mp_free_skb(mp, walk);
1937 1910
1938 if (walk == to) 1911 skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
1939 break; 1912 skb_put(skb,len),
1913 len);
1914 frag = from->next;
1915 isdn_ppp_mp_free_skb(mp, from);
1916 from = frag;
1940 } 1917 }
1941 } 1918 }
1942 if (!skb)
1943 return;
1944
1945 proto = isdn_ppp_strip_proto(skb); 1919 proto = isdn_ppp_strip_proto(skb);
1946 isdn_ppp_push_higher(net_dev, lp, skb, proto); 1920 isdn_ppp_push_higher(net_dev, lp, skb, proto);
1947} 1921}
1948 1922
1949static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb) 1923static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb)
1950{ 1924{
1951 dev_kfree_skb(skb); 1925 dev_kfree_skb(skb);
1952 mp->frames--; 1926 mp->frames--;
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index 6e4e49113ab4..02b3dadc8abd 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -41,36 +41,13 @@
41#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver" 41#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
42#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation." 42#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
43 43
44#define IWMCT_VERSION "0.1.62" 44#define DRIVER_VERSION "0.1.62"
45
46#ifdef REPOSITORY_LABEL
47#define RL REPOSITORY_LABEL
48#else
49#define RL local
50#endif
51
52#ifdef CONFIG_IWMC3200TOP_DEBUG
53#define VD "-d"
54#else
55#define VD
56#endif
57
58#define DRIVER_VERSION IWMCT_VERSION "-" __stringify(RL) VD
59 45
60MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 46MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
61MODULE_VERSION(DRIVER_VERSION); 47MODULE_VERSION(DRIVER_VERSION);
62MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
63MODULE_AUTHOR(DRIVER_COPYRIGHT); 49MODULE_AUTHOR(DRIVER_COPYRIGHT);
64 50
65
66/* FIXME: These can be found in sdio_ids.h in newer kernels */
67#ifndef SDIO_INTEL_VENDOR_ID
68#define SDIO_INTEL_VENDOR_ID 0x0089
69#endif
70#ifndef SDIO_DEVICE_ID_INTEL_IWMC3200TOP
71#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
72#endif
73
74/* 51/*
75 * This workers main task is to wait for OP_OPR_ALIVE 52 * This workers main task is to wait for OP_OPR_ALIVE
76 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed. 53 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
@@ -662,8 +639,9 @@ static void iwmct_remove(struct sdio_func *func)
662 639
663 640
664static const struct sdio_device_id iwmct_ids[] = { 641static const struct sdio_device_id iwmct_ids[] = {
665 { SDIO_DEVICE(SDIO_INTEL_VENDOR_ID, SDIO_DEVICE_ID_INTEL_IWMC3200TOP)}, 642 /* Intel Wireless MultiCom 3200 Top Driver */
666 { /* end: all zeroes */ }, 643 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
644 { }, /* Terminating entry */
667}; 645};
668 646
669MODULE_DEVICE_TABLE(sdio, iwmct_ids); 647MODULE_DEVICE_TABLE(sdio, iwmct_ids);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e012c2e0825a..6399abbdad6b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3235,7 +3235,7 @@ config VIRTIO_NET
3235 3235
3236config VMXNET3 3236config VMXNET3
3237 tristate "VMware VMXNET3 ethernet driver" 3237 tristate "VMware VMXNET3 ethernet driver"
3238 depends on PCI && X86 && INET 3238 depends on PCI && INET
3239 help 3239 help
3240 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3240 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3241 To compile this driver as a module, choose M here: the 3241 To compile this driver as a module, choose M here: the
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 0073d198715b..be256b34cea8 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -433,24 +433,16 @@ ks8695_rx_irq(int irq, void *dev_id)
433{ 433{
434 struct net_device *ndev = (struct net_device *)dev_id; 434 struct net_device *ndev = (struct net_device *)dev_id;
435 struct ks8695_priv *ksp = netdev_priv(ndev); 435 struct ks8695_priv *ksp = netdev_priv(ndev);
436 unsigned long status;
437
438 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
439 436
440 spin_lock(&ksp->rx_lock); 437 spin_lock(&ksp->rx_lock);
441 438
442 status = readl(KS8695_IRQ_VA + KS8695_INTST); 439 if (napi_schedule_prep(&ksp->napi)) {
443 440 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
444 /*clean rx status bit*/ 441 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
445 writel(status | mask_bit , KS8695_IRQ_VA + KS8695_INTST); 442 /*disable rx interrupt*/
446 443 status &= ~mask_bit;
447 if (status & mask_bit) { 444 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
448 if (napi_schedule_prep(&ksp->napi)) { 445 __napi_schedule(&ksp->napi);
449 /*disable rx interrupt*/
450 status &= ~mask_bit;
451 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
452 __napi_schedule(&ksp->napi);
453 }
454 } 446 }
455 447
456 spin_unlock(&ksp->rx_lock); 448 spin_unlock(&ksp->rx_lock);
@@ -552,14 +544,13 @@ rx_finished:
552 ksp->next_rx_desc_read = 544 ksp->next_rx_desc_read =
553 (last_rx_processed + 1) & 545 (last_rx_processed + 1) &
554 MAX_RX_DESC_MASK; 546 MAX_RX_DESC_MASK;
555
556 /* And refill the buffers */
557 ks8695_refill_rxbuffers(ksp);
558
559 /* Kick the RX DMA engine, in case it became
560 * suspended */
561 ks8695_writereg(ksp, KS8695_DRSC, 0);
562 } 547 }
548 /* And refill the buffers */
549 ks8695_refill_rxbuffers(ksp);
550
551 /* Kick the RX DMA engine, in case it became
552 * suspended */
553 ks8695_writereg(ksp, KS8695_DRSC, 0);
563 return received; 554 return received;
564} 555}
565 556
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 25e2627eb118..b7f3866d546f 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -160,8 +160,8 @@ struct w90p910_ether {
160 struct mii_if_info mii; 160 struct mii_if_info mii;
161 struct timer_list check_timer; 161 struct timer_list check_timer;
162 void __iomem *reg; 162 void __iomem *reg;
163 unsigned int rxirq; 163 int rxirq;
164 unsigned int txirq; 164 int txirq;
165 unsigned int cur_tx; 165 unsigned int cur_tx;
166 unsigned int cur_rx; 166 unsigned int cur_rx;
167 unsigned int finish_tx; 167 unsigned int finish_tx;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 5ef9e23435f4..1e2f57d4c367 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2135,7 +2135,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2135 2135
2136 if (!adapter->have_msi) 2136 if (!adapter->have_msi)
2137 flags |= IRQF_SHARED; 2137 flags |= IRQF_SHARED;
2138 err = request_irq(adapter->pdev->irq, &atl1c_intr, flags, 2138 err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
2139 netdev->name, netdev); 2139 netdev->name, netdev);
2140 if (err) { 2140 if (err) {
2141 if (netif_msg_ifup(adapter)) 2141 if (netif_msg_ifup(adapter))
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 928942b74ce6..602ab86b6392 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -259,9 +259,6 @@ struct bnx2x_eth_q_stats {
259struct bnx2x_fastpath { 259struct bnx2x_fastpath {
260 260
261 struct napi_struct napi; 261 struct napi_struct napi;
262
263 u8 is_rx_queue;
264
265 struct host_status_block *status_blk; 262 struct host_status_block *status_blk;
266 dma_addr_t status_blk_mapping; 263 dma_addr_t status_blk_mapping;
267 264
@@ -970,8 +967,7 @@ struct bnx2x {
970#define BNX2X_STATE_ERROR 0xf000 967#define BNX2X_STATE_ERROR 0xf000
971 968
972 int multi_mode; 969 int multi_mode;
973 int num_rx_queues; 970 int num_queues;
974 int num_tx_queues;
975 971
976 u32 rx_mode; 972 u32 rx_mode;
977#define BNX2X_RX_MODE_NONE 0 973#define BNX2X_RX_MODE_NONE 0
@@ -1074,20 +1070,15 @@ struct bnx2x {
1074}; 1070};
1075 1071
1076 1072
1077#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/(2 * E1HVN_MAX)) \ 1073#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
1078 : (MAX_CONTEXT/2)) 1074 : MAX_CONTEXT)
1079#define BNX2X_NUM_QUEUES(bp) (bp->num_rx_queues + bp->num_tx_queues) 1075#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1080#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 2) 1076#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1081 1077
1082#define for_each_rx_queue(bp, var) \
1083 for (var = 0; var < bp->num_rx_queues; var++)
1084#define for_each_tx_queue(bp, var) \
1085 for (var = bp->num_rx_queues; \
1086 var < BNX2X_NUM_QUEUES(bp); var++)
1087#define for_each_queue(bp, var) \ 1078#define for_each_queue(bp, var) \
1088 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1079 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
1089#define for_each_nondefault_queue(bp, var) \ 1080#define for_each_nondefault_queue(bp, var) \
1090 for (var = 1; var < bp->num_rx_queues; var++) 1081 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
1091 1082
1092 1083
1093void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1084void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index e2cf686d1118..77ba13520d87 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,7 +57,7 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.1-4" 60#define DRV_MODULE_VERSION "1.52.1-5"
61#define DRV_MODULE_RELDATE "2009/11/09" 61#define DRV_MODULE_RELDATE "2009/11/09"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
@@ -91,15 +91,10 @@ module_param(multi_mode, int, 0);
91MODULE_PARM_DESC(multi_mode, " Multi queue mode " 91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))"); 92 "(0 Disable; 1 Enable (default))");
93 93
94static int num_rx_queues; 94static int num_queues;
95module_param(num_rx_queues, int, 0); 95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" 96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is half number of CPUs)"); 97 " (default is as a number of CPUs)");
98
99static int num_tx_queues;
100module_param(num_tx_queues, int, 0);
101MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
102 " (default is half number of CPUs)");
103 98
104static int disable_tpa; 99static int disable_tpa;
105module_param(disable_tpa, int, 0); 100module_param(disable_tpa, int, 0);
@@ -558,7 +553,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
558 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
559 554
560 /* Rx */ 555 /* Rx */
561 for_each_rx_queue(bp, i) { 556 for_each_queue(bp, i) {
562 struct bnx2x_fastpath *fp = &bp->fp[i]; 557 struct bnx2x_fastpath *fp = &bp->fp[i];
563 558
564 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
@@ -575,7 +570,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
575 } 570 }
576 571
577 /* Tx */ 572 /* Tx */
578 for_each_tx_queue(bp, i) { 573 for_each_queue(bp, i) {
579 struct bnx2x_fastpath *fp = &bp->fp[i]; 574 struct bnx2x_fastpath *fp = &bp->fp[i];
580 575
581 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
@@ -590,7 +585,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
590 585
591 /* Rings */ 586 /* Rings */
592 /* Rx */ 587 /* Rx */
593 for_each_rx_queue(bp, i) { 588 for_each_queue(bp, i) {
594 struct bnx2x_fastpath *fp = &bp->fp[i]; 589 struct bnx2x_fastpath *fp = &bp->fp[i];
595 590
596 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -624,7 +619,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
624 } 619 }
625 620
626 /* Tx */ 621 /* Tx */
627 for_each_tx_queue(bp, i) { 622 for_each_queue(bp, i) {
628 struct bnx2x_fastpath *fp = &bp->fp[i]; 623 struct bnx2x_fastpath *fp = &bp->fp[i];
629 624
630 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -792,21 +787,13 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
792 barrier(); 787 barrier();
793} 788}
794 789
795static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
796{ 791{
797 struct host_status_block *fpsb = fp->status_blk; 792 struct host_status_block *fpsb = fp->status_blk;
798 u16 rc = 0;
799 793
800 barrier(); /* status block is written to by the chip */ 794 barrier(); /* status block is written to by the chip */
801 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) { 795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
802 fp->fp_c_idx = fpsb->c_status_block.status_block_index; 796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
803 rc |= 1;
804 }
805 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
806 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
807 rc |= 2;
808 }
809 return rc;
810} 797}
811 798
812static u16 bnx2x_ack_int(struct bnx2x *bp) 799static u16 bnx2x_ack_int(struct bnx2x *bp)
@@ -846,6 +833,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
846 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
847 int nbd; 834 int nbd;
848 835
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
849 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", 839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
850 idx, tx_buf, skb); 840 idx, tx_buf, skb);
851 841
@@ -890,7 +880,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
890 880
891 /* release skb */ 881 /* release skb */
892 WARN_ON(!skb); 882 WARN_ON(!skb);
893 dev_kfree_skb_any(skb); 883 dev_kfree_skb(skb);
894 tx_buf->first_bd = 0; 884 tx_buf->first_bd = 0;
895 tx_buf->skb = NULL; 885 tx_buf->skb = NULL;
896 886
@@ -920,19 +910,28 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
920 return (s16)(fp->bp->tx_ring_size) - used; 910 return (s16)(fp->bp->tx_ring_size) - used;
921} 911}
922 912
923static void bnx2x_tx_int(struct bnx2x_fastpath *fp) 913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924{ 924{
925 struct bnx2x *bp = fp->bp; 925 struct bnx2x *bp = fp->bp;
926 struct netdev_queue *txq; 926 struct netdev_queue *txq;
927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; 927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928 int done = 0;
929 928
930#ifdef BNX2X_STOP_ON_ERROR 929#ifdef BNX2X_STOP_ON_ERROR
931 if (unlikely(bp->panic)) 930 if (unlikely(bp->panic))
932 return; 931 return -1;
933#endif 932#endif
934 933
935 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); 934 txq = netdev_get_tx_queue(bp->dev, fp->index);
936 hw_cons = le16_to_cpu(*fp->tx_cons_sb); 935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
937 sw_cons = fp->tx_pkt_cons; 936 sw_cons = fp->tx_pkt_cons;
938 937
@@ -953,7 +952,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
953*/ 952*/
954 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); 953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
955 sw_cons++; 954 sw_cons++;
956 done++;
957 } 955 }
958 956
959 fp->tx_pkt_cons = sw_cons; 957 fp->tx_pkt_cons = sw_cons;
@@ -975,6 +973,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
975 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
976 netif_tx_wake_queue(txq); 974 netif_tx_wake_queue(txq);
977 } 975 }
976 return 0;
978} 977}
979 978
980#ifdef BCM_CNIC 979#ifdef BCM_CNIC
@@ -1561,6 +1560,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1561 } else { 1560 } else {
1562 rx_buf = &fp->rx_buf_ring[bd_cons]; 1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1563 skb = rx_buf->skb; 1562 skb = rx_buf->skb;
1563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
1564 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 1565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1565 pad = cqe->fast_path_cqe.placement_offset; 1566 pad = cqe->fast_path_cqe.placement_offset;
1566 1567
@@ -1742,27 +1743,13 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1742 if (unlikely(bp->panic)) 1743 if (unlikely(bp->panic))
1743 return IRQ_HANDLED; 1744 return IRQ_HANDLED;
1744#endif 1745#endif
1745 /* Handle Rx or Tx according to MSI-X vector */
1746 if (fp->is_rx_queue) {
1747 prefetch(fp->rx_cons_sb);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
1750 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1751
1752 } else {
1753 prefetch(fp->tx_cons_sb);
1754 prefetch(&fp->status_blk->c_status_block.status_block_index);
1755
1756 bnx2x_update_fpsb_idx(fp);
1757 rmb();
1758 bnx2x_tx_int(fp);
1759 1746
1760 /* Re-enable interrupts */ 1747 /* Handle Rx and Tx according to MSI-X vector */
1761 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 1748 prefetch(fp->rx_cons_sb);
1762 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 1749 prefetch(fp->tx_cons_sb);
1763 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1764 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); 1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1765 } 1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1766 1753
1767 return IRQ_HANDLED; 1754 return IRQ_HANDLED;
1768} 1755}
@@ -1797,31 +1784,14 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1797 1784
1798 mask = 0x2 << fp->sb_id; 1785 mask = 0x2 << fp->sb_id;
1799 if (status & mask) { 1786 if (status & mask) {
1800 /* Handle Rx or Tx according to SB id */ 1787 /* Handle Rx and Tx according to SB id */
1801 if (fp->is_rx_queue) { 1788 prefetch(fp->rx_cons_sb);
1802 prefetch(fp->rx_cons_sb); 1789 prefetch(&fp->status_blk->u_status_block.
1803 prefetch(&fp->status_blk->u_status_block. 1790 status_block_index);
1804 status_block_index); 1791 prefetch(fp->tx_cons_sb);
1805 1792 prefetch(&fp->status_blk->c_status_block.
1806 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1793 status_block_index);
1807 1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1808 } else {
1809 prefetch(fp->tx_cons_sb);
1810 prefetch(&fp->status_blk->c_status_block.
1811 status_block_index);
1812
1813 bnx2x_update_fpsb_idx(fp);
1814 rmb();
1815 bnx2x_tx_int(fp);
1816
1817 /* Re-enable interrupts */
1818 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1819 le16_to_cpu(fp->fp_u_idx),
1820 IGU_INT_NOP, 1);
1821 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1822 le16_to_cpu(fp->fp_c_idx),
1823 IGU_INT_ENABLE, 1);
1824 }
1825 status &= ~mask; 1795 status &= ~mask;
1826 } 1796 }
1827 } 1797 }
@@ -2587,7 +2557,6 @@ static void bnx2x_e1h_disable(struct bnx2x *bp)
2587 int port = BP_PORT(bp); 2557 int port = BP_PORT(bp);
2588 2558
2589 netif_tx_disable(bp->dev); 2559 netif_tx_disable(bp->dev);
2590 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2591 2560
2592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2593 2562
@@ -4027,7 +3996,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4027 estats->no_buff_discard_hi = 0; 3996 estats->no_buff_discard_hi = 0;
4028 estats->no_buff_discard_lo = 0; 3997 estats->no_buff_discard_lo = 0;
4029 3998
4030 for_each_rx_queue(bp, i) { 3999 for_each_queue(bp, i) {
4031 struct bnx2x_fastpath *fp = &bp->fp[i]; 4000 struct bnx2x_fastpath *fp = &bp->fp[i];
4032 int cl_id = fp->cl_id; 4001 int cl_id = fp->cl_id;
4033 struct tstorm_per_client_stats *tclient = 4002 struct tstorm_per_client_stats *tclient =
@@ -4244,7 +4213,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
4244 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 4213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4245 4214
4246 nstats->rx_dropped = estats->mac_discard; 4215 nstats->rx_dropped = estats->mac_discard;
4247 for_each_rx_queue(bp, i) 4216 for_each_queue(bp, i)
4248 nstats->rx_dropped += 4217 nstats->rx_dropped +=
4249 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4250 4219
@@ -4298,7 +4267,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
4298 estats->rx_err_discard_pkt = 0; 4267 estats->rx_err_discard_pkt = 0;
4299 estats->rx_skb_alloc_failed = 0; 4268 estats->rx_skb_alloc_failed = 0;
4300 estats->hw_csum_err = 0; 4269 estats->hw_csum_err = 0;
4301 for_each_rx_queue(bp, i) { 4270 for_each_queue(bp, i) {
4302 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 4271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4303 4272
4304 estats->driver_xoff += qstats->driver_xoff; 4273 estats->driver_xoff += qstats->driver_xoff;
@@ -4329,7 +4298,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4329 4298
4330 if (bp->msglevel & NETIF_MSG_TIMER) { 4299 if (bp->msglevel & NETIF_MSG_TIMER) {
4331 struct bnx2x_fastpath *fp0_rx = bp->fp; 4300 struct bnx2x_fastpath *fp0_rx = bp->fp;
4332 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]); 4301 struct bnx2x_fastpath *fp0_tx = bp->fp;
4333 struct tstorm_per_client_stats *old_tclient = 4302 struct tstorm_per_client_stats *old_tclient =
4334 &bp->fp->old_tclient; 4303 &bp->fp->old_tclient;
4335 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; 4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
@@ -4984,7 +4953,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4984 4953
4985 if (bp->flags & TPA_ENABLE_FLAG) { 4954 if (bp->flags & TPA_ENABLE_FLAG) {
4986 4955
4987 for_each_rx_queue(bp, j) { 4956 for_each_queue(bp, j) {
4988 struct bnx2x_fastpath *fp = &bp->fp[j]; 4957 struct bnx2x_fastpath *fp = &bp->fp[j];
4989 4958
4990 for (i = 0; i < max_agg_queues; i++) { 4959 for (i = 0; i < max_agg_queues; i++) {
@@ -5007,16 +4976,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
5007 } 4976 }
5008 } 4977 }
5009 4978
5010 for_each_rx_queue(bp, j) { 4979 for_each_queue(bp, j) {
5011 struct bnx2x_fastpath *fp = &bp->fp[j]; 4980 struct bnx2x_fastpath *fp = &bp->fp[j];
5012 4981
5013 fp->rx_bd_cons = 0; 4982 fp->rx_bd_cons = 0;
5014 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5015 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; 4984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5016 4985
5017 /* Mark queue as Rx */
5018 fp->is_rx_queue = 1;
5019
5020 /* "next page" elements initialization */ 4986 /* "next page" elements initialization */
5021 /* SGE ring */ 4987 /* SGE ring */
5022 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
@@ -5122,7 +5088,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
5122{ 5088{
5123 int i, j; 5089 int i, j;
5124 5090
5125 for_each_tx_queue(bp, j) { 5091 for_each_queue(bp, j) {
5126 struct bnx2x_fastpath *fp = &bp->fp[j]; 5092 struct bnx2x_fastpath *fp = &bp->fp[j];
5127 5093
5128 for (i = 1; i <= NUM_TX_RINGS; i++) { 5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
@@ -5148,10 +5114,6 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
5148 fp->tx_cons_sb = BNX2X_TX_SB_INDEX; 5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5149 fp->tx_pkt = 0; 5115 fp->tx_pkt = 0;
5150 } 5116 }
5151
5152 /* clean tx statistics */
5153 for_each_rx_queue(bp, i)
5154 bnx2x_fp(bp, i, tx_pkt) = 0;
5155} 5117}
5156 5118
5157static void bnx2x_init_sp_ring(struct bnx2x *bp) 5119static void bnx2x_init_sp_ring(struct bnx2x *bp)
@@ -5180,7 +5142,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
5180{ 5142{
5181 int i; 5143 int i;
5182 5144
5183 for_each_rx_queue(bp, i) { 5145 /* Rx */
5146 for_each_queue(bp, i) {
5184 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 5147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5185 struct bnx2x_fastpath *fp = &bp->fp[i]; 5148 struct bnx2x_fastpath *fp = &bp->fp[i];
5186 u8 cl_id = fp->cl_id; 5149 u8 cl_id = fp->cl_id;
@@ -5232,10 +5195,11 @@ static void bnx2x_init_context(struct bnx2x *bp)
5232 ETH_CONNECTION_TYPE); 5195 ETH_CONNECTION_TYPE);
5233 } 5196 }
5234 5197
5235 for_each_tx_queue(bp, i) { 5198 /* Tx */
5199 for_each_queue(bp, i) {
5236 struct bnx2x_fastpath *fp = &bp->fp[i]; 5200 struct bnx2x_fastpath *fp = &bp->fp[i];
5237 struct eth_context *context = 5201 struct eth_context *context =
5238 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth); 5202 bnx2x_sp(bp, context[i].eth);
5239 5203
5240 context->cstorm_st_context.sb_index_number = 5204 context->cstorm_st_context.sb_index_number =
5241 C_SB_ETH_TX_CQ_INDEX; 5205 C_SB_ETH_TX_CQ_INDEX;
@@ -5263,7 +5227,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
5263 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 5227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5264 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5265 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 5229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5266 bp->fp->cl_id + (i % bp->num_rx_queues)); 5230 bp->fp->cl_id + (i % bp->num_queues));
5267} 5231}
5268 5232
5269static void bnx2x_set_client_config(struct bnx2x *bp) 5233static void bnx2x_set_client_config(struct bnx2x *bp)
@@ -5507,7 +5471,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5507 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * 5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5508 SGE_PAGE_SIZE * PAGES_PER_SGE), 5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5509 (u32)0xffff); 5473 (u32)0xffff);
5510 for_each_rx_queue(bp, i) { 5474 for_each_queue(bp, i) {
5511 struct bnx2x_fastpath *fp = &bp->fp[i]; 5475 struct bnx2x_fastpath *fp = &bp->fp[i];
5512 5476
5513 REG_WR(bp, BAR_USTRORM_INTMEM + 5477 REG_WR(bp, BAR_USTRORM_INTMEM +
@@ -5542,7 +5506,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5542 rx_pause.cqe_thr_high = 350; 5506 rx_pause.cqe_thr_high = 350;
5543 rx_pause.sge_thr_high = 0; 5507 rx_pause.sge_thr_high = 0;
5544 5508
5545 for_each_rx_queue(bp, i) { 5509 for_each_queue(bp, i) {
5546 struct bnx2x_fastpath *fp = &bp->fp[i]; 5510 struct bnx2x_fastpath *fp = &bp->fp[i];
5547 5511
5548 if (!fp->disable_tpa) { 5512 if (!fp->disable_tpa) {
@@ -5637,9 +5601,6 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5637#else 5601#else
5638 fp->sb_id = fp->cl_id; 5602 fp->sb_id = fp->cl_id;
5639#endif 5603#endif
5640 /* Suitable Rx and Tx SBs are served by the same client */
5641 if (i >= bp->num_rx_queues)
5642 fp->cl_id -= bp->num_rx_queues;
5643 DP(NETIF_MSG_IFUP, 5604 DP(NETIF_MSG_IFUP,
5644 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", 5605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5645 i, bp, fp->status_blk, fp->cl_id, fp->sb_id); 5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
@@ -6749,7 +6710,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6749 sizeof(struct host_status_block)); 6710 sizeof(struct host_status_block));
6750 } 6711 }
6751 /* Rx */ 6712 /* Rx */
6752 for_each_rx_queue(bp, i) { 6713 for_each_queue(bp, i) {
6753 6714
6754 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 6715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6755 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); 6716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
@@ -6769,7 +6730,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6769 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6770 } 6731 }
6771 /* Tx */ 6732 /* Tx */
6772 for_each_tx_queue(bp, i) { 6733 for_each_queue(bp, i) {
6773 6734
6774 /* fastpath tx rings: tx_buf tx_desc */ 6735 /* fastpath tx rings: tx_buf tx_desc */
6775 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); 6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@ -6831,7 +6792,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6831 sizeof(struct host_status_block)); 6792 sizeof(struct host_status_block));
6832 } 6793 }
6833 /* Rx */ 6794 /* Rx */
6834 for_each_rx_queue(bp, i) { 6795 for_each_queue(bp, i) {
6835 6796
6836 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 6797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6837 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), 6798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
@@ -6853,7 +6814,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6853 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6854 } 6815 }
6855 /* Tx */ 6816 /* Tx */
6856 for_each_tx_queue(bp, i) { 6817 for_each_queue(bp, i) {
6857 6818
6858 /* fastpath tx rings: tx_buf tx_desc */ 6819 /* fastpath tx rings: tx_buf tx_desc */
6859 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), 6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
@@ -6909,7 +6870,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6909{ 6870{
6910 int i; 6871 int i;
6911 6872
6912 for_each_tx_queue(bp, i) { 6873 for_each_queue(bp, i) {
6913 struct bnx2x_fastpath *fp = &bp->fp[i]; 6874 struct bnx2x_fastpath *fp = &bp->fp[i];
6914 6875
6915 u16 bd_cons = fp->tx_bd_cons; 6876 u16 bd_cons = fp->tx_bd_cons;
@@ -6927,7 +6888,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6927{ 6888{
6928 int i, j; 6889 int i, j;
6929 6890
6930 for_each_rx_queue(bp, j) { 6891 for_each_queue(bp, j) {
6931 struct bnx2x_fastpath *fp = &bp->fp[j]; 6892 struct bnx2x_fastpath *fp = &bp->fp[j];
6932 6893
6933 for (i = 0; i < NUM_RX_BD; i++) { 6894 for (i = 0; i < NUM_RX_BD; i++) {
@@ -7042,12 +7003,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7042#endif 7003#endif
7043 for_each_queue(bp, i) { 7004 for_each_queue(bp, i) {
7044 struct bnx2x_fastpath *fp = &bp->fp[i]; 7005 struct bnx2x_fastpath *fp = &bp->fp[i];
7045 7006 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7046 if (i < bp->num_rx_queues) 7007 bp->dev->name, i);
7047 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7048 else
7049 sprintf(fp->name, "%s-tx-%d",
7050 bp->dev->name, i - bp->num_rx_queues);
7051 7008
7052 rc = request_irq(bp->msix_table[i + offset].vector, 7009 rc = request_irq(bp->msix_table[i + offset].vector,
7053 bnx2x_msix_fp_int, 0, fp->name, fp); 7010 bnx2x_msix_fp_int, 0, fp->name, fp);
@@ -7106,7 +7063,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
7106{ 7063{
7107 int i; 7064 int i;
7108 7065
7109 for_each_rx_queue(bp, i) 7066 for_each_queue(bp, i)
7110 napi_enable(&bnx2x_fp(bp, i, napi)); 7067 napi_enable(&bnx2x_fp(bp, i, napi));
7111} 7068}
7112 7069
@@ -7114,7 +7071,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
7114{ 7071{
7115 int i; 7072 int i;
7116 7073
7117 for_each_rx_queue(bp, i) 7074 for_each_queue(bp, i)
7118 napi_disable(&bnx2x_fp(bp, i, napi)); 7075 napi_disable(&bnx2x_fp(bp, i, napi));
7119} 7076}
7120 7077
@@ -7140,7 +7097,6 @@ static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7140 bnx2x_int_disable_sync(bp, disable_hw); 7097 bnx2x_int_disable_sync(bp, disable_hw);
7141 bnx2x_napi_disable(bp); 7098 bnx2x_napi_disable(bp);
7142 netif_tx_disable(bp->dev); 7099 netif_tx_disable(bp->dev);
7143 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7144} 7100}
7145 7101
7146/* 7102/*
@@ -7410,88 +7366,60 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7410 7366
7411static int bnx2x_poll(struct napi_struct *napi, int budget); 7367static int bnx2x_poll(struct napi_struct *napi, int budget);
7412 7368
7413static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out, 7369static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7414 int *num_tx_queues_out)
7415{ 7370{
7416 int _num_rx_queues = 0, _num_tx_queues = 0;
7417 7371
7418 switch (bp->multi_mode) { 7372 switch (bp->multi_mode) {
7419 case ETH_RSS_MODE_DISABLED: 7373 case ETH_RSS_MODE_DISABLED:
7420 _num_rx_queues = 1; 7374 bp->num_queues = 1;
7421 _num_tx_queues = 1;
7422 break; 7375 break;
7423 7376
7424 case ETH_RSS_MODE_REGULAR: 7377 case ETH_RSS_MODE_REGULAR:
7425 if (num_rx_queues) 7378 if (num_queues)
7426 _num_rx_queues = min_t(u32, num_rx_queues, 7379 bp->num_queues = min_t(u32, num_queues,
7427 BNX2X_MAX_QUEUES(bp)); 7380 BNX2X_MAX_QUEUES(bp));
7428 else
7429 _num_rx_queues = min_t(u32, num_online_cpus(),
7430 BNX2X_MAX_QUEUES(bp));
7431
7432 if (num_tx_queues)
7433 _num_tx_queues = min_t(u32, num_tx_queues,
7434 BNX2X_MAX_QUEUES(bp));
7435 else 7381 else
7436 _num_tx_queues = min_t(u32, num_online_cpus(), 7382 bp->num_queues = min_t(u32, num_online_cpus(),
7437 BNX2X_MAX_QUEUES(bp)); 7383 BNX2X_MAX_QUEUES(bp));
7438
7439 /* There must be not more Tx queues than Rx queues */
7440 if (_num_tx_queues > _num_rx_queues) {
7441 BNX2X_ERR("number of tx queues (%d) > "
7442 "number of rx queues (%d)"
7443 " defaulting to %d\n",
7444 _num_tx_queues, _num_rx_queues,
7445 _num_rx_queues);
7446 _num_tx_queues = _num_rx_queues;
7447 }
7448 break; 7384 break;
7449 7385
7450 7386
7451 default: 7387 default:
7452 _num_rx_queues = 1; 7388 bp->num_queues = 1;
7453 _num_tx_queues = 1;
7454 break; 7389 break;
7455 } 7390 }
7456
7457 *num_rx_queues_out = _num_rx_queues;
7458 *num_tx_queues_out = _num_tx_queues;
7459} 7391}
7460 7392
7461static int bnx2x_set_int_mode(struct bnx2x *bp) 7393static int bnx2x_set_num_queues(struct bnx2x *bp)
7462{ 7394{
7463 int rc = 0; 7395 int rc = 0;
7464 7396
7465 switch (int_mode) { 7397 switch (int_mode) {
7466 case INT_MODE_INTx: 7398 case INT_MODE_INTx:
7467 case INT_MODE_MSI: 7399 case INT_MODE_MSI:
7468 bp->num_rx_queues = 1; 7400 bp->num_queues = 1;
7469 bp->num_tx_queues = 1;
7470 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 7401 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7471 break; 7402 break;
7472 7403
7473 case INT_MODE_MSIX: 7404 case INT_MODE_MSIX:
7474 default: 7405 default:
7475 /* Set interrupt mode according to bp->multi_mode value */ 7406 /* Set number of queues according to bp->multi_mode value */
7476 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues, 7407 bnx2x_set_num_queues_msix(bp);
7477 &bp->num_tx_queues);
7478 7408
7479 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n", 7409 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7480 bp->num_rx_queues, bp->num_tx_queues); 7410 bp->num_queues);
7481 7411
7482 /* if we can't use MSI-X we only need one fp, 7412 /* if we can't use MSI-X we only need one fp,
7483 * so try to enable MSI-X with the requested number of fp's 7413 * so try to enable MSI-X with the requested number of fp's
7484 * and fallback to MSI or legacy INTx with one fp 7414 * and fallback to MSI or legacy INTx with one fp
7485 */ 7415 */
7486 rc = bnx2x_enable_msix(bp); 7416 rc = bnx2x_enable_msix(bp);
7487 if (rc) { 7417 if (rc)
7488 /* failed to enable MSI-X */ 7418 /* failed to enable MSI-X */
7489 bp->num_rx_queues = 1; 7419 bp->num_queues = 1;
7490 bp->num_tx_queues = 1;
7491 }
7492 break; 7420 break;
7493 } 7421 }
7494 bp->dev->real_num_tx_queues = bp->num_tx_queues; 7422 bp->dev->real_num_tx_queues = bp->num_queues;
7495 return rc; 7423 return rc;
7496} 7424}
7497 7425
@@ -7513,16 +7441,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7513 7441
7514 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 7442 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7515 7443
7516 rc = bnx2x_set_int_mode(bp); 7444 rc = bnx2x_set_num_queues(bp);
7517 7445
7518 if (bnx2x_alloc_mem(bp)) 7446 if (bnx2x_alloc_mem(bp))
7519 return -ENOMEM; 7447 return -ENOMEM;
7520 7448
7521 for_each_rx_queue(bp, i) 7449 for_each_queue(bp, i)
7522 bnx2x_fp(bp, i, disable_tpa) = 7450 bnx2x_fp(bp, i, disable_tpa) =
7523 ((bp->flags & TPA_ENABLE_FLAG) == 0); 7451 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7524 7452
7525 for_each_rx_queue(bp, i) 7453 for_each_queue(bp, i)
7526 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 7454 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7527 bnx2x_poll, 128); 7455 bnx2x_poll, 128);
7528 7456
@@ -7536,7 +7464,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7536 } 7464 }
7537 } else { 7465 } else {
7538 /* Fall to INTx if failed to enable MSI-X due to lack of 7466 /* Fall to INTx if failed to enable MSI-X due to lack of
7539 memory (in bnx2x_set_int_mode()) */ 7467 memory (in bnx2x_set_num_queues()) */
7540 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) 7468 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7541 bnx2x_enable_msi(bp); 7469 bnx2x_enable_msi(bp);
7542 bnx2x_ack_int(bp); 7470 bnx2x_ack_int(bp);
@@ -7730,14 +7658,14 @@ load_error3:
7730 bp->port.pmf = 0; 7658 bp->port.pmf = 0;
7731 /* Free SKBs, SGEs, TPA pool and driver internals */ 7659 /* Free SKBs, SGEs, TPA pool and driver internals */
7732 bnx2x_free_skbs(bp); 7660 bnx2x_free_skbs(bp);
7733 for_each_rx_queue(bp, i) 7661 for_each_queue(bp, i)
7734 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 7662 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7735load_error2: 7663load_error2:
7736 /* Release IRQs */ 7664 /* Release IRQs */
7737 bnx2x_free_irq(bp); 7665 bnx2x_free_irq(bp);
7738load_error1: 7666load_error1:
7739 bnx2x_napi_disable(bp); 7667 bnx2x_napi_disable(bp);
7740 for_each_rx_queue(bp, i) 7668 for_each_queue(bp, i)
7741 netif_napi_del(&bnx2x_fp(bp, i, napi)); 7669 netif_napi_del(&bnx2x_fp(bp, i, napi));
7742 bnx2x_free_mem(bp); 7670 bnx2x_free_mem(bp);
7743 7671
@@ -7928,7 +7856,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7928 bnx2x_free_irq(bp); 7856 bnx2x_free_irq(bp);
7929 7857
7930 /* Wait until tx fastpath tasks complete */ 7858 /* Wait until tx fastpath tasks complete */
7931 for_each_tx_queue(bp, i) { 7859 for_each_queue(bp, i) {
7932 struct bnx2x_fastpath *fp = &bp->fp[i]; 7860 struct bnx2x_fastpath *fp = &bp->fp[i];
7933 7861
7934 cnt = 1000; 7862 cnt = 1000;
@@ -8071,9 +7999,9 @@ unload_error:
8071 7999
8072 /* Free SKBs, SGEs, TPA pool and driver internals */ 8000 /* Free SKBs, SGEs, TPA pool and driver internals */
8073 bnx2x_free_skbs(bp); 8001 bnx2x_free_skbs(bp);
8074 for_each_rx_queue(bp, i) 8002 for_each_queue(bp, i)
8075 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 8003 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8076 for_each_rx_queue(bp, i) 8004 for_each_queue(bp, i)
8077 netif_napi_del(&bnx2x_fp(bp, i, napi)); 8005 netif_napi_del(&bnx2x_fp(bp, i, napi));
8078 bnx2x_free_mem(bp); 8006 bnx2x_free_mem(bp);
8079 8007
@@ -10269,7 +10197,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10269 struct sk_buff *skb; 10197 struct sk_buff *skb;
10270 unsigned char *packet; 10198 unsigned char *packet;
10271 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 10199 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10272 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues]; 10200 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10273 u16 tx_start_idx, tx_idx; 10201 u16 tx_start_idx, tx_idx;
10274 u16 rx_start_idx, rx_idx; 10202 u16 rx_start_idx, rx_idx;
10275 u16 pkt_prod, bd_prod; 10203 u16 pkt_prod, bd_prod;
@@ -10346,13 +10274,12 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10346 10274
10347 fp_tx->tx_db.data.prod += 2; 10275 fp_tx->tx_db.data.prod += 2;
10348 barrier(); 10276 barrier();
10349 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw); 10277 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10350 10278
10351 mmiowb(); 10279 mmiowb();
10352 10280
10353 num_pkts++; 10281 num_pkts++;
10354 fp_tx->tx_bd_prod += 2; /* start + pbd */ 10282 fp_tx->tx_bd_prod += 2; /* start + pbd */
10355 bp->dev->trans_start = jiffies;
10356 10283
10357 udelay(100); 10284 udelay(100);
10358 10285
@@ -10725,7 +10652,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10725 switch(stringset) { 10652 switch(stringset) {
10726 case ETH_SS_STATS: 10653 case ETH_SS_STATS:
10727 if (is_multi(bp)) { 10654 if (is_multi(bp)) {
10728 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues; 10655 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10729 if (!IS_E1HMF_MODE_STAT(bp)) 10656 if (!IS_E1HMF_MODE_STAT(bp))
10730 num_stats += BNX2X_NUM_STATS; 10657 num_stats += BNX2X_NUM_STATS;
10731 } else { 10658 } else {
@@ -10756,7 +10683,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10756 case ETH_SS_STATS: 10683 case ETH_SS_STATS:
10757 if (is_multi(bp)) { 10684 if (is_multi(bp)) {
10758 k = 0; 10685 k = 0;
10759 for_each_rx_queue(bp, i) { 10686 for_each_queue(bp, i) {
10760 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 10687 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10761 sprintf(buf + (k + j)*ETH_GSTRING_LEN, 10688 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10762 bnx2x_q_stats_arr[j].string, i); 10689 bnx2x_q_stats_arr[j].string, i);
@@ -10793,7 +10720,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10793 10720
10794 if (is_multi(bp)) { 10721 if (is_multi(bp)) {
10795 k = 0; 10722 k = 0;
10796 for_each_rx_queue(bp, i) { 10723 for_each_queue(bp, i) {
10797 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 10724 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10798 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 10725 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10799 if (bnx2x_q_stats_arr[j].size == 0) { 10726 if (bnx2x_q_stats_arr[j].size == 0) {
@@ -10989,54 +10916,60 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10989 10916
10990static int bnx2x_poll(struct napi_struct *napi, int budget) 10917static int bnx2x_poll(struct napi_struct *napi, int budget)
10991{ 10918{
10919 int work_done = 0;
10992 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 10920 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10993 napi); 10921 napi);
10994 struct bnx2x *bp = fp->bp; 10922 struct bnx2x *bp = fp->bp;
10995 int work_done = 0;
10996 10923
10924 while (1) {
10997#ifdef BNX2X_STOP_ON_ERROR 10925#ifdef BNX2X_STOP_ON_ERROR
10998 if (unlikely(bp->panic)) 10926 if (unlikely(bp->panic)) {
10999 goto poll_panic; 10927 napi_complete(napi);
10928 return 0;
10929 }
11000#endif 10930#endif
11001 10931
11002 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); 10932 if (bnx2x_has_tx_work(fp))
11003 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); 10933 bnx2x_tx_int(fp);
11004
11005 bnx2x_update_fpsb_idx(fp);
11006
11007 if (bnx2x_has_rx_work(fp)) {
11008 work_done = bnx2x_rx_int(fp, budget);
11009 10934
11010 /* must not complete if we consumed full budget */ 10935 if (bnx2x_has_rx_work(fp)) {
11011 if (work_done >= budget) 10936 work_done += bnx2x_rx_int(fp, budget - work_done);
11012 goto poll_again;
11013 }
11014 10937
11015 /* bnx2x_has_rx_work() reads the status block, thus we need to 10938 /* must not complete if we consumed full budget */
11016 * ensure that status block indices have been actually read 10939 if (work_done >= budget)
11017 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work) 10940 break;
11018 * so that we won't write the "newer" value of the status block to IGU 10941 }
11019 * (if there was a DMA right after bnx2x_has_rx_work and
11020 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11021 * may be postponed to right before bnx2x_ack_sb). In this case
11022 * there will never be another interrupt until there is another update
11023 * of the status block, while there is still unhandled work.
11024 */
11025 rmb();
11026 10942
11027 if (!bnx2x_has_rx_work(fp)) { 10943 /* Fall out from the NAPI loop if needed */
11028#ifdef BNX2X_STOP_ON_ERROR 10944 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11029poll_panic: 10945 bnx2x_update_fpsb_idx(fp);
11030#endif 10946 /* bnx2x_has_rx_work() reads the status block, thus we need
11031 napi_complete(napi); 10947 * to ensure that status block indices have been actually read
10948 * (bnx2x_update_fpsb_idx) prior to this check
10949 * (bnx2x_has_rx_work) so that we won't write the "newer"
10950 * value of the status block to IGU (if there was a DMA right
10951 * after bnx2x_has_rx_work and if there is no rmb, the memory
10952 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10953 * before bnx2x_ack_sb). In this case there will never be
10954 * another interrupt until there is another update of the
10955 * status block, while there is still unhandled work.
10956 */
10957 rmb();
11032 10958
11033 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 10959 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11034 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 10960 napi_complete(napi);
11035 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 10961 /* Re-enable interrupts */
11036 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); 10962 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10963 le16_to_cpu(fp->fp_c_idx),
10964 IGU_INT_NOP, 1);
10965 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10966 le16_to_cpu(fp->fp_u_idx),
10967 IGU_INT_ENABLE, 1);
10968 break;
10969 }
10970 }
11037 } 10971 }
11038 10972
11039poll_again:
11040 return work_done; 10973 return work_done;
11041} 10974}
11042 10975
@@ -11221,7 +11154,7 @@ exit_lbl:
11221static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 11154static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11222{ 11155{
11223 struct bnx2x *bp = netdev_priv(dev); 11156 struct bnx2x *bp = netdev_priv(dev);
11224 struct bnx2x_fastpath *fp, *fp_stat; 11157 struct bnx2x_fastpath *fp;
11225 struct netdev_queue *txq; 11158 struct netdev_queue *txq;
11226 struct sw_tx_bd *tx_buf; 11159 struct sw_tx_bd *tx_buf;
11227 struct eth_tx_start_bd *tx_start_bd; 11160 struct eth_tx_start_bd *tx_start_bd;
@@ -11243,11 +11176,10 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11243 fp_index = skb_get_queue_mapping(skb); 11176 fp_index = skb_get_queue_mapping(skb);
11244 txq = netdev_get_tx_queue(dev, fp_index); 11177 txq = netdev_get_tx_queue(dev, fp_index);
11245 11178
11246 fp = &bp->fp[fp_index + bp->num_rx_queues]; 11179 fp = &bp->fp[fp_index];
11247 fp_stat = &bp->fp[fp_index];
11248 11180
11249 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { 11181 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11250 fp_stat->eth_q_stats.driver_xoff++; 11182 fp->eth_q_stats.driver_xoff++;
11251 netif_tx_stop_queue(txq); 11183 netif_tx_stop_queue(txq);
11252 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 11184 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11253 return NETDEV_TX_BUSY; 11185 return NETDEV_TX_BUSY;
@@ -11473,7 +11405,7 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11473 11405
11474 fp->tx_db.data.prod += nbd; 11406 fp->tx_db.data.prod += nbd;
11475 barrier(); 11407 barrier();
11476 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw); 11408 DOORBELL(bp, fp->index, fp->tx_db.raw);
11477 11409
11478 mmiowb(); 11410 mmiowb();
11479 11411
@@ -11484,11 +11416,11 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11484 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod 11416 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11485 if we put Tx into XOFF state. */ 11417 if we put Tx into XOFF state. */
11486 smp_mb(); 11418 smp_mb();
11487 fp_stat->eth_q_stats.driver_xoff++; 11419 fp->eth_q_stats.driver_xoff++;
11488 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 11420 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11489 netif_tx_wake_queue(txq); 11421 netif_tx_wake_queue(txq);
11490 } 11422 }
11491 fp_stat->tx_pkt++; 11423 fp->tx_pkt++;
11492 11424
11493 return NETDEV_TX_OK; 11425 return NETDEV_TX_OK;
11494} 11426}
@@ -12376,9 +12308,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12376 12308
12377 /* Free SKBs, SGEs, TPA pool and driver internals */ 12309 /* Free SKBs, SGEs, TPA pool and driver internals */
12378 bnx2x_free_skbs(bp); 12310 bnx2x_free_skbs(bp);
12379 for_each_rx_queue(bp, i) 12311 for_each_queue(bp, i)
12380 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 12312 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12381 for_each_rx_queue(bp, i) 12313 for_each_queue(bp, i)
12382 netif_napi_del(&bnx2x_fp(bp, i, napi)); 12314 netif_napi_del(&bnx2x_fp(bp, i, napi));
12383 bnx2x_free_mem(bp); 12315 bnx2x_free_mem(bp);
12384 12316
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 1d0581923287..88c3fe80b355 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -446,6 +446,48 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
446///////////////////////////////////////////////////////////////////////////////// 446/////////////////////////////////////////////////////////////////////////////////
447 447
448/** 448/**
449 * __choose_matched - update a port's matched variable from a received lacpdu
450 * @lacpdu: the lacpdu we've received
451 * @port: the port we're looking at
452 *
453 * Update the value of the matched variable, using parameter values from a
454 * newly received lacpdu. Parameter values for the partner carried in the
455 * received PDU are compared with the corresponding operational parameter
456 * values for the actor. Matched is set to TRUE if all of these parameters
457 * match and the PDU parameter partner_state.aggregation has the same value as
458 * actor_oper_port_state.aggregation and lacp will actively maintain the link
459 * in the aggregation. Matched is also set to TRUE if the value of
460 * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
461 * an individual link and lacp will actively maintain the link. Otherwise,
462 * matched is set to FALSE. LACP is considered to be actively maintaining the
463 * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
464 * the actor's actor_oper_port_state.lacp_activity and the PDU's
465 * partner_state.lacp_activity variables are TRUE.
466 *
467 * Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is
468 * used here to implement the language from 802.3ad 43.4.9 that requires
469 * recordPDU to "match" the LACPDU parameters to the stored values.
470 */
471static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
472{
473 // check if all parameters are alike
474 if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
475 (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
476 !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
477 (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
478 (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
479 ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
480 // or this is individual link(aggregation == FALSE)
481 ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
482 ) {
483 // update the state machine Matched variable
484 port->sm_vars |= AD_PORT_MATCHED;
485 } else {
486 port->sm_vars &= ~AD_PORT_MATCHED;
487 }
488}
489
490/**
449 * __record_pdu - record parameters from a received lacpdu 491 * __record_pdu - record parameters from a received lacpdu
450 * @lacpdu: the lacpdu we've received 492 * @lacpdu: the lacpdu we've received
451 * @port: the port we're looking at 493 * @port: the port we're looking at
@@ -459,6 +501,7 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
459 if (lacpdu && port) { 501 if (lacpdu && port) {
460 struct port_params *partner = &port->partner_oper; 502 struct port_params *partner = &port->partner_oper;
461 503
504 __choose_matched(lacpdu, port);
462 // record the new parameter values for the partner operational 505 // record the new parameter values for the partner operational
463 partner->port_number = ntohs(lacpdu->actor_port); 506 partner->port_number = ntohs(lacpdu->actor_port);
464 partner->port_priority = ntohs(lacpdu->actor_port_priority); 507 partner->port_priority = ntohs(lacpdu->actor_port_priority);
@@ -563,47 +606,6 @@ static void __update_default_selected(struct port *port)
563} 606}
564 607
565/** 608/**
566 * __choose_matched - update a port's matched variable from a received lacpdu
567 * @lacpdu: the lacpdu we've received
568 * @port: the port we're looking at
569 *
570 * Update the value of the matched variable, using parameter values from a
571 * newly received lacpdu. Parameter values for the partner carried in the
572 * received PDU are compared with the corresponding operational parameter
573 * values for the actor. Matched is set to TRUE if all of these parameters
574 * match and the PDU parameter partner_state.aggregation has the same value as
575 * actor_oper_port_state.aggregation and lacp will actively maintain the link
576 * in the aggregation. Matched is also set to TRUE if the value of
577 * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
578 * an individual link and lacp will actively maintain the link. Otherwise,
579 * matched is set to FALSE. LACP is considered to be actively maintaining the
580 * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
581 * the actor's actor_oper_port_state.lacp_activity and the PDU's
582 * partner_state.lacp_activity variables are TRUE.
583 */
584static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
585{
586 // validate lacpdu and port
587 if (lacpdu && port) {
588 // check if all parameters are alike
589 if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
590 (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
591 !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
592 (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
593 (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
594 ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
595 // or this is individual link(aggregation == FALSE)
596 ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
597 ) {
598 // update the state machine Matched variable
599 port->sm_vars |= AD_PORT_MATCHED;
600 } else {
601 port->sm_vars &= ~AD_PORT_MATCHED;
602 }
603 }
604}
605
606/**
607 * __update_ntt - update a port's ntt variable from a received lacpdu 609 * __update_ntt - update a port's ntt variable from a received lacpdu
608 * @lacpdu: the lacpdu we've received 610 * @lacpdu: the lacpdu we've received
609 * @port: the port we're looking at 611 * @port: the port we're looking at
@@ -1134,7 +1136,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1134 __update_selected(lacpdu, port); 1136 __update_selected(lacpdu, port);
1135 __update_ntt(lacpdu, port); 1137 __update_ntt(lacpdu, port);
1136 __record_pdu(lacpdu, port); 1138 __record_pdu(lacpdu, port);
1137 __choose_matched(lacpdu, port);
1138 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)); 1139 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
1139 port->actor_oper_port_state &= ~AD_STATE_EXPIRED; 1140 port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
1140 // verify that if the aggregator is enabled, the port is enabled too. 1141 // verify that if the aggregator is enabled, the port is enabled too.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ecea6c294132..726bd755338f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -158,7 +158,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the
158static const char * const version = 158static const char * const version =
159 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 159 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
160 160
161int bond_net_id; 161int bond_net_id __read_mostly;
162 162
163static __be32 arp_target[BOND_MAX_ARP_TARGETS]; 163static __be32 arp_target[BOND_MAX_ARP_TARGETS];
164static int arp_ip_count; 164static int arp_ip_count;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index b819cc2a429e..bb803fa1e6a7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -35,63 +35,9 @@ config CAN_CALC_BITTIMING
35 arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw". 35 arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
36 If unsure, say Y. 36 If unsure, say Y.
37 37
38config CAN_SJA1000
39 depends on CAN_DEV && HAS_IOMEM
40 tristate "Philips SJA1000"
41 ---help---
42 Driver for the SJA1000 CAN controllers from Philips or NXP
43
44config CAN_SJA1000_ISA
45 depends on CAN_SJA1000 && ISA
46 tristate "ISA Bus based legacy SJA1000 driver"
47 ---help---
48 This driver adds legacy support for SJA1000 chips connected to
49 the ISA bus using I/O port, memory mapped or indirect access.
50
51config CAN_SJA1000_PLATFORM
52 depends on CAN_SJA1000
53 tristate "Generic Platform Bus based SJA1000 driver"
54 ---help---
55 This driver adds support for the SJA1000 chips connected to
56 the "platform bus" (Linux abstraction for directly to the
57 processor attached devices). Which can be found on various
58 boards from Phytec (http://www.phytec.de) like the PCM027,
59 PCM038.
60
61config CAN_SJA1000_OF_PLATFORM
62 depends on CAN_SJA1000 && PPC_OF
63 tristate "Generic OF Platform Bus based SJA1000 driver"
64 ---help---
65 This driver adds support for the SJA1000 chips connected to
66 the OpenFirmware "platform bus" found on embedded systems with
67 OpenFirmware bindings, e.g. if you have a PowerPC based system
68 you may want to enable this option.
69
70config CAN_EMS_PCI
71 tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
72 depends on PCI && CAN_SJA1000
73 ---help---
74 This driver is for the one, two or four channel CPC-PCI,
75 CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
76 (http://www.ems-wuensche.de).
77
78config CAN_EMS_USB
79 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
80 depends on USB && CAN_DEV
81 ---help---
82 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
83 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
84
85config CAN_KVASER_PCI
86 tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
87 depends on PCI && CAN_SJA1000
88 ---help---
89 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
90 4 channel) from Kvaser (http://www.kvaser.com).
91
92config CAN_AT91 38config CAN_AT91
93 tristate "Atmel AT91 onchip CAN controller" 39 tristate "Atmel AT91 onchip CAN controller"
94 depends on CAN && CAN_DEV && ARCH_AT91SAM9263 40 depends on CAN_DEV && ARCH_AT91SAM9263
95 ---help--- 41 ---help---
96 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. 42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
97 43
@@ -108,6 +54,12 @@ config CAN_MCP251X
108 ---help--- 54 ---help---
109 Driver for the Microchip MCP251x SPI CAN controllers. 55 Driver for the Microchip MCP251x SPI CAN controllers.
110 56
57source "drivers/net/can/mscan/Kconfig"
58
59source "drivers/net/can/sja1000/Kconfig"
60
61source "drivers/net/can/usb/Kconfig"
62
111config CAN_DEBUG_DEVICES 63config CAN_DEBUG_DEVICES
112 bool "CAN devices debugging messages" 64 bool "CAN devices debugging messages"
113 depends on CAN 65 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 14891817ea5b..56899fef1c6a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
10obj-y += usb/ 10obj-y += usb/
11 11
12obj-$(CONFIG_CAN_SJA1000) += sja1000/ 12obj-$(CONFIG_CAN_SJA1000) += sja1000/
13obj-$(CONFIG_CAN_MSCAN) += mscan/
13obj-$(CONFIG_CAN_AT91) += at91_can.o 14obj-$(CONFIG_CAN_AT91) += at91_can.o
14obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
15obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 26c89aaeba62..c1bb29f0322b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -677,6 +677,11 @@ nla_put_failure:
677 return -EMSGSIZE; 677 return -EMSGSIZE;
678} 678}
679 679
680static size_t can_get_xstats_size(const struct net_device *dev)
681{
682 return sizeof(struct can_device_stats);
683}
684
680static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) 685static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
681{ 686{
682 struct can_priv *priv = netdev_priv(dev); 687 struct can_priv *priv = netdev_priv(dev);
@@ -705,6 +710,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
705 .changelink = can_changelink, 710 .changelink = can_changelink,
706 .get_size = can_get_size, 711 .get_size = can_get_size,
707 .fill_info = can_fill_info, 712 .fill_info = can_fill_info,
713 .get_xstats_size = can_get_xstats_size,
708 .fill_xstats = can_fill_xstats, 714 .fill_xstats = can_fill_xstats,
709}; 715};
710 716
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 8f48f4b50b7c..78b1b69b2921 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -594,13 +594,7 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
594static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv, 594static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
595 struct spi_device *spi) 595 struct spi_device *spi)
596{ 596{
597 int ret; 597 mcp251x_do_set_bittiming(net);
598
599 ret = open_candev(net);
600 if (ret) {
601 dev_err(&spi->dev, "unable to set initial baudrate!\n");
602 return ret;
603 }
604 598
605 /* Enable RX0->RX1 buffer roll over and disable filters */ 599 /* Enable RX0->RX1 buffer roll over and disable filters */
606 mcp251x_write_bits(spi, RXBCTRL(0), 600 mcp251x_write_bits(spi, RXBCTRL(0),
@@ -671,6 +665,12 @@ static int mcp251x_open(struct net_device *net)
671 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 665 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
672 int ret; 666 int ret;
673 667
668 ret = open_candev(net);
669 if (ret) {
670 dev_err(&spi->dev, "unable to set initial baudrate!\n");
671 return ret;
672 }
673
674 if (pdata->transceiver_enable) 674 if (pdata->transceiver_enable)
675 pdata->transceiver_enable(1); 675 pdata->transceiver_enable(1);
676 676
@@ -684,6 +684,7 @@ static int mcp251x_open(struct net_device *net)
684 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); 684 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
685 if (pdata->transceiver_enable) 685 if (pdata->transceiver_enable)
686 pdata->transceiver_enable(0); 686 pdata->transceiver_enable(0);
687 close_candev(net);
687 return ret; 688 return ret;
688 } 689 }
689 690
@@ -692,8 +693,10 @@ static int mcp251x_open(struct net_device *net)
692 ret = mcp251x_setup(net, priv, spi); 693 ret = mcp251x_setup(net, priv, spi);
693 if (ret) { 694 if (ret) {
694 free_irq(spi->irq, net); 695 free_irq(spi->irq, net);
696 mcp251x_hw_sleep(spi);
695 if (pdata->transceiver_enable) 697 if (pdata->transceiver_enable)
696 pdata->transceiver_enable(0); 698 pdata->transceiver_enable(0);
699 close_candev(net);
697 return ret; 700 return ret;
698 } 701 }
699 mcp251x_set_normal_mode(spi); 702 mcp251x_set_normal_mode(spi);
@@ -956,7 +959,6 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
956 priv->can.bittiming_const = &mcp251x_bittiming_const; 959 priv->can.bittiming_const = &mcp251x_bittiming_const;
957 priv->can.do_set_mode = mcp251x_do_set_mode; 960 priv->can.do_set_mode = mcp251x_do_set_mode;
958 priv->can.clock.freq = pdata->oscillator_frequency / 2; 961 priv->can.clock.freq = pdata->oscillator_frequency / 2;
959 priv->can.do_set_bittiming = mcp251x_do_set_bittiming;
960 priv->net = net; 962 priv->net = net;
961 dev_set_drvdata(&spi->dev, priv); 963 dev_set_drvdata(&spi->dev, priv);
962 964
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
new file mode 100644
index 000000000000..cd0f2d6f375d
--- /dev/null
+++ b/drivers/net/can/mscan/Kconfig
@@ -0,0 +1,23 @@
1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
3 tristate "Support for Freescale MSCAN based chips"
4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition
6 is based on the MSCAN12 definition which is the specific
7 implementation of the Motorola Scalable CAN concept targeted for
8 the Motorola MC68HC12 Microcontroller Family.
9
10if CAN_MSCAN
11
12config CAN_MPC5XXX
13 tristate "Freescale MPC5xxx onboard CAN controller"
14 depends on PPC_MPC52xx
15 ---help---
16 If you say yes here you get support for Freescale's MPC5xxx
17 onboard CAN controller.
18
19 This driver can also be built as a module. If so, the module
20 will be called mscan-mpc5xxx.ko.
21
22endif
23
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
new file mode 100644
index 000000000000..c9fab17cd8b4
--- /dev/null
+++ b/drivers/net/can/mscan/Makefile
@@ -0,0 +1,5 @@
1
2obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o
3mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o
4
5ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
new file mode 100644
index 000000000000..1de6f6349b16
--- /dev/null
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -0,0 +1,259 @@
1/*
2 * CAN bus driver for the Freescale MPC5xxx embedded CPU.
3 *
4 * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/netdevice.h>
28#include <linux/can.h>
29#include <linux/can/dev.h>
30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h>
32#include <linux/io.h>
33#include <asm/mpc52xx.h>
34
35#include "mscan.h"
36
37#define DRV_NAME "mpc5xxx_can"
38
39static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
40 { .compatible = "fsl,mpc5200-cdm", },
41 {}
42};
43
44/*
45 * Get frequency of the MSCAN clock source
46 *
47 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
48 * can be selected. According to the MPC5200 user's manual, the oscillator
49 * clock is the better choice as it has less jitter but due to a hardware
50 * bug, it can not be selected for the old MPC5200 Rev. A chips.
51 */
52
53static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
54 int clock_src)
55{
56 unsigned int pvr;
57 struct mpc52xx_cdm __iomem *cdm;
58 struct device_node *np_cdm;
59 unsigned int freq;
60 u32 val;
61
62 pvr = mfspr(SPRN_PVR);
63
64 freq = mpc5xxx_get_bus_frequency(of->node);
65 if (!freq)
66 return 0;
67
68 if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
69 return freq;
70
71 /* Determine SYS_XTAL_IN frequency from the clock domain settings */
72 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
73 if (!np_cdm) {
74 dev_err(&of->dev, "can't get clock node!\n");
75 return 0;
76 }
77 cdm = of_iomap(np_cdm, 0);
78 of_node_put(np_cdm);
79
80 if (in_8(&cdm->ipb_clk_sel) & 0x1)
81 freq *= 2;
82 val = in_be32(&cdm->rstcfg);
83
84 freq *= (val & (1 << 5)) ? 8 : 4;
85 freq /= (val & (1 << 6)) ? 12 : 16;
86
87 iounmap(cdm);
88
89 return freq;
90}
91
92static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
93 const struct of_device_id *id)
94{
95 struct device_node *np = ofdev->node;
96 struct net_device *dev;
97 struct mscan_priv *priv;
98 void __iomem *base;
99 const char *clk_src;
100 int err, irq, clock_src;
101
102 base = of_iomap(ofdev->node, 0);
103 if (!base) {
104 dev_err(&ofdev->dev, "couldn't ioremap\n");
105 err = -ENOMEM;
106 goto exit_release_mem;
107 }
108
109 irq = irq_of_parse_and_map(np, 0);
110 if (!irq) {
111 dev_err(&ofdev->dev, "no irq found\n");
112 err = -ENODEV;
113 goto exit_unmap_mem;
114 }
115
116 dev = alloc_mscandev();
117 if (!dev) {
118 err = -ENOMEM;
119 goto exit_dispose_irq;
120 }
121
122 priv = netdev_priv(dev);
123 priv->reg_base = base;
124 dev->irq = irq;
125
126 /*
127 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
128 * (IP_CLK) can be selected as MSCAN clock source. According to
129 * the MPC5200 user's manual, the oscillator clock is the better
130 * choice as it has less jitter. For this reason, it is selected
131 * by default.
132 */
133 clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
134 if (clk_src && strcmp(clk_src, "ip") == 0)
135 clock_src = MSCAN_CLKSRC_BUS;
136 else
137 clock_src = MSCAN_CLKSRC_XTAL;
138 priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
139 if (!priv->can.clock.freq) {
140 dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
141 err = -ENODEV;
142 goto exit_free_mscan;
143 }
144
145 SET_NETDEV_DEV(dev, &ofdev->dev);
146
147 err = register_mscandev(dev, clock_src);
148 if (err) {
149 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
150 DRV_NAME, err);
151 goto exit_free_mscan;
152 }
153
154 dev_set_drvdata(&ofdev->dev, dev);
155
156 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
157 priv->reg_base, dev->irq, priv->can.clock.freq);
158
159 return 0;
160
161exit_free_mscan:
162 free_candev(dev);
163exit_dispose_irq:
164 irq_dispose_mapping(irq);
165exit_unmap_mem:
166 iounmap(base);
167exit_release_mem:
168 return err;
169}
170
171static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
172{
173 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
174 struct mscan_priv *priv = netdev_priv(dev);
175
176 dev_set_drvdata(&ofdev->dev, NULL);
177
178 unregister_mscandev(dev);
179 iounmap(priv->reg_base);
180 irq_dispose_mapping(dev->irq);
181 free_candev(dev);
182
183 return 0;
184}
185
186#ifdef CONFIG_PM
187static struct mscan_regs saved_regs;
188static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
189{
190 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
191 struct mscan_priv *priv = netdev_priv(dev);
192 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
193
194 _memcpy_fromio(&saved_regs, regs, sizeof(*regs));
195
196 return 0;
197}
198
199static int mpc5xxx_can_resume(struct of_device *ofdev)
200{
201 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
202 struct mscan_priv *priv = netdev_priv(dev);
203 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
204
205 regs->canctl0 |= MSCAN_INITRQ;
206 while (!(regs->canctl1 & MSCAN_INITAK))
207 udelay(10);
208
209 regs->canctl1 = saved_regs.canctl1;
210 regs->canbtr0 = saved_regs.canbtr0;
211 regs->canbtr1 = saved_regs.canbtr1;
212 regs->canidac = saved_regs.canidac;
213
214 /* restore masks, buffers etc. */
215 _memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0,
216 sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0));
217
218 regs->canctl0 &= ~MSCAN_INITRQ;
219 regs->cantbsel = saved_regs.cantbsel;
220 regs->canrier = saved_regs.canrier;
221 regs->cantier = saved_regs.cantier;
222 regs->canctl0 = saved_regs.canctl0;
223
224 return 0;
225}
226#endif
227
228static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
229 {.compatible = "fsl,mpc5200-mscan"},
230 {},
231};
232
233static struct of_platform_driver mpc5xxx_can_driver = {
234 .owner = THIS_MODULE,
235 .name = "mpc5xxx_can",
236 .probe = mpc5xxx_can_probe,
237 .remove = __devexit_p(mpc5xxx_can_remove),
238#ifdef CONFIG_PM
239 .suspend = mpc5xxx_can_suspend,
240 .resume = mpc5xxx_can_resume,
241#endif
242 .match_table = mpc5xxx_can_table,
243};
244
245static int __init mpc5xxx_can_init(void)
246{
247 return of_register_platform_driver(&mpc5xxx_can_driver);
248}
249module_init(mpc5xxx_can_init);
250
251static void __exit mpc5xxx_can_exit(void)
252{
253 return of_unregister_platform_driver(&mpc5xxx_can_driver);
254};
255module_exit(mpc5xxx_can_exit);
256
257MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
258MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
259MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
new file mode 100644
index 000000000000..bb06dfb58f25
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.c
@@ -0,0 +1,668 @@
1/*
2 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/can.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/io.h>
35
36#include "mscan.h"
37
38static struct can_bittiming_const mscan_bittiming_const = {
39 .name = "mscan",
40 .tseg1_min = 4,
41 .tseg1_max = 16,
42 .tseg2_min = 2,
43 .tseg2_max = 8,
44 .sjw_max = 4,
45 .brp_min = 1,
46 .brp_max = 64,
47 .brp_inc = 1,
48};
49
50struct mscan_state {
51 u8 mode;
52 u8 canrier;
53 u8 cantier;
54};
55
56static enum can_state state_map[] = {
57 CAN_STATE_ERROR_ACTIVE,
58 CAN_STATE_ERROR_WARNING,
59 CAN_STATE_ERROR_PASSIVE,
60 CAN_STATE_BUS_OFF
61};
62
63static int mscan_set_mode(struct net_device *dev, u8 mode)
64{
65 struct mscan_priv *priv = netdev_priv(dev);
66 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
67 int ret = 0;
68 int i;
69 u8 canctl1;
70
71 if (mode != MSCAN_NORMAL_MODE) {
72 if (priv->tx_active) {
73 /* Abort transfers before going to sleep */#
74 out_8(&regs->cantarq, priv->tx_active);
75 /* Suppress TX done interrupts */
76 out_8(&regs->cantier, 0);
77 }
78
79 canctl1 = in_8(&regs->canctl1);
80 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
81 setbits8(&regs->canctl0, MSCAN_SLPRQ);
82 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
83 if (in_8(&regs->canctl1) & MSCAN_SLPAK)
84 break;
85 udelay(100);
86 }
87 /*
88 * The mscan controller will fail to enter sleep mode,
89 * while there are irregular activities on bus, like
90 * somebody keeps retransmitting. This behavior is
91 * undocumented and seems to differ between mscan built
92 * in mpc5200b and mpc5200. We proceed in that case,
93 * since otherwise the slprq will be kept set and the
94 * controller will get stuck. NOTE: INITRQ or CSWAI
95 * will abort all active transmit actions, if still
96 * any, at once.
97 */
98 if (i >= MSCAN_SET_MODE_RETRIES)
99 dev_dbg(dev->dev.parent,
100 "device failed to enter sleep mode. "
101 "We proceed anyhow.\n");
102 else
103 priv->can.state = CAN_STATE_SLEEPING;
104 }
105
106 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
107 setbits8(&regs->canctl0, MSCAN_INITRQ);
108 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
109 if (in_8(&regs->canctl1) & MSCAN_INITAK)
110 break;
111 }
112 if (i >= MSCAN_SET_MODE_RETRIES)
113 ret = -ENODEV;
114 }
115 if (!ret)
116 priv->can.state = CAN_STATE_STOPPED;
117
118 if (mode & MSCAN_CSWAI)
119 setbits8(&regs->canctl0, MSCAN_CSWAI);
120
121 } else {
122 canctl1 = in_8(&regs->canctl1);
123 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
124 clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
125 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
126 canctl1 = in_8(&regs->canctl1);
127 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
128 break;
129 }
130 if (i >= MSCAN_SET_MODE_RETRIES)
131 ret = -ENODEV;
132 else
133 priv->can.state = CAN_STATE_ERROR_ACTIVE;
134 }
135 }
136 return ret;
137}
138
139static int mscan_start(struct net_device *dev)
140{
141 struct mscan_priv *priv = netdev_priv(dev);
142 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
143 u8 canrflg;
144 int err;
145
146 out_8(&regs->canrier, 0);
147
148 INIT_LIST_HEAD(&priv->tx_head);
149 priv->prev_buf_id = 0;
150 priv->cur_pri = 0;
151 priv->tx_active = 0;
152 priv->shadow_canrier = 0;
153 priv->flags = 0;
154
155 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
156 if (err)
157 return err;
158
159 canrflg = in_8(&regs->canrflg);
160 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
161 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
162 MSCAN_STATE_TX(canrflg))];
163 out_8(&regs->cantier, 0);
164
165 /* Enable receive interrupts. */
166 out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
167 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
168
169 return 0;
170}
171
172static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
173{
174 struct can_frame *frame = (struct can_frame *)skb->data;
175 struct mscan_priv *priv = netdev_priv(dev);
176 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
177 int i, rtr, buf_id;
178 u32 can_id;
179
180 if (frame->can_dlc > 8)
181 return -EINVAL;
182
183 out_8(&regs->cantier, 0);
184
185 i = ~priv->tx_active & MSCAN_TXE;
186 buf_id = ffs(i) - 1;
187 switch (hweight8(i)) {
188 case 0:
189 netif_stop_queue(dev);
190 dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n");
191 return NETDEV_TX_BUSY;
192 case 1:
193 /*
194 * if buf_id < 3, then current frame will be send out of order,
195 * since buffer with lower id have higher priority (hell..)
196 */
197 netif_stop_queue(dev);
198 case 2:
199 if (buf_id < priv->prev_buf_id) {
200 priv->cur_pri++;
201 if (priv->cur_pri == 0xff) {
202 set_bit(F_TX_WAIT_ALL, &priv->flags);
203 netif_stop_queue(dev);
204 }
205 }
206 set_bit(F_TX_PROGRESS, &priv->flags);
207 break;
208 }
209 priv->prev_buf_id = buf_id;
210 out_8(&regs->cantbsel, i);
211
212 rtr = frame->can_id & CAN_RTR_FLAG;
213
214 /* RTR is always the lowest bit of interest, then IDs follow */
215 if (frame->can_id & CAN_EFF_FLAG) {
216 can_id = (frame->can_id & CAN_EFF_MASK)
217 << (MSCAN_EFF_RTR_SHIFT + 1);
218 if (rtr)
219 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
220 out_be16(&regs->tx.idr3_2, can_id);
221
222 can_id >>= 16;
223 /* EFF_FLAGS are inbetween the IDs :( */
224 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
225 | MSCAN_EFF_FLAGS;
226 } else {
227 can_id = (frame->can_id & CAN_SFF_MASK)
228 << (MSCAN_SFF_RTR_SHIFT + 1);
229 if (rtr)
230 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
231 }
232 out_be16(&regs->tx.idr1_0, can_id);
233
234 if (!rtr) {
235 void __iomem *data = &regs->tx.dsr1_0;
236 u16 *payload = (u16 *)frame->data;
237
238 /* It is safe to write into dsr[dlc+1] */
239 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
240 out_be16(data, *payload++);
241 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
242 }
243 }
244
245 out_8(&regs->tx.dlr, frame->can_dlc);
246 out_8(&regs->tx.tbpr, priv->cur_pri);
247
248 /* Start transmission. */
249 out_8(&regs->cantflg, 1 << buf_id);
250
251 if (!test_bit(F_TX_PROGRESS, &priv->flags))
252 dev->trans_start = jiffies;
253
254 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
255
256 can_put_echo_skb(skb, dev, buf_id);
257
258 /* Enable interrupt. */
259 priv->tx_active |= 1 << buf_id;
260 out_8(&regs->cantier, priv->tx_active);
261
262 return NETDEV_TX_OK;
263}
264
265/* This function returns the old state to see where we came from */
266static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
267{
268 struct mscan_priv *priv = netdev_priv(dev);
269 enum can_state state, old_state = priv->can.state;
270
271 if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
272 state = state_map[max(MSCAN_STATE_RX(canrflg),
273 MSCAN_STATE_TX(canrflg))];
274 priv->can.state = state;
275 }
276 return old_state;
277}
278
279static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
280{
281 struct mscan_priv *priv = netdev_priv(dev);
282 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
283 u32 can_id;
284 int i;
285
286 can_id = in_be16(&regs->rx.idr1_0);
287 if (can_id & (1 << 3)) {
288 frame->can_id = CAN_EFF_FLAG;
289 can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
290 can_id = ((can_id & 0xffe00000) |
291 ((can_id & 0x7ffff) << 2)) >> 2;
292 } else {
293 can_id >>= 4;
294 frame->can_id = 0;
295 }
296
297 frame->can_id |= can_id >> 1;
298 if (can_id & 1)
299 frame->can_id |= CAN_RTR_FLAG;
300 frame->can_dlc = in_8(&regs->rx.dlr) & 0xf;
301
302 if (!(frame->can_id & CAN_RTR_FLAG)) {
303 void __iomem *data = &regs->rx.dsr1_0;
304 u16 *payload = (u16 *)frame->data;
305
306 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
307 *payload++ = in_be16(data);
308 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
309 }
310 }
311
312 out_8(&regs->canrflg, MSCAN_RXF);
313}
314
315static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
316 u8 canrflg)
317{
318 struct mscan_priv *priv = netdev_priv(dev);
319 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
320 struct net_device_stats *stats = &dev->stats;
321 enum can_state old_state;
322
323 dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg);
324 frame->can_id = CAN_ERR_FLAG;
325
326 if (canrflg & MSCAN_OVRIF) {
327 frame->can_id |= CAN_ERR_CRTL;
328 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
329 stats->rx_over_errors++;
330 stats->rx_errors++;
331 } else {
332 frame->data[1] = 0;
333 }
334
335 old_state = check_set_state(dev, canrflg);
336 /* State changed */
337 if (old_state != priv->can.state) {
338 switch (priv->can.state) {
339 case CAN_STATE_ERROR_WARNING:
340 frame->can_id |= CAN_ERR_CRTL;
341 priv->can.can_stats.error_warning++;
342 if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
343 (canrflg & MSCAN_RSTAT_MSK))
344 frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
345 if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
346 (canrflg & MSCAN_TSTAT_MSK))
347 frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
348 break;
349 case CAN_STATE_ERROR_PASSIVE:
350 frame->can_id |= CAN_ERR_CRTL;
351 priv->can.can_stats.error_passive++;
352 frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
353 break;
354 case CAN_STATE_BUS_OFF:
355 frame->can_id |= CAN_ERR_BUSOFF;
356 /*
357 * The MSCAN on the MPC5200 does recover from bus-off
358 * automatically. To avoid that we stop the chip doing
359 * a light-weight stop (we are in irq-context).
360 */
361 out_8(&regs->cantier, 0);
362 out_8(&regs->canrier, 0);
363 setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
364 can_bus_off(dev);
365 break;
366 default:
367 break;
368 }
369 }
370 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
371 frame->can_dlc = CAN_ERR_DLC;
372 out_8(&regs->canrflg, MSCAN_ERR_IF);
373}
374
375static int mscan_rx_poll(struct napi_struct *napi, int quota)
376{
377 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
378 struct net_device *dev = napi->dev;
379 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
380 struct net_device_stats *stats = &dev->stats;
381 int npackets = 0;
382 int ret = 1;
383 struct sk_buff *skb;
384 struct can_frame *frame;
385 u8 canrflg;
386
387 while (npackets < quota) {
388 canrflg = in_8(&regs->canrflg);
389 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
390 break;
391
392 skb = alloc_can_skb(dev, &frame);
393 if (!skb) {
394 if (printk_ratelimit())
395 dev_notice(dev->dev.parent, "packet dropped\n");
396 stats->rx_dropped++;
397 out_8(&regs->canrflg, canrflg);
398 continue;
399 }
400
401 if (canrflg & MSCAN_RXF)
402 mscan_get_rx_frame(dev, frame);
403 else if (canrflg & MSCAN_ERR_IF)
404 mscan_get_err_frame(dev, frame, canrflg);
405
406 stats->rx_packets++;
407 stats->rx_bytes += frame->can_dlc;
408 npackets++;
409 netif_receive_skb(skb);
410 }
411
412 if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
413 napi_complete(&priv->napi);
414 clear_bit(F_RX_PROGRESS, &priv->flags);
415 if (priv->can.state < CAN_STATE_BUS_OFF)
416 out_8(&regs->canrier, priv->shadow_canrier);
417 ret = 0;
418 }
419 return ret;
420}
421
422static irqreturn_t mscan_isr(int irq, void *dev_id)
423{
424 struct net_device *dev = (struct net_device *)dev_id;
425 struct mscan_priv *priv = netdev_priv(dev);
426 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
427 struct net_device_stats *stats = &dev->stats;
428 u8 cantier, cantflg, canrflg;
429 irqreturn_t ret = IRQ_NONE;
430
431 cantier = in_8(&regs->cantier) & MSCAN_TXE;
432 cantflg = in_8(&regs->cantflg) & cantier;
433
434 if (cantier && cantflg) {
435 struct list_head *tmp, *pos;
436
437 list_for_each_safe(pos, tmp, &priv->tx_head) {
438 struct tx_queue_entry *entry =
439 list_entry(pos, struct tx_queue_entry, list);
440 u8 mask = entry->mask;
441
442 if (!(cantflg & mask))
443 continue;
444
445 out_8(&regs->cantbsel, mask);
446 stats->tx_bytes += in_8(&regs->tx.dlr);
447 stats->tx_packets++;
448 can_get_echo_skb(dev, entry->id);
449 priv->tx_active &= ~mask;
450 list_del(pos);
451 }
452
453 if (list_empty(&priv->tx_head)) {
454 clear_bit(F_TX_WAIT_ALL, &priv->flags);
455 clear_bit(F_TX_PROGRESS, &priv->flags);
456 priv->cur_pri = 0;
457 } else {
458 dev->trans_start = jiffies;
459 }
460
461 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
462 netif_wake_queue(dev);
463
464 out_8(&regs->cantier, priv->tx_active);
465 ret = IRQ_HANDLED;
466 }
467
468 canrflg = in_8(&regs->canrflg);
469 if ((canrflg & ~MSCAN_STAT_MSK) &&
470 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
471 if (canrflg & ~MSCAN_STAT_MSK) {
472 priv->shadow_canrier = in_8(&regs->canrier);
473 out_8(&regs->canrier, 0);
474 napi_schedule(&priv->napi);
475 ret = IRQ_HANDLED;
476 } else {
477 clear_bit(F_RX_PROGRESS, &priv->flags);
478 }
479 }
480 return ret;
481}
482
483static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
484{
485 struct mscan_priv *priv = netdev_priv(dev);
486 int ret = 0;
487
488 if (!priv->open_time)
489 return -EINVAL;
490
491 switch (mode) {
492 case CAN_MODE_START:
493 if (priv->can.state <= CAN_STATE_BUS_OFF)
494 mscan_set_mode(dev, MSCAN_INIT_MODE);
495 ret = mscan_start(dev);
496 if (ret)
497 break;
498 if (netif_queue_stopped(dev))
499 netif_wake_queue(dev);
500 break;
501
502 default:
503 ret = -EOPNOTSUPP;
504 break;
505 }
506 return ret;
507}
508
509static int mscan_do_set_bittiming(struct net_device *dev)
510{
511 struct mscan_priv *priv = netdev_priv(dev);
512 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
513 struct can_bittiming *bt = &priv->can.bittiming;
514 u8 btr0, btr1;
515
516 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
517 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
518 BTR1_SET_TSEG2(bt->phase_seg2) |
519 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
520
521 dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
522 btr0, btr1);
523
524 out_8(&regs->canbtr0, btr0);
525 out_8(&regs->canbtr1, btr1);
526
527 return 0;
528}
529
530static int mscan_open(struct net_device *dev)
531{
532 int ret;
533 struct mscan_priv *priv = netdev_priv(dev);
534 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
535
536 /* common open */
537 ret = open_candev(dev);
538 if (ret)
539 return ret;
540
541 napi_enable(&priv->napi);
542
543 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
544 if (ret < 0) {
545 dev_err(dev->dev.parent, "failed to attach interrupt\n");
546 goto exit_napi_disable;
547 }
548
549 priv->open_time = jiffies;
550
551 clrbits8(&regs->canctl1, MSCAN_LISTEN);
552
553 ret = mscan_start(dev);
554 if (ret)
555 goto exit_free_irq;
556
557 netif_start_queue(dev);
558
559 return 0;
560
561exit_free_irq:
562 priv->open_time = 0;
563 free_irq(dev->irq, dev);
564exit_napi_disable:
565 napi_disable(&priv->napi);
566 close_candev(dev);
567 return ret;
568}
569
570static int mscan_close(struct net_device *dev)
571{
572 struct mscan_priv *priv = netdev_priv(dev);
573 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
574
575 netif_stop_queue(dev);
576 napi_disable(&priv->napi);
577
578 out_8(&regs->cantier, 0);
579 out_8(&regs->canrier, 0);
580 mscan_set_mode(dev, MSCAN_INIT_MODE);
581 close_candev(dev);
582 free_irq(dev->irq, dev);
583 priv->open_time = 0;
584
585 return 0;
586}
587
588static const struct net_device_ops mscan_netdev_ops = {
589 .ndo_open = mscan_open,
590 .ndo_stop = mscan_close,
591 .ndo_start_xmit = mscan_start_xmit,
592};
593
594int register_mscandev(struct net_device *dev, int clock_src)
595{
596 struct mscan_priv *priv = netdev_priv(dev);
597 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
598 u8 ctl1;
599
600 ctl1 = in_8(&regs->canctl1);
601 if (clock_src)
602 ctl1 |= MSCAN_CLKSRC;
603 else
604 ctl1 &= ~MSCAN_CLKSRC;
605
606 ctl1 |= MSCAN_CANE;
607 out_8(&regs->canctl1, ctl1);
608 udelay(100);
609
610 /* acceptance mask/acceptance code (accept everything) */
611 out_be16(&regs->canidar1_0, 0);
612 out_be16(&regs->canidar3_2, 0);
613 out_be16(&regs->canidar5_4, 0);
614 out_be16(&regs->canidar7_6, 0);
615
616 out_be16(&regs->canidmr1_0, 0xffff);
617 out_be16(&regs->canidmr3_2, 0xffff);
618 out_be16(&regs->canidmr5_4, 0xffff);
619 out_be16(&regs->canidmr7_6, 0xffff);
620 /* Two 32 bit Acceptance Filters */
621 out_8(&regs->canidac, MSCAN_AF_32BIT);
622
623 mscan_set_mode(dev, MSCAN_INIT_MODE);
624
625 return register_candev(dev);
626}
627
628void unregister_mscandev(struct net_device *dev)
629{
630 struct mscan_priv *priv = netdev_priv(dev);
631 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
632 mscan_set_mode(dev, MSCAN_INIT_MODE);
633 clrbits8(&regs->canctl1, MSCAN_CANE);
634 unregister_candev(dev);
635}
636
637struct net_device *alloc_mscandev(void)
638{
639 struct net_device *dev;
640 struct mscan_priv *priv;
641 int i;
642
643 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
644 if (!dev)
645 return NULL;
646 priv = netdev_priv(dev);
647
648 dev->netdev_ops = &mscan_netdev_ops;
649
650 dev->flags |= IFF_ECHO; /* we support local echo */
651
652 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
653
654 priv->can.bittiming_const = &mscan_bittiming_const;
655 priv->can.do_set_bittiming = mscan_do_set_bittiming;
656 priv->can.do_set_mode = mscan_do_set_mode;
657
658 for (i = 0; i < TX_QUEUE_SIZE; i++) {
659 priv->tx_queue[i].id = i;
660 priv->tx_queue[i].mask = 1 << i;
661 }
662
663 return dev;
664}
665
666MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
667MODULE_LICENSE("GPL v2");
668MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
new file mode 100644
index 000000000000..00fc4aaf1ed8
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.h
@@ -0,0 +1,296 @@
1/*
2 * Definitions of consts/structs to drive the Freescale MSCAN.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the version 2 of the GNU General Public License
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __MSCAN_H__
22#define __MSCAN_H__
23
24#include <linux/types.h>
25
26/* MSCAN control register 0 (CANCTL0) bits */
27#define MSCAN_RXFRM 0x80
28#define MSCAN_RXACT 0x40
29#define MSCAN_CSWAI 0x20
30#define MSCAN_SYNCH 0x10
31#define MSCAN_TIME 0x08
32#define MSCAN_WUPE 0x04
33#define MSCAN_SLPRQ 0x02
34#define MSCAN_INITRQ 0x01
35
36/* MSCAN control register 1 (CANCTL1) bits */
37#define MSCAN_CANE 0x80
38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10
41#define MSCAN_WUPM 0x04
42#define MSCAN_SLPAK 0x02
43#define MSCAN_INITAK 0x01
44
45/* Use the MPC5200 MSCAN variant? */
46#ifdef CONFIG_PPC
47#define MSCAN_FOR_MPC5200
48#endif
49
50#ifdef MSCAN_FOR_MPC5200
51#define MSCAN_CLKSRC_BUS 0
52#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
53#else
54#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
55#define MSCAN_CLKSRC_XTAL 0
56#endif
57
58/* MSCAN receiver flag register (CANRFLG) bits */
59#define MSCAN_WUPIF 0x80
60#define MSCAN_CSCIF 0x40
61#define MSCAN_RSTAT1 0x20
62#define MSCAN_RSTAT0 0x10
63#define MSCAN_TSTAT1 0x08
64#define MSCAN_TSTAT0 0x04
65#define MSCAN_OVRIF 0x02
66#define MSCAN_RXF 0x01
67#define MSCAN_ERR_IF (MSCAN_OVRIF | MSCAN_CSCIF)
68#define MSCAN_RSTAT_MSK (MSCAN_RSTAT1 | MSCAN_RSTAT0)
69#define MSCAN_TSTAT_MSK (MSCAN_TSTAT1 | MSCAN_TSTAT0)
70#define MSCAN_STAT_MSK (MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK)
71
72#define MSCAN_STATE_BUS_OFF (MSCAN_RSTAT1 | MSCAN_RSTAT0 | \
73 MSCAN_TSTAT1 | MSCAN_TSTAT0)
74#define MSCAN_STATE_TX(canrflg) (((canrflg)&MSCAN_TSTAT_MSK)>>2)
75#define MSCAN_STATE_RX(canrflg) (((canrflg)&MSCAN_RSTAT_MSK)>>4)
76#define MSCAN_STATE_ACTIVE 0
77#define MSCAN_STATE_WARNING 1
78#define MSCAN_STATE_PASSIVE 2
79#define MSCAN_STATE_BUSOFF 3
80
81/* MSCAN receiver interrupt enable register (CANRIER) bits */
82#define MSCAN_WUPIE 0x80
83#define MSCAN_CSCIE 0x40
84#define MSCAN_RSTATE1 0x20
85#define MSCAN_RSTATE0 0x10
86#define MSCAN_TSTATE1 0x08
87#define MSCAN_TSTATE0 0x04
88#define MSCAN_OVRIE 0x02
89#define MSCAN_RXFIE 0x01
90
91/* MSCAN transmitter flag register (CANTFLG) bits */
92#define MSCAN_TXE2 0x04
93#define MSCAN_TXE1 0x02
94#define MSCAN_TXE0 0x01
95#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
96
97/* MSCAN transmitter interrupt enable register (CANTIER) bits */
98#define MSCAN_TXIE2 0x04
99#define MSCAN_TXIE1 0x02
100#define MSCAN_TXIE0 0x01
101#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
102
103/* MSCAN transmitter message abort request (CANTARQ) bits */
104#define MSCAN_ABTRQ2 0x04
105#define MSCAN_ABTRQ1 0x02
106#define MSCAN_ABTRQ0 0x01
107
108/* MSCAN transmitter message abort ack (CANTAAK) bits */
109#define MSCAN_ABTAK2 0x04
110#define MSCAN_ABTAK1 0x02
111#define MSCAN_ABTAK0 0x01
112
113/* MSCAN transmit buffer selection (CANTBSEL) bits */
114#define MSCAN_TX2 0x04
115#define MSCAN_TX1 0x02
116#define MSCAN_TX0 0x01
117
118/* MSCAN ID acceptance control register (CANIDAC) bits */
119#define MSCAN_IDAM1 0x20
120#define MSCAN_IDAM0 0x10
121#define MSCAN_IDHIT2 0x04
122#define MSCAN_IDHIT1 0x02
123#define MSCAN_IDHIT0 0x01
124
125#define MSCAN_AF_32BIT 0x00
126#define MSCAN_AF_16BIT MSCAN_IDAM0
127#define MSCAN_AF_8BIT MSCAN_IDAM1
128#define MSCAN_AF_CLOSED (MSCAN_IDAM0|MSCAN_IDAM1)
129#define MSCAN_AF_MASK (~(MSCAN_IDAM0|MSCAN_IDAM1))
130
131/* MSCAN Miscellaneous Register (CANMISC) bits */
132#define MSCAN_BOHOLD 0x01
133
134/* MSCAN Identifier Register (IDR) bits */
135#define MSCAN_SFF_RTR_SHIFT 4
136#define MSCAN_EFF_RTR_SHIFT 0
137#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
138
139#ifdef MSCAN_FOR_MPC5200
140#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
141#define _MSCAN_RESERVED_DSR_SIZE 2
142#else
143#define _MSCAN_RESERVED_(n, num)
144#define _MSCAN_RESERVED_DSR_SIZE 0
145#endif
146
147/* Structure of the hardware registers */
148struct mscan_regs {
149 /* (see doc S12MSCANV3/D) MPC5200 MSCAN */
150 u8 canctl0; /* + 0x00 0x00 */
151 u8 canctl1; /* + 0x01 0x01 */
152 _MSCAN_RESERVED_(1, 2); /* + 0x02 */
153 u8 canbtr0; /* + 0x04 0x02 */
154 u8 canbtr1; /* + 0x05 0x03 */
155 _MSCAN_RESERVED_(2, 2); /* + 0x06 */
156 u8 canrflg; /* + 0x08 0x04 */
157 u8 canrier; /* + 0x09 0x05 */
158 _MSCAN_RESERVED_(3, 2); /* + 0x0a */
159 u8 cantflg; /* + 0x0c 0x06 */
160 u8 cantier; /* + 0x0d 0x07 */
161 _MSCAN_RESERVED_(4, 2); /* + 0x0e */
162 u8 cantarq; /* + 0x10 0x08 */
163 u8 cantaak; /* + 0x11 0x09 */
164 _MSCAN_RESERVED_(5, 2); /* + 0x12 */
165 u8 cantbsel; /* + 0x14 0x0a */
166 u8 canidac; /* + 0x15 0x0b */
167 u8 reserved; /* + 0x16 0x0c */
168 _MSCAN_RESERVED_(6, 5); /* + 0x17 */
169#ifndef MSCAN_FOR_MPC5200
170 u8 canmisc; /* 0x0d */
171#endif
172 u8 canrxerr; /* + 0x1c 0x0e */
173 u8 cantxerr; /* + 0x1d 0x0f */
174 _MSCAN_RESERVED_(7, 2); /* + 0x1e */
175 u16 canidar1_0; /* + 0x20 0x10 */
176 _MSCAN_RESERVED_(8, 2); /* + 0x22 */
177 u16 canidar3_2; /* + 0x24 0x12 */
178 _MSCAN_RESERVED_(9, 2); /* + 0x26 */
179 u16 canidmr1_0; /* + 0x28 0x14 */
180 _MSCAN_RESERVED_(10, 2); /* + 0x2a */
181 u16 canidmr3_2; /* + 0x2c 0x16 */
182 _MSCAN_RESERVED_(11, 2); /* + 0x2e */
183 u16 canidar5_4; /* + 0x30 0x18 */
184 _MSCAN_RESERVED_(12, 2); /* + 0x32 */
185 u16 canidar7_6; /* + 0x34 0x1a */
186 _MSCAN_RESERVED_(13, 2); /* + 0x36 */
187 u16 canidmr5_4; /* + 0x38 0x1c */
188 _MSCAN_RESERVED_(14, 2); /* + 0x3a */
189 u16 canidmr7_6; /* + 0x3c 0x1e */
190 _MSCAN_RESERVED_(15, 2); /* + 0x3e */
191 struct {
192 u16 idr1_0; /* + 0x40 0x20 */
193 _MSCAN_RESERVED_(16, 2); /* + 0x42 */
194 u16 idr3_2; /* + 0x44 0x22 */
195 _MSCAN_RESERVED_(17, 2); /* + 0x46 */
196 u16 dsr1_0; /* + 0x48 0x24 */
197 _MSCAN_RESERVED_(18, 2); /* + 0x4a */
198 u16 dsr3_2; /* + 0x4c 0x26 */
199 _MSCAN_RESERVED_(19, 2); /* + 0x4e */
200 u16 dsr5_4; /* + 0x50 0x28 */
201 _MSCAN_RESERVED_(20, 2); /* + 0x52 */
202 u16 dsr7_6; /* + 0x54 0x2a */
203 _MSCAN_RESERVED_(21, 2); /* + 0x56 */
204 u8 dlr; /* + 0x58 0x2c */
205 u8:8; /* + 0x59 0x2d */
206 _MSCAN_RESERVED_(22, 2); /* + 0x5a */
207 u16 time; /* + 0x5c 0x2e */
208 } rx;
209 _MSCAN_RESERVED_(23, 2); /* + 0x5e */
210 struct {
211 u16 idr1_0; /* + 0x60 0x30 */
212 _MSCAN_RESERVED_(24, 2); /* + 0x62 */
213 u16 idr3_2; /* + 0x64 0x32 */
214 _MSCAN_RESERVED_(25, 2); /* + 0x66 */
215 u16 dsr1_0; /* + 0x68 0x34 */
216 _MSCAN_RESERVED_(26, 2); /* + 0x6a */
217 u16 dsr3_2; /* + 0x6c 0x36 */
218 _MSCAN_RESERVED_(27, 2); /* + 0x6e */
219 u16 dsr5_4; /* + 0x70 0x38 */
220 _MSCAN_RESERVED_(28, 2); /* + 0x72 */
221 u16 dsr7_6; /* + 0x74 0x3a */
222 _MSCAN_RESERVED_(29, 2); /* + 0x76 */
223 u8 dlr; /* + 0x78 0x3c */
224 u8 tbpr; /* + 0x79 0x3d */
225 _MSCAN_RESERVED_(30, 2); /* + 0x7a */
226 u16 time; /* + 0x7c 0x3e */
227 } tx;
228 _MSCAN_RESERVED_(31, 2); /* + 0x7e */
229} __attribute__ ((packed));
230
231#undef _MSCAN_RESERVED_
232#define MSCAN_REGION sizeof(struct mscan)
233
234#define MSCAN_NORMAL_MODE 0
235#define MSCAN_SLEEP_MODE MSCAN_SLPRQ
236#define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ)
237#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
238#define MSCAN_SET_MODE_RETRIES 255
239#define MSCAN_ECHO_SKB_MAX 3
240
241#define BTR0_BRP_MASK 0x3f
242#define BTR0_SJW_SHIFT 6
243#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT)
244
245#define BTR1_TSEG1_MASK 0xf
246#define BTR1_TSEG2_SHIFT 4
247#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT)
248#define BTR1_SAM_SHIFT 7
249
250#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK)
251#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \
252 BTR0_SJW_MASK)
253
254#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
255#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
256 BTR1_TSEG2_MASK)
257#define BTR1_SET_SAM(sam) ((sam) ? 1 << BTR1_SAM_SHIFT : 0)
258
259#define F_RX_PROGRESS 0
260#define F_TX_PROGRESS 1
261#define F_TX_WAIT_ALL 2
262
263#define TX_QUEUE_SIZE 3
264
265struct tx_queue_entry {
266 struct list_head list;
267 u8 mask;
268 u8 id;
269};
270
271struct mscan_priv {
272 struct can_priv can; /* must be the first member */
273 long open_time;
274 unsigned long flags;
275 void __iomem *reg_base; /* ioremap'ed address to registers */
276 u8 shadow_statflg;
277 u8 shadow_canrier;
278 u8 cur_pri;
279 u8 prev_buf_id;
280 u8 tx_active;
281
282 struct list_head tx_head;
283 struct tx_queue_entry tx_queue[TX_QUEUE_SIZE];
284 struct napi_struct napi;
285};
286
287extern struct net_device *alloc_mscandev(void);
288/*
289 * clock_src:
290 * 1 = The MSCAN clock source is the onchip Bus Clock.
291 * 0 = The MSCAN clock source is the chip Oscillator Clock.
292 */
293extern int register_mscandev(struct net_device *dev, int clock_src);
294extern void unregister_mscandev(struct net_device *dev);
295
296#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
new file mode 100644
index 000000000000..4c674927f247
--- /dev/null
+++ b/drivers/net/can/sja1000/Kconfig
@@ -0,0 +1,47 @@
1menuconfig CAN_SJA1000
2 tristate "Philips/NXP SJA1000 devices"
3 depends on CAN_DEV && HAS_IOMEM
4
5if CAN_SJA1000
6
7config CAN_SJA1000_ISA
8 tristate "ISA Bus based legacy SJA1000 driver"
9 depends on ISA
10 ---help---
11 This driver adds legacy support for SJA1000 chips connected to
12 the ISA bus using I/O port, memory mapped or indirect access.
13
14config CAN_SJA1000_PLATFORM
15 tristate "Generic Platform Bus based SJA1000 driver"
16 ---help---
17 This driver adds support for the SJA1000 chips connected to
18 the "platform bus" (Linux abstraction for directly to the
19 processor attached devices). Which can be found on various
20 boards from Phytec (http://www.phytec.de) like the PCM027,
21 PCM038.
22
23config CAN_SJA1000_OF_PLATFORM
24 tristate "Generic OF Platform Bus based SJA1000 driver"
25 depends on PPC_OF
26 ---help---
27 This driver adds support for the SJA1000 chips connected to
28 the OpenFirmware "platform bus" found on embedded systems with
29 OpenFirmware bindings, e.g. if you have a PowerPC based system
30 you may want to enable this option.
31
32config CAN_EMS_PCI
33 tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
34 depends on PCI
35 ---help---
36 This driver is for the one, two or four channel CPC-PCI,
37 CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
38 (http://www.ems-wuensche.de).
39
40config CAN_KVASER_PCI
41 tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
42 depends on PCI
43 ---help---
44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
45 4 channel) from Kvaser (http://www.kvaser.com).
46
47endif
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 782a47fabf2c..b4ba88a31075 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -516,7 +516,7 @@ static int sja1000_open(struct net_device *dev)
516 516
517 /* register interrupt handler, if not done by the device driver */ 517 /* register interrupt handler, if not done by the device driver */
518 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { 518 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
519 err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags, 519 err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags,
520 dev->name, (void *)dev); 520 dev->name, (void *)dev);
521 if (err) { 521 if (err) {
522 close_candev(dev); 522 close_candev(dev);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
new file mode 100644
index 000000000000..bbc78e0b8a15
--- /dev/null
+++ b/drivers/net/can/usb/Kconfig
@@ -0,0 +1,10 @@
1menu "CAN USB interfaces"
2 depends on USB && CAN_DEV
3
4config CAN_EMS_USB
5 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
6 ---help---
7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
8 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
9
10endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index c3f75ba701b1..0afd51d4c7a5 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,3 +3,5 @@
3# 3#
4 4
5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o 5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
6
7ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 79ce8e857eab..8edac8915ea8 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2136,9 +2136,6 @@ static int emac_poll(struct napi_struct *napi, int budget)
2136 u32 status = 0; 2136 u32 status = 0;
2137 u32 num_pkts = 0; 2137 u32 num_pkts = 0;
2138 2138
2139 if (!netif_running(ndev))
2140 return 0;
2141
2142 /* Check interrupt vectors and call packet processing */ 2139 /* Check interrupt vectors and call packet processing */
2143 status = emac_read(EMAC_MACINVECTOR); 2140 status = emac_read(EMAC_MACINVECTOR);
2144 2141
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 31b8bef49d2e..3aab2e466008 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -100,6 +100,7 @@ typedef struct board_info {
100 100
101 unsigned int flags; 101 unsigned int flags;
102 unsigned int in_suspend :1; 102 unsigned int in_suspend :1;
103 unsigned int wake_supported :1;
103 int debug_level; 104 int debug_level;
104 105
105 enum dm9000_type type; 106 enum dm9000_type type;
@@ -116,6 +117,8 @@ typedef struct board_info {
116 struct resource *data_req; 117 struct resource *data_req;
117 struct resource *irq_res; 118 struct resource *irq_res;
118 119
120 int irq_wake;
121
119 struct mutex addr_lock; /* phy and eeprom access lock */ 122 struct mutex addr_lock; /* phy and eeprom access lock */
120 123
121 struct delayed_work phy_poll; 124 struct delayed_work phy_poll;
@@ -125,6 +128,7 @@ typedef struct board_info {
125 128
126 struct mii_if_info mii; 129 struct mii_if_info mii;
127 u32 msg_enable; 130 u32 msg_enable;
131 u32 wake_state;
128 132
129 int rx_csum; 133 int rx_csum;
130 int can_csum; 134 int can_csum;
@@ -568,6 +572,54 @@ static int dm9000_set_eeprom(struct net_device *dev,
568 return 0; 572 return 0;
569} 573}
570 574
575static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
576{
577 board_info_t *dm = to_dm9000_board(dev);
578
579 memset(w, 0, sizeof(struct ethtool_wolinfo));
580
581 /* note, we could probably support wake-phy too */
582 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
583 w->wolopts = dm->wake_state;
584}
585
586static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
587{
588 board_info_t *dm = to_dm9000_board(dev);
589 unsigned long flags;
590 u32 opts = w->wolopts;
591 u32 wcr = 0;
592
593 if (!dm->wake_supported)
594 return -EOPNOTSUPP;
595
596 if (opts & ~WAKE_MAGIC)
597 return -EINVAL;
598
599 if (opts & WAKE_MAGIC)
600 wcr |= WCR_MAGICEN;
601
602 mutex_lock(&dm->addr_lock);
603
604 spin_lock_irqsave(&dm->lock, flags);
605 iow(dm, DM9000_WCR, wcr);
606 spin_unlock_irqrestore(&dm->lock, flags);
607
608 mutex_unlock(&dm->addr_lock);
609
610 if (dm->wake_state != opts) {
611 /* change in wol state, update IRQ state */
612
613 if (!dm->wake_state)
614 set_irq_wake(dm->irq_wake, 1);
615 else if (dm->wake_state & !opts)
616 set_irq_wake(dm->irq_wake, 0);
617 }
618
619 dm->wake_state = opts;
620 return 0;
621}
622
571static const struct ethtool_ops dm9000_ethtool_ops = { 623static const struct ethtool_ops dm9000_ethtool_ops = {
572 .get_drvinfo = dm9000_get_drvinfo, 624 .get_drvinfo = dm9000_get_drvinfo,
573 .get_settings = dm9000_get_settings, 625 .get_settings = dm9000_get_settings,
@@ -576,6 +628,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
576 .set_msglevel = dm9000_set_msglevel, 628 .set_msglevel = dm9000_set_msglevel,
577 .nway_reset = dm9000_nway_reset, 629 .nway_reset = dm9000_nway_reset,
578 .get_link = dm9000_get_link, 630 .get_link = dm9000_get_link,
631 .get_wol = dm9000_get_wol,
632 .set_wol = dm9000_set_wol,
579 .get_eeprom_len = dm9000_get_eeprom_len, 633 .get_eeprom_len = dm9000_get_eeprom_len,
580 .get_eeprom = dm9000_get_eeprom, 634 .get_eeprom = dm9000_get_eeprom,
581 .set_eeprom = dm9000_set_eeprom, 635 .set_eeprom = dm9000_set_eeprom,
@@ -722,6 +776,7 @@ dm9000_init_dm9000(struct net_device *dev)
722{ 776{
723 board_info_t *db = netdev_priv(dev); 777 board_info_t *db = netdev_priv(dev);
724 unsigned int imr; 778 unsigned int imr;
779 unsigned int ncr;
725 780
726 dm9000_dbg(db, 1, "entering %s\n", __func__); 781 dm9000_dbg(db, 1, "entering %s\n", __func__);
727 782
@@ -736,8 +791,15 @@ dm9000_init_dm9000(struct net_device *dev)
736 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 791 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
737 iow(db, DM9000_GPR, 0); /* Enable PHY */ 792 iow(db, DM9000_GPR, 0); /* Enable PHY */
738 793
739 if (db->flags & DM9000_PLATF_EXT_PHY) 794 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
740 iow(db, DM9000_NCR, NCR_EXT_PHY); 795
796 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
797 * up dumping the wake events if we disable this. There is already
798 * a wake-mask in DM9000_WCR */
799 if (db->wake_supported)
800 ncr |= NCR_WAKEEN;
801
802 iow(db, DM9000_NCR, ncr);
741 803
742 /* Program operating register */ 804 /* Program operating register */
743 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 805 iow(db, DM9000_TCR, 0); /* TX Polling clear */
@@ -1045,6 +1107,41 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1045 return IRQ_HANDLED; 1107 return IRQ_HANDLED;
1046} 1108}
1047 1109
1110static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1111{
1112 struct net_device *dev = dev_id;
1113 board_info_t *db = netdev_priv(dev);
1114 unsigned long flags;
1115 unsigned nsr, wcr;
1116
1117 spin_lock_irqsave(&db->lock, flags);
1118
1119 nsr = ior(db, DM9000_NSR);
1120 wcr = ior(db, DM9000_WCR);
1121
1122 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1123
1124 if (nsr & NSR_WAKEST) {
1125 /* clear, so we can avoid */
1126 iow(db, DM9000_NSR, NSR_WAKEST);
1127
1128 if (wcr & WCR_LINKST)
1129 dev_info(db->dev, "wake by link status change\n");
1130 if (wcr & WCR_SAMPLEST)
1131 dev_info(db->dev, "wake by sample packet\n");
1132 if (wcr & WCR_MAGICST )
1133 dev_info(db->dev, "wake by magic packet\n");
1134 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1135 dev_err(db->dev, "wake signalled with no reason? "
1136 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1137
1138 }
1139
1140 spin_unlock_irqrestore(&db->lock, flags);
1141
1142 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1143}
1144
1048#ifdef CONFIG_NET_POLL_CONTROLLER 1145#ifdef CONFIG_NET_POLL_CONTROLLER
1049/* 1146/*
1050 *Used by netconsole 1147 *Used by netconsole
@@ -1299,6 +1396,29 @@ dm9000_probe(struct platform_device *pdev)
1299 goto out; 1396 goto out;
1300 } 1397 }
1301 1398
1399 db->irq_wake = platform_get_irq(pdev, 1);
1400 if (db->irq_wake >= 0) {
1401 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1402
1403 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1404 IRQF_SHARED, dev_name(db->dev), ndev);
1405 if (ret) {
1406 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1407 } else {
1408
1409 /* test to see if irq is really wakeup capable */
1410 ret = set_irq_wake(db->irq_wake, 1);
1411 if (ret) {
1412 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1413 db->irq_wake, ret);
1414 ret = 0;
1415 } else {
1416 set_irq_wake(db->irq_wake, 0);
1417 db->wake_supported = 1;
1418 }
1419 }
1420 }
1421
1302 iosize = resource_size(db->addr_res); 1422 iosize = resource_size(db->addr_res);
1303 db->addr_req = request_mem_region(db->addr_res->start, iosize, 1423 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1304 pdev->name); 1424 pdev->name);
@@ -1490,10 +1610,14 @@ dm9000_drv_suspend(struct device *dev)
1490 db = netdev_priv(ndev); 1610 db = netdev_priv(ndev);
1491 db->in_suspend = 1; 1611 db->in_suspend = 1;
1492 1612
1493 if (netif_running(ndev)) { 1613 if (!netif_running(ndev))
1494 netif_device_detach(ndev); 1614 return 0;
1615
1616 netif_device_detach(ndev);
1617
1618 /* only shutdown if not using WoL */
1619 if (!db->wake_state)
1495 dm9000_shutdown(ndev); 1620 dm9000_shutdown(ndev);
1496 }
1497 } 1621 }
1498 return 0; 1622 return 0;
1499} 1623}
@@ -1506,10 +1630,13 @@ dm9000_drv_resume(struct device *dev)
1506 board_info_t *db = netdev_priv(ndev); 1630 board_info_t *db = netdev_priv(ndev);
1507 1631
1508 if (ndev) { 1632 if (ndev) {
1509
1510 if (netif_running(ndev)) { 1633 if (netif_running(ndev)) {
1511 dm9000_reset(db); 1634 /* reset if we were not in wake mode to ensure if
1512 dm9000_init_dm9000(ndev); 1635 * the device was powered off it is in a known state */
1636 if (!db->wake_state) {
1637 dm9000_reset(db);
1638 dm9000_init_dm9000(ndev);
1639 }
1513 1640
1514 netif_device_attach(ndev); 1641 netif_device_attach(ndev);
1515 } 1642 }
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index fb1c924d79b4..55688bd1a3ef 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -111,6 +111,13 @@
111#define RSR_CE (1<<1) 111#define RSR_CE (1<<1)
112#define RSR_FOE (1<<0) 112#define RSR_FOE (1<<0)
113 113
114#define WCR_LINKEN (1 << 5)
115#define WCR_SAMPLEEN (1 << 4)
116#define WCR_MAGICEN (1 << 3)
117#define WCR_LINKST (1 << 2)
118#define WCR_SAMPLEST (1 << 1)
119#define WCR_MAGICST (1 << 0)
120
114#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 ) 121#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 )
115#define FCTR_LWOT(ot) ( ot & 0xf ) 122#define FCTR_LWOT(ot) ( ot & 0xf )
116 123
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index f1c565282d58..96b6dc42fc74 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -640,7 +640,7 @@ static int ethoc_mdio_probe(struct net_device *dev)
640 return -ENXIO; 640 return -ENXIO;
641 } 641 }
642 642
643 phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0, 643 phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0,
644 PHY_INTERFACE_MODE_GMII); 644 PHY_INTERFACE_MODE_GMII);
645 if (IS_ERR(phy)) { 645 if (IS_ERR(phy)) {
646 dev_err(&dev->dev, "could not attach to PHY\n"); 646 dev_err(&dev->dev, "could not attach to PHY\n");
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0a1c2bb27d4d..73fe97777201 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5820,10 +5820,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5820 dev->dev_addr); 5820 dev->dev_addr);
5821 dev_printk(KERN_ERR, &pci_dev->dev, 5821 dev_printk(KERN_ERR, &pci_dev->dev,
5822 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5822 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5823 dev->dev_addr[0] = 0x00; 5823 random_ether_addr(dev->dev_addr);
5824 dev->dev_addr[1] = 0x00;
5825 dev->dev_addr[2] = 0x6c;
5826 get_random_bytes(&dev->dev_addr[3], 3);
5827 } 5824 }
5828 5825
5829 dprintk(KERN_DEBUG "%s: MAC Address %pM\n", 5826 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 197b358e6361..16def131c390 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1246,7 +1246,7 @@ static int gfar_restore(struct device *dev)
1246 phy_start(priv->phydev); 1246 phy_start(priv->phydev);
1247 1247
1248 netif_device_attach(ndev); 1248 netif_device_attach(ndev);
1249 napi_enable(&priv->gfargrp.napi); 1249 enable_napi(priv);
1250 1250
1251 return 0; 1251 return 0;
1252} 1252}
@@ -1928,14 +1928,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1928 /* total number of fragments in the SKB */ 1928 /* total number of fragments in the SKB */
1929 nr_frags = skb_shinfo(skb)->nr_frags; 1929 nr_frags = skb_shinfo(skb)->nr_frags;
1930 1930
1931 spin_lock_irqsave(&tx_queue->txlock, flags);
1932
1933 /* check if there is space to queue this packet */ 1931 /* check if there is space to queue this packet */
1934 if ((nr_frags+1) > tx_queue->num_txbdfree) { 1932 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1935 /* no space, stop the queue */ 1933 /* no space, stop the queue */
1936 netif_tx_stop_queue(txq); 1934 netif_tx_stop_queue(txq);
1937 dev->stats.tx_fifo_errors++; 1935 dev->stats.tx_fifo_errors++;
1938 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1939 return NETDEV_TX_BUSY; 1936 return NETDEV_TX_BUSY;
1940 } 1937 }
1941 1938
@@ -1999,6 +1996,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1999 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 1996 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2000 1997
2001 /* 1998 /*
1999 * We can work in parallel with gfar_clean_tx_ring(), except
2000 * when modifying num_txbdfree. Note that we didn't grab the lock
2001 * when we were reading the num_txbdfree and checking for available
2002 * space, that's because outside of this function it can only grow,
2003 * and once we've got needed space, it cannot suddenly disappear.
2004 *
2005 * The lock also protects us from gfar_error(), which can modify
2006 * regs->tstat and thus retrigger the transfers, which is why we
2007 * also must grab the lock before setting ready bit for the first
2008 * to be transmitted BD.
2009 */
2010 spin_lock_irqsave(&tx_queue->txlock, flags);
2011
2012 /*
2002 * The powerpc-specific eieio() is used, as wmb() has too strong 2013 * The powerpc-specific eieio() is used, as wmb() has too strong
2003 * semantics (it requires synchronization between cacheable and 2014 * semantics (it requires synchronization between cacheable and
2004 * uncacheable mappings, which eieio doesn't provide and which we 2015 * uncacheable mappings, which eieio doesn't provide and which we
@@ -2225,6 +2236,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2225 skb_dirtytx = tx_queue->skb_dirtytx; 2236 skb_dirtytx = tx_queue->skb_dirtytx;
2226 2237
2227 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2238 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2239 unsigned long flags;
2240
2228 frags = skb_shinfo(skb)->nr_frags; 2241 frags = skb_shinfo(skb)->nr_frags;
2229 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2242 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
2230 2243
@@ -2269,7 +2282,9 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2269 TX_RING_MOD_MASK(tx_ring_size); 2282 TX_RING_MOD_MASK(tx_ring_size);
2270 2283
2271 howmany++; 2284 howmany++;
2285 spin_lock_irqsave(&tx_queue->txlock, flags);
2272 tx_queue->num_txbdfree += frags + 1; 2286 tx_queue->num_txbdfree += frags + 1;
2287 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2273 } 2288 }
2274 2289
2275 /* If we freed a buffer, we can restart transmission, if necessary */ 2290 /* If we freed a buffer, we can restart transmission, if necessary */
@@ -2504,8 +2519,6 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2504 skb_put(skb, pkt_len); 2519 skb_put(skb, pkt_len);
2505 dev->stats.rx_bytes += pkt_len; 2520 dev->stats.rx_bytes += pkt_len;
2506 2521
2507 if (in_irq() || irqs_disabled())
2508 printk("Interrupt problem!\n");
2509 gfar_process_frame(dev, skb, amount_pull); 2522 gfar_process_frame(dev, skb, amount_pull);
2510 2523
2511 } else { 2524 } else {
@@ -2550,7 +2563,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2550 int tx_cleaned = 0, i, left_over_budget = budget; 2563 int tx_cleaned = 0, i, left_over_budget = budget;
2551 unsigned long serviced_queues = 0; 2564 unsigned long serviced_queues = 0;
2552 int num_queues = 0; 2565 int num_queues = 0;
2553 unsigned long flags;
2554 2566
2555 num_queues = gfargrp->num_rx_queues; 2567 num_queues = gfargrp->num_rx_queues;
2556 budget_per_queue = budget/num_queues; 2568 budget_per_queue = budget/num_queues;
@@ -2570,14 +2582,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2570 rx_queue = priv->rx_queue[i]; 2582 rx_queue = priv->rx_queue[i];
2571 tx_queue = priv->tx_queue[rx_queue->qindex]; 2583 tx_queue = priv->tx_queue[rx_queue->qindex];
2572 2584
2573 /* If we fail to get the lock, 2585 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2574 * don't bother with the TX BDs */
2575 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
2576 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2577 spin_unlock_irqrestore(&tx_queue->txlock,
2578 flags);
2579 }
2580
2581 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2586 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2582 budget_per_queue); 2587 budget_per_queue);
2583 rx_cleaned += rx_cleaned_per_queue; 2588 rx_cleaned += rx_cleaned_per_queue;
@@ -2945,14 +2950,22 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
2945 if (events & IEVENT_CRL) 2950 if (events & IEVENT_CRL)
2946 dev->stats.tx_aborted_errors++; 2951 dev->stats.tx_aborted_errors++;
2947 if (events & IEVENT_XFUN) { 2952 if (events & IEVENT_XFUN) {
2953 unsigned long flags;
2954
2948 if (netif_msg_tx_err(priv)) 2955 if (netif_msg_tx_err(priv))
2949 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2956 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2950 "packet dropped.\n", dev->name); 2957 "packet dropped.\n", dev->name);
2951 dev->stats.tx_dropped++; 2958 dev->stats.tx_dropped++;
2952 priv->extra_stats.tx_underrun++; 2959 priv->extra_stats.tx_underrun++;
2953 2960
2961 local_irq_save(flags);
2962 lock_tx_qs(priv);
2963
2954 /* Reactivate the Tx Queues */ 2964 /* Reactivate the Tx Queues */
2955 gfar_write(&regs->tstat, gfargrp->tstat); 2965 gfar_write(&regs->tstat, gfargrp->tstat);
2966
2967 unlock_tx_qs(priv);
2968 local_irq_restore(flags);
2956 } 2969 }
2957 if (netif_msg_tx_err(priv)) 2970 if (netif_msg_tx_err(priv))
2958 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2971 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 3724835d2856..b31c9c8876e6 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -186,7 +186,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
186 temp = gfar_read(&regs->attreli); 186 temp = gfar_read(&regs->attreli);
187 temp &= ~ATTRELI_EI_MASK; 187 temp &= ~ATTRELI_EI_MASK;
188 temp |= ATTRELI_EI(index); 188 temp |= ATTRELI_EI(index);
189 gfar_write(&regs->attreli, flags); 189 gfar_write(&regs->attreli, temp);
190 190
191out: 191out:
192 unlock_rx_qs(priv); 192 unlock_rx_qs(priv);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index fc9c57893f8a..7db0a1c3216c 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -903,7 +903,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
903static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file, 903static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
904 unsigned int cmd, unsigned long arg) 904 unsigned int cmd, unsigned long arg)
905{ 905{
906 switch (arg) { 906 switch (cmd) {
907 case SIOCGIFNAME: 907 case SIOCGIFNAME:
908 case SIOCGIFENCAP: 908 case SIOCGIFENCAP:
909 case SIOCSIFENCAP: 909 case SIOCSIFENCAP:
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3298f5a11dab..63abd1c0d75e 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -59,10 +59,10 @@ struct igb_adapter;
59#define MAX_Q_VECTORS 8 59#define MAX_Q_VECTORS 8
60 60
61/* Transmit and receive queues */ 61/* Transmit and receive queues */
62#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ 62#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
63 (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) 63 (hw->mac.type > e1000_82575 ? 8 : 4))
64#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES 64#define IGB_ABS_MAX_TX_QUEUES 8
65#define IGB_ABS_MAX_TX_QUEUES 4 65#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
66 66
67#define IGB_MAX_VF_MC_ENTRIES 30 67#define IGB_MAX_VF_MC_ENTRIES 30
68#define IGB_MAX_VF_FUNCTIONS 8 68#define IGB_MAX_VF_FUNCTIONS 8
@@ -249,10 +249,6 @@ struct igb_adapter {
249 u16 link_speed; 249 u16 link_speed;
250 u16 link_duplex; 250 u16 link_duplex;
251 251
252 unsigned int total_tx_bytes;
253 unsigned int total_tx_packets;
254 unsigned int total_rx_bytes;
255 unsigned int total_rx_packets;
256 /* Interrupt Throttle Rate */ 252 /* Interrupt Throttle Rate */
257 u32 rx_itr_setting; 253 u32 rx_itr_setting;
258 u32 tx_itr_setting; 254 u32 tx_itr_setting;
@@ -315,6 +311,7 @@ struct igb_adapter {
315 u16 rx_ring_count; 311 u16 rx_ring_count;
316 unsigned int vfs_allocated_count; 312 unsigned int vfs_allocated_count;
317 struct vf_data_storage *vf_data; 313 struct vf_data_storage *vf_data;
314 u32 rss_queues;
318}; 315};
319 316
320#define IGB_FLAG_HAS_MSI (1 << 0) 317#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 90b89a81f669..c1cde5b44906 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -37,77 +37,88 @@
37 37
38#include "igb.h" 38#include "igb.h"
39 39
40enum {NETDEV_STATS, IGB_STATS};
41
42struct igb_stats { 40struct igb_stats {
43 char stat_string[ETH_GSTRING_LEN]; 41 char stat_string[ETH_GSTRING_LEN];
44 int type;
45 int sizeof_stat; 42 int sizeof_stat;
46 int stat_offset; 43 int stat_offset;
47}; 44};
48 45
49#define IGB_STAT(m) IGB_STATS, \ 46#define IGB_STAT(_name, _stat) { \
50 FIELD_SIZEOF(struct igb_adapter, m), \ 47 .stat_string = _name, \
51 offsetof(struct igb_adapter, m) 48 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
52#define IGB_NETDEV_STAT(m) NETDEV_STATS, \ 49 .stat_offset = offsetof(struct igb_adapter, _stat) \
53 FIELD_SIZEOF(struct net_device, m), \ 50}
54 offsetof(struct net_device, m)
55
56static const struct igb_stats igb_gstrings_stats[] = { 51static const struct igb_stats igb_gstrings_stats[] = {
57 { "rx_packets", IGB_STAT(stats.gprc) }, 52 IGB_STAT("rx_packets", stats.gprc),
58 { "tx_packets", IGB_STAT(stats.gptc) }, 53 IGB_STAT("tx_packets", stats.gptc),
59 { "rx_bytes", IGB_STAT(stats.gorc) }, 54 IGB_STAT("rx_bytes", stats.gorc),
60 { "tx_bytes", IGB_STAT(stats.gotc) }, 55 IGB_STAT("tx_bytes", stats.gotc),
61 { "rx_broadcast", IGB_STAT(stats.bprc) }, 56 IGB_STAT("rx_broadcast", stats.bprc),
62 { "tx_broadcast", IGB_STAT(stats.bptc) }, 57 IGB_STAT("tx_broadcast", stats.bptc),
63 { "rx_multicast", IGB_STAT(stats.mprc) }, 58 IGB_STAT("rx_multicast", stats.mprc),
64 { "tx_multicast", IGB_STAT(stats.mptc) }, 59 IGB_STAT("tx_multicast", stats.mptc),
65 { "rx_errors", IGB_NETDEV_STAT(stats.rx_errors) }, 60 IGB_STAT("multicast", stats.mprc),
66 { "tx_errors", IGB_NETDEV_STAT(stats.tx_errors) }, 61 IGB_STAT("collisions", stats.colc),
67 { "tx_dropped", IGB_NETDEV_STAT(stats.tx_dropped) }, 62 IGB_STAT("rx_crc_errors", stats.crcerrs),
68 { "multicast", IGB_STAT(stats.mprc) }, 63 IGB_STAT("rx_no_buffer_count", stats.rnbc),
69 { "collisions", IGB_STAT(stats.colc) }, 64 IGB_STAT("rx_missed_errors", stats.mpc),
70 { "rx_length_errors", IGB_NETDEV_STAT(stats.rx_length_errors) }, 65 IGB_STAT("tx_aborted_errors", stats.ecol),
71 { "rx_over_errors", IGB_NETDEV_STAT(stats.rx_over_errors) }, 66 IGB_STAT("tx_carrier_errors", stats.tncrs),
72 { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, 67 IGB_STAT("tx_window_errors", stats.latecol),
73 { "rx_frame_errors", IGB_NETDEV_STAT(stats.rx_frame_errors) }, 68 IGB_STAT("tx_abort_late_coll", stats.latecol),
74 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, 69 IGB_STAT("tx_deferred_ok", stats.dc),
75 { "rx_queue_drop_packet_count", IGB_NETDEV_STAT(stats.rx_fifo_errors) }, 70 IGB_STAT("tx_single_coll_ok", stats.scc),
76 { "rx_missed_errors", IGB_STAT(stats.mpc) }, 71 IGB_STAT("tx_multi_coll_ok", stats.mcc),
77 { "tx_aborted_errors", IGB_STAT(stats.ecol) }, 72 IGB_STAT("tx_timeout_count", tx_timeout_count),
78 { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, 73 IGB_STAT("rx_long_length_errors", stats.roc),
79 { "tx_fifo_errors", IGB_NETDEV_STAT(stats.tx_fifo_errors) }, 74 IGB_STAT("rx_short_length_errors", stats.ruc),
80 { "tx_heartbeat_errors", IGB_NETDEV_STAT(stats.tx_heartbeat_errors) }, 75 IGB_STAT("rx_align_errors", stats.algnerrc),
81 { "tx_window_errors", IGB_STAT(stats.latecol) }, 76 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
82 { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, 77 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
83 { "tx_deferred_ok", IGB_STAT(stats.dc) }, 78 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
84 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 79 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
85 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 80 IGB_STAT("tx_flow_control_xon", stats.xontxc),
86 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 81 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
87 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 82 IGB_STAT("rx_long_byte_count", stats.gorc),
88 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 83 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
89 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 84 IGB_STAT("tx_smbus", stats.mgptc),
90 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, 85 IGB_STAT("rx_smbus", stats.mgprc),
91 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, 86 IGB_STAT("dropped_smbus", stats.mgpdc),
92 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, 87};
93 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, 88
94 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 89#define IGB_NETDEV_STAT(_net_stat) { \
95 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 90 .stat_string = __stringify(_net_stat), \
96 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 91 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
97 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, 92 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
98 { "tx_smbus", IGB_STAT(stats.mgptc) }, 93}
99 { "rx_smbus", IGB_STAT(stats.mgprc) }, 94static const struct igb_stats igb_gstrings_net_stats[] = {
100 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 95 IGB_NETDEV_STAT(rx_errors),
96 IGB_NETDEV_STAT(tx_errors),
97 IGB_NETDEV_STAT(tx_dropped),
98 IGB_NETDEV_STAT(rx_length_errors),
99 IGB_NETDEV_STAT(rx_over_errors),
100 IGB_NETDEV_STAT(rx_frame_errors),
101 IGB_NETDEV_STAT(rx_fifo_errors),
102 IGB_NETDEV_STAT(tx_fifo_errors),
103 IGB_NETDEV_STAT(tx_heartbeat_errors)
101}; 104};
102 105
106#define IGB_GLOBAL_STATS_LEN \
107 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
108#define IGB_NETDEV_STATS_LEN \
109 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
110#define IGB_RX_QUEUE_STATS_LEN \
111 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
112#define IGB_TX_QUEUE_STATS_LEN \
113 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
103#define IGB_QUEUE_STATS_LEN \ 114#define IGB_QUEUE_STATS_LEN \
104 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ 115 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
105 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \ 116 IGB_RX_QUEUE_STATS_LEN) + \
106 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ 117 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
107 (sizeof(struct igb_tx_queue_stats) / sizeof(u64)))) 118 IGB_TX_QUEUE_STATS_LEN))
108#define IGB_GLOBAL_STATS_LEN \ 119#define IGB_STATS_LEN \
109 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) 120 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
110#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 121
111static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 122static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
112 "Register test (offline)", "Eeprom test (offline)", 123 "Register test (offline)", "Eeprom test (offline)",
113 "Interrupt test (offline)", "Loopback test (offline)", 124 "Interrupt test (offline)", "Loopback test (offline)",
@@ -735,17 +746,17 @@ static int igb_set_ringparam(struct net_device *netdev,
735 struct igb_adapter *adapter = netdev_priv(netdev); 746 struct igb_adapter *adapter = netdev_priv(netdev);
736 struct igb_ring *temp_ring; 747 struct igb_ring *temp_ring;
737 int i, err = 0; 748 int i, err = 0;
738 u32 new_rx_count, new_tx_count; 749 u16 new_rx_count, new_tx_count;
739 750
740 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 751 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
741 return -EINVAL; 752 return -EINVAL;
742 753
743 new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD); 754 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
744 new_rx_count = max(new_rx_count, (u32)IGB_MIN_RXD); 755 new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
745 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 756 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
746 757
747 new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD); 758 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
748 new_tx_count = max(new_tx_count, (u32)IGB_MIN_TXD); 759 new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
749 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 760 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
750 761
751 if ((new_tx_count == adapter->tx_ring_count) && 762 if ((new_tx_count == adapter->tx_ring_count) &&
@@ -1922,43 +1933,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1922 struct ethtool_stats *stats, u64 *data) 1933 struct ethtool_stats *stats, u64 *data)
1923{ 1934{
1924 struct igb_adapter *adapter = netdev_priv(netdev); 1935 struct igb_adapter *adapter = netdev_priv(netdev);
1936 struct net_device_stats *net_stats = &netdev->stats;
1925 u64 *queue_stat; 1937 u64 *queue_stat;
1926 int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); 1938 int i, j, k;
1927 int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); 1939 char *p;
1928 int j;
1929 int i;
1930 char *p = NULL;
1931 1940
1932 igb_update_stats(adapter); 1941 igb_update_stats(adapter);
1933 1942
1934 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1943 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1935 switch (igb_gstrings_stats[i].type) { 1944 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
1936 case NETDEV_STATS:
1937 p = (char *) netdev +
1938 igb_gstrings_stats[i].stat_offset;
1939 break;
1940 case IGB_STATS:
1941 p = (char *) adapter +
1942 igb_gstrings_stats[i].stat_offset;
1943 break;
1944 }
1945
1946 data[i] = (igb_gstrings_stats[i].sizeof_stat == 1945 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1947 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1946 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1948 } 1947 }
1948 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
1949 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
1950 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
1951 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1952 }
1949 for (j = 0; j < adapter->num_tx_queues; j++) { 1953 for (j = 0; j < adapter->num_tx_queues; j++) {
1950 int k;
1951 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 1954 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
1952 for (k = 0; k < stat_count_tx; k++) 1955 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
1953 data[i + k] = queue_stat[k]; 1956 data[i] = queue_stat[k];
1954 i += k;
1955 } 1957 }
1956 for (j = 0; j < adapter->num_rx_queues; j++) { 1958 for (j = 0; j < adapter->num_rx_queues; j++) {
1957 int k;
1958 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 1959 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
1959 for (k = 0; k < stat_count_rx; k++) 1960 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
1960 data[i + k] = queue_stat[k]; 1961 data[i] = queue_stat[k];
1961 i += k;
1962 } 1962 }
1963} 1963}
1964 1964
@@ -1979,6 +1979,11 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1979 ETH_GSTRING_LEN); 1979 ETH_GSTRING_LEN);
1980 p += ETH_GSTRING_LEN; 1980 p += ETH_GSTRING_LEN;
1981 } 1981 }
1982 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
1983 memcpy(p, igb_gstrings_net_stats[i].stat_string,
1984 ETH_GSTRING_LEN);
1985 p += ETH_GSTRING_LEN;
1986 }
1982 for (i = 0; i < adapter->num_tx_queues; i++) { 1987 for (i = 0; i < adapter->num_tx_queues; i++) {
1983 sprintf(p, "tx_queue_%u_packets", i); 1988 sprintf(p, "tx_queue_%u_packets", i);
1984 p += ETH_GSTRING_LEN; 1989 p += ETH_GSTRING_LEN;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b044c985df0b..0cab5e2b0894 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -296,10 +296,10 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
296 * and continue consuming queues in the same sequence 296 * and continue consuming queues in the same sequence
297 */ 297 */
298 if (adapter->vfs_allocated_count) { 298 if (adapter->vfs_allocated_count) {
299 for (; i < adapter->num_rx_queues; i++) 299 for (; i < adapter->rss_queues; i++)
300 adapter->rx_ring[i].reg_idx = rbase_offset + 300 adapter->rx_ring[i].reg_idx = rbase_offset +
301 Q_IDX_82576(i); 301 Q_IDX_82576(i);
302 for (; j < adapter->num_tx_queues; j++) 302 for (; j < adapter->rss_queues; j++)
303 adapter->tx_ring[j].reg_idx = rbase_offset + 303 adapter->tx_ring[j].reg_idx = rbase_offset +
304 Q_IDX_82576(j); 304 Q_IDX_82576(j);
305 } 305 }
@@ -618,14 +618,15 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
618 int numvecs, i; 618 int numvecs, i;
619 619
620 /* Number of supported queues. */ 620 /* Number of supported queues. */
621 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 621 adapter->num_rx_queues = adapter->rss_queues;
622 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); 622 adapter->num_tx_queues = adapter->rss_queues;
623 623
624 /* start with one vector for every rx queue */ 624 /* start with one vector for every rx queue */
625 numvecs = adapter->num_rx_queues; 625 numvecs = adapter->num_rx_queues;
626 626
627 /* if tx handler is seperate add 1 for every tx queue */ 627 /* if tx handler is seperate add 1 for every tx queue */
628 numvecs += adapter->num_tx_queues; 628 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
629 numvecs += adapter->num_tx_queues;
629 630
630 /* store the number of vectors reserved for queues */ 631 /* store the number of vectors reserved for queues */
631 adapter->num_q_vectors = numvecs; 632 adapter->num_q_vectors = numvecs;
@@ -666,6 +667,7 @@ msi_only:
666 } 667 }
667#endif 668#endif
668 adapter->vfs_allocated_count = 0; 669 adapter->vfs_allocated_count = 0;
670 adapter->rss_queues = 1;
669 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; 671 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
670 adapter->num_rx_queues = 1; 672 adapter->num_rx_queues = 1;
671 adapter->num_tx_queues = 1; 673 adapter->num_tx_queues = 1;
@@ -1566,56 +1568,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1566 } 1568 }
1567 1569
1568#endif 1570#endif
1569 switch (hw->mac.type) {
1570 case e1000_82576:
1571 /*
1572 * Initialize hardware timer: we keep it running just in case
1573 * that some program needs it later on.
1574 */
1575 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1576 adapter->cycles.read = igb_read_clock;
1577 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1578 adapter->cycles.mult = 1;
1579 /**
1580 * Scale the NIC clock cycle by a large factor so that
1581 * relatively small clock corrections can be added or
1582 * substracted at each clock tick. The drawbacks of a large
1583 * factor are a) that the clock register overflows more quickly
1584 * (not such a big deal) and b) that the increment per tick has
1585 * to fit into 24 bits. As a result we need to use a shift of
1586 * 19 so we can fit a value of 16 into the TIMINCA register.
1587 */
1588 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1589 wr32(E1000_TIMINCA,
1590 (1 << E1000_TIMINCA_16NS_SHIFT) |
1591 (16 << IGB_82576_TSYNC_SHIFT));
1592
1593 /* Set registers so that rollover occurs soon to test this. */
1594 wr32(E1000_SYSTIML, 0x00000000);
1595 wr32(E1000_SYSTIMH, 0xFF800000);
1596 wrfl();
1597
1598 timecounter_init(&adapter->clock,
1599 &adapter->cycles,
1600 ktime_to_ns(ktime_get_real()));
1601 /*
1602 * Synchronize our NIC clock against system wall clock. NIC
1603 * time stamp reading requires ~3us per sample, each sample
1604 * was pretty stable even under load => only require 10
1605 * samples for each offset comparison.
1606 */
1607 memset(&adapter->compare, 0, sizeof(adapter->compare));
1608 adapter->compare.source = &adapter->clock;
1609 adapter->compare.target = ktime_get_real;
1610 adapter->compare.num_samples = 10;
1611 timecompare_update(&adapter->compare, 0);
1612 break;
1613 case e1000_82575:
1614 /* 82575 does not support timesync */
1615 default:
1616 break;
1617 }
1618
1619 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1571 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1620 /* print bus type/speed/width info */ 1572 /* print bus type/speed/width info */
1621 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1573 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -1781,6 +1733,70 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1781#endif /* CONFIG_PCI_IOV */ 1733#endif /* CONFIG_PCI_IOV */
1782} 1734}
1783 1735
1736
1737/**
1738 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1739 * @adapter: board private structure to initialize
1740 *
1741 * igb_init_hw_timer initializes the function pointer and values for the hw
1742 * timer found in hardware.
1743 **/
1744static void igb_init_hw_timer(struct igb_adapter *adapter)
1745{
1746 struct e1000_hw *hw = &adapter->hw;
1747
1748 switch (hw->mac.type) {
1749 case e1000_82576:
1750 /*
1751 * Initialize hardware timer: we keep it running just in case
1752 * that some program needs it later on.
1753 */
1754 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1755 adapter->cycles.read = igb_read_clock;
1756 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1757 adapter->cycles.mult = 1;
1758 /**
1759 * Scale the NIC clock cycle by a large factor so that
1760 * relatively small clock corrections can be added or
1761 * substracted at each clock tick. The drawbacks of a large
1762 * factor are a) that the clock register overflows more quickly
1763 * (not such a big deal) and b) that the increment per tick has
1764 * to fit into 24 bits. As a result we need to use a shift of
1765 * 19 so we can fit a value of 16 into the TIMINCA register.
1766 */
1767 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1768 wr32(E1000_TIMINCA,
1769 (1 << E1000_TIMINCA_16NS_SHIFT) |
1770 (16 << IGB_82576_TSYNC_SHIFT));
1771
1772 /* Set registers so that rollover occurs soon to test this. */
1773 wr32(E1000_SYSTIML, 0x00000000);
1774 wr32(E1000_SYSTIMH, 0xFF800000);
1775 wrfl();
1776
1777 timecounter_init(&adapter->clock,
1778 &adapter->cycles,
1779 ktime_to_ns(ktime_get_real()));
1780 /*
1781 * Synchronize our NIC clock against system wall clock. NIC
1782 * time stamp reading requires ~3us per sample, each sample
1783 * was pretty stable even under load => only require 10
1784 * samples for each offset comparison.
1785 */
1786 memset(&adapter->compare, 0, sizeof(adapter->compare));
1787 adapter->compare.source = &adapter->clock;
1788 adapter->compare.target = ktime_get_real;
1789 adapter->compare.num_samples = 10;
1790 timecompare_update(&adapter->compare, 0);
1791 break;
1792 case e1000_82575:
1793 /* 82575 does not support timesync */
1794 default:
1795 break;
1796 }
1797
1798}
1799
1784/** 1800/**
1785 * igb_sw_init - Initialize general software structures (struct igb_adapter) 1801 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1786 * @adapter: board private structure to initialize 1802 * @adapter: board private structure to initialize
@@ -1810,12 +1826,24 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1810 adapter->vfs_allocated_count = max_vfs; 1826 adapter->vfs_allocated_count = max_vfs;
1811 1827
1812#endif /* CONFIG_PCI_IOV */ 1828#endif /* CONFIG_PCI_IOV */
1829 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1830
1831 /*
1832 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1833 * then we should combine the queues into a queue pair in order to
1834 * conserve interrupts due to limited supply
1835 */
1836 if ((adapter->rss_queues > 4) ||
1837 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1838 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1839
1813 /* This call may decrease the number of queues */ 1840 /* This call may decrease the number of queues */
1814 if (igb_init_interrupt_scheme(adapter)) { 1841 if (igb_init_interrupt_scheme(adapter)) {
1815 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 1842 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1816 return -ENOMEM; 1843 return -ENOMEM;
1817 } 1844 }
1818 1845
1846 igb_init_hw_timer(adapter);
1819 igb_probe_vfs(adapter); 1847 igb_probe_vfs(adapter);
1820 1848
1821 /* Explicitly disable IRQ since the NIC can be in any state. */ 1849 /* Explicitly disable IRQ since the NIC can be in any state. */
@@ -2000,7 +2028,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2000 } 2028 }
2001 } 2029 }
2002 2030
2003 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 2031 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2004 int r_idx = i % adapter->num_tx_queues; 2032 int r_idx = i % adapter->num_tx_queues;
2005 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2033 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
2006 } 2034 }
@@ -2184,7 +2212,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2184 array_wr32(E1000_RSSRK(0), j, rsskey); 2212 array_wr32(E1000_RSSRK(0), j, rsskey);
2185 } 2213 }
2186 2214
2187 num_rx_queues = adapter->num_rx_queues; 2215 num_rx_queues = adapter->rss_queues;
2188 2216
2189 if (adapter->vfs_allocated_count) { 2217 if (adapter->vfs_allocated_count) {
2190 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2218 /* 82575 and 82576 supports 2 RSS queues for VMDq */
@@ -2240,7 +2268,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2240 E1000_VT_CTL_DEFAULT_POOL_SHIFT; 2268 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2241 wr32(E1000_VT_CTL, vtctl); 2269 wr32(E1000_VT_CTL, vtctl);
2242 } 2270 }
2243 if (adapter->num_rx_queues > 1) 2271 if (adapter->rss_queues > 1)
2244 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; 2272 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2245 else 2273 else
2246 mrqc = E1000_MRQC_ENABLE_VMDQ; 2274 mrqc = E1000_MRQC_ENABLE_VMDQ;
@@ -2370,7 +2398,7 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2370 /* clear all bits that might not be set */ 2398 /* clear all bits that might not be set */
2371 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); 2399 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2372 2400
2373 if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count) 2401 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2374 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ 2402 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2375 /* 2403 /*
2376 * for VMDq only allow the VFs and pool 0 to accept broadcast and 2404 * for VMDq only allow the VFs and pool 0 to accept broadcast and
@@ -2915,7 +2943,6 @@ static void igb_watchdog_task(struct work_struct *work)
2915 watchdog_task); 2943 watchdog_task);
2916 struct e1000_hw *hw = &adapter->hw; 2944 struct e1000_hw *hw = &adapter->hw;
2917 struct net_device *netdev = adapter->netdev; 2945 struct net_device *netdev = adapter->netdev;
2918 struct igb_ring *tx_ring = adapter->tx_ring;
2919 u32 link; 2946 u32 link;
2920 int i; 2947 int i;
2921 2948
@@ -2985,22 +3012,24 @@ static void igb_watchdog_task(struct work_struct *work)
2985 igb_update_stats(adapter); 3012 igb_update_stats(adapter);
2986 igb_update_adaptive(hw); 3013 igb_update_adaptive(hw);
2987 3014
2988 if (!netif_carrier_ok(netdev)) { 3015 for (i = 0; i < adapter->num_tx_queues; i++) {
2989 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { 3016 struct igb_ring *tx_ring = &adapter->tx_ring[i];
3017 if (!netif_carrier_ok(netdev)) {
2990 /* We've lost link, so the controller stops DMA, 3018 /* We've lost link, so the controller stops DMA,
2991 * but we've got queued Tx work that's never going 3019 * but we've got queued Tx work that's never going
2992 * to get done, so reset controller to flush Tx. 3020 * to get done, so reset controller to flush Tx.
2993 * (Do the reset outside of interrupt context). */ 3021 * (Do the reset outside of interrupt context). */
2994 adapter->tx_timeout_count++; 3022 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2995 schedule_work(&adapter->reset_task); 3023 adapter->tx_timeout_count++;
2996 /* return immediately since reset is imminent */ 3024 schedule_work(&adapter->reset_task);
2997 return; 3025 /* return immediately since reset is imminent */
3026 return;
3027 }
2998 } 3028 }
2999 }
3000 3029
3001 /* Force detection of hung controller every watchdog period */ 3030 /* Force detection of hung controller every watchdog period */
3002 for (i = 0; i < adapter->num_tx_queues; i++) 3031 tx_ring->detect_tx_hung = true;
3003 adapter->tx_ring[i].detect_tx_hung = true; 3032 }
3004 3033
3005 /* Cause software interrupt to ensure rx ring is cleaned */ 3034 /* Cause software interrupt to ensure rx ring is cleaned */
3006 if (adapter->msix_entries) { 3035 if (adapter->msix_entries) {
@@ -3761,7 +3790,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3761 3790
3762void igb_update_stats(struct igb_adapter *adapter) 3791void igb_update_stats(struct igb_adapter *adapter)
3763{ 3792{
3764 struct net_device *netdev = adapter->netdev; 3793 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3765 struct e1000_hw *hw = &adapter->hw; 3794 struct e1000_hw *hw = &adapter->hw;
3766 struct pci_dev *pdev = adapter->pdev; 3795 struct pci_dev *pdev = adapter->pdev;
3767 u32 rnbc; 3796 u32 rnbc;
@@ -3785,13 +3814,13 @@ void igb_update_stats(struct igb_adapter *adapter)
3785 for (i = 0; i < adapter->num_rx_queues; i++) { 3814 for (i = 0; i < adapter->num_rx_queues; i++) {
3786 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 3815 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3787 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; 3816 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3788 netdev->stats.rx_fifo_errors += rqdpc_tmp; 3817 net_stats->rx_fifo_errors += rqdpc_tmp;
3789 bytes += adapter->rx_ring[i].rx_stats.bytes; 3818 bytes += adapter->rx_ring[i].rx_stats.bytes;
3790 packets += adapter->rx_ring[i].rx_stats.packets; 3819 packets += adapter->rx_ring[i].rx_stats.packets;
3791 } 3820 }
3792 3821
3793 netdev->stats.rx_bytes = bytes; 3822 net_stats->rx_bytes = bytes;
3794 netdev->stats.rx_packets = packets; 3823 net_stats->rx_packets = packets;
3795 3824
3796 bytes = 0; 3825 bytes = 0;
3797 packets = 0; 3826 packets = 0;
@@ -3799,8 +3828,8 @@ void igb_update_stats(struct igb_adapter *adapter)
3799 bytes += adapter->tx_ring[i].tx_stats.bytes; 3828 bytes += adapter->tx_ring[i].tx_stats.bytes;
3800 packets += adapter->tx_ring[i].tx_stats.packets; 3829 packets += adapter->tx_ring[i].tx_stats.packets;
3801 } 3830 }
3802 netdev->stats.tx_bytes = bytes; 3831 net_stats->tx_bytes = bytes;
3803 netdev->stats.tx_packets = packets; 3832 net_stats->tx_packets = packets;
3804 3833
3805 /* read stats registers */ 3834 /* read stats registers */
3806 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 3835 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
@@ -3837,7 +3866,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3837 rd32(E1000_GOTCH); /* clear GOTCL */ 3866 rd32(E1000_GOTCH); /* clear GOTCL */
3838 rnbc = rd32(E1000_RNBC); 3867 rnbc = rd32(E1000_RNBC);
3839 adapter->stats.rnbc += rnbc; 3868 adapter->stats.rnbc += rnbc;
3840 netdev->stats.rx_fifo_errors += rnbc; 3869 net_stats->rx_fifo_errors += rnbc;
3841 adapter->stats.ruc += rd32(E1000_RUC); 3870 adapter->stats.ruc += rd32(E1000_RUC);
3842 adapter->stats.rfc += rd32(E1000_RFC); 3871 adapter->stats.rfc += rd32(E1000_RFC);
3843 adapter->stats.rjc += rd32(E1000_RJC); 3872 adapter->stats.rjc += rd32(E1000_RJC);
@@ -3878,29 +3907,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3878 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); 3907 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3879 3908
3880 /* Fill out the OS statistics structure */ 3909 /* Fill out the OS statistics structure */
3881 netdev->stats.multicast = adapter->stats.mprc; 3910 net_stats->multicast = adapter->stats.mprc;
3882 netdev->stats.collisions = adapter->stats.colc; 3911 net_stats->collisions = adapter->stats.colc;
3883 3912
3884 /* Rx Errors */ 3913 /* Rx Errors */
3885 3914
3886 /* RLEC on some newer hardware can be incorrect so build 3915 /* RLEC on some newer hardware can be incorrect so build
3887 * our own version based on RUC and ROC */ 3916 * our own version based on RUC and ROC */
3888 netdev->stats.rx_errors = adapter->stats.rxerrc + 3917 net_stats->rx_errors = adapter->stats.rxerrc +
3889 adapter->stats.crcerrs + adapter->stats.algnerrc + 3918 adapter->stats.crcerrs + adapter->stats.algnerrc +
3890 adapter->stats.ruc + adapter->stats.roc + 3919 adapter->stats.ruc + adapter->stats.roc +
3891 adapter->stats.cexterr; 3920 adapter->stats.cexterr;
3892 netdev->stats.rx_length_errors = adapter->stats.ruc + 3921 net_stats->rx_length_errors = adapter->stats.ruc +
3893 adapter->stats.roc; 3922 adapter->stats.roc;
3894 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3923 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3895 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3924 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3896 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3925 net_stats->rx_missed_errors = adapter->stats.mpc;
3897 3926
3898 /* Tx Errors */ 3927 /* Tx Errors */
3899 netdev->stats.tx_errors = adapter->stats.ecol + 3928 net_stats->tx_errors = adapter->stats.ecol +
3900 adapter->stats.latecol; 3929 adapter->stats.latecol;
3901 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3930 net_stats->tx_aborted_errors = adapter->stats.ecol;
3902 netdev->stats.tx_window_errors = adapter->stats.latecol; 3931 net_stats->tx_window_errors = adapter->stats.latecol;
3903 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3932 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3904 3933
3905 /* Tx Dropped needs to be maintained elsewhere */ 3934 /* Tx Dropped needs to be maintained elsewhere */
3906 3935
@@ -4923,6 +4952,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4923 struct sk_buff *skb; 4952 struct sk_buff *skb;
4924 bool cleaned = false; 4953 bool cleaned = false;
4925 int cleaned_count = 0; 4954 int cleaned_count = 0;
4955 int current_node = numa_node_id();
4926 unsigned int total_bytes = 0, total_packets = 0; 4956 unsigned int total_bytes = 0, total_packets = 0;
4927 unsigned int i; 4957 unsigned int i;
4928 u32 staterr; 4958 u32 staterr;
@@ -4977,7 +5007,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4977 buffer_info->page_offset, 5007 buffer_info->page_offset,
4978 length); 5008 length);
4979 5009
4980 if (page_count(buffer_info->page) != 1) 5010 if ((page_count(buffer_info->page) != 1) ||
5011 (page_to_nid(buffer_info->page) != current_node))
4981 buffer_info->page = NULL; 5012 buffer_info->page = NULL;
4982 else 5013 else
4983 get_page(buffer_info->page); 5014 get_page(buffer_info->page);
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 63056e7b9e22..ba8d246d05a0 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1751,7 +1751,7 @@ static int ipg_nic_open(struct net_device *dev)
1751 /* Register the interrupt line to be used by the IPG within 1751 /* Register the interrupt line to be used by the IPG within
1752 * the Linux system. 1752 * the Linux system.
1753 */ 1753 */
1754 rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED, 1754 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1755 dev->name, dev); 1755 dev->name, dev);
1756 if (rc < 0) { 1756 if (rc < 0) {
1757 printk(KERN_INFO "%s: Error when requesting interrupt.\n", 1757 printk(KERN_INFO "%s: Error when requesting interrupt.\n",
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 215adf6377d0..ae6eab3e5eed 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -852,7 +852,7 @@ static void irda_usb_receive(struct urb *urb)
852 * hot unplug of the dongle... 852 * hot unplug of the dongle...
853 * Lowest effective timer is 10ms... 853 * Lowest effective timer is 10ms...
854 * Jean II */ 854 * Jean II */
855 self->rx_defer_timer.function = &irda_usb_rx_defer_expired; 855 self->rx_defer_timer.function = irda_usb_rx_defer_expired;
856 self->rx_defer_timer.data = (unsigned long) urb; 856 self->rx_defer_timer.data = (unsigned long) urb;
857 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000)); 857 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
858 return; 858 return;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 448e84d56601..dceed80f16fb 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1204,6 +1204,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1204 adapter->link_check_timeout = jiffies; 1204 adapter->link_check_timeout = jiffies;
1205 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1205 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1206 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1206 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1207 IXGBE_WRITE_FLUSH(hw);
1207 schedule_work(&adapter->watchdog_task); 1208 schedule_work(&adapter->watchdog_task);
1208 } 1209 }
1209} 1210}
@@ -1339,8 +1340,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1339 if (!q_vector->rxr_count) 1340 if (!q_vector->rxr_count)
1340 return IRQ_HANDLED; 1341 return IRQ_HANDLED;
1341 1342
1342 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1343 rx_ring = &(adapter->rx_ring[r_idx]);
1344 /* disable interrupts on this vector only */ 1343 /* disable interrupts on this vector only */
1345 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); 1344 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1346 napi_schedule(&q_vector->napi); 1345 napi_schedule(&q_vector->napi);
@@ -3627,10 +3626,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3627 * It's easy to be greedy for MSI-X vectors, but it really 3626 * It's easy to be greedy for MSI-X vectors, but it really
3628 * doesn't do us much good if we have a lot more vectors 3627 * doesn't do us much good if we have a lot more vectors
3629 * than CPU's. So let's be conservative and only ask for 3628 * than CPU's. So let's be conservative and only ask for
3630 * (roughly) twice the number of vectors as there are CPU's. 3629 * (roughly) the same number of vectors as there are CPU's.
3631 */ 3630 */
3632 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 3631 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
3633 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 3632 (int)num_online_cpus()) + NON_Q_VECTORS;
3634 3633
3635 /* 3634 /*
3636 * At the same time, hardware can only support a maximum of 3635 * At the same time, hardware can only support a maximum of
@@ -5989,6 +5988,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5989 } else { 5988 } else {
5990 pci_set_master(pdev); 5989 pci_set_master(pdev);
5991 pci_restore_state(pdev); 5990 pci_restore_state(pdev);
5991 pci_save_state(pdev);
5992 5992
5993 pci_wake_from_d3(pdev, false); 5993 pci_wake_from_d3(pdev, false);
5994 5994
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d7dba3f6f763..ae2b5c79c55e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,12 +38,27 @@ struct macvlan_port {
38 struct list_head vlans; 38 struct list_head vlans;
39}; 39};
40 40
41/**
42 * struct macvlan_rx_stats - MACVLAN percpu rx stats
43 * @rx_packets: number of received packets
44 * @rx_bytes: number of received bytes
45 * @multicast: number of received multicast packets
46 * @rx_errors: number of errors
47 */
48struct macvlan_rx_stats {
49 unsigned long rx_packets;
50 unsigned long rx_bytes;
51 unsigned long multicast;
52 unsigned long rx_errors;
53};
54
41struct macvlan_dev { 55struct macvlan_dev {
42 struct net_device *dev; 56 struct net_device *dev;
43 struct list_head list; 57 struct list_head list;
44 struct hlist_node hlist; 58 struct hlist_node hlist;
45 struct macvlan_port *port; 59 struct macvlan_port *port;
46 struct net_device *lowerdev; 60 struct net_device *lowerdev;
61 struct macvlan_rx_stats *rx_stats;
47}; 62};
48 63
49 64
@@ -110,6 +125,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
110 struct net_device *dev; 125 struct net_device *dev;
111 struct sk_buff *nskb; 126 struct sk_buff *nskb;
112 unsigned int i; 127 unsigned int i;
128 struct macvlan_rx_stats *rx_stats;
113 129
114 if (skb->protocol == htons(ETH_P_PAUSE)) 130 if (skb->protocol == htons(ETH_P_PAUSE))
115 return; 131 return;
@@ -117,17 +133,17 @@ static void macvlan_broadcast(struct sk_buff *skb,
117 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 133 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
118 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { 134 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
119 dev = vlan->dev; 135 dev = vlan->dev;
136 rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
120 137
121 nskb = skb_clone(skb, GFP_ATOMIC); 138 nskb = skb_clone(skb, GFP_ATOMIC);
122 if (nskb == NULL) { 139 if (nskb == NULL) {
123 dev->stats.rx_errors++; 140 rx_stats->rx_errors++;
124 dev->stats.rx_dropped++;
125 continue; 141 continue;
126 } 142 }
127 143
128 dev->stats.rx_bytes += skb->len + ETH_HLEN; 144 rx_stats->rx_bytes += skb->len + ETH_HLEN;
129 dev->stats.rx_packets++; 145 rx_stats->rx_packets++;
130 dev->stats.multicast++; 146 rx_stats->multicast++;
131 147
132 nskb->dev = dev; 148 nskb->dev = dev;
133 if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) 149 if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
@@ -147,6 +163,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
147 const struct macvlan_port *port; 163 const struct macvlan_port *port;
148 const struct macvlan_dev *vlan; 164 const struct macvlan_dev *vlan;
149 struct net_device *dev; 165 struct net_device *dev;
166 struct macvlan_rx_stats *rx_stats;
150 167
151 port = rcu_dereference(skb->dev->macvlan_port); 168 port = rcu_dereference(skb->dev->macvlan_port);
152 if (port == NULL) 169 if (port == NULL)
@@ -166,16 +183,15 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
166 kfree_skb(skb); 183 kfree_skb(skb);
167 return NULL; 184 return NULL;
168 } 185 }
169 186 rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
170 skb = skb_share_check(skb, GFP_ATOMIC); 187 skb = skb_share_check(skb, GFP_ATOMIC);
171 if (skb == NULL) { 188 if (skb == NULL) {
172 dev->stats.rx_errors++; 189 rx_stats->rx_errors++;
173 dev->stats.rx_dropped++;
174 return NULL; 190 return NULL;
175 } 191 }
176 192
177 dev->stats.rx_bytes += skb->len + ETH_HLEN; 193 rx_stats->rx_bytes += skb->len + ETH_HLEN;
178 dev->stats.rx_packets++; 194 rx_stats->rx_packets++;
179 195
180 skb->dev = dev; 196 skb->dev = dev;
181 skb->pkt_type = PACKET_HOST; 197 skb->pkt_type = PACKET_HOST;
@@ -202,7 +218,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
202 } else 218 } else
203 txq->tx_dropped++; 219 txq->tx_dropped++;
204 220
205 return NETDEV_TX_OK; 221 return ret;
206} 222}
207 223
208static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, 224static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -365,9 +381,47 @@ static int macvlan_init(struct net_device *dev)
365 381
366 macvlan_set_lockdep_class(dev); 382 macvlan_set_lockdep_class(dev);
367 383
384 vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
385 if (!vlan->rx_stats)
386 return -ENOMEM;
387
368 return 0; 388 return 0;
369} 389}
370 390
391static void macvlan_uninit(struct net_device *dev)
392{
393 struct macvlan_dev *vlan = netdev_priv(dev);
394
395 free_percpu(vlan->rx_stats);
396}
397
398static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev)
399{
400 struct net_device_stats *stats = &dev->stats;
401 struct macvlan_dev *vlan = netdev_priv(dev);
402
403 dev_txq_stats_fold(dev, stats);
404
405 if (vlan->rx_stats) {
406 struct macvlan_rx_stats *p, rx = {0};
407 int i;
408
409 for_each_possible_cpu(i) {
410 p = per_cpu_ptr(vlan->rx_stats, i);
411 rx.rx_packets += p->rx_packets;
412 rx.rx_bytes += p->rx_bytes;
413 rx.rx_errors += p->rx_errors;
414 rx.multicast += p->multicast;
415 }
416 stats->rx_packets = rx.rx_packets;
417 stats->rx_bytes = rx.rx_bytes;
418 stats->rx_errors = rx.rx_errors;
419 stats->rx_dropped = rx.rx_errors;
420 stats->multicast = rx.multicast;
421 }
422 return stats;
423}
424
371static void macvlan_ethtool_get_drvinfo(struct net_device *dev, 425static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
372 struct ethtool_drvinfo *drvinfo) 426 struct ethtool_drvinfo *drvinfo)
373{ 427{
@@ -404,6 +458,7 @@ static const struct ethtool_ops macvlan_ethtool_ops = {
404 458
405static const struct net_device_ops macvlan_netdev_ops = { 459static const struct net_device_ops macvlan_netdev_ops = {
406 .ndo_init = macvlan_init, 460 .ndo_init = macvlan_init,
461 .ndo_uninit = macvlan_uninit,
407 .ndo_open = macvlan_open, 462 .ndo_open = macvlan_open,
408 .ndo_stop = macvlan_stop, 463 .ndo_stop = macvlan_stop,
409 .ndo_start_xmit = macvlan_start_xmit, 464 .ndo_start_xmit = macvlan_start_xmit,
@@ -411,6 +466,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
411 .ndo_change_rx_flags = macvlan_change_rx_flags, 466 .ndo_change_rx_flags = macvlan_change_rx_flags,
412 .ndo_set_mac_address = macvlan_set_mac_address, 467 .ndo_set_mac_address = macvlan_set_mac_address,
413 .ndo_set_multicast_list = macvlan_set_multicast_list, 468 .ndo_set_multicast_list = macvlan_set_multicast_list,
469 .ndo_get_stats = macvlan_dev_get_stats,
414 .ndo_validate_addr = eth_validate_addr, 470 .ndo_validate_addr = eth_validate_addr,
415}; 471};
416 472
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 44558fcb56ac..8ce58c4c7dd3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -8143,7 +8143,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
8143 int i; 8143 int i;
8144 8144
8145 for (i = 0; i < len - 5; i++) { 8145 for (i = 0; i < len - 5; i++) {
8146 if (!strncmp(s + i, "FCode ", 5)) 8146 if (!strncmp(s + i, "FCode ", 6))
8147 break; 8147 break;
8148 } 8148 }
8149 if (i >= len - 5) 8149 if (i >= len - 5)
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7e01fbdb87e0..57e09616330a 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -264,7 +264,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
264 /* Interrupt setup */ 264 /* Interrupt setup */
265 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 265 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT;
266 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 266 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
267 link->irq.Handler = &fjn_interrupt; 267 link->irq.Handler = fjn_interrupt;
268 link->irq.Instance = dev; 268 link->irq.Instance = dev;
269 269
270 /* General socket configuration */ 270 /* General socket configuration */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 5ed6339c52bc..b12e69592d18 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -479,7 +479,7 @@ static int nmclan_probe(struct pcmcia_device *link)
479 link->io.IOAddrLines = 5; 479 link->io.IOAddrLines = 5;
480 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 480 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
481 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 481 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
482 link->irq.Handler = &mace_interrupt; 482 link->irq.Handler = mace_interrupt;
483 link->irq.Instance = dev; 483 link->irq.Instance = dev;
484 link->conf.Attributes = CONF_ENABLE_IRQ; 484 link->conf.Attributes = CONF_ENABLE_IRQ;
485 link->conf.IntType = INT_MEMORY_AND_IO; 485 link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 30b1b3326765..c311fa6597f5 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -36,7 +36,7 @@
36 36
37#define PPP_VERSION "2.4.2" 37#define PPP_VERSION "2.4.2"
38 38
39#define OBUFSIZE 256 39#define OBUFSIZE 4096
40 40
41/* Structure for storing local state. */ 41/* Structure for storing local state. */
42struct asyncppp { 42struct asyncppp {
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 9bf2a6be9031..0a56a778af0a 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -184,7 +184,7 @@ static atomic_t ppp_unit_count = ATOMIC_INIT(0);
184static atomic_t channel_count = ATOMIC_INIT(0); 184static atomic_t channel_count = ATOMIC_INIT(0);
185 185
186/* per-net private data for this module */ 186/* per-net private data for this module */
187static int ppp_net_id; 187static int ppp_net_id __read_mostly;
188struct ppp_net { 188struct ppp_net {
189 /* units to ppp mapping */ 189 /* units to ppp mapping */
190 struct idr units_idr; 190 struct idr units_idr;
@@ -1944,8 +1944,15 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1944 } 1944 }
1945 1945
1946 /* Pull completed packets off the queue and receive them. */ 1946 /* Pull completed packets off the queue and receive them. */
1947 while ((skb = ppp_mp_reconstruct(ppp))) 1947 while ((skb = ppp_mp_reconstruct(ppp))) {
1948 ppp_receive_nonmp_frame(ppp, skb); 1948 if (pskb_may_pull(skb, 2))
1949 ppp_receive_nonmp_frame(ppp, skb);
1950 else {
1951 ++ppp->dev->stats.rx_length_errors;
1952 kfree_skb(skb);
1953 ppp_receive_error(ppp);
1954 }
1955 }
1949 1956
1950 return; 1957 return;
1951 1958
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 60c8d233209f..a1dcba255b06 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -97,7 +97,7 @@ static const struct proto_ops pppoe_ops;
97static struct ppp_channel_ops pppoe_chan_ops; 97static struct ppp_channel_ops pppoe_chan_ops;
98 98
99/* per-net private data for this module */ 99/* per-net private data for this module */
100static int pppoe_net_id; 100static int pppoe_net_id __read_mostly;
101struct pppoe_net { 101struct pppoe_net {
102 /* 102 /*
103 * we could use _single_ hash table for all 103 * we could use _single_ hash table for all
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 849cc9c62c2a..c58b50f8ba3b 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -232,7 +232,7 @@ static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
232static const struct proto_ops pppol2tp_ops; 232static const struct proto_ops pppol2tp_ops;
233 233
234/* per-net private data for this module */ 234/* per-net private data for this module */
235static int pppol2tp_net_id; 235static int pppol2tp_net_id __read_mostly;
236struct pppol2tp_net { 236struct pppol2tp_net {
237 struct list_head pppol2tp_tunnel_list; 237 struct list_head pppol2tp_tunnel_list;
238 rwlock_t pppol2tp_tunnel_list_lock; 238 rwlock_t pppol2tp_tunnel_list_lock;
@@ -1537,7 +1537,7 @@ static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
1537 * if the tunnel socket goes away. 1537 * if the tunnel socket goes away.
1538 */ 1538 */
1539 tunnel->old_sk_destruct = sk->sk_destruct; 1539 tunnel->old_sk_destruct = sk->sk_destruct;
1540 sk->sk_destruct = &pppol2tp_tunnel_destruct; 1540 sk->sk_destruct = pppol2tp_tunnel_destruct;
1541 1541
1542 tunnel->sock = sk; 1542 tunnel->sock = sk;
1543 sk->sk_allocation = GFP_ATOMIC; 1543 sk->sk_allocation = GFP_ATOMIC;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 1f59f054452d..862c1aaf3860 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00-b3" 19#define DRV_VERSION "v1.00.00.23.00.00-01"
20 20
21#define PFX "qlge: " 21#define PFX "qlge: "
22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \ 22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index bd8e164b121c..7692299e7826 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -69,9 +69,9 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69#define MSIX_IRQ 0 69#define MSIX_IRQ 0
70#define MSI_IRQ 1 70#define MSI_IRQ 1
71#define LEG_IRQ 2 71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ; 72static int qlge_irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, 77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
@@ -2870,7 +2870,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2870 int i, err; 2870 int i, err;
2871 2871
2872 /* Get the MSIX vectors. */ 2872 /* Get the MSIX vectors. */
2873 if (irq_type == MSIX_IRQ) { 2873 if (qlge_irq_type == MSIX_IRQ) {
2874 /* Try to alloc space for the msix struct, 2874 /* Try to alloc space for the msix struct,
2875 * if it fails then go to MSI/legacy. 2875 * if it fails then go to MSI/legacy.
2876 */ 2876 */
@@ -2878,7 +2878,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2878 sizeof(struct msix_entry), 2878 sizeof(struct msix_entry),
2879 GFP_KERNEL); 2879 GFP_KERNEL);
2880 if (!qdev->msi_x_entry) { 2880 if (!qdev->msi_x_entry) {
2881 irq_type = MSI_IRQ; 2881 qlge_irq_type = MSI_IRQ;
2882 goto msi; 2882 goto msi;
2883 } 2883 }
2884 2884
@@ -2901,7 +2901,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2901 QPRINTK(qdev, IFUP, WARNING, 2901 QPRINTK(qdev, IFUP, WARNING,
2902 "MSI-X Enable failed, trying MSI.\n"); 2902 "MSI-X Enable failed, trying MSI.\n");
2903 qdev->intr_count = 1; 2903 qdev->intr_count = 1;
2904 irq_type = MSI_IRQ; 2904 qlge_irq_type = MSI_IRQ;
2905 } else if (err == 0) { 2905 } else if (err == 0) {
2906 set_bit(QL_MSIX_ENABLED, &qdev->flags); 2906 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2907 QPRINTK(qdev, IFUP, INFO, 2907 QPRINTK(qdev, IFUP, INFO,
@@ -2912,7 +2912,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2912 } 2912 }
2913msi: 2913msi:
2914 qdev->intr_count = 1; 2914 qdev->intr_count = 1;
2915 if (irq_type == MSI_IRQ) { 2915 if (qlge_irq_type == MSI_IRQ) {
2916 if (!pci_enable_msi(qdev->pdev)) { 2916 if (!pci_enable_msi(qdev->pdev)) {
2917 set_bit(QL_MSI_ENABLED, &qdev->flags); 2917 set_bit(QL_MSI_ENABLED, &qdev->flags);
2918 QPRINTK(qdev, IFUP, INFO, 2918 QPRINTK(qdev, IFUP, INFO,
@@ -2920,7 +2920,7 @@ msi:
2920 return; 2920 return;
2921 } 2921 }
2922 } 2922 }
2923 irq_type = LEG_IRQ; 2923 qlge_irq_type = LEG_IRQ;
2924 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2924 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2925} 2925}
2926 2926
@@ -3514,9 +3514,6 @@ int ql_wol(struct ql_adapter *qdev)
3514 } 3514 }
3515 3515
3516 if (qdev->wol) { 3516 if (qdev->wol) {
3517 /* Reroute all packets to Management Interface */
3518 ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
3519 (MGMT_RCV_CFG_RM << 16)));
3520 wol |= MB_WOL_MODE_ON; 3517 wol |= MB_WOL_MODE_ON;
3521 status = ql_mb_wol_mode(qdev, wol); 3518 status = ql_mb_wol_mode(qdev, wol);
3522 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", 3519 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
@@ -3717,6 +3714,10 @@ static int qlge_open(struct net_device *ndev)
3717 int err = 0; 3714 int err = 0;
3718 struct ql_adapter *qdev = netdev_priv(ndev); 3715 struct ql_adapter *qdev = netdev_priv(ndev);
3719 3716
3717 err = ql_adapter_reset(qdev);
3718 if (err)
3719 return err;
3720
3720 err = ql_configure_rings(qdev); 3721 err = ql_configure_rings(qdev);
3721 if (err) 3722 if (err)
3722 return err; 3723 return err;
@@ -3950,9 +3951,6 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3950 struct sockaddr *addr = p; 3951 struct sockaddr *addr = p;
3951 int status; 3952 int status;
3952 3953
3953 if (netif_running(ndev))
3954 return -EBUSY;
3955
3956 if (!is_valid_ether_addr(addr->sa_data)) 3954 if (!is_valid_ether_addr(addr->sa_data))
3957 return -EADDRNOTAVAIL; 3955 return -EADDRNOTAVAIL;
3958 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3956 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 7dfcb58b0eb4..0f30ea4e97ec 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -842,7 +842,7 @@ static int r6040_open(struct net_device *dev)
842 int ret; 842 int ret;
843 843
844 /* Request IRQ and Register interrupt handler */ 844 /* Request IRQ and Register interrupt handler */
845 ret = request_irq(dev->irq, &r6040_interrupt, 845 ret = request_irq(dev->irq, r6040_interrupt,
846 IRQF_SHARED, dev->name, dev); 846 IRQF_SHARED, dev->name, dev);
847 if (ret) 847 if (ret)
848 return ret; 848 return ret;
@@ -1085,7 +1085,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1085 int bar = 0; 1085 int bar = 0;
1086 u16 *adrp; 1086 u16 *adrp;
1087 1087
1088 printk(KERN_INFO "%s\n", version); 1088 printk("%s\n", version);
1089 1089
1090 err = pci_enable_device(pdev); 1090 err = pci_enable_device(pdev);
1091 if (err) 1091 if (err)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 1b0aa4cf89bc..98f6c51b7608 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3243,9 +3243,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3243static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, 3243static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
3244 struct net_device *dev) 3244 struct net_device *dev)
3245{ 3245{
3246 unsigned int mtu = dev->mtu; 3246 unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
3247 3247
3248 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; 3248 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
3249} 3249}
3250 3250
3251static int rtl8169_open(struct net_device *dev) 3251static int rtl8169_open(struct net_device *dev)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ddccf5fa56b6..0dd7839322bc 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3494,6 +3494,7 @@ static void s2io_reset(struct s2io_nic *sp)
3494 3494
3495 /* Restore the PCI state saved during initialization. */ 3495 /* Restore the PCI state saved during initialization. */
3496 pci_restore_state(sp->pdev); 3496 pci_restore_state(sp->pdev);
3497 pci_save_state(sp->pdev);
3497 pci_read_config_word(sp->pdev, 0x2, &val16); 3498 pci_read_config_word(sp->pdev, 0x2, &val16);
3498 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID) 3499 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3499 break; 3500 break;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 6a9f51d1d9f2..7f01e60d5172 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -986,7 +986,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
986 struct net_device *dev = pdata->dev; 986 struct net_device *dev = pdata->dev;
987 int npackets = 0; 987 int npackets = 0;
988 988
989 while (likely(netif_running(dev)) && (npackets < budget)) { 989 while (npackets < budget) {
990 unsigned int pktlength; 990 unsigned int pktlength;
991 unsigned int pktwords; 991 unsigned int pktwords;
992 struct sk_buff *skb; 992 struct sk_buff *skb;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index b4909a2dec66..92e2bbe6b49b 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1161,7 +1161,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
1161 phydev->phy_id); 1161 phydev->phy_id);
1162 1162
1163 phydev = phy_connect(dev, dev_name(&phydev->dev), 1163 phydev = phy_connect(dev, dev_name(&phydev->dev),
1164 &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1164 smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
1165 1165
1166 if (IS_ERR(phydev)) { 1166 if (IS_ERR(phydev)) {
1167 pr_err("%s: Could not attach to PHY\n", dev->name); 1167 pr_err("%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 47a4f0947872..6e6db955b4a9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.103" 71#define DRV_MODULE_VERSION "3.104"
72#define DRV_MODULE_RELDATE "November 2, 2009" 72#define DRV_MODULE_RELDATE "November 13, 2009"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -137,6 +137,12 @@
137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 139
140#define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
142
143#define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
145
140/* minimum number of free TX descriptors required to wake up TX process */ 146/* minimum number of free TX descriptors required to wake up TX process */
141#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 147#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
142 148
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = {
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 TG3_64BIT_REG_LOW, val); 405 TG3_64BIT_REG_LOW, val);
397 return; 406 return;
398 } 407 }
399 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { 408 if (off == TG3_RX_STD_PROD_IDX_REG) {
400 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 409 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
401 TG3_64BIT_REG_LOW, val); 410 TG3_64BIT_REG_LOW, val);
402 return; 411 return;
@@ -2249,7 +2258,7 @@ static void tg3_nvram_unlock(struct tg3 *tp)
2249static void tg3_enable_nvram_access(struct tg3 *tp) 2258static void tg3_enable_nvram_access(struct tg3 *tp)
2250{ 2259{
2251 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2260 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2252 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2261 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2253 u32 nvaccess = tr32(NVRAM_ACCESS); 2262 u32 nvaccess = tr32(NVRAM_ACCESS);
2254 2263
2255 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 2264 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
@@ -2260,7 +2269,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp)
2260static void tg3_disable_nvram_access(struct tg3 *tp) 2269static void tg3_disable_nvram_access(struct tg3 *tp)
2261{ 2270{
2262 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2271 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2263 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2272 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2264 u32 nvaccess = tr32(NVRAM_ACCESS); 2273 u32 nvaccess = tr32(NVRAM_ACCESS);
2265 2274
2266 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 2275 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@ -4397,6 +4406,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
4397 } 4406 }
4398} 4407}
4399 4408
4409static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4410{
4411 if (!ri->skb)
4412 return;
4413
4414 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4415 map_sz, PCI_DMA_FROMDEVICE);
4416 dev_kfree_skb_any(ri->skb);
4417 ri->skb = NULL;
4418}
4419
4400/* Returns size of skb allocated or < 0 on error. 4420/* Returns size of skb allocated or < 0 on error.
4401 * 4421 *
4402 * We only need to fill in the address because the other members 4422 * We only need to fill in the address because the other members
@@ -4408,16 +4428,14 @@ static void tg3_tx(struct tg3_napi *tnapi)
4408 * buffers the cpu only reads the last cacheline of the RX descriptor 4428 * buffers the cpu only reads the last cacheline of the RX descriptor
4409 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 4429 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4410 */ 4430 */
4411static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, 4431static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4412 int src_idx, u32 dest_idx_unmasked) 4432 u32 opaque_key, u32 dest_idx_unmasked)
4413{ 4433{
4414 struct tg3 *tp = tnapi->tp;
4415 struct tg3_rx_buffer_desc *desc; 4434 struct tg3_rx_buffer_desc *desc;
4416 struct ring_info *map, *src_map; 4435 struct ring_info *map, *src_map;
4417 struct sk_buff *skb; 4436 struct sk_buff *skb;
4418 dma_addr_t mapping; 4437 dma_addr_t mapping;
4419 int skb_size, dest_idx; 4438 int skb_size, dest_idx;
4420 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4421 4439
4422 src_map = NULL; 4440 src_map = NULL;
4423 switch (opaque_key) { 4441 switch (opaque_key) {
@@ -4425,8 +4443,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4425 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4443 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4426 desc = &tpr->rx_std[dest_idx]; 4444 desc = &tpr->rx_std[dest_idx];
4427 map = &tpr->rx_std_buffers[dest_idx]; 4445 map = &tpr->rx_std_buffers[dest_idx];
4428 if (src_idx >= 0)
4429 src_map = &tpr->rx_std_buffers[src_idx];
4430 skb_size = tp->rx_pkt_map_sz; 4446 skb_size = tp->rx_pkt_map_sz;
4431 break; 4447 break;
4432 4448
@@ -4434,8 +4450,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4434 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4450 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4435 desc = &tpr->rx_jmb[dest_idx].std; 4451 desc = &tpr->rx_jmb[dest_idx].std;
4436 map = &tpr->rx_jmb_buffers[dest_idx]; 4452 map = &tpr->rx_jmb_buffers[dest_idx];
4437 if (src_idx >= 0)
4438 src_map = &tpr->rx_jmb_buffers[src_idx];
4439 skb_size = TG3_RX_JMB_MAP_SZ; 4453 skb_size = TG3_RX_JMB_MAP_SZ;
4440 break; 4454 break;
4441 4455
@@ -4465,9 +4479,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4465 map->skb = skb; 4479 map->skb = skb;
4466 pci_unmap_addr_set(map, mapping, mapping); 4480 pci_unmap_addr_set(map, mapping, mapping);
4467 4481
4468 if (src_map != NULL)
4469 src_map->skb = NULL;
4470
4471 desc->addr_hi = ((u64)mapping >> 32); 4482 desc->addr_hi = ((u64)mapping >> 32);
4472 desc->addr_lo = ((u64)mapping & 0xffffffff); 4483 desc->addr_lo = ((u64)mapping & 0xffffffff);
4473 4484
@@ -4478,30 +4489,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4478 * members of the RX descriptor are invariant. See notes above 4489 * members of the RX descriptor are invariant. See notes above
4479 * tg3_alloc_rx_skb for full details. 4490 * tg3_alloc_rx_skb for full details.
4480 */ 4491 */
4481static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, 4492static void tg3_recycle_rx(struct tg3_napi *tnapi,
4482 int src_idx, u32 dest_idx_unmasked) 4493 struct tg3_rx_prodring_set *dpr,
4494 u32 opaque_key, int src_idx,
4495 u32 dest_idx_unmasked)
4483{ 4496{
4484 struct tg3 *tp = tnapi->tp; 4497 struct tg3 *tp = tnapi->tp;
4485 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4498 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4486 struct ring_info *src_map, *dest_map; 4499 struct ring_info *src_map, *dest_map;
4487 int dest_idx; 4500 int dest_idx;
4488 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4501 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4489 4502
4490 switch (opaque_key) { 4503 switch (opaque_key) {
4491 case RXD_OPAQUE_RING_STD: 4504 case RXD_OPAQUE_RING_STD:
4492 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4505 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4493 dest_desc = &tpr->rx_std[dest_idx]; 4506 dest_desc = &dpr->rx_std[dest_idx];
4494 dest_map = &tpr->rx_std_buffers[dest_idx]; 4507 dest_map = &dpr->rx_std_buffers[dest_idx];
4495 src_desc = &tpr->rx_std[src_idx]; 4508 src_desc = &spr->rx_std[src_idx];
4496 src_map = &tpr->rx_std_buffers[src_idx]; 4509 src_map = &spr->rx_std_buffers[src_idx];
4497 break; 4510 break;
4498 4511
4499 case RXD_OPAQUE_RING_JUMBO: 4512 case RXD_OPAQUE_RING_JUMBO:
4500 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4513 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4501 dest_desc = &tpr->rx_jmb[dest_idx].std; 4514 dest_desc = &dpr->rx_jmb[dest_idx].std;
4502 dest_map = &tpr->rx_jmb_buffers[dest_idx]; 4515 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4503 src_desc = &tpr->rx_jmb[src_idx].std; 4516 src_desc = &spr->rx_jmb[src_idx].std;
4504 src_map = &tpr->rx_jmb_buffers[src_idx]; 4517 src_map = &spr->rx_jmb_buffers[src_idx];
4505 break; 4518 break;
4506 4519
4507 default: 4520 default:
@@ -4513,7 +4526,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4513 pci_unmap_addr(src_map, mapping)); 4526 pci_unmap_addr(src_map, mapping));
4514 dest_desc->addr_hi = src_desc->addr_hi; 4527 dest_desc->addr_hi = src_desc->addr_hi;
4515 dest_desc->addr_lo = src_desc->addr_lo; 4528 dest_desc->addr_lo = src_desc->addr_lo;
4516
4517 src_map->skb = NULL; 4529 src_map->skb = NULL;
4518} 4530}
4519 4531
@@ -4545,10 +4557,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4545{ 4557{
4546 struct tg3 *tp = tnapi->tp; 4558 struct tg3 *tp = tnapi->tp;
4547 u32 work_mask, rx_std_posted = 0; 4559 u32 work_mask, rx_std_posted = 0;
4560 u32 std_prod_idx, jmb_prod_idx;
4548 u32 sw_idx = tnapi->rx_rcb_ptr; 4561 u32 sw_idx = tnapi->rx_rcb_ptr;
4549 u16 hw_idx; 4562 u16 hw_idx;
4550 int received; 4563 int received;
4551 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4564 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4552 4565
4553 hw_idx = *(tnapi->rx_rcb_prod_idx); 4566 hw_idx = *(tnapi->rx_rcb_prod_idx);
4554 /* 4567 /*
@@ -4558,7 +4571,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4558 rmb(); 4571 rmb();
4559 work_mask = 0; 4572 work_mask = 0;
4560 received = 0; 4573 received = 0;
4574 std_prod_idx = tpr->rx_std_prod_idx;
4575 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4561 while (sw_idx != hw_idx && budget > 0) { 4576 while (sw_idx != hw_idx && budget > 0) {
4577 struct ring_info *ri;
4562 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 4578 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4563 unsigned int len; 4579 unsigned int len;
4564 struct sk_buff *skb; 4580 struct sk_buff *skb;
@@ -4568,16 +4584,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4568 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4584 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4569 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4585 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4570 if (opaque_key == RXD_OPAQUE_RING_STD) { 4586 if (opaque_key == RXD_OPAQUE_RING_STD) {
4571 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; 4587 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4572 dma_addr = pci_unmap_addr(ri, mapping); 4588 dma_addr = pci_unmap_addr(ri, mapping);
4573 skb = ri->skb; 4589 skb = ri->skb;
4574 post_ptr = &tpr->rx_std_ptr; 4590 post_ptr = &std_prod_idx;
4575 rx_std_posted++; 4591 rx_std_posted++;
4576 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4592 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4577 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; 4593 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4578 dma_addr = pci_unmap_addr(ri, mapping); 4594 dma_addr = pci_unmap_addr(ri, mapping);
4579 skb = ri->skb; 4595 skb = ri->skb;
4580 post_ptr = &tpr->rx_jmb_ptr; 4596 post_ptr = &jmb_prod_idx;
4581 } else 4597 } else
4582 goto next_pkt_nopost; 4598 goto next_pkt_nopost;
4583 4599
@@ -4586,7 +4602,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4586 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 4602 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4587 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 4603 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4588 drop_it: 4604 drop_it:
4589 tg3_recycle_rx(tnapi, opaque_key, 4605 tg3_recycle_rx(tnapi, tpr, opaque_key,
4590 desc_idx, *post_ptr); 4606 desc_idx, *post_ptr);
4591 drop_it_no_recycle: 4607 drop_it_no_recycle:
4592 /* Other statistics kept track of by card. */ 4608 /* Other statistics kept track of by card. */
@@ -4606,11 +4622,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4606 ) { 4622 ) {
4607 int skb_size; 4623 int skb_size;
4608 4624
4609 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, 4625 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4610 desc_idx, *post_ptr); 4626 *post_ptr);
4611 if (skb_size < 0) 4627 if (skb_size < 0)
4612 goto drop_it; 4628 goto drop_it;
4613 4629
4630 ri->skb = NULL;
4631
4614 pci_unmap_single(tp->pdev, dma_addr, skb_size, 4632 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4615 PCI_DMA_FROMDEVICE); 4633 PCI_DMA_FROMDEVICE);
4616 4634
@@ -4618,7 +4636,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4618 } else { 4636 } else {
4619 struct sk_buff *copy_skb; 4637 struct sk_buff *copy_skb;
4620 4638
4621 tg3_recycle_rx(tnapi, opaque_key, 4639 tg3_recycle_rx(tnapi, tpr, opaque_key,
4622 desc_idx, *post_ptr); 4640 desc_idx, *post_ptr);
4623 4641
4624 copy_skb = netdev_alloc_skb(tp->dev, 4642 copy_skb = netdev_alloc_skb(tp->dev,
@@ -4669,9 +4687,7 @@ next_pkt:
4669 4687
4670 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4688 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4671 u32 idx = *post_ptr % TG3_RX_RING_SIZE; 4689 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4672 4690 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
4673 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4674 TG3_64BIT_REG_LOW, idx);
4675 work_mask &= ~RXD_OPAQUE_RING_STD; 4691 work_mask &= ~RXD_OPAQUE_RING_STD;
4676 rx_std_posted = 0; 4692 rx_std_posted = 0;
4677 } 4693 }
@@ -4691,33 +4707,45 @@ next_pkt_nopost:
4691 tw32_rx_mbox(tnapi->consmbox, sw_idx); 4707 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4692 4708
4693 /* Refill RX ring(s). */ 4709 /* Refill RX ring(s). */
4694 if (work_mask & RXD_OPAQUE_RING_STD) { 4710 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4695 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; 4711 if (work_mask & RXD_OPAQUE_RING_STD) {
4696 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 4712 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4697 sw_idx); 4713 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4698 } 4714 tpr->rx_std_prod_idx);
4699 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4715 }
4700 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; 4716 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4701 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 4717 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4702 sw_idx); 4718 TG3_RX_JUMBO_RING_SIZE;
4719 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4720 tpr->rx_jmb_prod_idx);
4721 }
4722 mmiowb();
4723 } else if (work_mask) {
4724 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4725 * updated before the producer indices can be updated.
4726 */
4727 smp_wmb();
4728
4729 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4730 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4731
4732 napi_schedule(&tp->napi[1].napi);
4703 } 4733 }
4704 mmiowb();
4705 4734
4706 return received; 4735 return received;
4707} 4736}
4708 4737
4709static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 4738static void tg3_poll_link(struct tg3 *tp)
4710{ 4739{
4711 struct tg3 *tp = tnapi->tp;
4712 struct tg3_hw_status *sblk = tnapi->hw_status;
4713
4714 /* handle link change and other phy events */ 4740 /* handle link change and other phy events */
4715 if (!(tp->tg3_flags & 4741 if (!(tp->tg3_flags &
4716 (TG3_FLAG_USE_LINKCHG_REG | 4742 (TG3_FLAG_USE_LINKCHG_REG |
4717 TG3_FLAG_POLL_SERDES))) { 4743 TG3_FLAG_POLL_SERDES))) {
4744 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4745
4718 if (sblk->status & SD_STATUS_LINK_CHG) { 4746 if (sblk->status & SD_STATUS_LINK_CHG) {
4719 sblk->status = SD_STATUS_UPDATED | 4747 sblk->status = SD_STATUS_UPDATED |
4720 (sblk->status & ~SD_STATUS_LINK_CHG); 4748 (sblk->status & ~SD_STATUS_LINK_CHG);
4721 spin_lock(&tp->lock); 4749 spin_lock(&tp->lock);
4722 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 4750 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4723 tw32_f(MAC_STATUS, 4751 tw32_f(MAC_STATUS,
@@ -4731,6 +4759,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4731 spin_unlock(&tp->lock); 4759 spin_unlock(&tp->lock);
4732 } 4760 }
4733 } 4761 }
4762}
4763
4764static void tg3_rx_prodring_xfer(struct tg3 *tp,
4765 struct tg3_rx_prodring_set *dpr,
4766 struct tg3_rx_prodring_set *spr)
4767{
4768 u32 si, di, cpycnt, src_prod_idx;
4769 int i;
4770
4771 while (1) {
4772 src_prod_idx = spr->rx_std_prod_idx;
4773
4774 /* Make sure updates to the rx_std_buffers[] entries and the
4775 * standard producer index are seen in the correct order.
4776 */
4777 smp_rmb();
4778
4779 if (spr->rx_std_cons_idx == src_prod_idx)
4780 break;
4781
4782 if (spr->rx_std_cons_idx < src_prod_idx)
4783 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4784 else
4785 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4786
4787 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4788
4789 si = spr->rx_std_cons_idx;
4790 di = dpr->rx_std_prod_idx;
4791
4792 memcpy(&dpr->rx_std_buffers[di],
4793 &spr->rx_std_buffers[si],
4794 cpycnt * sizeof(struct ring_info));
4795
4796 for (i = 0; i < cpycnt; i++, di++, si++) {
4797 struct tg3_rx_buffer_desc *sbd, *dbd;
4798 sbd = &spr->rx_std[si];
4799 dbd = &dpr->rx_std[di];
4800 dbd->addr_hi = sbd->addr_hi;
4801 dbd->addr_lo = sbd->addr_lo;
4802 }
4803
4804 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4805 TG3_RX_RING_SIZE;
4806 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4807 TG3_RX_RING_SIZE;
4808 }
4809
4810 while (1) {
4811 src_prod_idx = spr->rx_jmb_prod_idx;
4812
4813 /* Make sure updates to the rx_jmb_buffers[] entries and
4814 * the jumbo producer index are seen in the correct order.
4815 */
4816 smp_rmb();
4817
4818 if (spr->rx_jmb_cons_idx == src_prod_idx)
4819 break;
4820
4821 if (spr->rx_jmb_cons_idx < src_prod_idx)
4822 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4823 else
4824 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4825
4826 cpycnt = min(cpycnt,
4827 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4828
4829 si = spr->rx_jmb_cons_idx;
4830 di = dpr->rx_jmb_prod_idx;
4831
4832 memcpy(&dpr->rx_jmb_buffers[di],
4833 &spr->rx_jmb_buffers[si],
4834 cpycnt * sizeof(struct ring_info));
4835
4836 for (i = 0; i < cpycnt; i++, di++, si++) {
4837 struct tg3_rx_buffer_desc *sbd, *dbd;
4838 sbd = &spr->rx_jmb[si].std;
4839 dbd = &dpr->rx_jmb[di].std;
4840 dbd->addr_hi = sbd->addr_hi;
4841 dbd->addr_lo = sbd->addr_lo;
4842 }
4843
4844 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4845 TG3_RX_JUMBO_RING_SIZE;
4846 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4847 TG3_RX_JUMBO_RING_SIZE;
4848 }
4849}
4850
4851static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4852{
4853 struct tg3 *tp = tnapi->tp;
4734 4854
4735 /* run TX completion thread */ 4855 /* run TX completion thread */
4736 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 4856 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
@@ -4746,6 +4866,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4746 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 4866 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4747 work_done += tg3_rx(tnapi, budget - work_done); 4867 work_done += tg3_rx(tnapi, budget - work_done);
4748 4868
4869 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4870 int i;
4871 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4872 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4873
4874 for (i = 2; i < tp->irq_cnt; i++)
4875 tg3_rx_prodring_xfer(tp, tnapi->prodring,
4876 tp->napi[i].prodring);
4877
4878 wmb();
4879
4880 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4881 u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4882 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4883 }
4884
4885 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4886 u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4887 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4888 }
4889
4890 mmiowb();
4891 }
4892
4893 return work_done;
4894}
4895
4896static int tg3_poll_msix(struct napi_struct *napi, int budget)
4897{
4898 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4899 struct tg3 *tp = tnapi->tp;
4900 int work_done = 0;
4901 struct tg3_hw_status *sblk = tnapi->hw_status;
4902
4903 while (1) {
4904 work_done = tg3_poll_work(tnapi, work_done, budget);
4905
4906 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4907 goto tx_recovery;
4908
4909 if (unlikely(work_done >= budget))
4910 break;
4911
4912 /* tp->last_tag is used in tg3_restart_ints() below
4913 * to tell the hw how much work has been processed,
4914 * so we must read it before checking for more work.
4915 */
4916 tnapi->last_tag = sblk->status_tag;
4917 tnapi->last_irq_tag = tnapi->last_tag;
4918 rmb();
4919
4920 /* check for RX/TX work to do */
4921 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4922 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4923 napi_complete(napi);
4924 /* Reenable interrupts. */
4925 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4926 mmiowb();
4927 break;
4928 }
4929 }
4930
4931 return work_done;
4932
4933tx_recovery:
4934 /* work_done is guaranteed to be less than budget. */
4935 napi_complete(napi);
4936 schedule_work(&tp->reset_task);
4749 return work_done; 4937 return work_done;
4750} 4938}
4751 4939
@@ -4757,6 +4945,8 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4757 struct tg3_hw_status *sblk = tnapi->hw_status; 4945 struct tg3_hw_status *sblk = tnapi->hw_status;
4758 4946
4759 while (1) { 4947 while (1) {
4948 tg3_poll_link(tp);
4949
4760 work_done = tg3_poll_work(tnapi, work_done, budget); 4950 work_done = tg3_poll_work(tnapi, work_done, budget);
4761 4951
4762 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4952 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
@@ -5119,11 +5309,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5119static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); 5309static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5120 5310
5121/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5311/* Workaround 4GB and 40-bit hardware DMA bugs. */
5122static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 5312static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5123 u32 last_plus_one, u32 *start, 5313 struct sk_buff *skb, u32 last_plus_one,
5124 u32 base_flags, u32 mss) 5314 u32 *start, u32 base_flags, u32 mss)
5125{ 5315{
5126 struct tg3_napi *tnapi = &tp->napi[0]; 5316 struct tg3 *tp = tnapi->tp;
5127 struct sk_buff *new_skb; 5317 struct sk_buff *new_skb;
5128 dma_addr_t new_addr = 0; 5318 dma_addr_t new_addr = 0;
5129 u32 entry = *start; 5319 u32 entry = *start;
@@ -5206,7 +5396,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5206} 5396}
5207 5397
5208/* hard_start_xmit for devices that don't have any bugs and 5398/* hard_start_xmit for devices that don't have any bugs and
5209 * support TG3_FLG2_HW_TSO_2 only. 5399 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5210 */ 5400 */
5211static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 5401static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5212 struct net_device *dev) 5402 struct net_device *dev)
@@ -5265,7 +5455,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5265 hdrlen = ip_tcp_len + tcp_opt_len; 5455 hdrlen = ip_tcp_len + tcp_opt_len;
5266 } 5456 }
5267 5457
5268 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 5458 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5269 mss |= (hdrlen & 0xc) << 12; 5459 mss |= (hdrlen & 0xc) << 12;
5270 if (hdrlen & 0x10) 5460 if (hdrlen & 0x10)
5271 base_flags |= 0x00000010; 5461 base_flags |= 0x00000010;
@@ -5392,9 +5582,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5392 struct skb_shared_info *sp; 5582 struct skb_shared_info *sp;
5393 int would_hit_hwbug; 5583 int would_hit_hwbug;
5394 dma_addr_t mapping; 5584 dma_addr_t mapping;
5395 struct tg3_napi *tnapi = &tp->napi[0]; 5585 struct tg3_napi *tnapi;
5586 struct netdev_queue *txq;
5396 5587
5397 len = skb_headlen(skb); 5588 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5589 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5590 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5591 tnapi++;
5398 5592
5399 /* We are running in BH disabled context with netif_tx_lock 5593 /* We are running in BH disabled context with netif_tx_lock
5400 * and TX reclaim runs via tp->napi.poll inside of a software 5594 * and TX reclaim runs via tp->napi.poll inside of a software
@@ -5402,8 +5596,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5402 * no IRQ context deadlocks to worry about either. Rejoice! 5596 * no IRQ context deadlocks to worry about either. Rejoice!
5403 */ 5597 */
5404 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 5598 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5405 if (!netif_queue_stopped(dev)) { 5599 if (!netif_tx_queue_stopped(txq)) {
5406 netif_stop_queue(dev); 5600 netif_tx_stop_queue(txq);
5407 5601
5408 /* This is a hard error, log it. */ 5602 /* This is a hard error, log it. */
5409 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 5603 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
@@ -5416,7 +5610,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5416 base_flags = 0; 5610 base_flags = 0;
5417 if (skb->ip_summed == CHECKSUM_PARTIAL) 5611 if (skb->ip_summed == CHECKSUM_PARTIAL)
5418 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5612 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5419 mss = 0; 5613
5420 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5614 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5421 struct iphdr *iph; 5615 struct iphdr *iph;
5422 u32 tcp_opt_len, ip_tcp_len, hdr_len; 5616 u32 tcp_opt_len, ip_tcp_len, hdr_len;
@@ -5450,7 +5644,12 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5450 IPPROTO_TCP, 5644 IPPROTO_TCP,
5451 0); 5645 0);
5452 5646
5453 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) 5647 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5648 mss |= (hdr_len & 0xc) << 12;
5649 if (hdr_len & 0x10)
5650 base_flags |= 0x00000010;
5651 base_flags |= (hdr_len & 0x3e0) << 5;
5652 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5454 mss |= hdr_len << 9; 5653 mss |= hdr_len << 9;
5455 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || 5654 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 5655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -5475,6 +5674,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5475 (vlan_tx_tag_get(skb) << 16)); 5674 (vlan_tx_tag_get(skb) << 16));
5476#endif 5675#endif
5477 5676
5677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5678 !mss && skb->len > ETH_DATA_LEN)
5679 base_flags |= TXD_FLAG_JMB_PKT;
5680
5478 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { 5681 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5479 dev_kfree_skb(skb); 5682 dev_kfree_skb(skb);
5480 goto out_unlock; 5683 goto out_unlock;
@@ -5488,6 +5691,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5488 5691
5489 would_hit_hwbug = 0; 5692 would_hit_hwbug = 0;
5490 5693
5694 len = skb_headlen(skb);
5695
5491 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) 5696 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5492 would_hit_hwbug = 1; 5697 would_hit_hwbug = 1;
5493 5698
@@ -5553,7 +5758,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5553 /* If the workaround fails due to memory/mapping 5758 /* If the workaround fails due to memory/mapping
5554 * failure, silently drop this packet. 5759 * failure, silently drop this packet.
5555 */ 5760 */
5556 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, 5761 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5557 &start, base_flags, mss)) 5762 &start, base_flags, mss))
5558 goto out_unlock; 5763 goto out_unlock;
5559 5764
@@ -5561,13 +5766,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5561 } 5766 }
5562 5767
5563 /* Packets are ready, update Tx producer idx local and on card. */ 5768 /* Packets are ready, update Tx producer idx local and on card. */
5564 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); 5769 tw32_tx_mbox(tnapi->prodmbox, entry);
5565 5770
5566 tnapi->tx_prod = entry; 5771 tnapi->tx_prod = entry;
5567 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5772 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5568 netif_stop_queue(dev); 5773 netif_tx_stop_queue(txq);
5569 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5774 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5570 netif_wake_queue(tp->dev); 5775 netif_tx_wake_queue(txq);
5571 } 5776 }
5572 5777
5573out_unlock: 5778out_unlock:
@@ -5638,36 +5843,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
5638 struct tg3_rx_prodring_set *tpr) 5843 struct tg3_rx_prodring_set *tpr)
5639{ 5844{
5640 int i; 5845 int i;
5641 struct ring_info *rxp;
5642
5643 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5644 rxp = &tpr->rx_std_buffers[i];
5645 5846
5646 if (rxp->skb == NULL) 5847 if (tpr != &tp->prodring[0]) {
5647 continue; 5848 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5849 i = (i + 1) % TG3_RX_RING_SIZE)
5850 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5851 tp->rx_pkt_map_sz);
5852
5853 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5854 for (i = tpr->rx_jmb_cons_idx;
5855 i != tpr->rx_jmb_prod_idx;
5856 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5857 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5858 TG3_RX_JMB_MAP_SZ);
5859 }
5860 }
5648 5861
5649 pci_unmap_single(tp->pdev, 5862 return;
5650 pci_unmap_addr(rxp, mapping),
5651 tp->rx_pkt_map_sz,
5652 PCI_DMA_FROMDEVICE);
5653 dev_kfree_skb_any(rxp->skb);
5654 rxp->skb = NULL;
5655 } 5863 }
5656 5864
5657 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 5865 for (i = 0; i < TG3_RX_RING_SIZE; i++)
5658 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 5866 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5659 rxp = &tpr->rx_jmb_buffers[i]; 5867 tp->rx_pkt_map_sz);
5660
5661 if (rxp->skb == NULL)
5662 continue;
5663 5868
5664 pci_unmap_single(tp->pdev, 5869 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5665 pci_unmap_addr(rxp, mapping), 5870 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
5666 TG3_RX_JMB_MAP_SZ, 5871 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5667 PCI_DMA_FROMDEVICE); 5872 TG3_RX_JMB_MAP_SZ);
5668 dev_kfree_skb_any(rxp->skb);
5669 rxp->skb = NULL;
5670 }
5671 } 5873 }
5672} 5874}
5673 5875
@@ -5682,7 +5884,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5682 struct tg3_rx_prodring_set *tpr) 5884 struct tg3_rx_prodring_set *tpr)
5683{ 5885{
5684 u32 i, rx_pkt_dma_sz; 5886 u32 i, rx_pkt_dma_sz;
5685 struct tg3_napi *tnapi = &tp->napi[0]; 5887
5888 tpr->rx_std_cons_idx = 0;
5889 tpr->rx_std_prod_idx = 0;
5890 tpr->rx_jmb_cons_idx = 0;
5891 tpr->rx_jmb_prod_idx = 0;
5892
5893 if (tpr != &tp->prodring[0]) {
5894 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
5895 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
5896 memset(&tpr->rx_jmb_buffers[0], 0,
5897 TG3_RX_JMB_BUFF_RING_SIZE);
5898 goto done;
5899 }
5686 5900
5687 /* Zero out all descriptors. */ 5901 /* Zero out all descriptors. */
5688 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); 5902 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
@@ -5709,7 +5923,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5709 5923
5710 /* Now allocate fresh SKBs for each rx ring. */ 5924 /* Now allocate fresh SKBs for each rx ring. */
5711 for (i = 0; i < tp->rx_pending; i++) { 5925 for (i = 0; i < tp->rx_pending; i++) {
5712 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { 5926 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5713 printk(KERN_WARNING PFX 5927 printk(KERN_WARNING PFX
5714 "%s: Using a smaller RX standard ring, " 5928 "%s: Using a smaller RX standard ring, "
5715 "only %d out of %d buffers were allocated " 5929 "only %d out of %d buffers were allocated "
@@ -5740,8 +5954,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5740 } 5954 }
5741 5955
5742 for (i = 0; i < tp->rx_jumbo_pending; i++) { 5956 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5743 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, 5957 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
5744 -1, i) < 0) { 5958 i) < 0) {
5745 printk(KERN_WARNING PFX 5959 printk(KERN_WARNING PFX
5746 "%s: Using a smaller RX jumbo ring, " 5960 "%s: Using a smaller RX jumbo ring, "
5747 "only %d out of %d buffers were " 5961 "only %d out of %d buffers were "
@@ -5785,8 +5999,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
5785static int tg3_rx_prodring_init(struct tg3 *tp, 5999static int tg3_rx_prodring_init(struct tg3 *tp,
5786 struct tg3_rx_prodring_set *tpr) 6000 struct tg3_rx_prodring_set *tpr)
5787{ 6001{
5788 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * 6002 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
5789 TG3_RX_RING_SIZE, GFP_KERNEL);
5790 if (!tpr->rx_std_buffers) 6003 if (!tpr->rx_std_buffers)
5791 return -ENOMEM; 6004 return -ENOMEM;
5792 6005
@@ -5796,8 +6009,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
5796 goto err_out; 6009 goto err_out;
5797 6010
5798 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6011 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5799 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * 6012 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
5800 TG3_RX_JUMBO_RING_SIZE,
5801 GFP_KERNEL); 6013 GFP_KERNEL);
5802 if (!tpr->rx_jmb_buffers) 6014 if (!tpr->rx_jmb_buffers)
5803 goto err_out; 6015 goto err_out;
@@ -5853,9 +6065,10 @@ static void tg3_free_rings(struct tg3 *tp)
5853 6065
5854 dev_kfree_skb_any(skb); 6066 dev_kfree_skb_any(skb);
5855 } 6067 }
5856 }
5857 6068
5858 tg3_rx_prodring_free(tp, &tp->prodring[0]); 6069 if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
6070 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6071 }
5859} 6072}
5860 6073
5861/* Initialize tx/rx rings for packet processing. 6074/* Initialize tx/rx rings for packet processing.
@@ -5889,9 +6102,13 @@ static int tg3_init_rings(struct tg3 *tp)
5889 tnapi->rx_rcb_ptr = 0; 6102 tnapi->rx_rcb_ptr = 0;
5890 if (tnapi->rx_rcb) 6103 if (tnapi->rx_rcb)
5891 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6104 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6105
6106 if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
6107 tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
6108 return -ENOMEM;
5892 } 6109 }
5893 6110
5894 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); 6111 return 0;
5895} 6112}
5896 6113
5897/* 6114/*
@@ -5935,7 +6152,8 @@ static void tg3_free_consistent(struct tg3 *tp)
5935 tp->hw_stats = NULL; 6152 tp->hw_stats = NULL;
5936 } 6153 }
5937 6154
5938 tg3_rx_prodring_fini(tp, &tp->prodring[0]); 6155 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
6156 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
5939} 6157}
5940 6158
5941/* 6159/*
@@ -5946,8 +6164,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5946{ 6164{
5947 int i; 6165 int i;
5948 6166
5949 if (tg3_rx_prodring_init(tp, &tp->prodring[0])) 6167 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
5950 return -ENOMEM; 6168 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6169 goto err_out;
6170 }
5951 6171
5952 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6172 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5953 sizeof(struct tg3_hw_stats), 6173 sizeof(struct tg3_hw_stats),
@@ -5991,6 +6211,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5991 break; 6211 break;
5992 } 6212 }
5993 6213
6214 if (tp->irq_cnt == 1)
6215 tnapi->prodring = &tp->prodring[0];
6216 else if (i)
6217 tnapi->prodring = &tp->prodring[i - 1];
6218
5994 /* 6219 /*
5995 * If multivector RSS is enabled, vector 0 does not handle 6220 * If multivector RSS is enabled, vector 0 does not handle
5996 * rx or tx interrupts. Don't allocate any resources for it. 6221 * rx or tx interrupts. Don't allocate any resources for it.
@@ -7279,9 +7504,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7279 if (err) 7504 if (err)
7280 return err; 7505 return err;
7281 7506
7282 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7283 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && 7508 val = tr32(TG3PCI_DMA_RW_CTRL) &
7284 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { 7509 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7510 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7511 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7512 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7285 /* This value is determined during the probe time DMA 7513 /* This value is determined during the probe time DMA
7286 * engine test, tg3_test_dma. 7514 * engine test, tg3_test_dma.
7287 */ 7515 */
@@ -7404,8 +7632,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7404 ((u64) tpr->rx_std_mapping >> 32)); 7632 ((u64) tpr->rx_std_mapping >> 32));
7405 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7633 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7406 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7634 ((u64) tpr->rx_std_mapping & 0xffffffff));
7407 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7635 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7408 NIC_SRAM_RX_BUFFER_DESC); 7636 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7637 NIC_SRAM_RX_BUFFER_DESC);
7409 7638
7410 /* Disable the mini ring */ 7639 /* Disable the mini ring */
7411 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7640 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
@@ -7428,8 +7657,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7428 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7657 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7429 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7658 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7430 BDINFO_FLAGS_USE_EXT_RECV); 7659 BDINFO_FLAGS_USE_EXT_RECV);
7431 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7660 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7432 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7661 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7662 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7433 } else { 7663 } else {
7434 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7664 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7435 BDINFO_FLAGS_DISABLED); 7665 BDINFO_FLAGS_DISABLED);
@@ -7445,14 +7675,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7445 7675
7446 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 7676 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7447 7677
7448 tpr->rx_std_ptr = tp->rx_pending; 7678 tpr->rx_std_prod_idx = tp->rx_pending;
7449 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 7679 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7450 tpr->rx_std_ptr);
7451 7680
7452 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 7681 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7453 tp->rx_jumbo_pending : 0; 7682 tp->rx_jumbo_pending : 0;
7454 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 7683 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7455 tpr->rx_jmb_ptr);
7456 7684
7457 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 7685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7458 tw32(STD_REPLENISH_LWM, 32); 7686 tw32(STD_REPLENISH_LWM, 32);
@@ -7515,7 +7743,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7515 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7743 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7516 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 7744 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7517 7745
7518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 7746 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 7748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7520 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 7749 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7521 7750
@@ -9505,15 +9734,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
9505 return 0; 9734 return 0;
9506 } 9735 }
9507 if ((dev->features & NETIF_F_IPV6_CSUM) && 9736 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9508 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { 9737 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9738 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9509 if (value) { 9739 if (value) {
9510 dev->features |= NETIF_F_TSO6; 9740 dev->features |= NETIF_F_TSO6;
9511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 9741 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9742 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9512 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 9743 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9513 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 9744 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 9745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 9746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9517 dev->features |= NETIF_F_TSO_ECN; 9747 dev->features |= NETIF_F_TSO_ECN;
9518 } else 9748 } else
9519 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9749 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -10962,7 +11192,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10962 11192
10963 /* NVRAM protection for TPM */ 11193 /* NVRAM protection for TPM */
10964 if (nvcfg1 & (1 << 27)) 11194 if (nvcfg1 & (1 << 27))
10965 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11195 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10966 11196
10967 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11197 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10968 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 11198 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
@@ -11003,7 +11233,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11003 11233
11004 /* NVRAM protection for TPM */ 11234 /* NVRAM protection for TPM */
11005 if (nvcfg1 & (1 << 27)) { 11235 if (nvcfg1 & (1 << 27)) {
11006 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11236 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11007 protect = 1; 11237 protect = 1;
11008 } 11238 }
11009 11239
@@ -11097,7 +11327,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11097 11327
11098 /* NVRAM protection for TPM */ 11328 /* NVRAM protection for TPM */
11099 if (nvcfg1 & (1 << 27)) { 11329 if (nvcfg1 & (1 << 27)) {
11100 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11330 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11101 protect = 1; 11331 protect = 1;
11102 } 11332 }
11103 11333
@@ -11599,7 +11829,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11599 11829
11600 tg3_enable_nvram_access(tp); 11830 tg3_enable_nvram_access(tp);
11601 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 11831 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11602 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) 11832 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11603 tw32(NVRAM_WRITE1, 0x406); 11833 tw32(NVRAM_WRITE1, 0x406);
11604 11834
11605 grc_mode = tr32(GRC_MODE); 11835 grc_mode = tr32(GRC_MODE);
@@ -12475,10 +12705,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { 12705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12476 u32 prod_id_asic_rev; 12706 u32 prod_id_asic_rev;
12477 12707
12478 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || 12708 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12479 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || 12709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12480 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || 12710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12481 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12482 pci_read_config_dword(tp->pdev, 12711 pci_read_config_dword(tp->pdev,
12483 TG3PCI_GEN2_PRODID_ASICREV, 12712 TG3PCI_GEN2_PRODID_ASICREV,
12484 &prod_id_asic_rev); 12713 &prod_id_asic_rev);
@@ -12661,6 +12890,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12661 tp->dev->features |= NETIF_F_IPV6_CSUM; 12890 tp->dev->features |= NETIF_F_IPV6_CSUM;
12662 } 12891 }
12663 12892
12893 /* Determine TSO capabilities */
12894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12895 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12896 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12898 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12899 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12900 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12902 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12903 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12904 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12905 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12906 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12907 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12909 tp->fw_needed = FIRMWARE_TG3TSO5;
12910 else
12911 tp->fw_needed = FIRMWARE_TG3TSO;
12912 }
12913
12914 tp->irq_max = 1;
12915
12664 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 12916 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12665 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; 12917 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12666 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || 12918 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
@@ -12672,31 +12924,21 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12672 12924
12673 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 12925 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12675 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12676 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12927 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12677 } else {
12678 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12679 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12680 ASIC_REV_5750 &&
12681 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12682 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12683 } 12928 }
12684 }
12685 12929
12686 tp->irq_max = 1; 12930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12687 12931 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 12932 tp->irq_max = TG3_IRQ_MAX_VECS;
12689 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 12933 }
12690 tp->irq_max = TG3_IRQ_MAX_VECS;
12691 } 12934 }
12692 12935
12693 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { 12936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12695 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; 12938 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12696 else { 12939 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12697 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; 12940 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12698 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 12941 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12699 }
12700 } 12942 }
12701 12943
12702 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 12944 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13297,6 +13539,11 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13297#endif 13539#endif
13298#endif 13540#endif
13299 13541
13542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13543 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13544 goto out;
13545 }
13546
13300 if (!goal) 13547 if (!goal)
13301 goto out; 13548 goto out;
13302 13549
@@ -13491,7 +13738,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13491{ 13738{
13492 dma_addr_t buf_dma; 13739 dma_addr_t buf_dma;
13493 u32 *buf, saved_dma_rwctrl; 13740 u32 *buf, saved_dma_rwctrl;
13494 int ret; 13741 int ret = 0;
13495 13742
13496 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 13743 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13497 if (!buf) { 13744 if (!buf) {
@@ -13504,6 +13751,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13504 13751
13505 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 13752 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13506 13753
13754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13755 goto out;
13756
13507 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 13757 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13508 /* DMA read watermark not used on PCIE */ 13758 /* DMA read watermark not used on PCIE */
13509 tp->dma_rwctrl |= 0x00180000; 13759 tp->dma_rwctrl |= 0x00180000;
@@ -13576,7 +13826,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13576 tg3_switch_clocks(tp); 13826 tg3_switch_clocks(tp);
13577#endif 13827#endif
13578 13828
13579 ret = 0;
13580 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13829 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13581 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 13830 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13582 goto out; 13831 goto out;
@@ -13755,6 +14004,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
13755 case PHY_ID_BCM5756: return "5722/5756"; 14004 case PHY_ID_BCM5756: return "5722/5756";
13756 case PHY_ID_BCM5906: return "5906"; 14005 case PHY_ID_BCM5906: return "5906";
13757 case PHY_ID_BCM5761: return "5761"; 14006 case PHY_ID_BCM5761: return "5761";
14007 case PHY_ID_BCM5717: return "5717";
13758 case PHY_ID_BCM8002: return "8002/serdes"; 14008 case PHY_ID_BCM8002: return "8002/serdes";
13759 case 0: return "serdes"; 14009 case 0: return "serdes";
13760 default: return "unknown"; 14010 default: return "unknown";
@@ -13996,51 +14246,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13996 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 14246 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13997 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 14247 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13998 14248
13999 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14000 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14001 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14002 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14003 struct tg3_napi *tnapi = &tp->napi[i];
14004
14005 tnapi->tp = tp;
14006 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14007
14008 tnapi->int_mbox = intmbx;
14009 if (i < 4)
14010 intmbx += 0x8;
14011 else
14012 intmbx += 0x4;
14013
14014 tnapi->consmbox = rcvmbx;
14015 tnapi->prodmbox = sndmbx;
14016
14017 if (i)
14018 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14019 else
14020 tnapi->coal_now = HOSTCC_MODE_NOW;
14021
14022 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14023 break;
14024
14025 /*
14026 * If we support MSIX, we'll be using RSS. If we're using
14027 * RSS, the first vector only handles link interrupts and the
14028 * remaining vectors handle rx and tx interrupts. Reuse the
14029 * mailbox values for the next iteration. The values we setup
14030 * above are still useful for the single vectored mode.
14031 */
14032 if (!i)
14033 continue;
14034
14035 rcvmbx += 0x8;
14036
14037 if (sndmbx & 0x4)
14038 sndmbx -= 0x4;
14039 else
14040 sndmbx += 0xc;
14041 }
14042
14043 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
14044 dev->ethtool_ops = &tg3_ethtool_ops; 14249 dev->ethtool_ops = &tg3_ethtool_ops;
14045 dev->watchdog_timeo = TG3_TX_TIMEOUT; 14250 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14046 dev->irq = pdev->irq; 14251 dev->irq = pdev->irq;
@@ -14052,7 +14257,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14052 goto err_out_iounmap; 14257 goto err_out_iounmap;
14053 } 14258 }
14054 14259
14055 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 14260 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14261 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14056 dev->netdev_ops = &tg3_netdev_ops; 14262 dev->netdev_ops = &tg3_netdev_ops;
14057 else 14263 else
14058 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14264 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14099,46 +14305,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14099 14305
14100 tg3_init_bufmgr_config(tp); 14306 tg3_init_bufmgr_config(tp);
14101 14307
14102 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 14308 /* Selectively allow TSO based on operating conditions */
14103 tp->fw_needed = FIRMWARE_TG3; 14309 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14104 14310 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14105 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14106 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 14311 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14312 else {
14313 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14314 tp->fw_needed = NULL;
14107 } 14315 }
14108 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 14316
14109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 14317 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14110 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || 14318 tp->fw_needed = FIRMWARE_TG3;
14111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14112 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
14113 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
14114 } else {
14115 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
14116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14117 tp->fw_needed = FIRMWARE_TG3TSO5;
14118 else
14119 tp->fw_needed = FIRMWARE_TG3TSO;
14120 }
14121 14319
14122 /* TSO is on by default on chips that support hardware TSO. 14320 /* TSO is on by default on chips that support hardware TSO.
14123 * Firmware TSO on older chips gives lower performance, so it 14321 * Firmware TSO on older chips gives lower performance, so it
14124 * is off by default, but can be enabled using ethtool. 14322 * is off by default, but can be enabled using ethtool.
14125 */ 14323 */
14126 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 14324 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14127 if (dev->features & NETIF_F_IP_CSUM) 14325 (dev->features & NETIF_F_IP_CSUM))
14128 dev->features |= NETIF_F_TSO; 14326 dev->features |= NETIF_F_TSO;
14129 if ((dev->features & NETIF_F_IPV6_CSUM) && 14327
14130 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) 14328 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14329 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14330 if (dev->features & NETIF_F_IPV6_CSUM)
14131 dev->features |= NETIF_F_TSO6; 14331 dev->features |= NETIF_F_TSO6;
14132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14332 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14133 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 14334 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14134 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 14335 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14336 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 14337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
14138 dev->features |= NETIF_F_TSO_ECN; 14338 dev->features |= NETIF_F_TSO_ECN;
14139 } 14339 }
14140 14340
14141
14142 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 14341 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14143 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 14342 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14144 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 14343 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
@@ -14189,6 +14388,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14189 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 14388 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14190 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 14389 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14191 14390
14391 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14392 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14393 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14394 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14395 struct tg3_napi *tnapi = &tp->napi[i];
14396
14397 tnapi->tp = tp;
14398 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14399
14400 tnapi->int_mbox = intmbx;
14401 if (i < 4)
14402 intmbx += 0x8;
14403 else
14404 intmbx += 0x4;
14405
14406 tnapi->consmbox = rcvmbx;
14407 tnapi->prodmbox = sndmbx;
14408
14409 if (i) {
14410 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14411 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14412 } else {
14413 tnapi->coal_now = HOSTCC_MODE_NOW;
14414 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14415 }
14416
14417 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14418 break;
14419
14420 /*
14421 * If we support MSIX, we'll be using RSS. If we're using
14422 * RSS, the first vector only handles link interrupts and the
14423 * remaining vectors handle rx and tx interrupts. Reuse the
14424 * mailbox values for the next iteration. The values we setup
14425 * above are still useful for the single vectored mode.
14426 */
14427 if (!i)
14428 continue;
14429
14430 rcvmbx += 0x8;
14431
14432 if (sndmbx & 0x4)
14433 sndmbx -= 0x4;
14434 else
14435 sndmbx += 0xc;
14436 }
14437
14192 tg3_init_coal(tp); 14438 tg3_init_coal(tp);
14193 14439
14194 pci_set_drvdata(pdev, dev); 14440 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d770da124b85..453a34fb72b9 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -46,10 +46,9 @@
46#define TG3PCI_DEVICE_TIGON3_57788 0x1691 46#define TG3PCI_DEVICE_TIGON3_57788 0x1691
47#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ 47#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
48#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ 48#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
49#define TG3PCI_DEVICE_TIGON3_5717C 0x1655 49#define TG3PCI_DEVICE_TIGON3_5717 0x1655
50#define TG3PCI_DEVICE_TIGON3_5717S 0x1656 50#define TG3PCI_DEVICE_TIGON3_5718 0x1656
51#define TG3PCI_DEVICE_TIGON3_5718C 0x1665 51#define TG3PCI_DEVICE_TIGON3_5724 0x165c
52#define TG3PCI_DEVICE_TIGON3_5718S 0x1666
53/* 0x04 --> 0x64 unused */ 52/* 0x04 --> 0x64 unused */
54#define TG3PCI_MSI_DATA 0x00000064 53#define TG3PCI_MSI_DATA 0x00000064
55/* 0x66 --> 0x68 unused */ 54/* 0x66 --> 0x68 unused */
@@ -103,6 +102,7 @@
103#define CHIPREV_ID_5906_A1 0xc001 102#define CHIPREV_ID_5906_A1 0xc001
104#define CHIPREV_ID_57780_A0 0x57780000 103#define CHIPREV_ID_57780_A0 0x57780000
105#define CHIPREV_ID_57780_A1 0x57780001 104#define CHIPREV_ID_57780_A1 0x57780001
105#define CHIPREV_ID_5717_A0 0x05717000
106#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 106#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
107#define ASIC_REV_5700 0x07 107#define ASIC_REV_5700 0x07
108#define ASIC_REV_5701 0x00 108#define ASIC_REV_5701 0x00
@@ -141,8 +141,7 @@
141#define METAL_REV_B1 0x01 141#define METAL_REV_B1 0x01
142#define METAL_REV_B2 0x02 142#define METAL_REV_B2 0x02
143#define TG3PCI_DMA_RW_CTRL 0x0000006c 143#define TG3PCI_DMA_RW_CTRL 0x0000006c
144#define DMA_RWCTRL_MIN_DMA 0x000000ff 144#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
145#define DMA_RWCTRL_MIN_DMA_SHIFT 0
146#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 145#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
147#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 146#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
148#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 147#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -242,7 +241,11 @@
242#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ 241#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */
243#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ 242#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */
244#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ 243#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */
244#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \
245 TG3_64BIT_REG_LOW)
245#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ 246#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */
247#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \
248 TG3_64BIT_REG_LOW)
246#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ 249#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */
247#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ 250#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */
248#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ 251#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */
@@ -2570,8 +2573,10 @@ struct tg3_ethtool_stats {
2570}; 2573};
2571 2574
2572struct tg3_rx_prodring_set { 2575struct tg3_rx_prodring_set {
2573 u32 rx_std_ptr; 2576 u32 rx_std_prod_idx;
2574 u32 rx_jmb_ptr; 2577 u32 rx_std_cons_idx;
2578 u32 rx_jmb_prod_idx;
2579 u32 rx_jmb_cons_idx;
2575 struct tg3_rx_buffer_desc *rx_std; 2580 struct tg3_rx_buffer_desc *rx_std;
2576 struct tg3_ext_rx_buffer_desc *rx_jmb; 2581 struct tg3_ext_rx_buffer_desc *rx_jmb;
2577 struct ring_info *rx_std_buffers; 2582 struct ring_info *rx_std_buffers;
@@ -2599,6 +2604,7 @@ struct tg3_napi {
2599 u32 consmbox; 2604 u32 consmbox;
2600 u32 rx_rcb_ptr; 2605 u32 rx_rcb_ptr;
2601 u16 *rx_rcb_prod_idx; 2606 u16 *rx_rcb_prod_idx;
2607 struct tg3_rx_prodring_set *prodring;
2602 2608
2603 struct tg3_rx_buffer_desc *rx_rcb; 2609 struct tg3_rx_buffer_desc *rx_rcb;
2604 struct tg3_tx_buffer_desc *tx_ring; 2610 struct tg3_tx_buffer_desc *tx_ring;
@@ -2682,7 +2688,7 @@ struct tg3 {
2682 struct vlan_group *vlgrp; 2688 struct vlan_group *vlgrp;
2683#endif 2689#endif
2684 2690
2685 struct tg3_rx_prodring_set prodring[1]; 2691 struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1];
2686 2692
2687 2693
2688 /* begin "everything else" cacheline(s) section */ 2694 /* begin "everything else" cacheline(s) section */
@@ -2753,7 +2759,7 @@ struct tg3 {
2753#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 2759#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000
2754#define TG3_FLG2_5705_PLUS 0x00040000 2760#define TG3_FLG2_5705_PLUS 0x00040000
2755#define TG3_FLG2_5750_PLUS 0x00080000 2761#define TG3_FLG2_5750_PLUS 0x00080000
2756#define TG3_FLG2_PROTECTED_NVRAM 0x00100000 2762#define TG3_FLG2_HW_TSO_3 0x00100000
2757#define TG3_FLG2_USING_MSI 0x00200000 2763#define TG3_FLG2_USING_MSI 0x00200000
2758#define TG3_FLG2_USING_MSIX 0x00400000 2764#define TG3_FLG2_USING_MSIX 0x00400000
2759#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ 2765#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \
@@ -2765,7 +2771,9 @@ struct tg3 {
2765#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2771#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2766#define TG3_FLG2_5780_CLASS 0x04000000 2772#define TG3_FLG2_5780_CLASS 0x04000000
2767#define TG3_FLG2_HW_TSO_2 0x08000000 2773#define TG3_FLG2_HW_TSO_2 0x08000000
2768#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2774#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \
2775 TG3_FLG2_HW_TSO_2 | \
2776 TG3_FLG2_HW_TSO_3)
2769#define TG3_FLG2_1SHOT_MSI 0x10000000 2777#define TG3_FLG2_1SHOT_MSI 0x10000000
2770#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 2778#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2771#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 2779#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
@@ -2773,6 +2781,7 @@ struct tg3 {
2773 u32 tg3_flags3; 2781 u32 tg3_flags3;
2774#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 2782#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
2775#define TG3_FLG3_ENABLE_APE 0x00000002 2783#define TG3_FLG3_ENABLE_APE 0x00000002
2784#define TG3_FLG3_PROTECTED_NVRAM 0x00000004
2776#define TG3_FLG3_5701_DMA_BUG 0x00000008 2785#define TG3_FLG3_5701_DMA_BUG 0x00000008
2777#define TG3_FLG3_USE_PHYLIB 0x00000010 2786#define TG3_FLG3_USE_PHYLIB 0x00000010
2778#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2787#define TG3_FLG3_MDIOBUS_INITED 0x00000020
@@ -2855,6 +2864,7 @@ struct tg3 {
2855#define PHY_ID_BCM5756 0xbc050ed0 2864#define PHY_ID_BCM5756 0xbc050ed0
2856#define PHY_ID_BCM5784 0xbc050fa0 2865#define PHY_ID_BCM5784 0xbc050fa0
2857#define PHY_ID_BCM5761 0xbc050fd0 2866#define PHY_ID_BCM5761 0xbc050fd0
2867#define PHY_ID_BCM5717 0x5c0d8a00
2858#define PHY_ID_BCM5906 0xdc00ac40 2868#define PHY_ID_BCM5906 0xdc00ac40
2859#define PHY_ID_BCM8002 0x60010140 2869#define PHY_ID_BCM8002 0x60010140
2860#define PHY_ID_INVALID 0xffffffff 2870#define PHY_ID_INVALID 0xffffffff
@@ -2896,7 +2906,7 @@ struct tg3 {
2896 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ 2906 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
2897 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ 2907 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
2898 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ 2908 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
2899 (X) == PHY_ID_BCM8002) 2909 (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
2900 2910
2901 struct tg3_hw_stats *hw_stats; 2911 struct tg3_hw_stats *hw_stats;
2902 dma_addr_t stats_mapping; 2912 dma_addr_t stats_mapping;
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 724158966ec1..cf552d1d9629 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -610,9 +610,8 @@ static int xl_open(struct net_device *dev)
610 610
611 u16 switchsettings, switchsettings_eeprom ; 611 u16 switchsettings, switchsettings_eeprom ;
612 612
613 if(request_irq(dev->irq, &xl_interrupt, IRQF_SHARED , "3c359", dev)) { 613 if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev))
614 return -EAGAIN; 614 return -EAGAIN;
615 }
616 615
617 /* 616 /*
618 * Read the information from the EEPROM that we need. 617 * Read the information from the EEPROM that we need.
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index d9ec7f0bbd0a..df32025c5132 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -445,9 +445,9 @@ static int olympic_open(struct net_device *dev)
445 445
446 olympic_init(dev); 446 olympic_init(dev);
447 447
448 if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) { 448 if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
449 dev))
449 return -EAGAIN; 450 return -EAGAIN;
450 }
451 451
452#if OLYMPIC_DEBUG 452#if OLYMPIC_DEBUG
453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); 453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index d6d345229fe9..4b7541024424 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2150,7 +2150,7 @@ typhoon_open(struct net_device *dev)
2150 goto out_sleep; 2150 goto out_sleep;
2151 } 2151 }
2152 2152
2153 err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED, 2153 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2154 dev->name, dev); 2154 dev->name, dev);
2155 if(err < 0) 2155 if(err < 0)
2156 goto out_sleep; 2156 goto out_sleep;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4535e89dfff1..ec94ddf01f56 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1150,7 +1150,7 @@ static int rhine_open(struct net_device *dev)
1150 void __iomem *ioaddr = rp->base; 1150 void __iomem *ioaddr = rp->base;
1151 int rc; 1151 int rc;
1152 1152
1153 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name, 1153 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1154 dev); 1154 dev);
1155 if (rc) 1155 if (rc)
1156 return rc; 1156 return rc;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 158f411bd555..1e6b395c555f 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2176,7 +2176,7 @@ static int velocity_open(struct net_device *dev)
2176 2176
2177 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2177 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2178 2178
2179 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, 2179 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2180 dev->name, dev); 2180 dev->name, dev);
2181 if (ret < 0) { 2181 if (ret < 0) {
2182 /* Power down the chip */ 2182 /* Power down the chip */
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index dc8ee4438a4f..b4889e6c4a57 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -90,23 +90,60 @@ enum {
90 VMXNET3_CMD_GET_CONF_INTR 90 VMXNET3_CMD_GET_CONF_INTR
91}; 91};
92 92
93struct Vmxnet3_TxDesc { 93/*
94 u64 addr; 94 * Little Endian layout of bitfields -
95 * Byte 0 : 7.....len.....0
96 * Byte 1 : rsvd gen 13.len.8
97 * Byte 2 : 5.msscof.0 ext1 dtype
98 * Byte 3 : 13...msscof...6
99 *
100 * Big Endian layout of bitfields -
101 * Byte 0: 13...msscof...6
102 * Byte 1 : 5.msscof.0 ext1 dtype
103 * Byte 2 : rsvd gen 13.len.8
104 * Byte 3 : 7.....len.....0
105 *
106 * Thus, le32_to_cpu on the dword will allow the big endian driver to read
107 * the bit fields correctly. And cpu_to_le32 will convert bitfields
108 * bit fields written by big endian driver to format required by device.
109 */
95 110
96 u32 len:14; 111struct Vmxnet3_TxDesc {
97 u32 gen:1; /* generation bit */ 112 __le64 addr;
98 u32 rsvd:1; 113
99 u32 dtype:1; /* descriptor type */ 114#ifdef __BIG_ENDIAN_BITFIELD
100 u32 ext1:1; 115 u32 msscof:14; /* MSS, checksum offset, flags */
101 u32 msscof:14; /* MSS, checksum offset, flags */ 116 u32 ext1:1;
102 117 u32 dtype:1; /* descriptor type */
103 u32 hlen:10; /* header len */ 118 u32 rsvd:1;
104 u32 om:2; /* offload mode */ 119 u32 gen:1; /* generation bit */
105 u32 eop:1; /* End Of Packet */ 120 u32 len:14;
106 u32 cq:1; /* completion request */ 121#else
107 u32 ext2:1; 122 u32 len:14;
108 u32 ti:1; /* VLAN Tag Insertion */ 123 u32 gen:1; /* generation bit */
109 u32 tci:16; /* Tag to Insert */ 124 u32 rsvd:1;
125 u32 dtype:1; /* descriptor type */
126 u32 ext1:1;
127 u32 msscof:14; /* MSS, checksum offset, flags */
128#endif /* __BIG_ENDIAN_BITFIELD */
129
130#ifdef __BIG_ENDIAN_BITFIELD
131 u32 tci:16; /* Tag to Insert */
132 u32 ti:1; /* VLAN Tag Insertion */
133 u32 ext2:1;
134 u32 cq:1; /* completion request */
135 u32 eop:1; /* End Of Packet */
136 u32 om:2; /* offload mode */
137 u32 hlen:10; /* header len */
138#else
139 u32 hlen:10; /* header len */
140 u32 om:2; /* offload mode */
141 u32 eop:1; /* End Of Packet */
142 u32 cq:1; /* completion request */
143 u32 ext2:1;
144 u32 ti:1; /* VLAN Tag Insertion */
145 u32 tci:16; /* Tag to Insert */
146#endif /* __BIG_ENDIAN_BITFIELD */
110}; 147};
111 148
112/* TxDesc.OM values */ 149/* TxDesc.OM values */
@@ -118,6 +155,8 @@ struct Vmxnet3_TxDesc {
118#define VMXNET3_TXD_EOP_SHIFT 12 155#define VMXNET3_TXD_EOP_SHIFT 12
119#define VMXNET3_TXD_CQ_SHIFT 13 156#define VMXNET3_TXD_CQ_SHIFT 13
120#define VMXNET3_TXD_GEN_SHIFT 14 157#define VMXNET3_TXD_GEN_SHIFT 14
158#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
159#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
121 160
122#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) 161#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
123#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) 162#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
@@ -130,29 +169,40 @@ struct Vmxnet3_TxDataDesc {
130 u8 data[VMXNET3_HDR_COPY_SIZE]; 169 u8 data[VMXNET3_HDR_COPY_SIZE];
131}; 170};
132 171
172#define VMXNET3_TCD_GEN_SHIFT 31
173#define VMXNET3_TCD_GEN_SIZE 1
174#define VMXNET3_TCD_TXIDX_SHIFT 0
175#define VMXNET3_TCD_TXIDX_SIZE 12
176#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
133 177
134struct Vmxnet3_TxCompDesc { 178struct Vmxnet3_TxCompDesc {
135 u32 txdIdx:12; /* Index of the EOP TxDesc */ 179 u32 txdIdx:12; /* Index of the EOP TxDesc */
136 u32 ext1:20; 180 u32 ext1:20;
137 181
138 u32 ext2; 182 __le32 ext2;
139 u32 ext3; 183 __le32 ext3;
140 184
141 u32 rsvd:24; 185 u32 rsvd:24;
142 u32 type:7; /* completion type */ 186 u32 type:7; /* completion type */
143 u32 gen:1; /* generation bit */ 187 u32 gen:1; /* generation bit */
144}; 188};
145 189
146
147struct Vmxnet3_RxDesc { 190struct Vmxnet3_RxDesc {
148 u64 addr; 191 __le64 addr;
149 192
193#ifdef __BIG_ENDIAN_BITFIELD
194 u32 gen:1; /* Generation bit */
195 u32 rsvd:15;
196 u32 dtype:1; /* Descriptor type */
197 u32 btype:1; /* Buffer Type */
198 u32 len:14;
199#else
150 u32 len:14; 200 u32 len:14;
151 u32 btype:1; /* Buffer Type */ 201 u32 btype:1; /* Buffer Type */
152 u32 dtype:1; /* Descriptor type */ 202 u32 dtype:1; /* Descriptor type */
153 u32 rsvd:15; 203 u32 rsvd:15;
154 u32 gen:1; /* Generation bit */ 204 u32 gen:1; /* Generation bit */
155 205#endif
156 u32 ext1; 206 u32 ext1;
157}; 207};
158 208
@@ -164,8 +214,17 @@ struct Vmxnet3_RxDesc {
164#define VMXNET3_RXD_BTYPE_SHIFT 14 214#define VMXNET3_RXD_BTYPE_SHIFT 14
165#define VMXNET3_RXD_GEN_SHIFT 31 215#define VMXNET3_RXD_GEN_SHIFT 31
166 216
167
168struct Vmxnet3_RxCompDesc { 217struct Vmxnet3_RxCompDesc {
218#ifdef __BIG_ENDIAN_BITFIELD
219 u32 ext2:1;
220 u32 cnc:1; /* Checksum Not Calculated */
221 u32 rssType:4; /* RSS hash type used */
222 u32 rqID:10; /* rx queue/ring ID */
223 u32 sop:1; /* Start of Packet */
224 u32 eop:1; /* End of Packet */
225 u32 ext1:2;
226 u32 rxdIdx:12; /* Index of the RxDesc */
227#else
169 u32 rxdIdx:12; /* Index of the RxDesc */ 228 u32 rxdIdx:12; /* Index of the RxDesc */
170 u32 ext1:2; 229 u32 ext1:2;
171 u32 eop:1; /* End of Packet */ 230 u32 eop:1; /* End of Packet */
@@ -174,14 +233,36 @@ struct Vmxnet3_RxCompDesc {
174 u32 rssType:4; /* RSS hash type used */ 233 u32 rssType:4; /* RSS hash type used */
175 u32 cnc:1; /* Checksum Not Calculated */ 234 u32 cnc:1; /* Checksum Not Calculated */
176 u32 ext2:1; 235 u32 ext2:1;
236#endif /* __BIG_ENDIAN_BITFIELD */
177 237
178 u32 rssHash; /* RSS hash value */ 238 __le32 rssHash; /* RSS hash value */
179 239
240#ifdef __BIG_ENDIAN_BITFIELD
241 u32 tci:16; /* Tag stripped */
242 u32 ts:1; /* Tag is stripped */
243 u32 err:1; /* Error */
244 u32 len:14; /* data length */
245#else
180 u32 len:14; /* data length */ 246 u32 len:14; /* data length */
181 u32 err:1; /* Error */ 247 u32 err:1; /* Error */
182 u32 ts:1; /* Tag is stripped */ 248 u32 ts:1; /* Tag is stripped */
183 u32 tci:16; /* Tag stripped */ 249 u32 tci:16; /* Tag stripped */
250#endif /* __BIG_ENDIAN_BITFIELD */
251
184 252
253#ifdef __BIG_ENDIAN_BITFIELD
254 u32 gen:1; /* generation bit */
255 u32 type:7; /* completion type */
256 u32 fcs:1; /* Frame CRC correct */
257 u32 frg:1; /* IP Fragment */
258 u32 v4:1; /* IPv4 */
259 u32 v6:1; /* IPv6 */
260 u32 ipc:1; /* IP Checksum Correct */
261 u32 tcp:1; /* TCP packet */
262 u32 udp:1; /* UDP packet */
263 u32 tuc:1; /* TCP/UDP Checksum Correct */
264 u32 csum:16;
265#else
185 u32 csum:16; 266 u32 csum:16;
186 u32 tuc:1; /* TCP/UDP Checksum Correct */ 267 u32 tuc:1; /* TCP/UDP Checksum Correct */
187 u32 udp:1; /* UDP packet */ 268 u32 udp:1; /* UDP packet */
@@ -193,6 +274,7 @@ struct Vmxnet3_RxCompDesc {
193 u32 fcs:1; /* Frame CRC correct */ 274 u32 fcs:1; /* Frame CRC correct */
194 u32 type:7; /* completion type */ 275 u32 type:7; /* completion type */
195 u32 gen:1; /* generation bit */ 276 u32 gen:1; /* generation bit */
277#endif /* __BIG_ENDIAN_BITFIELD */
196}; 278};
197 279
198/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ 280/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
@@ -206,6 +288,8 @@ struct Vmxnet3_RxCompDesc {
206/* csum OK for TCP/UDP pkts over IP */ 288/* csum OK for TCP/UDP pkts over IP */
207#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ 289#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
208 1 << VMXNET3_RCD_IPC_SHIFT) 290 1 << VMXNET3_RCD_IPC_SHIFT)
291#define VMXNET3_TXD_GEN_SIZE 1
292#define VMXNET3_TXD_EOP_SIZE 1
209 293
210/* value of RxCompDesc.rssType */ 294/* value of RxCompDesc.rssType */
211enum { 295enum {
@@ -219,9 +303,9 @@ enum {
219 303
220/* a union for accessing all cmd/completion descriptors */ 304/* a union for accessing all cmd/completion descriptors */
221union Vmxnet3_GenericDesc { 305union Vmxnet3_GenericDesc {
222 u64 qword[2]; 306 __le64 qword[2];
223 u32 dword[4]; 307 __le32 dword[4];
224 u16 word[8]; 308 __le16 word[8];
225 struct Vmxnet3_TxDesc txd; 309 struct Vmxnet3_TxDesc txd;
226 struct Vmxnet3_RxDesc rxd; 310 struct Vmxnet3_RxDesc rxd;
227 struct Vmxnet3_TxCompDesc tcd; 311 struct Vmxnet3_TxCompDesc tcd;
@@ -287,18 +371,24 @@ enum {
287 371
288 372
289struct Vmxnet3_GOSInfo { 373struct Vmxnet3_GOSInfo {
290 u32 gosBits:2; /* 32-bit or 64-bit? */ 374#ifdef __BIG_ENDIAN_BITFIELD
291 u32 gosType:4; /* which guest */ 375 u32 gosMisc:10; /* other info about gos */
292 u32 gosVer:16; /* gos version */ 376 u32 gosVer:16; /* gos version */
293 u32 gosMisc:10; /* other info about gos */ 377 u32 gosType:4; /* which guest */
378 u32 gosBits:2; /* 32-bit or 64-bit? */
379#else
380 u32 gosBits:2; /* 32-bit or 64-bit? */
381 u32 gosType:4; /* which guest */
382 u32 gosVer:16; /* gos version */
383 u32 gosMisc:10; /* other info about gos */
384#endif /* __BIG_ENDIAN_BITFIELD */
294}; 385};
295 386
296
297struct Vmxnet3_DriverInfo { 387struct Vmxnet3_DriverInfo {
298 u32 version; 388 __le32 version;
299 struct Vmxnet3_GOSInfo gos; 389 struct Vmxnet3_GOSInfo gos;
300 u32 vmxnet3RevSpt; 390 __le32 vmxnet3RevSpt;
301 u32 uptVerSpt; 391 __le32 uptVerSpt;
302}; 392};
303 393
304 394
@@ -315,42 +405,42 @@ struct Vmxnet3_DriverInfo {
315 405
316struct Vmxnet3_MiscConf { 406struct Vmxnet3_MiscConf {
317 struct Vmxnet3_DriverInfo driverInfo; 407 struct Vmxnet3_DriverInfo driverInfo;
318 u64 uptFeatures; 408 __le64 uptFeatures;
319 u64 ddPA; /* driver data PA */ 409 __le64 ddPA; /* driver data PA */
320 u64 queueDescPA; /* queue descriptor table PA */ 410 __le64 queueDescPA; /* queue descriptor table PA */
321 u32 ddLen; /* driver data len */ 411 __le32 ddLen; /* driver data len */
322 u32 queueDescLen; /* queue desc. table len in bytes */ 412 __le32 queueDescLen; /* queue desc. table len in bytes */
323 u32 mtu; 413 __le32 mtu;
324 u16 maxNumRxSG; 414 __le16 maxNumRxSG;
325 u8 numTxQueues; 415 u8 numTxQueues;
326 u8 numRxQueues; 416 u8 numRxQueues;
327 u32 reserved[4]; 417 __le32 reserved[4];
328}; 418};
329 419
330 420
331struct Vmxnet3_TxQueueConf { 421struct Vmxnet3_TxQueueConf {
332 u64 txRingBasePA; 422 __le64 txRingBasePA;
333 u64 dataRingBasePA; 423 __le64 dataRingBasePA;
334 u64 compRingBasePA; 424 __le64 compRingBasePA;
335 u64 ddPA; /* driver data */ 425 __le64 ddPA; /* driver data */
336 u64 reserved; 426 __le64 reserved;
337 u32 txRingSize; /* # of tx desc */ 427 __le32 txRingSize; /* # of tx desc */
338 u32 dataRingSize; /* # of data desc */ 428 __le32 dataRingSize; /* # of data desc */
339 u32 compRingSize; /* # of comp desc */ 429 __le32 compRingSize; /* # of comp desc */
340 u32 ddLen; /* size of driver data */ 430 __le32 ddLen; /* size of driver data */
341 u8 intrIdx; 431 u8 intrIdx;
342 u8 _pad[7]; 432 u8 _pad[7];
343}; 433};
344 434
345 435
346struct Vmxnet3_RxQueueConf { 436struct Vmxnet3_RxQueueConf {
347 u64 rxRingBasePA[2]; 437 __le64 rxRingBasePA[2];
348 u64 compRingBasePA; 438 __le64 compRingBasePA;
349 u64 ddPA; /* driver data */ 439 __le64 ddPA; /* driver data */
350 u64 reserved; 440 __le64 reserved;
351 u32 rxRingSize[2]; /* # of rx desc */ 441 __le32 rxRingSize[2]; /* # of rx desc */
352 u32 compRingSize; /* # of rx comp desc */ 442 __le32 compRingSize; /* # of rx comp desc */
353 u32 ddLen; /* size of driver data */ 443 __le32 ddLen; /* size of driver data */
354 u8 intrIdx; 444 u8 intrIdx;
355 u8 _pad[7]; 445 u8 _pad[7];
356}; 446};
@@ -381,7 +471,7 @@ struct Vmxnet3_IntrConf {
381 u8 eventIntrIdx; 471 u8 eventIntrIdx;
382 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for 472 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
383 * each intr */ 473 * each intr */
384 u32 reserved[3]; 474 __le32 reserved[3];
385}; 475};
386 476
387/* one bit per VLAN ID, the size is in the units of u32 */ 477/* one bit per VLAN ID, the size is in the units of u32 */
@@ -391,21 +481,21 @@ struct Vmxnet3_IntrConf {
391struct Vmxnet3_QueueStatus { 481struct Vmxnet3_QueueStatus {
392 bool stopped; 482 bool stopped;
393 u8 _pad[3]; 483 u8 _pad[3];
394 u32 error; 484 __le32 error;
395}; 485};
396 486
397 487
398struct Vmxnet3_TxQueueCtrl { 488struct Vmxnet3_TxQueueCtrl {
399 u32 txNumDeferred; 489 __le32 txNumDeferred;
400 u32 txThreshold; 490 __le32 txThreshold;
401 u64 reserved; 491 __le64 reserved;
402}; 492};
403 493
404 494
405struct Vmxnet3_RxQueueCtrl { 495struct Vmxnet3_RxQueueCtrl {
406 bool updateRxProd; 496 bool updateRxProd;
407 u8 _pad[7]; 497 u8 _pad[7];
408 u64 reserved; 498 __le64 reserved;
409}; 499};
410 500
411enum { 501enum {
@@ -417,11 +507,11 @@ enum {
417}; 507};
418 508
419struct Vmxnet3_RxFilterConf { 509struct Vmxnet3_RxFilterConf {
420 u32 rxMode; /* VMXNET3_RXM_xxx */ 510 __le32 rxMode; /* VMXNET3_RXM_xxx */
421 u16 mfTableLen; /* size of the multicast filter table */ 511 __le16 mfTableLen; /* size of the multicast filter table */
422 u16 _pad1; 512 __le16 _pad1;
423 u64 mfTablePA; /* PA of the multicast filters table */ 513 __le64 mfTablePA; /* PA of the multicast filters table */
424 u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ 514 __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
425}; 515};
426 516
427 517
@@ -444,7 +534,7 @@ struct Vmxnet3_PM_PktFilter {
444 534
445 535
446struct Vmxnet3_PMConf { 536struct Vmxnet3_PMConf {
447 u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ 537 __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
448 u8 numFilters; 538 u8 numFilters;
449 u8 pad[5]; 539 u8 pad[5];
450 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; 540 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
@@ -452,9 +542,9 @@ struct Vmxnet3_PMConf {
452 542
453 543
454struct Vmxnet3_VariableLenConfDesc { 544struct Vmxnet3_VariableLenConfDesc {
455 u32 confVer; 545 __le32 confVer;
456 u32 confLen; 546 __le32 confLen;
457 u64 confPA; 547 __le64 confPA;
458}; 548};
459 549
460 550
@@ -491,12 +581,12 @@ struct Vmxnet3_DSDevRead {
491 581
492/* All structures in DriverShared are padded to multiples of 8 bytes */ 582/* All structures in DriverShared are padded to multiples of 8 bytes */
493struct Vmxnet3_DriverShared { 583struct Vmxnet3_DriverShared {
494 u32 magic; 584 __le32 magic;
495 /* make devRead start at 64bit boundaries */ 585 /* make devRead start at 64bit boundaries */
496 u32 pad; 586 __le32 pad;
497 struct Vmxnet3_DSDevRead devRead; 587 struct Vmxnet3_DSDevRead devRead;
498 u32 ecr; 588 __le32 ecr;
499 u32 reserved[5]; 589 __le32 reserved[5];
500}; 590};
501 591
502 592
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 004353a46af0..a4c97e786ee5 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -24,12 +24,13 @@
24 * 24 *
25 */ 25 */
26 26
27#include <net/ip6_checksum.h>
28
27#include "vmxnet3_int.h" 29#include "vmxnet3_int.h"
28 30
29char vmxnet3_driver_name[] = "vmxnet3"; 31char vmxnet3_driver_name[] = "vmxnet3";
30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 32#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31 33
32
33/* 34/*
34 * PCI Device ID Table 35 * PCI Device ID Table
35 * Last entry must be all 0s 36 * Last entry must be all 0s
@@ -151,11 +152,10 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter)
151 } 152 }
152} 153}
153 154
154
155static void 155static void
156vmxnet3_process_events(struct vmxnet3_adapter *adapter) 156vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157{ 157{
158 u32 events = adapter->shared->ecr; 158 u32 events = le32_to_cpu(adapter->shared->ecr);
159 if (!events) 159 if (!events)
160 return; 160 return;
161 161
@@ -173,7 +173,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173 if (adapter->tqd_start->status.stopped) { 173 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n", 174 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name, 175 adapter->netdev->name,
176 adapter->tqd_start->status.error); 176 le32_to_cpu(adapter->tqd_start->status.error));
177 } 177 }
178 if (adapter->rqd_start->status.stopped) { 178 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n", 179 printk(KERN_ERR "%s: rq error 0x%x\n",
@@ -185,6 +185,106 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
185 } 185 }
186} 186}
187 187
188#ifdef __BIG_ENDIAN_BITFIELD
189/*
190 * The device expects the bitfields in shared structures to be written in
191 * little endian. When CPU is big endian, the following routines are used to
192 * correctly read and write into ABI.
193 * The general technique used here is : double word bitfields are defined in
194 * opposite order for big endian architecture. Then before reading them in
195 * driver the complete double word is translated using le32_to_cpu. Similarly
196 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
197 * double words into required format.
198 * In order to avoid touching bits in shared structure more than once, temporary
199 * descriptors are used. These are passed as srcDesc to following functions.
200 */
201static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
202 struct Vmxnet3_RxDesc *dstDesc)
203{
204 u32 *src = (u32 *)srcDesc + 2;
205 u32 *dst = (u32 *)dstDesc + 2;
206 dstDesc->addr = le64_to_cpu(srcDesc->addr);
207 *dst = le32_to_cpu(*src);
208 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
209}
210
211static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
212 struct Vmxnet3_TxDesc *dstDesc)
213{
214 int i;
215 u32 *src = (u32 *)(srcDesc + 1);
216 u32 *dst = (u32 *)(dstDesc + 1);
217
218 /* Working backwards so that the gen bit is set at the end. */
219 for (i = 2; i > 0; i--) {
220 src--;
221 dst--;
222 *dst = cpu_to_le32(*src);
223 }
224}
225
226
227static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
228 struct Vmxnet3_RxCompDesc *dstDesc)
229{
230 int i = 0;
231 u32 *src = (u32 *)srcDesc;
232 u32 *dst = (u32 *)dstDesc;
233 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
234 *dst = le32_to_cpu(*src);
235 src++;
236 dst++;
237 }
238}
239
240
241/* Used to read bitfield values from double words. */
242static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
243{
244 u32 temp = le32_to_cpu(*bitfield);
245 u32 mask = ((1 << size) - 1) << pos;
246 temp &= mask;
247 temp >>= pos;
248 return temp;
249}
250
251
252
253#endif /* __BIG_ENDIAN_BITFIELD */
254
255#ifdef __BIG_ENDIAN_BITFIELD
256
257# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
258 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
259 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
260# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
261 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
262 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
263# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
264 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
265 VMXNET3_TCD_GEN_SIZE)
266# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
267 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
268# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
269 (dstrcd) = (tmp); \
270 vmxnet3_RxCompToCPU((rcd), (tmp)); \
271 } while (0)
272# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
273 (dstrxd) = (tmp); \
274 vmxnet3_RxDescToCPU((rxd), (tmp)); \
275 } while (0)
276
277#else
278
279# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
280# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
281# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
282# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
283# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
284# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
285
286#endif /* __BIG_ENDIAN_BITFIELD */
287
188 288
189static void 289static void
190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 290vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
@@ -212,7 +312,7 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
212 312
213 /* no out of order completion */ 313 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 314 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); 315 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
216 316
217 skb = tq->buf_info[eop_idx].skb; 317 skb = tq->buf_info[eop_idx].skb;
218 BUG_ON(skb == NULL); 318 BUG_ON(skb == NULL);
@@ -246,9 +346,10 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
246 union Vmxnet3_GenericDesc *gdesc; 346 union Vmxnet3_GenericDesc *gdesc;
247 347
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 348 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) { 349 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, 350 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
251 adapter->pdev, adapter); 351 &gdesc->tcd), tq, adapter->pdev,
352 adapter);
252 353
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 354 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 355 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
@@ -472,9 +573,9 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
472 } 573 }
473 574
474 BUG_ON(rbi->dma_addr == 0); 575 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr; 576 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | 577 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
477 rbi->len; 578 | val | rbi->len);
478 579
479 num_allocated++; 580 num_allocated++;
480 vmxnet3_cmd_ring_adv_next2fill(ring); 581 vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -531,10 +632,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
531 632
532 /* no need to map the buffer if headers are copied */ 633 /* no need to map the buffer if headers are copied */
533 if (ctx->copy_size) { 634 if (ctx->copy_size) {
534 ctx->sop_txd->txd.addr = tq->data_ring.basePA + 635 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
535 tq->tx_ring.next2fill * 636 tq->tx_ring.next2fill *
536 sizeof(struct Vmxnet3_TxDataDesc); 637 sizeof(struct Vmxnet3_TxDataDesc));
537 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; 638 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
538 ctx->sop_txd->dword[3] = 0; 639 ctx->sop_txd->dword[3] = 0;
539 640
540 tbi = tq->buf_info + tq->tx_ring.next2fill; 641 tbi = tq->buf_info + tq->tx_ring.next2fill;
@@ -542,7 +643,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
542 643
543 dev_dbg(&adapter->netdev->dev, 644 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 645 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 646 tq->tx_ring.next2fill,
647 le64_to_cpu(ctx->sop_txd->txd.addr),
546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 648 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 649 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
548 650
@@ -570,14 +672,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
570 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 672 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
571 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 673 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
572 674
573 gdesc->txd.addr = tbi->dma_addr; 675 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
574 gdesc->dword[2] = dw2 | buf_size; 676 gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
575 gdesc->dword[3] = 0; 677 gdesc->dword[3] = 0;
576 678
577 dev_dbg(&adapter->netdev->dev, 679 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 680 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
579 tq->tx_ring.next2fill, gdesc->txd.addr, 681 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
580 gdesc->dword[2], gdesc->dword[3]); 682 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 683 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
582 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 684 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
583 685
@@ -599,14 +701,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
599 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 701 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
600 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 702 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
601 703
602 gdesc->txd.addr = tbi->dma_addr; 704 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
603 gdesc->dword[2] = dw2 | frag->size; 705 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
604 gdesc->dword[3] = 0; 706 gdesc->dword[3] = 0;
605 707
606 dev_dbg(&adapter->netdev->dev, 708 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n", 709 "txd[%u]: 0x%llu %u %u\n",
608 tq->tx_ring.next2fill, gdesc->txd.addr, 710 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
609 gdesc->dword[2], gdesc->dword[3]); 711 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 712 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
611 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 713 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
612 } 714 }
@@ -751,6 +853,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
751 unsigned long flags; 853 unsigned long flags;
752 struct vmxnet3_tx_ctx ctx; 854 struct vmxnet3_tx_ctx ctx;
753 union Vmxnet3_GenericDesc *gdesc; 855 union Vmxnet3_GenericDesc *gdesc;
856#ifdef __BIG_ENDIAN_BITFIELD
857 /* Use temporary descriptor to avoid touching bits multiple times */
858 union Vmxnet3_GenericDesc tempTxDesc;
859#endif
754 860
755 /* conservatively estimate # of descriptors to use */ 861 /* conservatively estimate # of descriptors to use */
756 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 862 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
@@ -827,16 +933,22 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
827 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 933 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
828 934
829 /* setup the EOP desc */ 935 /* setup the EOP desc */
830 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; 936 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
831 937
832 /* setup the SOP desc */ 938 /* setup the SOP desc */
939#ifdef __BIG_ENDIAN_BITFIELD
940 gdesc = &tempTxDesc;
941 gdesc->dword[2] = ctx.sop_txd->dword[2];
942 gdesc->dword[3] = ctx.sop_txd->dword[3];
943#else
833 gdesc = ctx.sop_txd; 944 gdesc = ctx.sop_txd;
945#endif
834 if (ctx.mss) { 946 if (ctx.mss) {
835 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 947 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
836 gdesc->txd.om = VMXNET3_OM_TSO; 948 gdesc->txd.om = VMXNET3_OM_TSO;
837 gdesc->txd.msscof = ctx.mss; 949 gdesc->txd.msscof = ctx.mss;
838 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + 950 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
839 ctx.mss - 1) / ctx.mss; 951 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
840 } else { 952 } else {
841 if (skb->ip_summed == CHECKSUM_PARTIAL) { 953 if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 954 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -847,7 +959,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
847 gdesc->txd.om = 0; 959 gdesc->txd.om = 0;
848 gdesc->txd.msscof = 0; 960 gdesc->txd.msscof = 0;
849 } 961 }
850 tq->shared->txNumDeferred++; 962 le32_add_cpu(&tq->shared->txNumDeferred, 1);
851 } 963 }
852 964
853 if (vlan_tx_tag_present(skb)) { 965 if (vlan_tx_tag_present(skb)) {
@@ -855,19 +967,27 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
855 gdesc->txd.tci = vlan_tx_tag_get(skb); 967 gdesc->txd.tci = vlan_tx_tag_get(skb);
856 } 968 }
857 969
858 wmb(); 970 /* finally flips the GEN bit of the SOP desc. */
859 971 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
860 /* finally flips the GEN bit of the SOP desc */ 972 VMXNET3_TXD_GEN);
861 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 973#ifdef __BIG_ENDIAN_BITFIELD
974 /* Finished updating in bitfields of Tx Desc, so write them in original
975 * place.
976 */
977 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
978 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
979 gdesc = ctx.sop_txd;
980#endif
862 dev_dbg(&adapter->netdev->dev, 981 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 982 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 983 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 984 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
866 gdesc->dword[3]); 985 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
867 986
868 spin_unlock_irqrestore(&tq->tx_lock, flags); 987 spin_unlock_irqrestore(&tq->tx_lock, flags);
869 988
870 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { 989 if (le32_to_cpu(tq->shared->txNumDeferred) >=
990 le32_to_cpu(tq->shared->txThreshold)) {
871 tq->shared->txNumDeferred = 0; 991 tq->shared->txNumDeferred = 0;
872 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 992 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
873 tq->tx_ring.next2fill); 993 tq->tx_ring.next2fill);
@@ -889,9 +1009,8 @@ static netdev_tx_t
889vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1009vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
890{ 1010{
891 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1011 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
892 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
893 1012
894 return vmxnet3_tq_xmit(skb, tq, adapter, netdev); 1013 return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
895} 1014}
896 1015
897 1016
@@ -902,7 +1021,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
902{ 1021{
903 if (!gdesc->rcd.cnc && adapter->rxcsum) { 1022 if (!gdesc->rcd.cnc && adapter->rxcsum) {
904 /* typical case: TCP/UDP over IP and both csums are correct */ 1023 /* typical case: TCP/UDP over IP and both csums are correct */
905 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == 1024 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
906 VMXNET3_RCD_CSUM_OK) { 1025 VMXNET3_RCD_CSUM_OK) {
907 skb->ip_summed = CHECKSUM_UNNECESSARY; 1026 skb->ip_summed = CHECKSUM_UNNECESSARY;
908 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1027 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
@@ -957,8 +1076,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
957 u32 num_rxd = 0; 1076 u32 num_rxd = 0;
958 struct Vmxnet3_RxCompDesc *rcd; 1077 struct Vmxnet3_RxCompDesc *rcd;
959 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1078 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
960 1079#ifdef __BIG_ENDIAN_BITFIELD
961 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1080 struct Vmxnet3_RxDesc rxCmdDesc;
1081 struct Vmxnet3_RxCompDesc rxComp;
1082#endif
1083 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1084 &rxComp);
962 while (rcd->gen == rq->comp_ring.gen) { 1085 while (rcd->gen == rq->comp_ring.gen) {
963 struct vmxnet3_rx_buf_info *rbi; 1086 struct vmxnet3_rx_buf_info *rbi;
964 struct sk_buff *skb; 1087 struct sk_buff *skb;
@@ -976,11 +1099,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
976 1099
977 idx = rcd->rxdIdx; 1100 idx = rcd->rxdIdx;
978 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 1101 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
979 1102 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
980 rxd = &rq->rx_ring[ring_idx].base[idx].rxd; 1103 &rxCmdDesc);
981 rbi = rq->buf_info[ring_idx] + idx; 1104 rbi = rq->buf_info[ring_idx] + idx;
982 1105
983 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); 1106 BUG_ON(rxd->addr != rbi->dma_addr ||
1107 rxd->len != rbi->len);
984 1108
985 if (unlikely(rcd->eop && rcd->err)) { 1109 if (unlikely(rcd->eop && rcd->err)) {
986 vmxnet3_rx_error(rq, rcd, ctx, adapter); 1110 vmxnet3_rx_error(rq, rcd, ctx, adapter);
@@ -1078,7 +1202,8 @@ rcd_done:
1078 } 1202 }
1079 1203
1080 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1204 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1081 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1205 vmxnet3_getRxComp(rcd,
1206 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1082 } 1207 }
1083 1208
1084 return num_rxd; 1209 return num_rxd;
@@ -1094,7 +1219,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1094 1219
1095 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1220 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1096 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1221 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1097 rxd = &rq->rx_ring[ring_idx].base[i].rxd; 1222#ifdef __BIG_ENDIAN_BITFIELD
1223 struct Vmxnet3_RxDesc rxDesc;
1224#endif
1225 vmxnet3_getRxDesc(rxd,
1226 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1098 1227
1099 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1228 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1100 rq->buf_info[ring_idx][i].skb) { 1229 rq->buf_info[ring_idx][i].skb) {
@@ -1346,12 +1475,12 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1346 err = request_irq(adapter->intr.msix_entries[0].vector, 1475 err = request_irq(adapter->intr.msix_entries[0].vector,
1347 vmxnet3_intr, 0, adapter->netdev->name, 1476 vmxnet3_intr, 0, adapter->netdev->name,
1348 adapter->netdev); 1477 adapter->netdev);
1349 } else 1478 } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1350#endif
1351 if (adapter->intr.type == VMXNET3_IT_MSI) {
1352 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1479 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1353 adapter->netdev->name, adapter->netdev); 1480 adapter->netdev->name, adapter->netdev);
1354 } else { 1481 } else
1482#endif
1483 {
1355 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1484 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1356 IRQF_SHARED, adapter->netdev->name, 1485 IRQF_SHARED, adapter->netdev->name,
1357 adapter->netdev); 1486 adapter->netdev);
@@ -1412,6 +1541,22 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1412} 1541}
1413 1542
1414 1543
1544inline void set_flag_le16(__le16 *data, u16 flag)
1545{
1546 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
1547}
1548
1549inline void set_flag_le64(__le64 *data, u64 flag)
1550{
1551 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
1552}
1553
1554inline void reset_flag_le64(__le64 *data, u64 flag)
1555{
1556 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
1557}
1558
1559
1415static void 1560static void
1416vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 1561vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1417{ 1562{
@@ -1427,7 +1572,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1427 adapter->vlan_grp = grp; 1572 adapter->vlan_grp = grp;
1428 1573
1429 /* update FEATURES to device */ 1574 /* update FEATURES to device */
1430 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1575 set_flag_le64(&devRead->misc.uptFeatures,
1576 UPT1_F_RXVLAN);
1431 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1577 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1432 VMXNET3_CMD_UPDATE_FEATURE); 1578 VMXNET3_CMD_UPDATE_FEATURE);
1433 /* 1579 /*
@@ -1450,7 +1596,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1450 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1596 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1451 adapter->vlan_grp = NULL; 1597 adapter->vlan_grp = NULL;
1452 1598
1453 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { 1599 if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
1454 int i; 1600 int i;
1455 1601
1456 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1602 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1463,7 +1609,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1463 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1609 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1464 1610
1465 /* update FEATURES to device */ 1611 /* update FEATURES to device */
1466 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1612 reset_flag_le64(&devRead->misc.uptFeatures,
1613 UPT1_F_RXVLAN);
1467 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1614 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1468 VMXNET3_CMD_UPDATE_FEATURE); 1615 VMXNET3_CMD_UPDATE_FEATURE);
1469 } 1616 }
@@ -1565,9 +1712,10 @@ vmxnet3_set_mc(struct net_device *netdev)
1565 new_table = vmxnet3_copy_mc(netdev); 1712 new_table = vmxnet3_copy_mc(netdev);
1566 if (new_table) { 1713 if (new_table) {
1567 new_mode |= VMXNET3_RXM_MCAST; 1714 new_mode |= VMXNET3_RXM_MCAST;
1568 rxConf->mfTableLen = netdev->mc_count * 1715 rxConf->mfTableLen = cpu_to_le16(
1569 ETH_ALEN; 1716 netdev->mc_count * ETH_ALEN);
1570 rxConf->mfTablePA = virt_to_phys(new_table); 1717 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
1718 new_table));
1571 } else { 1719 } else {
1572 printk(KERN_INFO "%s: failed to copy mcast list" 1720 printk(KERN_INFO "%s: failed to copy mcast list"
1573 ", setting ALL_MULTI\n", netdev->name); 1721 ", setting ALL_MULTI\n", netdev->name);
@@ -1582,7 +1730,7 @@ vmxnet3_set_mc(struct net_device *netdev)
1582 } 1730 }
1583 1731
1584 if (new_mode != rxConf->rxMode) { 1732 if (new_mode != rxConf->rxMode) {
1585 rxConf->rxMode = new_mode; 1733 rxConf->rxMode = cpu_to_le32(new_mode);
1586 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1734 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1587 VMXNET3_CMD_UPDATE_RX_MODE); 1735 VMXNET3_CMD_UPDATE_RX_MODE);
1588 } 1736 }
@@ -1610,63 +1758,69 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1610 memset(shared, 0, sizeof(*shared)); 1758 memset(shared, 0, sizeof(*shared));
1611 1759
1612 /* driver settings */ 1760 /* driver settings */
1613 shared->magic = VMXNET3_REV1_MAGIC; 1761 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
1614 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 1762 devRead->misc.driverInfo.version = cpu_to_le32(
1763 VMXNET3_DRIVER_VERSION_NUM);
1615 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 1764 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1616 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 1765 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1617 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 1766 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1618 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 1767 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
1619 devRead->misc.driverInfo.uptVerSpt = 1; 1768 *((u32 *)&devRead->misc.driverInfo.gos));
1769 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
1770 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
1620 1771
1621 devRead->misc.ddPA = virt_to_phys(adapter); 1772 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
1622 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); 1773 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
1623 1774
1624 /* set up feature flags */ 1775 /* set up feature flags */
1625 if (adapter->rxcsum) 1776 if (adapter->rxcsum)
1626 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 1777 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
1627 1778
1628 if (adapter->lro) { 1779 if (adapter->lro) {
1629 devRead->misc.uptFeatures |= UPT1_F_LRO; 1780 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
1630 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; 1781 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
1631 } 1782 }
1632 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) 1783 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1633 && adapter->vlan_grp) { 1784 && adapter->vlan_grp) {
1634 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1785 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
1635 } 1786 }
1636 1787
1637 devRead->misc.mtu = adapter->netdev->mtu; 1788 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1638 devRead->misc.queueDescPA = adapter->queue_desc_pa; 1789 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1639 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + 1790 devRead->misc.queueDescLen = cpu_to_le32(
1640 sizeof(struct Vmxnet3_RxQueueDesc); 1791 sizeof(struct Vmxnet3_TxQueueDesc) +
1792 sizeof(struct Vmxnet3_RxQueueDesc));
1641 1793
1642 /* tx queue settings */ 1794 /* tx queue settings */
1643 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 1795 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1644 1796
1645 devRead->misc.numTxQueues = 1; 1797 devRead->misc.numTxQueues = 1;
1646 tqc = &adapter->tqd_start->conf; 1798 tqc = &adapter->tqd_start->conf;
1647 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; 1799 tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
1648 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; 1800 tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
1649 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; 1801 tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
1650 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); 1802 tqc->ddPA = cpu_to_le64(virt_to_phys(
1651 tqc->txRingSize = adapter->tx_queue.tx_ring.size; 1803 adapter->tx_queue.buf_info));
1652 tqc->dataRingSize = adapter->tx_queue.data_ring.size; 1804 tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
1653 tqc->compRingSize = adapter->tx_queue.comp_ring.size; 1805 tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
1654 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * 1806 tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
1655 tqc->txRingSize; 1807 tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
1808 tqc->txRingSize);
1656 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 1809 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1657 1810
1658 /* rx queue settings */ 1811 /* rx queue settings */
1659 devRead->misc.numRxQueues = 1; 1812 devRead->misc.numRxQueues = 1;
1660 rqc = &adapter->rqd_start->conf; 1813 rqc = &adapter->rqd_start->conf;
1661 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; 1814 rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
1662 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; 1815 rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
1663 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; 1816 rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
1664 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); 1817 rqc->ddPA = cpu_to_le64(virt_to_phys(
1665 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; 1818 adapter->rx_queue.buf_info));
1666 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; 1819 rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
1667 rqc->compRingSize = adapter->rx_queue.comp_ring.size; 1820 rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
1668 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * 1821 rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
1669 (rqc->rxRingSize[0] + rqc->rxRingSize[1]); 1822 rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
1823 (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
1670 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 1824 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1671 1825
1672 /* intr settings */ 1826 /* intr settings */
@@ -1715,11 +1869,10 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1715 1869
1716 vmxnet3_setup_driver_shared(adapter); 1870 vmxnet3_setup_driver_shared(adapter);
1717 1871
1718 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
1719 VMXNET3_GET_ADDR_LO(adapter->shared_pa)); 1873 adapter->shared_pa));
1720 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 1874 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
1721 VMXNET3_GET_ADDR_HI(adapter->shared_pa)); 1875 adapter->shared_pa));
1722
1723 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1876 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1724 VMXNET3_CMD_ACTIVATE_DEV); 1877 VMXNET3_CMD_ACTIVATE_DEV);
1725 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 1878 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -2425,7 +2578,7 @@ vmxnet3_suspend(struct device *device)
2425 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 2578 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2426 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 2579 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2427 2580
2428 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2581 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2429 i++; 2582 i++;
2430 } 2583 }
2431 2584
@@ -2467,19 +2620,21 @@ vmxnet3_suspend(struct device *device)
2467 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 2620 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2468 in_dev_put(in_dev); 2621 in_dev_put(in_dev);
2469 2622
2470 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2623 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2471 i++; 2624 i++;
2472 } 2625 }
2473 2626
2474skip_arp: 2627skip_arp:
2475 if (adapter->wol & WAKE_MAGIC) 2628 if (adapter->wol & WAKE_MAGIC)
2476 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 2629 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
2477 2630
2478 pmConf->numFilters = i; 2631 pmConf->numFilters = i;
2479 2632
2480 adapter->shared->devRead.pmConfDesc.confVer = 1; 2633 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2481 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2634 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2482 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2635 *pmConf));
2636 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
2637 pmConf));
2483 2638
2484 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2639 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2485 VMXNET3_CMD_UPDATE_PMCFG); 2640 VMXNET3_CMD_UPDATE_PMCFG);
@@ -2510,9 +2665,11 @@ vmxnet3_resume(struct device *device)
2510 pmConf = adapter->pm_conf; 2665 pmConf = adapter->pm_conf;
2511 memset(pmConf, 0, sizeof(*pmConf)); 2666 memset(pmConf, 0, sizeof(*pmConf));
2512 2667
2513 adapter->shared->devRead.pmConfDesc.confVer = 1; 2668 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2514 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2669 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2515 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2670 *pmConf));
2671 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
2672 pmConf));
2516 2673
2517 netif_device_attach(netdev); 2674 netif_device_attach(netdev);
2518 pci_set_power_state(pdev, PCI_D0); 2675 pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c2c15e4cafc7..3935c4493fb7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,11 +50,13 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
50 adapter->rxcsum = val; 50 adapter->rxcsum = val;
51 if (netif_running(netdev)) { 51 if (netif_running(netdev)) {
52 if (val) 52 if (val)
53 adapter->shared->devRead.misc.uptFeatures |= 53 set_flag_le64(
54 UPT1_F_RXCSUM; 54 &adapter->shared->devRead.misc.uptFeatures,
55 UPT1_F_RXCSUM);
55 else 56 else
56 adapter->shared->devRead.misc.uptFeatures &= 57 reset_flag_le64(
57 ~UPT1_F_RXCSUM; 58 &adapter->shared->devRead.misc.uptFeatures,
59 UPT1_F_RXCSUM);
58 60
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 445081686d5d..34f392f46fb1 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -330,14 +330,14 @@ struct vmxnet3_adapter {
330}; 330};
331 331
332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
333 writel((val), (adapter)->hw_addr0 + (reg)) 333 writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
334#define VMXNET3_READ_BAR0_REG(adapter, reg) \ 334#define VMXNET3_READ_BAR0_REG(adapter, reg) \
335 readl((adapter)->hw_addr0 + (reg)) 335 le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
336 336
337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ 337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
338 writel((val), (adapter)->hw_addr1 + (reg)) 338 writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
339#define VMXNET3_READ_BAR1_REG(adapter, reg) \ 339#define VMXNET3_READ_BAR1_REG(adapter, reg) \
340 readl((adapter)->hw_addr1 + (reg)) 340 le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
341 341
342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) 342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ 343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -353,6 +353,10 @@ struct vmxnet3_adapter {
353#define VMXNET3_MAX_ETH_HDR_SIZE 22 353#define VMXNET3_MAX_ETH_HDR_SIZE 22
354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
355 355
356void set_flag_le16(__le16 *data, u16 flag);
357void set_flag_le64(__le64 *data, u64 flag);
358void reset_flag_le64(__le64 *data, u64 flag);
359
356int 360int
357vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 361vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
358 362
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 81c8aec9df92..63a010252a37 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1127,7 +1127,7 @@ done:
1127 init_timer(&dpriv->timer); 1127 init_timer(&dpriv->timer);
1128 dpriv->timer.expires = jiffies + 10*HZ; 1128 dpriv->timer.expires = jiffies + 10*HZ;
1129 dpriv->timer.data = (unsigned long)dev; 1129 dpriv->timer.data = (unsigned long)dev;
1130 dpriv->timer.function = &dscc4_timer; 1130 dpriv->timer.function = dscc4_timer;
1131 add_timer(&dpriv->timer); 1131 add_timer(&dpriv->timer);
1132 netif_carrier_on(dev); 1132 netif_carrier_on(dev);
1133 1133
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index b80f514877d8..39410016b4ff 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1538,7 +1538,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
1538 adm8211_hw_init(dev); 1538 adm8211_hw_init(dev);
1539 adm8211_rf_set_channel(dev, priv->channel); 1539 adm8211_rf_set_channel(dev, priv->channel);
1540 1540
1541 retval = request_irq(priv->pdev->irq, &adm8211_interrupt, 1541 retval = request_irq(priv->pdev->irq, adm8211_interrupt,
1542 IRQF_SHARED, "adm8211", dev); 1542 IRQF_SHARED, "adm8211", dev);
1543 if (retval) { 1543 if (retval) {
1544 printk(KERN_ERR "%s: failed to register IRQ handler\n", 1544 printk(KERN_ERR "%s: failed to register IRQ handler\n",
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index a9bc8a97c4e1..b7408370cf82 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6029,7 +6029,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6029 struct ipw2100_priv *priv; 6029 struct ipw2100_priv *priv;
6030 struct net_device *dev; 6030 struct net_device *dev;
6031 6031
6032 dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0); 6032 dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
6033 if (!dev) 6033 if (!dev)
6034 return NULL; 6034 return NULL;
6035 priv = libipw_priv(dev); 6035 priv = libipw_priv(dev);
@@ -6342,7 +6342,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6342 sysfs_remove_group(&pci_dev->dev.kobj, 6342 sysfs_remove_group(&pci_dev->dev.kobj,
6343 &ipw2100_attribute_group); 6343 &ipw2100_attribute_group);
6344 6344
6345 free_ieee80211(dev, 0); 6345 free_ieee80211(dev);
6346 pci_set_drvdata(pci_dev, NULL); 6346 pci_set_drvdata(pci_dev, NULL);
6347 } 6347 }
6348 6348
@@ -6400,7 +6400,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6400 if (dev->base_addr) 6400 if (dev->base_addr)
6401 iounmap((void __iomem *)dev->base_addr); 6401 iounmap((void __iomem *)dev->base_addr);
6402 6402
6403 free_ieee80211(dev, 0); 6403 free_ieee80211(dev);
6404 } 6404 }
6405 6405
6406 pci_release_regions(pci_dev); 6406 pci_release_regions(pci_dev);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 39808e9378ba..9b398db2d740 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -108,25 +108,6 @@ static int antenna = CFG_SYS_ANTENNA_BOTH;
108static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ 108static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
109#endif 109#endif
110 110
111static struct ieee80211_rate ipw2200_rates[] = {
112 { .bitrate = 10 },
113 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
114 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
115 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
116 { .bitrate = 60 },
117 { .bitrate = 90 },
118 { .bitrate = 120 },
119 { .bitrate = 180 },
120 { .bitrate = 240 },
121 { .bitrate = 360 },
122 { .bitrate = 480 },
123 { .bitrate = 540 }
124};
125
126#define ipw2200_a_rates (ipw2200_rates + 4)
127#define ipw2200_num_a_rates 8
128#define ipw2200_bg_rates (ipw2200_rates + 0)
129#define ipw2200_num_bg_rates 12
130 111
131#ifdef CONFIG_IPW2200_QOS 112#ifdef CONFIG_IPW2200_QOS
132static int qos_enable = 0; 113static int qos_enable = 0;
@@ -8678,6 +8659,24 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
8678 * 8659 *
8679 */ 8660 */
8680 8661
8662static int ipw_wx_get_name(struct net_device *dev,
8663 struct iw_request_info *info,
8664 union iwreq_data *wrqu, char *extra)
8665{
8666 struct ipw_priv *priv = libipw_priv(dev);
8667 mutex_lock(&priv->mutex);
8668 if (priv->status & STATUS_RF_KILL_MASK)
8669 strcpy(wrqu->name, "radio off");
8670 else if (!(priv->status & STATUS_ASSOCIATED))
8671 strcpy(wrqu->name, "unassociated");
8672 else
8673 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8674 ipw_modes[priv->assoc_request.ieee_mode]);
8675 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8676 mutex_unlock(&priv->mutex);
8677 return 0;
8678}
8679
8681static int ipw_set_channel(struct ipw_priv *priv, u8 channel) 8680static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8682{ 8681{
8683 if (channel == 0) { 8682 if (channel == 0) {
@@ -9977,7 +9976,7 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9977/* Rebase the WE IOCTLs to zero for the handler array */ 9976/* Rebase the WE IOCTLs to zero for the handler array */
9978#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] 9977#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9979static iw_handler ipw_wx_handlers[] = { 9978static iw_handler ipw_wx_handlers[] = {
9980 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname, 9979 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9981 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 9980 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9982 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 9981 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9983 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 9982 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
@@ -11422,100 +11421,16 @@ static void ipw_bg_down(struct work_struct *work)
11422/* Called by register_netdev() */ 11421/* Called by register_netdev() */
11423static int ipw_net_init(struct net_device *dev) 11422static int ipw_net_init(struct net_device *dev)
11424{ 11423{
11425 int i, rc = 0;
11426 struct ipw_priv *priv = libipw_priv(dev); 11424 struct ipw_priv *priv = libipw_priv(dev);
11427 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11428 struct wireless_dev *wdev = &priv->ieee->wdev;
11429 mutex_lock(&priv->mutex); 11425 mutex_lock(&priv->mutex);
11430 11426
11431 if (ipw_up(priv)) { 11427 if (ipw_up(priv)) {
11432 rc = -EIO; 11428 mutex_unlock(&priv->mutex);
11433 goto out; 11429 return -EIO;
11434 }
11435
11436 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11437
11438 /* fill-out priv->ieee->bg_band */
11439 if (geo->bg_channels) {
11440 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11441
11442 bg_band->band = IEEE80211_BAND_2GHZ;
11443 bg_band->n_channels = geo->bg_channels;
11444 bg_band->channels =
11445 kzalloc(geo->bg_channels *
11446 sizeof(struct ieee80211_channel), GFP_KERNEL);
11447 /* translate geo->bg to bg_band.channels */
11448 for (i = 0; i < geo->bg_channels; i++) {
11449 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11450 bg_band->channels[i].center_freq = geo->bg[i].freq;
11451 bg_band->channels[i].hw_value = geo->bg[i].channel;
11452 bg_band->channels[i].max_power = geo->bg[i].max_power;
11453 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11454 bg_band->channels[i].flags |=
11455 IEEE80211_CHAN_PASSIVE_SCAN;
11456 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11457 bg_band->channels[i].flags |=
11458 IEEE80211_CHAN_NO_IBSS;
11459 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11460 bg_band->channels[i].flags |=
11461 IEEE80211_CHAN_RADAR;
11462 /* No equivalent for LIBIPW_CH_80211H_RULES,
11463 LIBIPW_CH_UNIFORM_SPREADING, or
11464 LIBIPW_CH_B_ONLY... */
11465 }
11466 /* point at bitrate info */
11467 bg_band->bitrates = ipw2200_bg_rates;
11468 bg_band->n_bitrates = ipw2200_num_bg_rates;
11469
11470 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11471 }
11472
11473 /* fill-out priv->ieee->a_band */
11474 if (geo->a_channels) {
11475 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11476
11477 a_band->band = IEEE80211_BAND_5GHZ;
11478 a_band->n_channels = geo->a_channels;
11479 a_band->channels =
11480 kzalloc(geo->a_channels *
11481 sizeof(struct ieee80211_channel), GFP_KERNEL);
11482 /* translate geo->bg to a_band.channels */
11483 for (i = 0; i < geo->a_channels; i++) {
11484 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11485 a_band->channels[i].center_freq = geo->a[i].freq;
11486 a_band->channels[i].hw_value = geo->a[i].channel;
11487 a_band->channels[i].max_power = geo->a[i].max_power;
11488 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11489 a_band->channels[i].flags |=
11490 IEEE80211_CHAN_PASSIVE_SCAN;
11491 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11492 a_band->channels[i].flags |=
11493 IEEE80211_CHAN_NO_IBSS;
11494 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11495 a_band->channels[i].flags |=
11496 IEEE80211_CHAN_RADAR;
11497 /* No equivalent for LIBIPW_CH_80211H_RULES,
11498 LIBIPW_CH_UNIFORM_SPREADING, or
11499 LIBIPW_CH_B_ONLY... */
11500 }
11501 /* point at bitrate info */
11502 a_band->bitrates = ipw2200_a_rates;
11503 a_band->n_bitrates = ipw2200_num_a_rates;
11504
11505 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11506 }
11507
11508 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11509
11510 /* With that information in place, we can now register the wiphy... */
11511 if (wiphy_register(wdev->wiphy)) {
11512 rc = -EIO;
11513 goto out;
11514 } 11430 }
11515 11431
11516out:
11517 mutex_unlock(&priv->mutex); 11432 mutex_unlock(&priv->mutex);
11518 return rc; 11433 return 0;
11519} 11434}
11520 11435
11521/* PCI driver stuff */ 11436/* PCI driver stuff */
@@ -11646,7 +11561,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11646 if (priv->prom_net_dev) 11561 if (priv->prom_net_dev)
11647 return -EPERM; 11562 return -EPERM;
11648 11563
11649 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1); 11564 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11650 if (priv->prom_net_dev == NULL) 11565 if (priv->prom_net_dev == NULL)
11651 return -ENOMEM; 11566 return -ENOMEM;
11652 11567
@@ -11665,7 +11580,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11665 11580
11666 rc = register_netdev(priv->prom_net_dev); 11581 rc = register_netdev(priv->prom_net_dev);
11667 if (rc) { 11582 if (rc) {
11668 free_ieee80211(priv->prom_net_dev, 1); 11583 free_ieee80211(priv->prom_net_dev);
11669 priv->prom_net_dev = NULL; 11584 priv->prom_net_dev = NULL;
11670 return rc; 11585 return rc;
11671 } 11586 }
@@ -11679,7 +11594,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
11679 return; 11594 return;
11680 11595
11681 unregister_netdev(priv->prom_net_dev); 11596 unregister_netdev(priv->prom_net_dev);
11682 free_ieee80211(priv->prom_net_dev, 1); 11597 free_ieee80211(priv->prom_net_dev);
11683 11598
11684 priv->prom_net_dev = NULL; 11599 priv->prom_net_dev = NULL;
11685} 11600}
@@ -11707,7 +11622,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11707 struct ipw_priv *priv; 11622 struct ipw_priv *priv;
11708 int i; 11623 int i;
11709 11624
11710 net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0); 11625 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11711 if (net_dev == NULL) { 11626 if (net_dev == NULL) {
11712 err = -ENOMEM; 11627 err = -ENOMEM;
11713 goto out; 11628 goto out;
@@ -11855,7 +11770,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11855 pci_disable_device(pdev); 11770 pci_disable_device(pdev);
11856 pci_set_drvdata(pdev, NULL); 11771 pci_set_drvdata(pdev, NULL);
11857 out_free_ieee80211: 11772 out_free_ieee80211:
11858 free_ieee80211(priv->net_dev, 0); 11773 free_ieee80211(priv->net_dev);
11859 out: 11774 out:
11860 return err; 11775 return err;
11861} 11776}
@@ -11922,7 +11837,7 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11922 pci_release_regions(pdev); 11837 pci_release_regions(pdev);
11923 pci_disable_device(pdev); 11838 pci_disable_device(pdev);
11924 pci_set_drvdata(pdev, NULL); 11839 pci_set_drvdata(pdev, NULL);
11925 free_ieee80211(priv->net_dev, 0); 11840 free_ieee80211(priv->net_dev);
11926 free_firmware(); 11841 free_firmware();
11927} 11842}
11928 11843
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index bf45391172f3..1e334ff6bd52 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -31,7 +31,6 @@
31#include <linux/ieee80211.h> 31#include <linux/ieee80211.h>
32 32
33#include <net/lib80211.h> 33#include <net/lib80211.h>
34#include <net/cfg80211.h>
35 34
36#define LIBIPW_VERSION "git-1.1.13" 35#define LIBIPW_VERSION "git-1.1.13"
37 36
@@ -784,15 +783,12 @@ struct libipw_geo {
784 783
785struct libipw_device { 784struct libipw_device {
786 struct net_device *dev; 785 struct net_device *dev;
787 struct wireless_dev wdev;
788 struct libipw_security sec; 786 struct libipw_security sec;
789 787
790 /* Bookkeeping structures */ 788 /* Bookkeeping structures */
791 struct libipw_stats ieee_stats; 789 struct libipw_stats ieee_stats;
792 790
793 struct libipw_geo geo; 791 struct libipw_geo geo;
794 struct ieee80211_supported_band bg_band;
795 struct ieee80211_supported_band a_band;
796 792
797 /* Probe / Beacon management */ 793 /* Probe / Beacon management */
798 struct list_head network_free_list; 794 struct list_head network_free_list;
@@ -1018,8 +1014,8 @@ static inline int libipw_is_cck_rate(u8 rate)
1018} 1014}
1019 1015
1020/* ieee80211.c */ 1016/* ieee80211.c */
1021extern void free_ieee80211(struct net_device *dev, int monitor); 1017extern void free_ieee80211(struct net_device *dev);
1022extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); 1018extern struct net_device *alloc_ieee80211(int sizeof_priv);
1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu); 1019extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
1024 1020
1025extern void libipw_networks_age(struct libipw_device *ieee, 1021extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index a0e9f6aed7da..eb2b60834c17 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -62,9 +62,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
62MODULE_AUTHOR(DRV_COPYRIGHT); 62MODULE_AUTHOR(DRV_COPYRIGHT);
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65struct cfg80211_ops libipw_config_ops = { };
66void *libipw_wiphy_privid = &libipw_wiphy_privid;
67
68static int libipw_networks_allocate(struct libipw_device *ieee) 65static int libipw_networks_allocate(struct libipw_device *ieee)
69{ 66{
70 if (ieee->networks) 67 if (ieee->networks)
@@ -143,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
143} 140}
144EXPORT_SYMBOL(libipw_change_mtu); 141EXPORT_SYMBOL(libipw_change_mtu);
145 142
146struct net_device *alloc_ieee80211(int sizeof_priv, int monitor) 143struct net_device *alloc_ieee80211(int sizeof_priv)
147{ 144{
148 struct libipw_device *ieee; 145 struct libipw_device *ieee;
149 struct net_device *dev; 146 struct net_device *dev;
@@ -160,31 +157,10 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
160 157
161 ieee->dev = dev; 158 ieee->dev = dev;
162 159
163 if (!monitor) {
164 ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0);
165 if (!ieee->wdev.wiphy) {
166 LIBIPW_ERROR("Unable to allocate wiphy.\n");
167 goto failed_free_netdev;
168 }
169
170 ieee->dev->ieee80211_ptr = &ieee->wdev;
171 ieee->wdev.iftype = NL80211_IFTYPE_STATION;
172
173 /* Fill-out wiphy structure bits we know... Not enough info
174 here to call set_wiphy_dev or set MAC address or channel info
175 -- have to do that in ->ndo_init... */
176 ieee->wdev.wiphy->privid = libipw_wiphy_privid;
177
178 ieee->wdev.wiphy->max_scan_ssids = 1;
179 ieee->wdev.wiphy->max_scan_ie_len = 0;
180 ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
181 | BIT(NL80211_IFTYPE_ADHOC);
182 }
183
184 err = libipw_networks_allocate(ieee); 160 err = libipw_networks_allocate(ieee);
185 if (err) { 161 if (err) {
186 LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err); 162 LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
187 goto failed_free_wiphy; 163 goto failed_free_netdev;
188 } 164 }
189 libipw_networks_initialize(ieee); 165 libipw_networks_initialize(ieee);
190 166
@@ -217,31 +193,19 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
217 193
218 return dev; 194 return dev;
219 195
220failed_free_wiphy:
221 if (!monitor)
222 wiphy_free(ieee->wdev.wiphy);
223failed_free_netdev: 196failed_free_netdev:
224 free_netdev(dev); 197 free_netdev(dev);
225failed: 198failed:
226 return NULL; 199 return NULL;
227} 200}
228 201
229void free_ieee80211(struct net_device *dev, int monitor) 202void free_ieee80211(struct net_device *dev)
230{ 203{
231 struct libipw_device *ieee = netdev_priv(dev); 204 struct libipw_device *ieee = netdev_priv(dev);
232 205
233 lib80211_crypt_info_free(&ieee->crypt_info); 206 lib80211_crypt_info_free(&ieee->crypt_info);
234 207
235 libipw_networks_free(ieee); 208 libipw_networks_free(ieee);
236
237 /* free cfg80211 resources */
238 if (!monitor) {
239 wiphy_unregister(ieee->wdev.wiphy);
240 kfree(ieee->a_band.channels);
241 kfree(ieee->bg_band.channels);
242 wiphy_free(ieee->wdev.wiphy);
243 }
244
245 free_netdev(dev); 209 free_netdev(dev);
246} 210}
247 211
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index dc81e19674f7..d4b49883b30e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -355,7 +355,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
355 355
356 init_timer(&rs_sta->rate_scale_flush); 356 init_timer(&rs_sta->rate_scale_flush);
357 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; 357 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
358 rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush; 358 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
359 359
360 for (i = 0; i < IWL_RATE_COUNT_3945; i++) 360 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
361 iwl3945_clear_window(&rs_sta->win[i]); 361 iwl3945_clear_window(&rs_sta->win[i]);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index d348c265e867..a15962a19b2a 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -411,7 +411,7 @@ static int p54p_open(struct ieee80211_hw *dev)
411 int err; 411 int err;
412 412
413 init_completion(&priv->boot_comp); 413 init_completion(&priv->boot_comp);
414 err = request_irq(priv->pdev->irq, &p54p_interrupt, 414 err = request_irq(priv->pdev->irq, p54p_interrupt,
415 IRQF_SHARED, "p54pci", dev); 415 IRQF_SHARED, "p54pci", dev);
416 if (err) { 416 if (err) {
417 dev_err(&priv->pdev->dev, "failed to register IRQ handler\n"); 417 dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 17e199546eeb..92af9b96bb7a 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -426,12 +426,16 @@ static const char p54u_romboot_3887[] = "~~~~";
426static int p54u_firmware_reset_3887(struct ieee80211_hw *dev) 426static int p54u_firmware_reset_3887(struct ieee80211_hw *dev)
427{ 427{
428 struct p54u_priv *priv = dev->priv; 428 struct p54u_priv *priv = dev->priv;
429 u8 buf[4]; 429 u8 *buf;
430 int ret; 430 int ret;
431 431
432 memcpy(&buf, p54u_romboot_3887, sizeof(buf)); 432 buf = kmalloc(4, GFP_KERNEL);
433 if (!buf)
434 return -ENOMEM;
435 memcpy(buf, p54u_romboot_3887, 4);
433 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA, 436 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
434 buf, sizeof(buf)); 437 buf, 4);
438 kfree(buf);
435 if (ret) 439 if (ret)
436 dev_err(&priv->udev->dev, "(p54usb) unable to jump to " 440 dev_err(&priv->udev->dev, "(p54usb) unable to jump to "
437 "boot ROM (%d)!\n", ret); 441 "boot ROM (%d)!\n", ret);
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 16429c49139c..a1a3dd15c664 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -548,7 +548,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
548 rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma); 548 rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
549 rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma); 549 rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
550 550
551 ret = request_irq(priv->pdev->irq, &rtl8180_interrupt, 551 ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
552 IRQF_SHARED, KBUILD_MODNAME, dev); 552 IRQF_SHARED, KBUILD_MODNAME, dev);
553 if (ret) { 553 if (ret) {
554 printk(KERN_ERR "%s: failed to register IRQ handler\n", 554 printk(KERN_ERR "%s: failed to register IRQ handler\n",
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 96eddb3b1d08..6cab5a62f99e 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o 5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
6obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o 6obj-$(CONFIG_CTCM) += ctcm.o fsm.o
7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_LCS) += lcs.o
10obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_CLAW) += claw.o
11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
12obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o 13qeth_l2-y += qeth_l2_main.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c63babefb698..3c77bfe0764c 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -90,7 +90,6 @@
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/types.h> 91#include <linux/types.h>
92 92
93#include "cu3088.h"
94#include "claw.h" 93#include "claw.h"
95 94
96/* 95/*
@@ -258,6 +257,9 @@ static int claw_pm_prepare(struct ccwgroup_device *gdev)
258 return -EPERM; 257 return -EPERM;
259} 258}
260 259
260/* the root device for claw group devices */
261static struct device *claw_root_dev;
262
261/* ccwgroup table */ 263/* ccwgroup table */
262 264
263static struct ccwgroup_driver claw_group_driver = { 265static struct ccwgroup_driver claw_group_driver = {
@@ -272,6 +274,47 @@ static struct ccwgroup_driver claw_group_driver = {
272 .prepare = claw_pm_prepare, 274 .prepare = claw_pm_prepare,
273}; 275};
274 276
277static struct ccw_device_id claw_ids[] = {
278 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
279 {},
280};
281MODULE_DEVICE_TABLE(ccw, claw_ids);
282
283static struct ccw_driver claw_ccw_driver = {
284 .owner = THIS_MODULE,
285 .name = "claw",
286 .ids = claw_ids,
287 .probe = ccwgroup_probe_ccwdev,
288 .remove = ccwgroup_remove_ccwdev,
289};
290
291static ssize_t
292claw_driver_group_store(struct device_driver *ddrv, const char *buf,
293 size_t count)
294{
295 int err;
296 err = ccwgroup_create_from_string(claw_root_dev,
297 claw_group_driver.driver_id,
298 &claw_ccw_driver, 3, buf);
299 return err ? err : count;
300}
301
302static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
303
304static struct attribute *claw_group_attrs[] = {
305 &driver_attr_group.attr,
306 NULL,
307};
308
309static struct attribute_group claw_group_attr_group = {
310 .attrs = claw_group_attrs,
311};
312
313static const struct attribute_group *claw_group_attr_groups[] = {
314 &claw_group_attr_group,
315 NULL,
316};
317
275/* 318/*
276* Key functions 319* Key functions
277*/ 320*/
@@ -3326,7 +3369,11 @@ claw_remove_files(struct device *dev)
3326static void __exit 3369static void __exit
3327claw_cleanup(void) 3370claw_cleanup(void)
3328{ 3371{
3329 unregister_cu3088_discipline(&claw_group_driver); 3372 driver_remove_file(&claw_group_driver.driver,
3373 &driver_attr_group);
3374 ccwgroup_driver_unregister(&claw_group_driver);
3375 ccw_driver_unregister(&claw_ccw_driver);
3376 root_device_unregister(claw_root_dev);
3330 claw_unregister_debug_facility(); 3377 claw_unregister_debug_facility();
3331 pr_info("Driver unloaded\n"); 3378 pr_info("Driver unloaded\n");
3332 3379
@@ -3348,16 +3395,31 @@ claw_init(void)
3348 if (ret) { 3395 if (ret) {
3349 pr_err("Registering with the S/390 debug feature" 3396 pr_err("Registering with the S/390 debug feature"
3350 " failed with error code %d\n", ret); 3397 " failed with error code %d\n", ret);
3351 return ret; 3398 goto out_err;
3352 } 3399 }
3353 CLAW_DBF_TEXT(2, setup, "init_mod"); 3400 CLAW_DBF_TEXT(2, setup, "init_mod");
3354 ret = register_cu3088_discipline(&claw_group_driver); 3401 claw_root_dev = root_device_register("qeth");
3355 if (ret) { 3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3356 CLAW_DBF_TEXT(2, setup, "init_bad"); 3403 if (ret)
3357 claw_unregister_debug_facility(); 3404 goto register_err;
3358 pr_err("Registering with the cu3088 device driver failed " 3405 ret = ccw_driver_register(&claw_ccw_driver);
3359 "with error code %d\n", ret); 3406 if (ret)
3360 } 3407 goto ccw_err;
3408 claw_group_driver.driver.groups = claw_group_attr_groups;
3409 ret = ccwgroup_driver_register(&claw_group_driver);
3410 if (ret)
3411 goto ccwgroup_err;
3412 return 0;
3413
3414ccwgroup_err:
3415 ccw_driver_unregister(&claw_ccw_driver);
3416ccw_err:
3417 root_device_unregister(claw_root_dev);
3418register_err:
3419 CLAW_DBF_TEXT(2, setup, "init_bad");
3420 claw_unregister_debug_facility();
3421out_err:
3422 pr_err("Initializing the claw device driver failed\n");
3361 return ret; 3423 return ret;
3362} 3424}
3363 3425
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 005072c420d3..46d59a13db12 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -129,6 +129,18 @@ static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
129 } \ 129 } \
130 } while (0) 130 } while (0)
131 131
132/**
133 * Enum for classifying detected devices.
134 */
135enum claw_channel_types {
136 /* Device is not a channel */
137 claw_channel_type_none,
138
139 /* Device is a CLAW channel device */
140 claw_channel_type_claw
141};
142
143
132/******************************************************* 144/*******************************************************
133* Define Control Blocks * 145* Define Control Blocks *
134* * 146* *
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 4ded9ac2c5ef..70eb7f138414 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -44,7 +44,6 @@
44#include <asm/idals.h> 44#include <asm/idals.h>
45 45
46#include "fsm.h" 46#include "fsm.h"
47#include "cu3088.h"
48 47
49#include "ctcm_dbug.h" 48#include "ctcm_dbug.h"
50#include "ctcm_main.h" 49#include "ctcm_main.h"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 2326aba9807a..046d077fabbb 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -39,7 +39,6 @@
39#include <asm/idals.h> 39#include <asm/idals.h>
40 40
41#include "fsm.h" 41#include "fsm.h"
42#include "cu3088.h"
43#include "ctcm_main.h" 42#include "ctcm_main.h"
44 43
45/* 44/*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index db054ed1a8cc..e35713dd0504 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -51,12 +51,16 @@
51 51
52#include <asm/idals.h> 52#include <asm/idals.h>
53 53
54#include "cu3088.h"
55#include "ctcm_fsms.h" 54#include "ctcm_fsms.h"
56#include "ctcm_main.h" 55#include "ctcm_main.h"
57 56
58/* Some common global variables */ 57/* Some common global variables */
59 58
59/**
60 * The root device for ctcm group devices
61 */
62static struct device *ctcm_root_dev;
63
60/* 64/*
61 * Linked list of all detected channels. 65 * Linked list of all detected channels.
62 */ 66 */
@@ -246,7 +250,7 @@ static void channel_remove(struct channel *ch)
246 * 250 *
247 * returns Pointer to a channel or NULL if no matching channel available. 251 * returns Pointer to a channel or NULL if no matching channel available.
248 */ 252 */
249static struct channel *channel_get(enum channel_types type, 253static struct channel *channel_get(enum ctcm_channel_types type,
250 char *id, int direction) 254 char *id, int direction)
251{ 255{
252 struct channel *ch = channels; 256 struct channel *ch = channels;
@@ -1342,7 +1346,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1342 * 1346 *
1343 * returns 0 on success, !0 on error. 1347 * returns 0 on success, !0 on error.
1344 */ 1348 */
1345static int add_channel(struct ccw_device *cdev, enum channel_types type, 1349static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1346 struct ctcm_priv *priv) 1350 struct ctcm_priv *priv)
1347{ 1351{
1348 struct channel **c = &channels; 1352 struct channel **c = &channels;
@@ -1501,13 +1505,13 @@ free_return: /* note that all channel pointers are 0 or valid */
1501/* 1505/*
1502 * Return type of a detected device. 1506 * Return type of a detected device.
1503 */ 1507 */
1504static enum channel_types get_channel_type(struct ccw_device_id *id) 1508static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
1505{ 1509{
1506 enum channel_types type; 1510 enum ctcm_channel_types type;
1507 type = (enum channel_types)id->driver_info; 1511 type = (enum ctcm_channel_types)id->driver_info;
1508 1512
1509 if (type == channel_type_ficon) 1513 if (type == ctcm_channel_type_ficon)
1510 type = channel_type_escon; 1514 type = ctcm_channel_type_escon;
1511 1515
1512 return type; 1516 return type;
1513} 1517}
@@ -1525,7 +1529,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1525 char read_id[CTCM_ID_SIZE]; 1529 char read_id[CTCM_ID_SIZE];
1526 char write_id[CTCM_ID_SIZE]; 1530 char write_id[CTCM_ID_SIZE];
1527 int direction; 1531 int direction;
1528 enum channel_types type; 1532 enum ctcm_channel_types type;
1529 struct ctcm_priv *priv; 1533 struct ctcm_priv *priv;
1530 struct net_device *dev; 1534 struct net_device *dev;
1531 struct ccw_device *cdev0; 1535 struct ccw_device *cdev0;
@@ -1720,6 +1724,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1720 return 0; 1724 return 0;
1721 netif_device_detach(priv->channel[READ]->netdev); 1725 netif_device_detach(priv->channel[READ]->netdev);
1722 ctcm_close(priv->channel[READ]->netdev); 1726 ctcm_close(priv->channel[READ]->netdev);
1727 if (!wait_event_timeout(priv->fsm->wait_q,
1728 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1729 netif_device_attach(priv->channel[READ]->netdev);
1730 return -EBUSY;
1731 }
1723 ccw_device_set_offline(gdev->cdev[1]); 1732 ccw_device_set_offline(gdev->cdev[1]);
1724 ccw_device_set_offline(gdev->cdev[0]); 1733 ccw_device_set_offline(gdev->cdev[0]);
1725 return 0; 1734 return 0;
@@ -1744,6 +1753,22 @@ err_out:
1744 return rc; 1753 return rc;
1745} 1754}
1746 1755
1756static struct ccw_device_id ctcm_ids[] = {
1757 {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
1758 {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
1759 {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
1760 {},
1761};
1762MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1763
1764static struct ccw_driver ctcm_ccw_driver = {
1765 .owner = THIS_MODULE,
1766 .name = "ctcm",
1767 .ids = ctcm_ids,
1768 .probe = ccwgroup_probe_ccwdev,
1769 .remove = ccwgroup_remove_ccwdev,
1770};
1771
1747static struct ccwgroup_driver ctcm_group_driver = { 1772static struct ccwgroup_driver ctcm_group_driver = {
1748 .owner = THIS_MODULE, 1773 .owner = THIS_MODULE,
1749 .name = CTC_DRIVER_NAME, 1774 .name = CTC_DRIVER_NAME,
@@ -1758,6 +1783,33 @@ static struct ccwgroup_driver ctcm_group_driver = {
1758 .restore = ctcm_pm_resume, 1783 .restore = ctcm_pm_resume,
1759}; 1784};
1760 1785
1786static ssize_t
1787ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
1788 size_t count)
1789{
1790 int err;
1791
1792 err = ccwgroup_create_from_string(ctcm_root_dev,
1793 ctcm_group_driver.driver_id,
1794 &ctcm_ccw_driver, 2, buf);
1795 return err ? err : count;
1796}
1797
1798static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1799
1800static struct attribute *ctcm_group_attrs[] = {
1801 &driver_attr_group.attr,
1802 NULL,
1803};
1804
1805static struct attribute_group ctcm_group_attr_group = {
1806 .attrs = ctcm_group_attrs,
1807};
1808
1809static const struct attribute_group *ctcm_group_attr_groups[] = {
1810 &ctcm_group_attr_group,
1811 NULL,
1812};
1761 1813
1762/* 1814/*
1763 * Module related routines 1815 * Module related routines
@@ -1771,7 +1823,10 @@ static struct ccwgroup_driver ctcm_group_driver = {
1771 */ 1823 */
1772static void __exit ctcm_exit(void) 1824static void __exit ctcm_exit(void)
1773{ 1825{
1774 unregister_cu3088_discipline(&ctcm_group_driver); 1826 driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
1827 ccwgroup_driver_unregister(&ctcm_group_driver);
1828 ccw_driver_unregister(&ctcm_ccw_driver);
1829 root_device_unregister(ctcm_root_dev);
1775 ctcm_unregister_dbf_views(); 1830 ctcm_unregister_dbf_views();
1776 pr_info("CTCM driver unloaded\n"); 1831 pr_info("CTCM driver unloaded\n");
1777} 1832}
@@ -1797,17 +1852,31 @@ static int __init ctcm_init(void)
1797 channels = NULL; 1852 channels = NULL;
1798 1853
1799 ret = ctcm_register_dbf_views(); 1854 ret = ctcm_register_dbf_views();
1800 if (ret) { 1855 if (ret)
1801 return ret; 1856 goto out_err;
1802 } 1857 ctcm_root_dev = root_device_register("ctcm");
1803 ret = register_cu3088_discipline(&ctcm_group_driver); 1858 ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
1804 if (ret) { 1859 if (ret)
1805 ctcm_unregister_dbf_views(); 1860 goto register_err;
1806 pr_err("%s / register_cu3088_discipline failed, ret = %d\n", 1861 ret = ccw_driver_register(&ctcm_ccw_driver);
1807 __func__, ret); 1862 if (ret)
1808 return ret; 1863 goto ccw_err;
1809 } 1864 ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
1865 ret = ccwgroup_driver_register(&ctcm_group_driver);
1866 if (ret)
1867 goto ccwgroup_err;
1810 print_banner(); 1868 print_banner();
1869 return 0;
1870
1871ccwgroup_err:
1872 ccw_driver_unregister(&ctcm_ccw_driver);
1873ccw_err:
1874 root_device_unregister(ctcm_root_dev);
1875register_err:
1876 ctcm_unregister_dbf_views();
1877out_err:
1878 pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
1879 __func__, ret);
1811 return ret; 1880 return ret;
1812} 1881}
1813 1882
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d925e732b7d8..d34fa14f44e7 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -16,7 +16,6 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17 17
18#include "fsm.h" 18#include "fsm.h"
19#include "cu3088.h"
20#include "ctcm_dbug.h" 19#include "ctcm_dbug.h"
21#include "ctcm_mpc.h" 20#include "ctcm_mpc.h"
22 21
@@ -66,6 +65,23 @@
66 ctcmpc_dumpit(buf, len); \ 65 ctcmpc_dumpit(buf, len); \
67 } while (0) 66 } while (0)
68 67
68/**
69 * Enum for classifying detected devices
70 */
71enum ctcm_channel_types {
72 /* Device is not a channel */
73 ctcm_channel_type_none,
74
75 /* Device is a CTC/A */
76 ctcm_channel_type_parallel,
77
78 /* Device is a FICON channel */
79 ctcm_channel_type_ficon,
80
81 /* Device is a ESCON channel */
82 ctcm_channel_type_escon
83};
84
69/* 85/*
70 * CCW commands, used in this driver. 86 * CCW commands, used in this driver.
71 */ 87 */
@@ -121,7 +137,7 @@ struct channel {
121 * Type of this channel. 137 * Type of this channel.
122 * CTC/A or Escon for valid channels. 138 * CTC/A or Escon for valid channels.
123 */ 139 */
124 enum channel_types type; 140 enum ctcm_channel_types type;
125 /* 141 /*
126 * Misc. flags. See CHANNEL_FLAGS_... below 142 * Misc. flags. See CHANNEL_FLAGS_... below
127 */ 143 */
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 781e18be7e8f..5978b390153f 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -53,7 +53,6 @@
53#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
54#include <asm/idals.h> 54#include <asm/idals.h>
55 55
56#include "cu3088.h"
57#include "ctcm_mpc.h" 56#include "ctcm_mpc.h"
58#include "ctcm_main.h" 57#include "ctcm_main.h"
59#include "ctcm_fsms.h" 58#include "ctcm_fsms.h"
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8452bb052d68..738ad26c74a7 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -158,6 +158,15 @@ static ssize_t ctcm_proto_store(struct device *dev,
158 return count; 158 return count;
159} 159}
160 160
161const char *ctcm_type[] = {
162 "not a channel",
163 "CTC/A",
164 "FICON channel",
165 "ESCON channel",
166 "unknown channel type",
167 "unsupported channel type",
168};
169
161static ssize_t ctcm_type_show(struct device *dev, 170static ssize_t ctcm_type_show(struct device *dev,
162 struct device_attribute *attr, char *buf) 171 struct device_attribute *attr, char *buf)
163{ 172{
@@ -168,7 +177,7 @@ static ssize_t ctcm_type_show(struct device *dev,
168 return -ENODEV; 177 return -ENODEV;
169 178
170 return sprintf(buf, "%s\n", 179 return sprintf(buf, "%s\n",
171 cu3088_type[cgdev->cdev[0]->id.driver_info]); 180 ctcm_type[cgdev->cdev[0]->id.driver_info]);
172} 181}
173 182
174static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); 183static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
deleted file mode 100644
index 48383459e99b..000000000000
--- a/drivers/s390/net/cu3088.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * CTC / LCS ccw_device driver
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Arnd Bergmann <arndb@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/err.h>
27
28#include <asm/ccwdev.h>
29#include <asm/ccwgroup.h>
30
31#include "cu3088.h"
32
33const char *cu3088_type[] = {
34 "not a channel",
35 "CTC/A",
36 "ESCON channel",
37 "FICON channel",
38 "OSA LCS card",
39 "CLAW channel device",
40 "unknown channel type",
41 "unsupported channel type",
42};
43
44/* static definitions */
45
46static struct ccw_device_id cu3088_ids[] = {
47 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
48 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
49 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
50 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
51 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
52 { /* end of list */ }
53};
54
55static struct ccw_driver cu3088_driver;
56
57static struct device *cu3088_root_dev;
58
59static ssize_t
60group_write(struct device_driver *drv, const char *buf, size_t count)
61{
62 int ret;
63 struct ccwgroup_driver *cdrv;
64
65 cdrv = to_ccwgroupdrv(drv);
66 if (!cdrv)
67 return -EINVAL;
68 ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
69 &cu3088_driver, 2, buf);
70
71 return (ret == 0) ? count : ret;
72}
73
74static DRIVER_ATTR(group, 0200, NULL, group_write);
75
76/* Register-unregister for ctc&lcs */
77int
78register_cu3088_discipline(struct ccwgroup_driver *dcp)
79{
80 int rc;
81
82 if (!dcp)
83 return -EINVAL;
84
85 /* Register discipline.*/
86 rc = ccwgroup_driver_register(dcp);
87 if (rc)
88 return rc;
89
90 rc = driver_create_file(&dcp->driver, &driver_attr_group);
91 if (rc)
92 ccwgroup_driver_unregister(dcp);
93
94 return rc;
95
96}
97
98void
99unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
100{
101 if (!dcp)
102 return;
103
104 driver_remove_file(&dcp->driver, &driver_attr_group);
105 ccwgroup_driver_unregister(dcp);
106}
107
108static struct ccw_driver cu3088_driver = {
109 .owner = THIS_MODULE,
110 .ids = cu3088_ids,
111 .name = "cu3088",
112 .probe = ccwgroup_probe_ccwdev,
113 .remove = ccwgroup_remove_ccwdev,
114};
115
116/* module setup */
117static int __init
118cu3088_init (void)
119{
120 int rc;
121
122 cu3088_root_dev = root_device_register("cu3088");
123 if (IS_ERR(cu3088_root_dev))
124 return PTR_ERR(cu3088_root_dev);
125 rc = ccw_driver_register(&cu3088_driver);
126 if (rc)
127 root_device_unregister(cu3088_root_dev);
128
129 return rc;
130}
131
132static void __exit
133cu3088_exit (void)
134{
135 ccw_driver_unregister(&cu3088_driver);
136 root_device_unregister(cu3088_root_dev);
137}
138
139MODULE_DEVICE_TABLE(ccw,cu3088_ids);
140MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
141MODULE_LICENSE("GPL");
142
143module_init(cu3088_init);
144module_exit(cu3088_exit);
145
146EXPORT_SYMBOL_GPL(cu3088_type);
147EXPORT_SYMBOL_GPL(register_cu3088_discipline);
148EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
deleted file mode 100644
index d8558a7105a5..000000000000
--- a/drivers/s390/net/cu3088.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _CU3088_H
2#define _CU3088_H
3
4/**
5 * Enum for classifying detected devices.
6 */
7enum channel_types {
8 /* Device is not a channel */
9 channel_type_none,
10
11 /* Device is a CTC/A */
12 channel_type_parallel,
13
14 /* Device is a ESCON channel */
15 channel_type_escon,
16
17 /* Device is a FICON channel */
18 channel_type_ficon,
19
20 /* Device is a OSA2 card */
21 channel_type_osa2,
22
23 /* Device is a CLAW channel device */
24 channel_type_claw,
25
26 /* Device is a channel, but we don't know
27 * anything about it */
28 channel_type_unknown,
29
30 /* Device is an unsupported model */
31 channel_type_unsupported,
32
33 /* number of type entries */
34 num_channel_types
35};
36
37extern const char *cu3088_type[num_channel_types];
38extern int register_cu3088_discipline(struct ccwgroup_driver *);
39extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
40
41#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 2c1db8036b7c..cae48cbc5e96 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -27,6 +27,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
27 return NULL; 27 return NULL;
28 } 28 }
29 strlcpy(this->name, name, sizeof(this->name)); 29 strlcpy(this->name, name, sizeof(this->name));
30 init_waitqueue_head(&this->wait_q);
30 31
31 f = kzalloc(sizeof(fsm), order); 32 f = kzalloc(sizeof(fsm), order);
32 if (f == NULL) { 33 if (f == NULL) {
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index af679c10f1bd..1e8b235d95b5 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -66,6 +66,7 @@ typedef struct fsm_instance_t {
66 char name[16]; 66 char name[16];
67 void *userdata; 67 void *userdata;
68 int userint; 68 int userint;
69 wait_queue_head_t wait_q;
69#if FSM_DEBUG_HISTORY 70#if FSM_DEBUG_HISTORY
70 int history_index; 71 int history_index;
71 int history_size; 72 int history_size;
@@ -197,6 +198,7 @@ fsm_newstate(fsm_instance *fi, int newstate)
197 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, 198 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
198 fi->f->state_names[newstate]); 199 fi->f->state_names[newstate]);
199#endif 200#endif
201 wake_up(&fi->wait_q);
200} 202}
201 203
202/** 204/**
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 5e46415d3e13..f6cc46dc0501 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -47,7 +47,6 @@
47#include <asm/ccwgroup.h> 47#include <asm/ccwgroup.h>
48 48
49#include "lcs.h" 49#include "lcs.h"
50#include "cu3088.h"
51 50
52 51
53#if !defined(CONFIG_NET_ETHERNET) && \ 52#if !defined(CONFIG_NET_ETHERNET) && \
@@ -60,7 +59,11 @@
60 */ 59 */
61 60
62static char version[] __initdata = "LCS driver"; 61static char version[] __initdata = "LCS driver";
63static char debug_buffer[255]; 62
63/**
64 * the root device for lcs group devices
65 */
66static struct device *lcs_root_dev;
64 67
65/** 68/**
66 * Some prototypes. 69 * Some prototypes.
@@ -76,6 +79,7 @@ static int lcs_recovery(void *ptr);
76/** 79/**
77 * Debug Facility Stuff 80 * Debug Facility Stuff
78 */ 81 */
82static char debug_buffer[255];
79static debug_info_t *lcs_dbf_setup; 83static debug_info_t *lcs_dbf_setup;
80static debug_info_t *lcs_dbf_trace; 84static debug_info_t *lcs_dbf_trace;
81 85
@@ -1968,6 +1972,15 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1968 1972
1969static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1973static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1970 1974
1975const char *lcs_type[] = {
1976 "not a channel",
1977 "2216 parallel",
1978 "2216 channel",
1979 "OSA LCS card",
1980 "unknown channel type",
1981 "unsupported channel type",
1982};
1983
1971static ssize_t 1984static ssize_t
1972lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1985lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1973{ 1986{
@@ -1977,7 +1990,7 @@ lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1977 if (!cgdev) 1990 if (!cgdev)
1978 return -ENODEV; 1991 return -ENODEV;
1979 1992
1980 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); 1993 return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1981} 1994}
1982 1995
1983static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); 1996static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
@@ -2370,6 +2383,22 @@ static int lcs_restore(struct ccwgroup_device *gdev)
2370 return lcs_pm_resume(card); 2383 return lcs_pm_resume(card);
2371} 2384}
2372 2385
2386static struct ccw_device_id lcs_ids[] = {
2387 {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2388 {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2389 {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2390 {},
2391};
2392MODULE_DEVICE_TABLE(ccw, lcs_ids);
2393
2394static struct ccw_driver lcs_ccw_driver = {
2395 .owner = THIS_MODULE,
2396 .name = "lcs",
2397 .ids = lcs_ids,
2398 .probe = ccwgroup_probe_ccwdev,
2399 .remove = ccwgroup_remove_ccwdev,
2400};
2401
2373/** 2402/**
2374 * LCS ccwgroup driver registration 2403 * LCS ccwgroup driver registration
2375 */ 2404 */
@@ -2389,6 +2418,33 @@ static struct ccwgroup_driver lcs_group_driver = {
2389 .restore = lcs_restore, 2418 .restore = lcs_restore,
2390}; 2419};
2391 2420
2421static ssize_t
2422lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2423 size_t count)
2424{
2425 int err;
2426 err = ccwgroup_create_from_string(lcs_root_dev,
2427 lcs_group_driver.driver_id,
2428 &lcs_ccw_driver, 2, buf);
2429 return err ? err : count;
2430}
2431
2432static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2433
2434static struct attribute *lcs_group_attrs[] = {
2435 &driver_attr_group.attr,
2436 NULL,
2437};
2438
2439static struct attribute_group lcs_group_attr_group = {
2440 .attrs = lcs_group_attrs,
2441};
2442
2443static const struct attribute_group *lcs_group_attr_groups[] = {
2444 &lcs_group_attr_group,
2445 NULL,
2446};
2447
2392/** 2448/**
2393 * LCS Module/Kernel initialization function 2449 * LCS Module/Kernel initialization function
2394 */ 2450 */
@@ -2400,17 +2456,30 @@ __init lcs_init_module(void)
2400 pr_info("Loading %s\n", version); 2456 pr_info("Loading %s\n", version);
2401 rc = lcs_register_debug_facility(); 2457 rc = lcs_register_debug_facility();
2402 LCS_DBF_TEXT(0, setup, "lcsinit"); 2458 LCS_DBF_TEXT(0, setup, "lcsinit");
2403 if (rc) { 2459 if (rc)
2404 pr_err("Initialization failed\n"); 2460 goto out_err;
2405 return rc; 2461 lcs_root_dev = root_device_register("lcs");
2406 } 2462 rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2407 2463 if (rc)
2408 rc = register_cu3088_discipline(&lcs_group_driver); 2464 goto register_err;
2409 if (rc) { 2465 rc = ccw_driver_register(&lcs_ccw_driver);
2410 pr_err("Initialization failed\n"); 2466 if (rc)
2411 return rc; 2467 goto ccw_err;
2412 } 2468 lcs_group_driver.driver.groups = lcs_group_attr_groups;
2469 rc = ccwgroup_driver_register(&lcs_group_driver);
2470 if (rc)
2471 goto ccwgroup_err;
2413 return 0; 2472 return 0;
2473
2474ccwgroup_err:
2475 ccw_driver_unregister(&lcs_ccw_driver);
2476ccw_err:
2477 root_device_unregister(lcs_root_dev);
2478register_err:
2479 lcs_unregister_debug_facility();
2480out_err:
2481 pr_err("Initializing the lcs device driver failed\n");
2482 return rc;
2414} 2483}
2415 2484
2416 2485
@@ -2422,7 +2491,11 @@ __exit lcs_cleanup_module(void)
2422{ 2491{
2423 pr_info("Terminating lcs module.\n"); 2492 pr_info("Terminating lcs module.\n");
2424 LCS_DBF_TEXT(0, trace, "cleanup"); 2493 LCS_DBF_TEXT(0, trace, "cleanup");
2425 unregister_cu3088_discipline(&lcs_group_driver); 2494 driver_remove_file(&lcs_group_driver.driver,
2495 &driver_attr_group);
2496 ccwgroup_driver_unregister(&lcs_group_driver);
2497 ccw_driver_unregister(&lcs_ccw_driver);
2498 root_device_unregister(lcs_root_dev);
2426 lcs_unregister_debug_facility(); 2499 lcs_unregister_debug_facility();
2427} 2500}
2428 2501
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 6d668642af27..8c03392ac833 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -36,6 +36,24 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
36#define CARD_FROM_DEV(cdev) \ 36#define CARD_FROM_DEV(cdev) \
37 (struct lcs_card *) dev_get_drvdata( \ 37 (struct lcs_card *) dev_get_drvdata( \
38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); 38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
39
40/**
41 * Enum for classifying detected devices.
42 */
43enum lcs_channel_types {
44 /* Device is not a channel */
45 lcs_channel_type_none,
46
47 /* Device is a 2216 channel */
48 lcs_channel_type_parallel,
49
50 /* Device is a 2216 channel */
51 lcs_channel_type_2216,
52
53 /* Device is a OSA2 card */
54 lcs_channel_type_osa2
55};
56
39/** 57/**
40 * CCW commands used in this driver 58 * CCW commands used in this driver
41 */ 59 */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index c84eadd3602a..395c04c2b00f 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
741 if (single_flag) { 741 if (single_flag) {
742 if ((skb = skb_dequeue(&conn->commit_queue))) { 742 if ((skb = skb_dequeue(&conn->commit_queue))) {
743 atomic_dec(&skb->users); 743 atomic_dec(&skb->users);
744 dev_kfree_skb_any(skb);
745 if (privptr) { 744 if (privptr) {
746 privptr->stats.tx_packets++; 745 privptr->stats.tx_packets++;
747 privptr->stats.tx_bytes += 746 privptr->stats.tx_bytes +=
748 (skb->len - NETIUCV_HDRLEN 747 (skb->len - NETIUCV_HDRLEN
749 - NETIUCV_HDRLEN); 748 - NETIUCV_HDRLEN);
750 } 749 }
750 dev_kfree_skb_any(skb);
751 } 751 }
752 } 752 }
753 conn->tx_buff->data = conn->tx_buff->head; 753 conn->tx_buff->data = conn->tx_buff->head;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e8f72d715eba..b232693378cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -122,7 +122,6 @@ struct qeth_perf_stats {
122 __u64 outbound_do_qdio_start_time; 122 __u64 outbound_do_qdio_start_time;
123 unsigned int outbound_do_qdio_cnt; 123 unsigned int outbound_do_qdio_cnt;
124 unsigned int outbound_do_qdio_time; 124 unsigned int outbound_do_qdio_time;
125 /* eddp data */
126 unsigned int large_send_bytes; 125 unsigned int large_send_bytes;
127 unsigned int large_send_cnt; 126 unsigned int large_send_cnt;
128 unsigned int sg_skbs_sent; 127 unsigned int sg_skbs_sent;
@@ -135,6 +134,7 @@ struct qeth_perf_stats {
135 unsigned int sg_frags_rx; 134 unsigned int sg_frags_rx;
136 unsigned int sg_alloc_page_rx; 135 unsigned int sg_alloc_page_rx;
137 unsigned int tx_csum; 136 unsigned int tx_csum;
137 unsigned int tx_lin;
138}; 138};
139 139
140/* Routing stuff */ 140/* Routing stuff */
@@ -648,6 +648,7 @@ struct qeth_card_options {
648 enum qeth_large_send_types large_send; 648 enum qeth_large_send_types large_send;
649 int performance_stats; 649 int performance_stats;
650 int rx_sg_cb; 650 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation;
651}; 652};
652 653
653/* 654/*
@@ -776,7 +777,6 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
776 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); 777 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
777} 778}
778 779
779struct qeth_eddp_context;
780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
782const char *qeth_get_cardname_short(struct qeth_card *); 782const char *qeth_get_cardname_short(struct qeth_card *);
@@ -836,7 +836,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); 836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
837int qeth_mdio_read(struct net_device *, int, int); 837int qeth_mdio_read(struct net_device *, int, int);
838int qeth_snmp_command(struct qeth_card *, char __user *); 838int qeth_snmp_command(struct qeth_card *, char __user *);
839int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
840struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); 839struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
841int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, 840int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
842 unsigned long); 841 unsigned long);
@@ -856,6 +855,7 @@ void qeth_core_get_strings(struct net_device *, u32, u8 *);
856void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 855void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
857void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); 856void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
858int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 857int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
858int qeth_set_access_ctrl_online(struct qeth_card *card);
859 859
860/* exports for OSN */ 860/* exports for OSN */
861int qeth_osn_assist(struct net_device *, void *, int); 861int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index edee4dc6430c..d34804d5ece1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -270,41 +270,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
270 return qeth_alloc_buffer_pool(card); 270 return qeth_alloc_buffer_pool(card);
271} 271}
272 272
273int qeth_set_large_send(struct qeth_card *card,
274 enum qeth_large_send_types type)
275{
276 int rc = 0;
277
278 if (card->dev == NULL) {
279 card->options.large_send = type;
280 return 0;
281 }
282 if (card->state == CARD_STATE_UP)
283 netif_tx_disable(card->dev);
284 card->options.large_send = type;
285 switch (card->options.large_send) {
286 case QETH_LARGE_SEND_TSO:
287 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
288 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
289 NETIF_F_HW_CSUM;
290 } else {
291 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
292 NETIF_F_HW_CSUM);
293 card->options.large_send = QETH_LARGE_SEND_NO;
294 rc = -EOPNOTSUPP;
295 }
296 break;
297 default: /* includes QETH_LARGE_SEND_NO */
298 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
299 NETIF_F_HW_CSUM);
300 break;
301 }
302 if (card->state == CARD_STATE_UP)
303 netif_wake_queue(card->dev);
304 return rc;
305}
306EXPORT_SYMBOL_GPL(qeth_set_large_send);
307
308static int qeth_issue_next_read(struct qeth_card *card) 273static int qeth_issue_next_read(struct qeth_card *card)
309{ 274{
310 int rc; 275 int rc;
@@ -1079,6 +1044,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
1079 card->options.add_hhlen = DEFAULT_ADD_HHLEN; 1044 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1080 card->options.performance_stats = 0; 1045 card->options.performance_stats = 0;
1081 card->options.rx_sg_cb = QETH_RX_SG_CB; 1046 card->options.rx_sg_cb = QETH_RX_SG_CB;
1047 card->options.isolation = ISOLATION_MODE_NONE;
1082} 1048}
1083 1049
1084static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1050static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -3389,6 +3355,156 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3389} 3355}
3390EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 3356EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3391 3357
3358static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
3359 struct qeth_reply *reply, unsigned long data)
3360{
3361 struct qeth_ipa_cmd *cmd;
3362 struct qeth_set_access_ctrl *access_ctrl_req;
3363 int rc;
3364
3365 QETH_DBF_TEXT(TRACE, 4, "setaccb");
3366
3367 cmd = (struct qeth_ipa_cmd *) data;
3368 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3369 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
3370 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3371 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
3372 cmd->data.setadapterparms.hdr.return_code);
3373 switch (cmd->data.setadapterparms.hdr.return_code) {
3374 case SET_ACCESS_CTRL_RC_SUCCESS:
3375 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
3376 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
3377 {
3378 card->options.isolation = access_ctrl_req->subcmd_code;
3379 if (card->options.isolation == ISOLATION_MODE_NONE) {
3380 dev_info(&card->gdev->dev,
3381 "QDIO data connection isolation is deactivated\n");
3382 } else {
3383 dev_info(&card->gdev->dev,
3384 "QDIO data connection isolation is activated\n");
3385 }
3386 QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
3387 card->gdev->dev.kobj.name,
3388 access_ctrl_req->subcmd_code,
3389 cmd->data.setadapterparms.hdr.return_code);
3390 rc = 0;
3391 break;
3392 }
3393 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
3394 {
3395 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
3396 card->gdev->dev.kobj.name,
3397 access_ctrl_req->subcmd_code,
3398 cmd->data.setadapterparms.hdr.return_code);
3399 dev_err(&card->gdev->dev, "Adapter does not "
3400 "support QDIO data connection isolation\n");
3401
3402 /* ensure isolation mode is "none" */
3403 card->options.isolation = ISOLATION_MODE_NONE;
3404 rc = -EOPNOTSUPP;
3405 break;
3406 }
3407 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
3408 {
3409 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3410 card->gdev->dev.kobj.name,
3411 access_ctrl_req->subcmd_code,
3412 cmd->data.setadapterparms.hdr.return_code);
3413 dev_err(&card->gdev->dev,
3414 "Adapter is dedicated. "
3415 "QDIO data connection isolation not supported\n");
3416
3417 /* ensure isolation mode is "none" */
3418 card->options.isolation = ISOLATION_MODE_NONE;
3419 rc = -EOPNOTSUPP;
3420 break;
3421 }
3422 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
3423 {
3424 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3425 card->gdev->dev.kobj.name,
3426 access_ctrl_req->subcmd_code,
3427 cmd->data.setadapterparms.hdr.return_code);
3428 dev_err(&card->gdev->dev,
3429 "TSO does not permit QDIO data connection isolation\n");
3430
3431 /* ensure isolation mode is "none" */
3432 card->options.isolation = ISOLATION_MODE_NONE;
3433 rc = -EPERM;
3434 break;
3435 }
3436 default:
3437 {
3438 /* this should never happen */
3439 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
3440 "==UNKNOWN\n",
3441 card->gdev->dev.kobj.name,
3442 access_ctrl_req->subcmd_code,
3443 cmd->data.setadapterparms.hdr.return_code);
3444
3445 /* ensure isolation mode is "none" */
3446 card->options.isolation = ISOLATION_MODE_NONE;
3447 rc = 0;
3448 break;
3449 }
3450 }
3451 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3452 return rc;
3453}
3454
3455static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
3456 enum qeth_ipa_isolation_modes isolation)
3457{
3458 int rc;
3459 struct qeth_cmd_buffer *iob;
3460 struct qeth_ipa_cmd *cmd;
3461 struct qeth_set_access_ctrl *access_ctrl_req;
3462
3463 QETH_DBF_TEXT(TRACE, 4, "setacctl");
3464
3465 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
3466 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3467
3468 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
3469 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
3470 sizeof(struct qeth_set_access_ctrl));
3471 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3472 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3473 access_ctrl_req->subcmd_code = isolation;
3474
3475 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
3476 NULL);
3477 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
3478 return rc;
3479}
3480
3481int qeth_set_access_ctrl_online(struct qeth_card *card)
3482{
3483 int rc = 0;
3484
3485 QETH_DBF_TEXT(TRACE, 4, "setactlo");
3486
3487 if (card->info.type == QETH_CARD_TYPE_OSAE &&
3488 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
3489 rc = qeth_setadpparms_set_access_ctrl(card,
3490 card->options.isolation);
3491 if (rc) {
3492 QETH_DBF_MESSAGE(3,
3493 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
3494 card->gdev->dev.kobj.name,
3495 rc);
3496 }
3497 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
3498 card->options.isolation = ISOLATION_MODE_NONE;
3499
3500 dev_err(&card->gdev->dev, "Adapter does not "
3501 "support QDIO data connection isolation\n");
3502 rc = -EOPNOTSUPP;
3503 }
3504 return rc;
3505}
3506EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
3507
3392void qeth_tx_timeout(struct net_device *dev) 3508void qeth_tx_timeout(struct net_device *dev)
3393{ 3509{
3394 struct qeth_card *card; 3510 struct qeth_card *card;
@@ -3732,30 +3848,36 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3732int qeth_core_hardsetup_card(struct qeth_card *card) 3848int qeth_core_hardsetup_card(struct qeth_card *card)
3733{ 3849{
3734 struct qdio_ssqd_desc *ssqd; 3850 struct qdio_ssqd_desc *ssqd;
3735 int retries = 3; 3851 int retries = 0;
3736 int mpno = 0; 3852 int mpno = 0;
3737 int rc; 3853 int rc;
3738 3854
3739 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3855 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3740 atomic_set(&card->force_alloc_skb, 0); 3856 atomic_set(&card->force_alloc_skb, 0);
3741retry: 3857retry:
3742 if (retries < 3) { 3858 if (retries)
3743 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 3859 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
3744 dev_name(&card->gdev->dev)); 3860 dev_name(&card->gdev->dev));
3745 ccw_device_set_offline(CARD_DDEV(card)); 3861 ccw_device_set_offline(CARD_DDEV(card));
3746 ccw_device_set_offline(CARD_WDEV(card)); 3862 ccw_device_set_offline(CARD_WDEV(card));
3747 ccw_device_set_offline(CARD_RDEV(card)); 3863 ccw_device_set_offline(CARD_RDEV(card));
3748 ccw_device_set_online(CARD_RDEV(card)); 3864 rc = ccw_device_set_online(CARD_RDEV(card));
3749 ccw_device_set_online(CARD_WDEV(card)); 3865 if (rc)
3750 ccw_device_set_online(CARD_DDEV(card)); 3866 goto retriable;
3751 } 3867 rc = ccw_device_set_online(CARD_WDEV(card));
3868 if (rc)
3869 goto retriable;
3870 rc = ccw_device_set_online(CARD_DDEV(card));
3871 if (rc)
3872 goto retriable;
3752 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 3873 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3874retriable:
3753 if (rc == -ERESTARTSYS) { 3875 if (rc == -ERESTARTSYS) {
3754 QETH_DBF_TEXT(SETUP, 2, "break1"); 3876 QETH_DBF_TEXT(SETUP, 2, "break1");
3755 return rc; 3877 return rc;
3756 } else if (rc) { 3878 } else if (rc) {
3757 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3879 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3758 if (--retries < 0) 3880 if (++retries > 3)
3759 goto out; 3881 goto out;
3760 else 3882 else
3761 goto retry; 3883 goto retry;
@@ -4303,6 +4425,7 @@ static struct {
4303 {"tx do_QDIO time"}, 4425 {"tx do_QDIO time"},
4304 {"tx do_QDIO count"}, 4426 {"tx do_QDIO count"},
4305 {"tx csum"}, 4427 {"tx csum"},
4428 {"tx lin"},
4306}; 4429};
4307 4430
4308int qeth_core_get_sset_count(struct net_device *dev, int stringset) 4431int qeth_core_get_sset_count(struct net_device *dev, int stringset)
@@ -4360,6 +4483,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
4360 data[31] = card->perf_stats.outbound_do_qdio_time; 4483 data[31] = card->perf_stats.outbound_do_qdio_time;
4361 data[32] = card->perf_stats.outbound_do_qdio_cnt; 4484 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4362 data[33] = card->perf_stats.tx_csum; 4485 data[33] = card->perf_stats.tx_csum;
4486 data[34] = card->perf_stats.tx_lin;
4363} 4487}
4364EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 4488EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4365 4489
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index eecb2ee62e85..52c03438dbec 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -234,18 +234,19 @@ enum qeth_ipa_setdelip_flags {
234 234
235/* SETADAPTER IPA Command: ****************************************************/ 235/* SETADAPTER IPA Command: ****************************************************/
236enum qeth_ipa_setadp_cmd { 236enum qeth_ipa_setadp_cmd {
237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001, 237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L,
238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002, 238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L,
239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004, 239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L,
240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008, 240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L,
241 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010, 241 IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L,
242 IPA_SETADP_SET_CONFIG_PARMS = 0x0020, 242 IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L,
243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040, 243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L,
244 IPA_SETADP_SET_BROADCAST_MODE = 0x0080, 244 IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L,
245 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, 245 IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L,
246 IPA_SETADP_SET_SNMP_CONTROL = 0x0200, 246 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
247 IPA_SETADP_QUERY_CARD_INFO = 0x0400, 247 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
248 IPA_SETADP_SET_PROMISC_MODE = 0x0800, 248 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
249 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
249}; 250};
250enum qeth_ipa_mac_ops { 251enum qeth_ipa_mac_ops {
251 CHANGE_ADDR_READ_MAC = 0, 252 CHANGE_ADDR_READ_MAC = 0,
@@ -264,6 +265,20 @@ enum qeth_ipa_promisc_modes {
264 SET_PROMISC_MODE_OFF = 0, 265 SET_PROMISC_MODE_OFF = 0,
265 SET_PROMISC_MODE_ON = 1, 266 SET_PROMISC_MODE_ON = 1,
266}; 267};
268enum qeth_ipa_isolation_modes {
269 ISOLATION_MODE_NONE = 0x00000000L,
270 ISOLATION_MODE_FWD = 0x00000001L,
271 ISOLATION_MODE_DROP = 0x00000002L,
272};
273enum qeth_ipa_set_access_mode_rc {
274 SET_ACCESS_CTRL_RC_SUCCESS = 0x0000,
275 SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004,
276 SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008,
277 SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010,
278 SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014,
279 SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018,
280};
281
267 282
268/* (SET)DELIP(M) IPA stuff ***************************************************/ 283/* (SET)DELIP(M) IPA stuff ***************************************************/
269struct qeth_ipacmd_setdelip4 { 284struct qeth_ipacmd_setdelip4 {
@@ -376,6 +391,11 @@ struct qeth_snmp_ureq {
376 struct qeth_snmp_cmd cmd; 391 struct qeth_snmp_cmd cmd;
377} __attribute__((packed)); 392} __attribute__((packed));
378 393
394/* SET_ACCESS_CONTROL: same format for request and reply */
395struct qeth_set_access_ctrl {
396 __u32 subcmd_code;
397} __attribute__((packed));
398
379struct qeth_ipacmd_setadpparms_hdr { 399struct qeth_ipacmd_setadpparms_hdr {
380 __u32 supp_hw_cmds; 400 __u32 supp_hw_cmds;
381 __u32 reserved1; 401 __u32 reserved1;
@@ -394,6 +414,7 @@ struct qeth_ipacmd_setadpparms {
394 struct qeth_query_cmds_supp query_cmds_supp; 414 struct qeth_query_cmds_supp query_cmds_supp;
395 struct qeth_change_addr change_addr; 415 struct qeth_change_addr change_addr;
396 struct qeth_snmp_cmd snmp; 416 struct qeth_snmp_cmd snmp;
417 struct qeth_set_access_ctrl set_access_ctrl;
397 __u32 mode; 418 __u32 mode;
398 } data; 419 } data;
399} __attribute__ ((packed)); 420} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 33505c2a0e3a..9ff2b36fdc43 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -416,7 +416,11 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, 416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
417 qeth_dev_layer2_store); 417 qeth_dev_layer2_store);
418 418
419static ssize_t qeth_dev_large_send_show(struct device *dev, 419#define ATTR_QETH_ISOLATION_NONE ("none")
420#define ATTR_QETH_ISOLATION_FWD ("forward")
421#define ATTR_QETH_ISOLATION_DROP ("drop")
422
423static ssize_t qeth_dev_isolation_show(struct device *dev,
420 struct device_attribute *attr, char *buf) 424 struct device_attribute *attr, char *buf)
421{ 425{
422 struct qeth_card *card = dev_get_drvdata(dev); 426 struct qeth_card *card = dev_get_drvdata(dev);
@@ -424,44 +428,69 @@ static ssize_t qeth_dev_large_send_show(struct device *dev,
424 if (!card) 428 if (!card)
425 return -EINVAL; 429 return -EINVAL;
426 430
427 switch (card->options.large_send) { 431 switch (card->options.isolation) {
428 case QETH_LARGE_SEND_NO: 432 case ISOLATION_MODE_NONE:
429 return sprintf(buf, "%s\n", "no"); 433 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
430 case QETH_LARGE_SEND_TSO: 434 case ISOLATION_MODE_FWD:
431 return sprintf(buf, "%s\n", "TSO"); 435 return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
436 case ISOLATION_MODE_DROP:
437 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
432 default: 438 default:
433 return sprintf(buf, "%s\n", "N/A"); 439 return snprintf(buf, 5, "%s\n", "N/A");
434 } 440 }
435} 441}
436 442
437static ssize_t qeth_dev_large_send_store(struct device *dev, 443static ssize_t qeth_dev_isolation_store(struct device *dev,
438 struct device_attribute *attr, const char *buf, size_t count) 444 struct device_attribute *attr, const char *buf, size_t count)
439{ 445{
440 struct qeth_card *card = dev_get_drvdata(dev); 446 struct qeth_card *card = dev_get_drvdata(dev);
441 enum qeth_large_send_types type; 447 enum qeth_ipa_isolation_modes isolation;
442 int rc = 0; 448 int rc = 0;
443 char *tmp; 449 char *tmp, *curtoken;
450 curtoken = (char *) buf;
444 451
445 if (!card) 452 if (!card) {
446 return -EINVAL; 453 rc = -EINVAL;
447 tmp = strsep((char **) &buf, "\n"); 454 goto out;
448 if (!strcmp(tmp, "no")) { 455 }
449 type = QETH_LARGE_SEND_NO; 456
450 } else if (!strcmp(tmp, "TSO")) { 457 /* check for unknown, too, in case we do not yet know who we are */
451 type = QETH_LARGE_SEND_TSO; 458 if (card->info.type != QETH_CARD_TYPE_OSAE &&
459 card->info.type != QETH_CARD_TYPE_UNKNOWN) {
460 rc = -EOPNOTSUPP;
461 dev_err(&card->gdev->dev, "Adapter does not "
462 "support QDIO data connection isolation\n");
463 goto out;
464 }
465
466 /* parse input into isolation mode */
467 tmp = strsep(&curtoken, "\n");
468 if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
469 isolation = ISOLATION_MODE_NONE;
470 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
471 isolation = ISOLATION_MODE_FWD;
472 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
473 isolation = ISOLATION_MODE_DROP;
452 } else { 474 } else {
453 return -EINVAL; 475 rc = -EINVAL;
476 goto out;
454 } 477 }
455 if (card->options.large_send == type) 478 rc = count;
456 return count; 479
457 rc = qeth_set_large_send(card, type); 480 /* defer IP assist if device is offline (until discipline->set_online)*/
458 if (rc) 481 card->options.isolation = isolation;
459 return rc; 482 if (card->state == CARD_STATE_SOFTSETUP ||
460 return count; 483 card->state == CARD_STATE_UP) {
484 int ipa_rc = qeth_set_access_ctrl_online(card);
485 if (ipa_rc != 0)
486 rc = ipa_rc;
487 }
488out:
489 return rc;
461} 490}
462 491
463static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show, 492static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
464 qeth_dev_large_send_store); 493 qeth_dev_isolation_store);
465 494
466static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) 495static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
467{ 496{
@@ -582,7 +611,7 @@ static struct attribute *qeth_device_attrs[] = {
582 &dev_attr_recover.attr, 611 &dev_attr_recover.attr,
583 &dev_attr_performance_stats.attr, 612 &dev_attr_performance_stats.attr,
584 &dev_attr_layer2.attr, 613 &dev_attr_layer2.attr,
585 &dev_attr_large_send.attr, 614 &dev_attr_isolation.attr,
586 NULL, 615 NULL,
587}; 616};
588 617
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b61d5c723c50..0b763396d5d1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -940,30 +940,17 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
940 940
941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
942 recover_flag = card->state; 942 recover_flag = card->state;
943 rc = ccw_device_set_online(CARD_RDEV(card));
944 if (rc) {
945 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
946 return -EIO;
947 }
948 rc = ccw_device_set_online(CARD_WDEV(card));
949 if (rc) {
950 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
951 return -EIO;
952 }
953 rc = ccw_device_set_online(CARD_DDEV(card));
954 if (rc) {
955 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
956 return -EIO;
957 }
958
959 rc = qeth_core_hardsetup_card(card); 943 rc = qeth_core_hardsetup_card(card);
960 if (rc) { 944 if (rc) {
961 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 945 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
946 rc = -ENODEV;
962 goto out_remove; 947 goto out_remove;
963 } 948 }
964 949
965 if (!card->dev && qeth_l2_setup_netdev(card)) 950 if (!card->dev && qeth_l2_setup_netdev(card)) {
951 rc = -ENODEV;
966 goto out_remove; 952 goto out_remove;
953 }
967 954
968 if (card->info.type != QETH_CARD_TYPE_OSN) 955 if (card->info.type != QETH_CARD_TYPE_OSN)
969 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 956 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
@@ -983,12 +970,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
983 card->lan_online = 0; 970 card->lan_online = 0;
984 return 0; 971 return 0;
985 } 972 }
973 rc = -ENODEV;
986 goto out_remove; 974 goto out_remove;
987 } else 975 } else
988 card->lan_online = 1; 976 card->lan_online = 1;
989 977
990 if (card->info.type != QETH_CARD_TYPE_OSN) { 978 if (card->info.type != QETH_CARD_TYPE_OSN) {
991 qeth_set_large_send(card, card->options.large_send); 979 /* configure isolation level */
980 qeth_set_access_ctrl_online(card);
992 qeth_l2_process_vlans(card, 0); 981 qeth_l2_process_vlans(card, 0);
993 } 982 }
994 983
@@ -997,6 +986,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
997 rc = qeth_init_qdio_queues(card); 986 rc = qeth_init_qdio_queues(card);
998 if (rc) { 987 if (rc) {
999 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 988 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
989 rc = -ENODEV;
1000 goto out_remove; 990 goto out_remove;
1001 } 991 }
1002 card->state = CARD_STATE_SOFTSETUP; 992 card->state = CARD_STATE_SOFTSETUP;
@@ -1018,6 +1008,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1018 /* let user_space know that device is online */ 1008 /* let user_space know that device is online */
1019 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1009 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1020 return 0; 1010 return 0;
1011
1021out_remove: 1012out_remove:
1022 card->use_hard_stop = 1; 1013 card->use_hard_stop = 1;
1023 qeth_l2_stop_card(card, 0); 1014 qeth_l2_stop_card(card, 0);
@@ -1028,7 +1019,7 @@ out_remove:
1028 card->state = CARD_STATE_RECOVER; 1019 card->state = CARD_STATE_RECOVER;
1029 else 1020 else
1030 card->state = CARD_STATE_DOWN; 1021 card->state = CARD_STATE_DOWN;
1031 return -ENODEV; 1022 return rc;
1032} 1023}
1033 1024
1034static int qeth_l2_set_online(struct ccwgroup_device *gdev) 1025static int qeth_l2_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 9f143c83bba3..321988fa9f7d 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -60,5 +60,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
62 const u8 *); 62 const u8 *);
63int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
64int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
63 65
64#endif /* __QETH_L3_H__ */ 66#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4ca28c16ca83..fd1b6ed3721f 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -41,6 +41,32 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41static int __qeth_l3_set_online(struct ccwgroup_device *, int); 41static int __qeth_l3_set_online(struct ccwgroup_device *, int);
42static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 42static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
43 43
44int qeth_l3_set_large_send(struct qeth_card *card,
45 enum qeth_large_send_types type)
46{
47 int rc = 0;
48
49 card->options.large_send = type;
50 if (card->dev == NULL)
51 return 0;
52
53 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
54 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
55 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
56 NETIF_F_HW_CSUM;
57 } else {
58 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
59 NETIF_F_HW_CSUM);
60 card->options.large_send = QETH_LARGE_SEND_NO;
61 rc = -EOPNOTSUPP;
62 }
63 } else {
64 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
65 NETIF_F_HW_CSUM);
66 card->options.large_send = QETH_LARGE_SEND_NO;
67 }
68 return rc;
69}
44 70
45static int qeth_l3_isxdigit(char *buf) 71static int qeth_l3_isxdigit(char *buf)
46{ 72{
@@ -1439,6 +1465,35 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1439 return 0; 1465 return 0;
1440} 1466}
1441 1467
1468int qeth_l3_set_rx_csum(struct qeth_card *card,
1469 enum qeth_checksum_types csum_type)
1470{
1471 int rc = 0;
1472
1473 if (card->options.checksum_type == HW_CHECKSUMMING) {
1474 if ((csum_type != HW_CHECKSUMMING) &&
1475 (card->state != CARD_STATE_DOWN)) {
1476 rc = qeth_l3_send_simple_setassparms(card,
1477 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1478 if (rc)
1479 return -EIO;
1480 }
1481 } else {
1482 if (csum_type == HW_CHECKSUMMING) {
1483 if (card->state != CARD_STATE_DOWN) {
1484 if (!qeth_is_supported(card,
1485 IPA_INBOUND_CHECKSUM))
1486 return -EPERM;
1487 rc = qeth_l3_send_checksum_command(card);
1488 if (rc)
1489 return -EIO;
1490 }
1491 }
1492 }
1493 card->options.checksum_type = csum_type;
1494 return rc;
1495}
1496
1442static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1497static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1443{ 1498{
1444 int rc = 0; 1499 int rc = 0;
@@ -1506,6 +1561,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1506static int qeth_l3_start_ipassists(struct qeth_card *card) 1561static int qeth_l3_start_ipassists(struct qeth_card *card)
1507{ 1562{
1508 QETH_DBF_TEXT(TRACE, 3, "strtipas"); 1563 QETH_DBF_TEXT(TRACE, 3, "strtipas");
1564
1565 qeth_set_access_ctrl_online(card); /* go on*/
1509 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 1566 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1510 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ 1567 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1511 qeth_l3_start_ipa_source_mac(card); /* go on*/ 1568 qeth_l3_start_ipa_source_mac(card); /* go on*/
@@ -2684,6 +2741,24 @@ static void qeth_tx_csum(struct sk_buff *skb)
2684 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2741 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2685} 2742}
2686 2743
2744static inline int qeth_l3_tso_elements(struct sk_buff *skb)
2745{
2746 unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
2747 tcp_hdr(skb)->doff * 4;
2748 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
2749 int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
2750 elements += skb_shinfo(skb)->nr_frags;
2751 return elements;
2752}
2753
2754static inline int qeth_l3_tso_check(struct sk_buff *skb)
2755{
2756 int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
2757 (unsigned long)skb->data;
2758 return (((unsigned long)skb->data & PAGE_MASK) !=
2759 (((unsigned long)skb->data + len) & PAGE_MASK));
2760}
2761
2687static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2762static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2688{ 2763{
2689 int rc; 2764 int rc;
@@ -2777,16 +2852,21 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2777 /* fix hardware limitation: as long as we do not have sbal 2852 /* fix hardware limitation: as long as we do not have sbal
2778 * chaining we can not send long frag lists 2853 * chaining we can not send long frag lists
2779 */ 2854 */
2780 if ((large_send == QETH_LARGE_SEND_TSO) && 2855 if (large_send == QETH_LARGE_SEND_TSO) {
2781 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { 2856 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
2782 if (skb_linearize(new_skb)) 2857 if (skb_linearize(new_skb))
2783 goto tx_drop; 2858 goto tx_drop;
2859 if (card->options.performance_stats)
2860 card->perf_stats.tx_lin++;
2861 }
2784 } 2862 }
2785 2863
2786 if ((large_send == QETH_LARGE_SEND_TSO) && 2864 if ((large_send == QETH_LARGE_SEND_TSO) &&
2787 (cast_type == RTN_UNSPEC)) { 2865 (cast_type == RTN_UNSPEC)) {
2788 hdr = (struct qeth_hdr *)skb_push(new_skb, 2866 hdr = (struct qeth_hdr *)skb_push(new_skb,
2789 sizeof(struct qeth_hdr_tso)); 2867 sizeof(struct qeth_hdr_tso));
2868 if (qeth_l3_tso_check(new_skb))
2869 QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
2790 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2870 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2791 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2871 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2792 qeth_tso_fill_header(card, hdr, new_skb); 2872 qeth_tso_fill_header(card, hdr, new_skb);
@@ -2903,46 +2983,28 @@ static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2903static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 2983static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2904{ 2984{
2905 struct qeth_card *card = dev->ml_priv; 2985 struct qeth_card *card = dev->ml_priv;
2906 enum qeth_card_states old_state;
2907 enum qeth_checksum_types csum_type; 2986 enum qeth_checksum_types csum_type;
2908 2987
2909 if ((card->state != CARD_STATE_UP) &&
2910 (card->state != CARD_STATE_DOWN))
2911 return -EPERM;
2912
2913 if (data) 2988 if (data)
2914 csum_type = HW_CHECKSUMMING; 2989 csum_type = HW_CHECKSUMMING;
2915 else 2990 else
2916 csum_type = SW_CHECKSUMMING; 2991 csum_type = SW_CHECKSUMMING;
2917 2992
2918 if (card->options.checksum_type != csum_type) { 2993 return qeth_l3_set_rx_csum(card, csum_type);
2919 old_state = card->state;
2920 if (card->state == CARD_STATE_UP)
2921 __qeth_l3_set_offline(card->gdev, 1);
2922 card->options.checksum_type = csum_type;
2923 if (old_state == CARD_STATE_UP)
2924 __qeth_l3_set_online(card->gdev, 1);
2925 }
2926 return 0;
2927} 2994}
2928 2995
2929static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 2996static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2930{ 2997{
2931 struct qeth_card *card = dev->ml_priv; 2998 struct qeth_card *card = dev->ml_priv;
2999 int rc = 0;
2932 3000
2933 if (data) { 3001 if (data) {
2934 if (card->options.large_send == QETH_LARGE_SEND_NO) { 3002 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
2935 if (card->info.type == QETH_CARD_TYPE_IQD)
2936 return -EPERM;
2937 else
2938 card->options.large_send = QETH_LARGE_SEND_TSO;
2939 dev->features |= NETIF_F_TSO;
2940 }
2941 } else { 3003 } else {
2942 dev->features &= ~NETIF_F_TSO; 3004 dev->features &= ~NETIF_F_TSO;
2943 card->options.large_send = QETH_LARGE_SEND_NO; 3005 card->options.large_send = QETH_LARGE_SEND_NO;
2944 } 3006 }
2945 return 0; 3007 return rc;
2946} 3008}
2947 3009
2948static const struct ethtool_ops qeth_l3_ethtool_ops = { 3010static const struct ethtool_ops qeth_l3_ethtool_ops = {
@@ -3058,6 +3120,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3058 NETIF_F_HW_VLAN_RX | 3120 NETIF_F_HW_VLAN_RX |
3059 NETIF_F_HW_VLAN_FILTER; 3121 NETIF_F_HW_VLAN_FILTER;
3060 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3122 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
3123 card->dev->gso_max_size = 15 * PAGE_SIZE;
3061 3124
3062 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3125 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3063 return register_netdev(card->dev); 3126 return register_netdev(card->dev);
@@ -3154,32 +3217,19 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3154 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 3217 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3155 3218
3156 recover_flag = card->state; 3219 recover_flag = card->state;
3157 rc = ccw_device_set_online(CARD_RDEV(card));
3158 if (rc) {
3159 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3160 return -EIO;
3161 }
3162 rc = ccw_device_set_online(CARD_WDEV(card));
3163 if (rc) {
3164 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3165 return -EIO;
3166 }
3167 rc = ccw_device_set_online(CARD_DDEV(card));
3168 if (rc) {
3169 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3170 return -EIO;
3171 }
3172
3173 rc = qeth_core_hardsetup_card(card); 3220 rc = qeth_core_hardsetup_card(card);
3174 if (rc) { 3221 if (rc) {
3175 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3222 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3223 rc = -ENODEV;
3176 goto out_remove; 3224 goto out_remove;
3177 } 3225 }
3178 3226
3179 qeth_l3_query_ipassists(card, QETH_PROT_IPV4); 3227 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3180 3228
3181 if (!card->dev && qeth_l3_setup_netdev(card)) 3229 if (!card->dev && qeth_l3_setup_netdev(card)) {
3230 rc = -ENODEV;
3182 goto out_remove; 3231 goto out_remove;
3232 }
3183 3233
3184 card->state = CARD_STATE_HARDSETUP; 3234 card->state = CARD_STATE_HARDSETUP;
3185 qeth_print_status_message(card); 3235 qeth_print_status_message(card);
@@ -3196,10 +3246,11 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3196 card->lan_online = 0; 3246 card->lan_online = 0;
3197 return 0; 3247 return 0;
3198 } 3248 }
3249 rc = -ENODEV;
3199 goto out_remove; 3250 goto out_remove;
3200 } else 3251 } else
3201 card->lan_online = 1; 3252 card->lan_online = 1;
3202 qeth_set_large_send(card, card->options.large_send); 3253 qeth_l3_set_large_send(card, card->options.large_send);
3203 3254
3204 rc = qeth_l3_setadapter_parms(card); 3255 rc = qeth_l3_setadapter_parms(card);
3205 if (rc) 3256 if (rc)
@@ -3218,6 +3269,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3218 rc = qeth_init_qdio_queues(card); 3269 rc = qeth_init_qdio_queues(card);
3219 if (rc) { 3270 if (rc) {
3220 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3271 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3272 rc = -ENODEV;
3221 goto out_remove; 3273 goto out_remove;
3222 } 3274 }
3223 card->state = CARD_STATE_SOFTSETUP; 3275 card->state = CARD_STATE_SOFTSETUP;
@@ -3248,7 +3300,7 @@ out_remove:
3248 card->state = CARD_STATE_RECOVER; 3300 card->state = CARD_STATE_RECOVER;
3249 else 3301 else
3250 card->state = CARD_STATE_DOWN; 3302 card->state = CARD_STATE_DOWN;
3251 return -ENODEV; 3303 return rc;
3252} 3304}
3253 3305
3254static int qeth_l3_set_online(struct ccwgroup_device *gdev) 3306static int qeth_l3_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index c144b9924d52..3360b0941aa1 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -293,31 +293,79 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
293 struct device_attribute *attr, const char *buf, size_t count) 293 struct device_attribute *attr, const char *buf, size_t count)
294{ 294{
295 struct qeth_card *card = dev_get_drvdata(dev); 295 struct qeth_card *card = dev_get_drvdata(dev);
296 enum qeth_checksum_types csum_type;
296 char *tmp; 297 char *tmp;
298 int rc;
297 299
298 if (!card) 300 if (!card)
299 return -EINVAL; 301 return -EINVAL;
300 302
301 if ((card->state != CARD_STATE_DOWN) &&
302 (card->state != CARD_STATE_RECOVER))
303 return -EPERM;
304
305 tmp = strsep((char **) &buf, "\n"); 303 tmp = strsep((char **) &buf, "\n");
306 if (!strcmp(tmp, "sw_checksumming")) 304 if (!strcmp(tmp, "sw_checksumming"))
307 card->options.checksum_type = SW_CHECKSUMMING; 305 csum_type = SW_CHECKSUMMING;
308 else if (!strcmp(tmp, "hw_checksumming")) 306 else if (!strcmp(tmp, "hw_checksumming"))
309 card->options.checksum_type = HW_CHECKSUMMING; 307 csum_type = HW_CHECKSUMMING;
310 else if (!strcmp(tmp, "no_checksumming")) 308 else if (!strcmp(tmp, "no_checksumming"))
311 card->options.checksum_type = NO_CHECKSUMMING; 309 csum_type = NO_CHECKSUMMING;
312 else { 310 else
313 return -EINVAL; 311 return -EINVAL;
314 } 312
313 rc = qeth_l3_set_rx_csum(card, csum_type);
314 if (rc)
315 return rc;
315 return count; 316 return count;
316} 317}
317 318
318static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, 319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
319 qeth_l3_dev_checksum_store); 320 qeth_l3_dev_checksum_store);
320 321
322static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct qeth_card *card = dev_get_drvdata(dev);
326
327 if (!card)
328 return -EINVAL;
329
330 switch (card->options.large_send) {
331 case QETH_LARGE_SEND_NO:
332 return sprintf(buf, "%s\n", "no");
333 case QETH_LARGE_SEND_TSO:
334 return sprintf(buf, "%s\n", "TSO");
335 default:
336 return sprintf(buf, "%s\n", "N/A");
337 }
338}
339
340static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
341 struct device_attribute *attr, const char *buf, size_t count)
342{
343 struct qeth_card *card = dev_get_drvdata(dev);
344 enum qeth_large_send_types type;
345 int rc = 0;
346 char *tmp;
347
348 if (!card)
349 return -EINVAL;
350 tmp = strsep((char **) &buf, "\n");
351 if (!strcmp(tmp, "no"))
352 type = QETH_LARGE_SEND_NO;
353 else if (!strcmp(tmp, "TSO"))
354 type = QETH_LARGE_SEND_TSO;
355 else
356 return -EINVAL;
357
358 if (card->options.large_send == type)
359 return count;
360 rc = qeth_l3_set_large_send(card, type);
361 if (rc)
362 return rc;
363 return count;
364}
365
366static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
367 qeth_l3_dev_large_send_store);
368
321static struct attribute *qeth_l3_device_attrs[] = { 369static struct attribute *qeth_l3_device_attrs[] = {
322 &dev_attr_route4.attr, 370 &dev_attr_route4.attr,
323 &dev_attr_route6.attr, 371 &dev_attr_route6.attr,
@@ -325,6 +373,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
325 &dev_attr_broadcast_mode.attr, 373 &dev_attr_broadcast_mode.attr,
326 &dev_attr_canonical_macaddr.attr, 374 &dev_attr_canonical_macaddr.attr,
327 &dev_attr_checksumming.attr, 375 &dev_attr_checksumming.attr,
376 &dev_attr_large_send.attr,
328 NULL, 377 NULL,
329}; 378};
330 379
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 580b6004d00e..005e1525ab86 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -136,10 +136,6 @@ extern struct ctl_table ether_table[];
136 136
137extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); 137extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
138 138
139/*
140 * Display a 6 byte device address (MAC) in a readable format.
141 */
142extern char *print_mac(char *buf, const unsigned char *addr) __deprecated;
143#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" 139#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
144#define MAC_BUF_SIZE 18 140#define MAC_BUF_SIZE 18
145#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] 141#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE]
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
index 4c218ee7587a..8687a7dc0632 100644
--- a/include/linux/isdn_ppp.h
+++ b/include/linux/isdn_ppp.h
@@ -157,7 +157,7 @@ typedef struct {
157 157
158typedef struct { 158typedef struct {
159 int mp_mrru; /* unused */ 159 int mp_mrru; /* unused */
160 struct sk_buff_head frags; /* fragments sl list */ 160 struct sk_buff * frags; /* fragments sl list -- use skb->next */
161 long frames; /* number of frames in the frame list */ 161 long frames; /* number of frames in the frame list */
162 unsigned int seq; /* last processed packet seq #: any packets 162 unsigned int seq; /* last processed packet seq #: any packets
163 * with smaller seq # will be dropped 163 * with smaller seq # will be dropped
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 083b5989cecb..97873e31661c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -63,30 +63,69 @@ struct wireless_dev;
63#define HAVE_FREE_NETDEV /* free_netdev() */ 63#define HAVE_FREE_NETDEV /* free_netdev() */
64#define HAVE_NETDEV_PRIV /* netdev_priv() */ 64#define HAVE_NETDEV_PRIV /* netdev_priv() */
65 65
66#define NET_XMIT_SUCCESS 0
67#define NET_XMIT_DROP 1 /* skb dropped */
68#define NET_XMIT_CN 2 /* congestion notification */
69#define NET_XMIT_POLICED 3 /* skb is shot by police */
70#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
71
72/* Backlog congestion levels */ 66/* Backlog congestion levels */
73#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 67#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
74#define NET_RX_DROP 1 /* packet dropped */ 68#define NET_RX_DROP 1 /* packet dropped */
69
70/*
71 * Transmit return codes: transmit return codes originate from three different
72 * namespaces:
73 *
74 * - qdisc return codes
75 * - driver transmit return codes
76 * - errno values
77 *
78 * Drivers are allowed to return any one of those in their hard_start_xmit()
79 * function. Real network devices commonly used with qdiscs should only return
80 * the driver transmit return codes though - when qdiscs are used, the actual
81 * transmission happens asynchronously, so the value is not propagated to
82 * higher layers. Virtual network devices transmit synchronously, in this case
83 * the driver transmit return codes are consumed by dev_queue_xmit(), all
84 * others are propagated to higher layers.
85 */
86
87/* qdisc ->enqueue() return codes. */
88#define NET_XMIT_SUCCESS 0x00
89#define NET_XMIT_DROP 0x01 /* skb dropped */
90#define NET_XMIT_CN 0x02 /* congestion notification */
91#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
92#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
75 93
76/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 94/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
77 * indicates that the device will soon be dropping packets, or already drops 95 * indicates that the device will soon be dropping packets, or already drops
78 * some packets of the same priority; prompting us to send less aggressively. */ 96 * some packets of the same priority; prompting us to send less aggressively. */
79#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) 97#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
80#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 98#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
81 99
82/* Driver transmit return codes */ 100/* Driver transmit return codes */
101#define NETDEV_TX_MASK 0xf0
102
83enum netdev_tx { 103enum netdev_tx {
84 NETDEV_TX_OK = 0, /* driver took care of packet */ 104 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
85 NETDEV_TX_BUSY, /* driver tx path was busy*/ 105 NETDEV_TX_OK = 0x00, /* driver took care of packet */
86 NETDEV_TX_LOCKED = -1, /* driver tx lock was already taken */ 106 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
107 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
87}; 108};
88typedef enum netdev_tx netdev_tx_t; 109typedef enum netdev_tx netdev_tx_t;
89 110
111/*
112 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
113 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
114 */
115static inline bool dev_xmit_complete(int rc)
116{
117 /*
118 * Positive cases with an skb consumed by a driver:
119 * - successful transmission (rc == NETDEV_TX_OK)
120 * - error while transmitting (rc < 0)
121 * - error while queueing to a different device (rc & NET_XMIT_MASK)
122 */
123 if (likely(rc < NET_XMIT_MASK))
124 return true;
125
126 return false;
127}
128
90#endif 129#endif
91 130
92#define MAX_ADDR_LEN 32 /* Largest hardware address length */ 131#define MAX_ADDR_LEN 32 /* Largest hardware address length */
@@ -857,7 +896,7 @@ struct net_device {
857 /* device index hash chain */ 896 /* device index hash chain */
858 struct hlist_node index_hlist; 897 struct hlist_node index_hlist;
859 898
860 struct net_device *link_watch_next; 899 struct list_head link_watch_list;
861 900
862 /* register/unregister state machine */ 901 /* register/unregister state machine */
863 enum { NETREG_UNINITIALIZED=0, 902 enum { NETREG_UNINITIALIZED=0,
@@ -1093,6 +1132,16 @@ static inline struct net_device *next_net_device(struct net_device *dev)
1093 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1132 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1094} 1133}
1095 1134
1135static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1136{
1137 struct list_head *lh;
1138 struct net *net;
1139
1140 net = dev_net(dev);
1141 lh = rcu_dereference(dev->dev_list.next);
1142 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1143}
1144
1096static inline struct net_device *first_net_device(struct net *net) 1145static inline struct net_device *first_net_device(struct net *net)
1097{ 1146{
1098 return list_empty(&net->dev_base_head) ? NULL : 1147 return list_empty(&net->dev_base_head) ? NULL :
@@ -1551,6 +1600,7 @@ static inline void dev_hold(struct net_device *dev)
1551 */ 1600 */
1552 1601
1553extern void linkwatch_fire_event(struct net_device *dev); 1602extern void linkwatch_fire_event(struct net_device *dev);
1603extern void linkwatch_forget_dev(struct net_device *dev);
1554 1604
1555/** 1605/**
1556 * netif_carrier_ok - test if carrier present 1606 * netif_carrier_ok - test if carrier present
@@ -1892,6 +1942,7 @@ extern void netdev_features_change(struct net_device *dev);
1892extern void dev_load(struct net *net, const char *name); 1942extern void dev_load(struct net *net, const char *name);
1893extern void dev_mcast_init(void); 1943extern void dev_mcast_init(void);
1894extern const struct net_device_stats *dev_get_stats(struct net_device *dev); 1944extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
1945extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
1895 1946
1896extern int netdev_max_backlog; 1947extern int netdev_max_backlog;
1897extern int weight_p; 1948extern int weight_p;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 29714b8441b1..b0c3671d463c 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -202,6 +202,7 @@ static inline int notifier_to_errno(int ret)
202#define NETDEV_BONDING_OLDTYPE 0x000E 202#define NETDEV_BONDING_OLDTYPE 0x000E
203#define NETDEV_BONDING_NEWTYPE 0x000F 203#define NETDEV_BONDING_NEWTYPE 0x000F
204#define NETDEV_POST_INIT 0x0010 204#define NETDEV_POST_INIT 0x0010
205#define NETDEV_UNREGISTER_PERNET 0x0011
205 206
206#define SYS_DOWN 0x0001 /* Notify of system down */ 207#define SYS_DOWN 0x0001 /* Notify of system down */
207#define SYS_RESTART SYS_DOWN 208#define SYS_RESTART SYS_DOWN
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index eeecb8547a2a..32d7d77b4a01 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -81,6 +81,12 @@ enum {
81 TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) 81 TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000)
82}; 82};
83 83
84/*
85 * TCP general constants
86 */
87#define TCP_MSS_DEFAULT 536U /* IPv4 (RFC1122, RFC2581) */
88#define TCP_MSS_DESIRED 1220U /* IPv6 (tunneled), EDNS0 (RFC3226) */
89
84/* TCP socket options */ 90/* TCP socket options */
85#define TCP_NODELAY 1 /* Turn off Nagle's algorithm. */ 91#define TCP_NODELAY 1 /* Turn off Nagle's algorithm. */
86#define TCP_MAXSEG 2 /* Limit MSS */ 92#define TCP_MAXSEG 2 /* Limit MSS */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 5b698b3b463d..41cbddd25b70 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -92,8 +92,8 @@ static inline struct net *ib_net(struct inet_bind_bucket *ib)
92 return read_pnet(&ib->ib_net); 92 return read_pnet(&ib->ib_net);
93} 93}
94 94
95#define inet_bind_bucket_for_each(tb, node, head) \ 95#define inet_bind_bucket_for_each(tb, pos, head) \
96 hlist_for_each_entry(tb, node, head, node) 96 hlist_for_each_entry(tb, pos, head, node)
97 97
98struct inet_bind_hashbucket { 98struct inet_bind_hashbucket {
99 spinlock_t lock; 99 spinlock_t lock;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 35ad7b930467..87b1df0d4d8c 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -17,15 +17,15 @@ struct inet_peer {
17 /* group together avl_left,avl_right,v4daddr to speedup lookups */ 17 /* group together avl_left,avl_right,v4daddr to speedup lookups */
18 struct inet_peer *avl_left, *avl_right; 18 struct inet_peer *avl_left, *avl_right;
19 __be32 v4daddr; /* peer's address */ 19 __be32 v4daddr; /* peer's address */
20 __u16 avl_height; 20 __u32 avl_height;
21 __u16 ip_id_count; /* IP ID for the next packet */
22 struct list_head unused; 21 struct list_head unused;
23 __u32 dtime; /* the time of last use of not 22 __u32 dtime; /* the time of last use of not
24 * referenced entries */ 23 * referenced entries */
25 atomic_t refcnt; 24 atomic_t refcnt;
26 atomic_t rid; /* Frag reception counter */ 25 atomic_t rid; /* Frag reception counter */
26 atomic_t ip_id_count; /* IP ID for the next packet */
27 __u32 tcp_ts; 27 __u32 tcp_ts;
28 unsigned long tcp_ts_stamp; 28 __u32 tcp_ts_stamp;
29}; 29};
30 30
31void inet_initpeers(void) __init; 31void inet_initpeers(void) __init;
@@ -36,17 +36,11 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create);
36/* can be called from BH context or outside */ 36/* can be called from BH context or outside */
37extern void inet_putpeer(struct inet_peer *p); 37extern void inet_putpeer(struct inet_peer *p);
38 38
39extern spinlock_t inet_peer_idlock;
40/* can be called with or without local BH being disabled */ 39/* can be called with or without local BH being disabled */
41static inline __u16 inet_getid(struct inet_peer *p, int more) 40static inline __u16 inet_getid(struct inet_peer *p, int more)
42{ 41{
43 __u16 id; 42 more++;
44 43 return atomic_add_return(more, &p->ip_id_count) - more;
45 spin_lock_bh(&inet_peer_idlock);
46 id = p->ip_id_count;
47 p->ip_id_count += 1 + more;
48 spin_unlock_bh(&inet_peer_idlock);
49 return id;
50} 44}
51 45
52#endif /* _NET_INETPEER_H */ 46#endif /* _NET_INETPEER_H */
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index afa7defceb14..d7b989ca3d63 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -25,7 +25,7 @@
25 25
26struct phonet_device_list { 26struct phonet_device_list {
27 struct list_head list; 27 struct list_head list;
28 spinlock_t lock; 28 struct mutex lock;
29}; 29};
30 30
31struct phonet_device_list *phonet_device_list(struct net *net); 31struct phonet_device_list *phonet_device_list(struct net *net);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6e5f0e0c7967..cd2e18778f81 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1980,7 +1980,7 @@ void sctp_assoc_set_primary(struct sctp_association *,
1980void sctp_assoc_del_nonprimary_peers(struct sctp_association *, 1980void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
1981 struct sctp_transport *); 1981 struct sctp_transport *);
1982int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, 1982int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *,
1983 gfp_t); 1983 sctp_scope_t, gfp_t);
1984int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, 1984int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
1985 struct sctp_cookie*, 1985 struct sctp_cookie*,
1986 gfp_t gfp); 1986 gfp_t gfp);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index bf20f88fd033..325bfcf5c934 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -62,9 +62,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
62/* Minimal accepted MSS. It is (60+60+8) - (20+20). */ 62/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
63#define TCP_MIN_MSS 88U 63#define TCP_MIN_MSS 88U
64 64
65/* Minimal RCV_MSS. */
66#define TCP_MIN_RCVMSS 536U
67
68/* The least MTU to use for probing */ 65/* The least MTU to use for probing */
69#define TCP_BASE_MSS 512 66#define TCP_BASE_MSS 512
70 67
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 5e18c6ab2c6a..4a310906b3e8 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -39,7 +39,7 @@ void timecounter_init(struct timecounter *tc,
39 tc->cycle_last = cc->read(cc); 39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp; 40 tc->nsec = start_tstamp;
41} 41}
42EXPORT_SYMBOL(timecounter_init); 42EXPORT_SYMBOL_GPL(timecounter_init);
43 43
44/** 44/**
45 * timecounter_read_delta - get nanoseconds since last call of this function 45 * timecounter_read_delta - get nanoseconds since last call of this function
@@ -83,7 +83,7 @@ u64 timecounter_read(struct timecounter *tc)
83 83
84 return nsec; 84 return nsec;
85} 85}
86EXPORT_SYMBOL(timecounter_read); 86EXPORT_SYMBOL_GPL(timecounter_read);
87 87
88u64 timecounter_cyc2time(struct timecounter *tc, 88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp) 89 cycle_t cycle_tstamp)
@@ -105,7 +105,7 @@ u64 timecounter_cyc2time(struct timecounter *tc,
105 105
106 return nsec; 106 return nsec;
107} 107}
108EXPORT_SYMBOL(timecounter_cyc2time); 108EXPORT_SYMBOL_GPL(timecounter_cyc2time);
109 109
110/*[Clocksource internal variables]--------- 110/*[Clocksource internal variables]---------
111 * curr_clocksource: 111 * curr_clocksource:
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
index 71e7f1a19156..96ff643a5a59 100644
--- a/kernel/time/timecompare.c
+++ b/kernel/time/timecompare.c
@@ -40,7 +40,7 @@ ktime_t timecompare_transform(struct timecompare *sync,
40 40
41 return ns_to_ktime(nsec); 41 return ns_to_ktime(nsec);
42} 42}
43EXPORT_SYMBOL(timecompare_transform); 43EXPORT_SYMBOL_GPL(timecompare_transform);
44 44
45int timecompare_offset(struct timecompare *sync, 45int timecompare_offset(struct timecompare *sync,
46 s64 *offset, 46 s64 *offset,
@@ -131,7 +131,7 @@ int timecompare_offset(struct timecompare *sync,
131 131
132 return used; 132 return used;
133} 133}
134EXPORT_SYMBOL(timecompare_offset); 134EXPORT_SYMBOL_GPL(timecompare_offset);
135 135
136void __timecompare_update(struct timecompare *sync, 136void __timecompare_update(struct timecompare *sync,
137 u64 source_tstamp) 137 u64 source_tstamp)
@@ -188,4 +188,4 @@ void __timecompare_update(struct timecompare *sync,
188 } 188 }
189 } 189 }
190} 190}
191EXPORT_SYMBOL(__timecompare_update); 191EXPORT_SYMBOL_GPL(__timecompare_update);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 39f8d0120104..d9cb020029b9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -41,7 +41,7 @@
41 41
42/* Global VLAN variables */ 42/* Global VLAN variables */
43 43
44int vlan_net_id; 44int vlan_net_id __read_mostly;
45 45
46/* Our listing of VLAN group(s) */ 46/* Our listing of VLAN group(s) */
47static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; 47static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 68f9290e6837..5685296017e9 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -16,6 +16,21 @@ struct vlan_priority_tci_mapping {
16 struct vlan_priority_tci_mapping *next; 16 struct vlan_priority_tci_mapping *next;
17}; 17};
18 18
19
20/**
21 * struct vlan_rx_stats - VLAN percpu rx stats
22 * @rx_packets: number of received packets
23 * @rx_bytes: number of received bytes
24 * @multicast: number of received multicast packets
25 * @rx_errors: number of errors
26 */
27struct vlan_rx_stats {
28 unsigned long rx_packets;
29 unsigned long rx_bytes;
30 unsigned long multicast;
31 unsigned long rx_errors;
32};
33
19/** 34/**
20 * struct vlan_dev_info - VLAN private device data 35 * struct vlan_dev_info - VLAN private device data
21 * @nr_ingress_mappings: number of ingress priority mappings 36 * @nr_ingress_mappings: number of ingress priority mappings
@@ -29,6 +44,7 @@ struct vlan_priority_tci_mapping {
29 * @dent: proc dir entry 44 * @dent: proc dir entry
30 * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX 45 * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX
31 * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX 46 * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
47 * @vlan_rx_stats: ptr to percpu rx stats
32 */ 48 */
33struct vlan_dev_info { 49struct vlan_dev_info {
34 unsigned int nr_ingress_mappings; 50 unsigned int nr_ingress_mappings;
@@ -45,6 +61,7 @@ struct vlan_dev_info {
45 struct proc_dir_entry *dent; 61 struct proc_dir_entry *dent;
46 unsigned long cnt_inc_headroom_on_tx; 62 unsigned long cnt_inc_headroom_on_tx;
47 unsigned long cnt_encap_on_xmit; 63 unsigned long cnt_encap_on_xmit;
64 struct vlan_rx_stats *vlan_rx_stats;
48}; 65};
49 66
50static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) 67static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 8d5ca2ac4f8d..e75a2f3b10af 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -14,7 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
14 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb))
15 goto drop; 15 goto drop;
16 16
17 skb->vlan_tci = vlan_tci; 17 __vlan_hwaccel_put_tag(skb, vlan_tci);
18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
19 19
20 if (!skb->dev) 20 if (!skb->dev)
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(__vlan_hwaccel_rx);
31int vlan_hwaccel_do_receive(struct sk_buff *skb) 31int vlan_hwaccel_do_receive(struct sk_buff *skb)
32{ 32{
33 struct net_device *dev = skb->dev; 33 struct net_device *dev = skb->dev;
34 struct net_device_stats *stats; 34 struct vlan_rx_stats *rx_stats;
35 35
36 skb->dev = vlan_dev_info(dev)->real_dev; 36 skb->dev = vlan_dev_info(dev)->real_dev;
37 netif_nit_deliver(skb); 37 netif_nit_deliver(skb);
@@ -40,15 +40,17 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
40 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 40 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
41 skb->vlan_tci = 0; 41 skb->vlan_tci = 0;
42 42
43 stats = &dev->stats; 43 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
44 stats->rx_packets++; 44 smp_processor_id());
45 stats->rx_bytes += skb->len; 45
46 rx_stats->rx_packets++;
47 rx_stats->rx_bytes += skb->len;
46 48
47 switch (skb->pkt_type) { 49 switch (skb->pkt_type) {
48 case PACKET_BROADCAST: 50 case PACKET_BROADCAST:
49 break; 51 break;
50 case PACKET_MULTICAST: 52 case PACKET_MULTICAST:
51 stats->multicast++; 53 rx_stats->multicast++;
52 break; 54 break;
53 case PACKET_OTHERHOST: 55 case PACKET_OTHERHOST:
54 /* Our lower layer thinks this is not local, let's make sure. 56 /* Our lower layer thinks this is not local, let's make sure.
@@ -83,7 +85,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
83 if (skb_bond_should_drop(skb)) 85 if (skb_bond_should_drop(skb))
84 goto drop; 86 goto drop;
85 87
86 skb->vlan_tci = vlan_tci; 88 __vlan_hwaccel_put_tag(skb, vlan_tci);
87 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 89 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
88 90
89 if (!skb->dev) 91 if (!skb->dev)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 790fd55ec318..de0dc6bacbe8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -140,7 +140,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
140 struct packet_type *ptype, struct net_device *orig_dev) 140 struct packet_type *ptype, struct net_device *orig_dev)
141{ 141{
142 struct vlan_hdr *vhdr; 142 struct vlan_hdr *vhdr;
143 struct net_device_stats *stats; 143 struct vlan_rx_stats *rx_stats;
144 u16 vlan_id; 144 u16 vlan_id;
145 u16 vlan_tci; 145 u16 vlan_tci;
146 146
@@ -163,9 +163,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
163 goto err_unlock; 163 goto err_unlock;
164 } 164 }
165 165
166 stats = &skb->dev->stats; 166 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
167 stats->rx_packets++; 167 smp_processor_id());
168 stats->rx_bytes += skb->len; 168 rx_stats->rx_packets++;
169 rx_stats->rx_bytes += skb->len;
169 170
170 skb_pull_rcsum(skb, VLAN_HLEN); 171 skb_pull_rcsum(skb, VLAN_HLEN);
171 172
@@ -180,7 +181,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
180 break; 181 break;
181 182
182 case PACKET_MULTICAST: 183 case PACKET_MULTICAST:
183 stats->multicast++; 184 rx_stats->multicast++;
184 break; 185 break;
185 186
186 case PACKET_OTHERHOST: 187 case PACKET_OTHERHOST:
@@ -200,7 +201,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
200 201
201 skb = vlan_check_reorder_header(skb); 202 skb = vlan_check_reorder_header(skb);
202 if (!skb) { 203 if (!skb) {
203 stats->rx_errors++; 204 rx_stats->rx_errors++;
204 goto err_unlock; 205 goto err_unlock;
205 } 206 }
206 207
@@ -332,7 +333,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
332 } else 333 } else
333 txq->tx_dropped++; 334 txq->tx_dropped++;
334 335
335 return NETDEV_TX_OK; 336 return ret;
336} 337}
337 338
338static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 339static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
@@ -358,7 +359,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
358 } else 359 } else
359 txq->tx_dropped++; 360 txq->tx_dropped++;
360 361
361 return NETDEV_TX_OK; 362 return ret;
362} 363}
363 364
364static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 365static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
@@ -731,6 +732,11 @@ static int vlan_dev_init(struct net_device *dev)
731 subclass = 1; 732 subclass = 1;
732 733
733 vlan_dev_set_lockdep_class(dev, subclass); 734 vlan_dev_set_lockdep_class(dev, subclass);
735
736 vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
737 if (!vlan_dev_info(dev)->vlan_rx_stats)
738 return -ENOMEM;
739
734 return 0; 740 return 0;
735} 741}
736 742
@@ -740,6 +746,8 @@ static void vlan_dev_uninit(struct net_device *dev)
740 struct vlan_dev_info *vlan = vlan_dev_info(dev); 746 struct vlan_dev_info *vlan = vlan_dev_info(dev);
741 int i; 747 int i;
742 748
749 free_percpu(vlan->vlan_rx_stats);
750 vlan->vlan_rx_stats = NULL;
743 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 751 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
744 while ((pm = vlan->egress_priority_map[i]) != NULL) { 752 while ((pm = vlan->egress_priority_map[i]) != NULL) {
745 vlan->egress_priority_map[i] = pm->next; 753 vlan->egress_priority_map[i] = pm->next;
@@ -775,6 +783,31 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
775 return dev_ethtool_get_flags(vlan->real_dev); 783 return dev_ethtool_get_flags(vlan->real_dev);
776} 784}
777 785
786static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
787{
788 struct net_device_stats *stats = &dev->stats;
789
790 dev_txq_stats_fold(dev, stats);
791
792 if (vlan_dev_info(dev)->vlan_rx_stats) {
793 struct vlan_rx_stats *p, rx = {0};
794 int i;
795
796 for_each_possible_cpu(i) {
797 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
798 rx.rx_packets += p->rx_packets;
799 rx.rx_bytes += p->rx_bytes;
800 rx.rx_errors += p->rx_errors;
801 rx.multicast += p->multicast;
802 }
803 stats->rx_packets = rx.rx_packets;
804 stats->rx_bytes = rx.rx_bytes;
805 stats->rx_errors = rx.rx_errors;
806 stats->multicast = rx.multicast;
807 }
808 return stats;
809}
810
778static const struct ethtool_ops vlan_ethtool_ops = { 811static const struct ethtool_ops vlan_ethtool_ops = {
779 .get_settings = vlan_ethtool_get_settings, 812 .get_settings = vlan_ethtool_get_settings,
780 .get_drvinfo = vlan_ethtool_get_drvinfo, 813 .get_drvinfo = vlan_ethtool_get_drvinfo,
@@ -797,6 +830,7 @@ static const struct net_device_ops vlan_netdev_ops = {
797 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 830 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
798 .ndo_do_ioctl = vlan_dev_ioctl, 831 .ndo_do_ioctl = vlan_dev_ioctl,
799 .ndo_neigh_setup = vlan_dev_neigh_setup, 832 .ndo_neigh_setup = vlan_dev_neigh_setup,
833 .ndo_get_stats = vlan_dev_get_stats,
800#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 834#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
801 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 835 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
802 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 836 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -820,6 +854,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
820 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 854 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
821 .ndo_do_ioctl = vlan_dev_ioctl, 855 .ndo_do_ioctl = vlan_dev_ioctl,
822 .ndo_neigh_setup = vlan_dev_neigh_setup, 856 .ndo_neigh_setup = vlan_dev_neigh_setup,
857 .ndo_get_stats = vlan_dev_get_stats,
823#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 858#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
824 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 859 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
825 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 860 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 4da8892ced5f..2ea40995dced 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -191,8 +191,181 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
191} 191}
192 192
193#ifdef CONFIG_COMPAT 193#ifdef CONFIG_COMPAT
194int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 194/*
195 * FIXME:
196 * The compat_ioctl handling is duplicated, using both these conversion
197 * routines and the compat argument to the actual handlers. Both
198 * versions are somewhat incomplete and should be merged, e.g. by
199 * moving the ioctl number translation into the actual handlers and
200 * killing the conversion code.
201 *
202 * -arnd, November 2009
203 */
204#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct compat_atmif_sioc)
205#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct compat_atm_iobuf)
206#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct compat_atmif_sioc)
207#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct compat_atmif_sioc)
208#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct compat_atmif_sioc)
209#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct compat_atmif_sioc)
210#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct compat_atmif_sioc)
211#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct compat_atmif_sioc)
212#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct compat_atmif_sioc)
213#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct compat_atmif_sioc)
214#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct compat_atmif_sioc)
215#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct compat_atmif_sioc)
216#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct compat_atmif_sioc)
217#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct compat_atmif_sioc)
218#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct compat_atmif_sioc)
219#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct compat_atmif_sioc)
220#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct compat_atmif_sioc)
221
222static struct {
223 unsigned int cmd32;
224 unsigned int cmd;
225} atm_ioctl_map[] = {
226 { ATM_GETLINKRATE32, ATM_GETLINKRATE },
227 { ATM_GETNAMES32, ATM_GETNAMES },
228 { ATM_GETTYPE32, ATM_GETTYPE },
229 { ATM_GETESI32, ATM_GETESI },
230 { ATM_GETADDR32, ATM_GETADDR },
231 { ATM_RSTADDR32, ATM_RSTADDR },
232 { ATM_ADDADDR32, ATM_ADDADDR },
233 { ATM_DELADDR32, ATM_DELADDR },
234 { ATM_GETCIRANGE32, ATM_GETCIRANGE },
235 { ATM_SETCIRANGE32, ATM_SETCIRANGE },
236 { ATM_SETESI32, ATM_SETESI },
237 { ATM_SETESIF32, ATM_SETESIF },
238 { ATM_GETSTAT32, ATM_GETSTAT },
239 { ATM_GETSTATZ32, ATM_GETSTATZ },
240 { ATM_GETLOOP32, ATM_GETLOOP },
241 { ATM_SETLOOP32, ATM_SETLOOP },
242 { ATM_QUERYLOOP32, ATM_QUERYLOOP },
243};
244
245#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map)
246
247static int do_atm_iobuf(struct socket *sock, unsigned int cmd,
248 unsigned long arg)
249{
250 struct atm_iobuf __user *iobuf;
251 struct compat_atm_iobuf __user *iobuf32;
252 u32 data;
253 void __user *datap;
254 int len, err;
255
256 iobuf = compat_alloc_user_space(sizeof(*iobuf));
257 iobuf32 = compat_ptr(arg);
258
259 if (get_user(len, &iobuf32->length) ||
260 get_user(data, &iobuf32->buffer))
261 return -EFAULT;
262 datap = compat_ptr(data);
263 if (put_user(len, &iobuf->length) ||
264 put_user(datap, &iobuf->buffer))
265 return -EFAULT;
266
267 err = do_vcc_ioctl(sock, cmd, (unsigned long) iobuf, 0);
268
269 if (!err) {
270 if (copy_in_user(&iobuf32->length, &iobuf->length,
271 sizeof(int)))
272 err = -EFAULT;
273 }
274
275 return err;
276}
277
278static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
279 unsigned long arg)
280{
281 struct atmif_sioc __user *sioc;
282 struct compat_atmif_sioc __user *sioc32;
283 u32 data;
284 void __user *datap;
285 int err;
286
287 sioc = compat_alloc_user_space(sizeof(*sioc));
288 sioc32 = compat_ptr(arg);
289
290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int))
291 || get_user(data, &sioc32->arg))
292 return -EFAULT;
293 datap = compat_ptr(data);
294 if (put_user(datap, &sioc->arg))
295 return -EFAULT;
296
297 err = do_vcc_ioctl(sock, cmd, (unsigned long) sioc, 0);
298
299 if (!err) {
300 if (copy_in_user(&sioc32->length, &sioc->length,
301 sizeof(int)))
302 err = -EFAULT;
303 }
304 return err;
305}
306
307static int do_atm_ioctl(struct socket *sock, unsigned int cmd32,
308 unsigned long arg)
309{
310 int i;
311 unsigned int cmd = 0;
312
313 switch (cmd32) {
314 case SONET_GETSTAT:
315 case SONET_GETSTATZ:
316 case SONET_GETDIAG:
317 case SONET_SETDIAG:
318 case SONET_CLRDIAG:
319 case SONET_SETFRAMING:
320 case SONET_GETFRAMING:
321 case SONET_GETFRSENSE:
322 return do_atmif_sioc(sock, cmd32, arg);
323 }
324
325 for (i = 0; i < NR_ATM_IOCTL; i++) {
326 if (cmd32 == atm_ioctl_map[i].cmd32) {
327 cmd = atm_ioctl_map[i].cmd;
328 break;
329 }
330 }
331 if (i == NR_ATM_IOCTL)
332 return -EINVAL;
333
334 switch (cmd) {
335 case ATM_GETNAMES:
336 return do_atm_iobuf(sock, cmd, arg);
337
338 case ATM_GETLINKRATE:
339 case ATM_GETTYPE:
340 case ATM_GETESI:
341 case ATM_GETADDR:
342 case ATM_RSTADDR:
343 case ATM_ADDADDR:
344 case ATM_DELADDR:
345 case ATM_GETCIRANGE:
346 case ATM_SETCIRANGE:
347 case ATM_SETESI:
348 case ATM_SETESIF:
349 case ATM_GETSTAT:
350 case ATM_GETSTATZ:
351 case ATM_GETLOOP:
352 case ATM_SETLOOP:
353 case ATM_QUERYLOOP:
354 return do_atmif_sioc(sock, cmd, arg);
355 }
356
357 return -EINVAL;
358}
359
360int vcc_compat_ioctl(struct socket *sock, unsigned int cmd,
361 unsigned long arg)
195{ 362{
196 return do_vcc_ioctl(sock, cmd, arg, 1); 363 int ret;
364
365 ret = do_vcc_ioctl(sock, cmd, arg, 1);
366 if (ret != -ENOIOCTLCMD)
367 return ret;
368
369 return do_atm_ioctl(sock, cmd, arg);
197} 370}
198#endif 371#endif
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a9750984f772..b7c4224f4e7d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -211,6 +211,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
211 conn->type = type; 211 conn->type = type;
212 conn->mode = HCI_CM_ACTIVE; 212 conn->mode = HCI_CM_ACTIVE;
213 conn->state = BT_OPEN; 213 conn->state = BT_OPEN;
214 conn->auth_type = HCI_AT_GENERAL_BONDING;
214 215
215 conn->power_save = 1; 216 conn->power_save = 1;
216 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 217 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index ff0233df6246..80d929842f04 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2206,7 +2206,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2206{ 2206{
2207 struct l2cap_pinfo *pi = l2cap_pi(sk); 2207 struct l2cap_pinfo *pi = l2cap_pi(sk);
2208 struct l2cap_conf_req *req = data; 2208 struct l2cap_conf_req *req = data;
2209 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM }; 2209 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2210 void *ptr = req->data; 2210 void *ptr = req->data;
2211 2211
2212 BT_DBG("sk %p", sk); 2212 BT_DBG("sk %p", sk);
@@ -2395,6 +2395,10 @@ done:
2395 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 2395 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2396 2396
2397 pi->conf_state |= L2CAP_CONF_MODE_DONE; 2397 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2398
2399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2400 sizeof(rfc), (unsigned long) &rfc);
2401
2398 break; 2402 break;
2399 2403
2400 case L2CAP_MODE_STREAMING: 2404 case L2CAP_MODE_STREAMING:
@@ -2402,6 +2406,10 @@ done:
2402 pi->max_pdu_size = rfc.max_pdu_size; 2406 pi->max_pdu_size = rfc.max_pdu_size;
2403 2407
2404 pi->conf_state |= L2CAP_CONF_MODE_DONE; 2408 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2409
2410 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2411 sizeof(rfc), (unsigned long) &rfc);
2412
2405 break; 2413 break;
2406 2414
2407 default: 2415 default:
@@ -2411,9 +2419,6 @@ done:
2411 rfc.mode = pi->mode; 2419 rfc.mode = pi->mode;
2412 } 2420 }
2413 2421
2414 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2415 sizeof(rfc), (unsigned long) &rfc);
2416
2417 if (result == L2CAP_CONF_SUCCESS) 2422 if (result == L2CAP_CONF_SUCCESS)
2418 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 2423 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2419 } 2424 }
diff --git a/net/core/dev.c b/net/core/dev.c
index bf629ac08b87..9977288583b8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -79,6 +79,7 @@
79#include <linux/cpu.h> 79#include <linux/cpu.h>
80#include <linux/types.h> 80#include <linux/types.h>
81#include <linux/kernel.h> 81#include <linux/kernel.h>
82#include <linux/hash.h>
82#include <linux/sched.h> 83#include <linux/sched.h>
83#include <linux/mutex.h> 84#include <linux/mutex.h>
84#include <linux/string.h> 85#include <linux/string.h>
@@ -196,7 +197,7 @@ EXPORT_SYMBOL(dev_base_lock);
196static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 197static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
197{ 198{
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 199 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)]; 200 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
200} 201}
201 202
202static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 203static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
@@ -892,7 +893,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
892 free_page((unsigned long) inuse); 893 free_page((unsigned long) inuse);
893 } 894 }
894 895
895 snprintf(buf, IFNAMSIZ, name, i); 896 if (buf != name)
897 snprintf(buf, IFNAMSIZ, name, i);
896 if (!__dev_get_by_name(net, buf)) 898 if (!__dev_get_by_name(net, buf))
897 return i; 899 return i;
898 900
@@ -932,6 +934,21 @@ int dev_alloc_name(struct net_device *dev, const char *name)
932} 934}
933EXPORT_SYMBOL(dev_alloc_name); 935EXPORT_SYMBOL(dev_alloc_name);
934 936
937static int dev_get_valid_name(struct net *net, const char *name, char *buf,
938 bool fmt)
939{
940 if (!dev_valid_name(name))
941 return -EINVAL;
942
943 if (fmt && strchr(name, '%'))
944 return __dev_alloc_name(net, name, buf);
945 else if (__dev_get_by_name(net, name))
946 return -EEXIST;
947 else if (buf != name)
948 strlcpy(buf, name, IFNAMSIZ);
949
950 return 0;
951}
935 952
936/** 953/**
937 * dev_change_name - change name of a device 954 * dev_change_name - change name of a device
@@ -955,22 +972,14 @@ int dev_change_name(struct net_device *dev, const char *newname)
955 if (dev->flags & IFF_UP) 972 if (dev->flags & IFF_UP)
956 return -EBUSY; 973 return -EBUSY;
957 974
958 if (!dev_valid_name(newname))
959 return -EINVAL;
960
961 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) 975 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
962 return 0; 976 return 0;
963 977
964 memcpy(oldname, dev->name, IFNAMSIZ); 978 memcpy(oldname, dev->name, IFNAMSIZ);
965 979
966 if (strchr(newname, '%')) { 980 err = dev_get_valid_name(net, newname, dev->name, 1);
967 err = dev_alloc_name(dev, newname); 981 if (err < 0)
968 if (err < 0) 982 return err;
969 return err;
970 } else if (__dev_get_by_name(net, newname))
971 return -EEXIST;
972 else
973 strlcpy(dev->name, newname, IFNAMSIZ);
974 983
975rollback: 984rollback:
976 /* For now only devices in the initial network namespace 985 /* For now only devices in the initial network namespace
@@ -998,14 +1007,15 @@ rollback:
998 ret = notifier_to_errno(ret); 1007 ret = notifier_to_errno(ret);
999 1008
1000 if (ret) { 1009 if (ret) {
1001 if (err) { 1010 /* err >= 0 after dev_alloc_name() or stores the first errno */
1002 printk(KERN_ERR 1011 if (err >= 0) {
1003 "%s: name change rollback failed: %d.\n",
1004 dev->name, ret);
1005 } else {
1006 err = ret; 1012 err = ret;
1007 memcpy(dev->name, oldname, IFNAMSIZ); 1013 memcpy(dev->name, oldname, IFNAMSIZ);
1008 goto rollback; 1014 goto rollback;
1015 } else {
1016 printk(KERN_ERR
1017 "%s: name change rollback failed: %d.\n",
1018 dev->name, ret);
1009 } 1019 }
1010 } 1020 }
1011 1021
@@ -1342,6 +1352,7 @@ rollback:
1342 nb->notifier_call(nb, NETDEV_DOWN, dev); 1352 nb->notifier_call(nb, NETDEV_DOWN, dev);
1343 } 1353 }
1344 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1354 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1355 nb->notifier_call(nb, NETDEV_UNREGISTER_PERNET, dev);
1345 } 1356 }
1346 } 1357 }
1347 1358
@@ -1756,7 +1767,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1756 struct netdev_queue *txq) 1767 struct netdev_queue *txq)
1757{ 1768{
1758 const struct net_device_ops *ops = dev->netdev_ops; 1769 const struct net_device_ops *ops = dev->netdev_ops;
1759 int rc; 1770 int rc = NETDEV_TX_OK;
1760 1771
1761 if (likely(!skb->next)) { 1772 if (likely(!skb->next)) {
1762 if (!list_empty(&ptype_all)) 1773 if (!list_empty(&ptype_all))
@@ -1804,6 +1815,8 @@ gso:
1804 nskb->next = NULL; 1815 nskb->next = NULL;
1805 rc = ops->ndo_start_xmit(nskb, dev); 1816 rc = ops->ndo_start_xmit(nskb, dev);
1806 if (unlikely(rc != NETDEV_TX_OK)) { 1817 if (unlikely(rc != NETDEV_TX_OK)) {
1818 if (rc & ~NETDEV_TX_MASK)
1819 goto out_kfree_gso_skb;
1807 nskb->next = skb->next; 1820 nskb->next = skb->next;
1808 skb->next = nskb; 1821 skb->next = nskb;
1809 return rc; 1822 return rc;
@@ -1813,11 +1826,12 @@ gso:
1813 return NETDEV_TX_BUSY; 1826 return NETDEV_TX_BUSY;
1814 } while (skb->next); 1827 } while (skb->next);
1815 1828
1816 skb->destructor = DEV_GSO_CB(skb)->destructor; 1829out_kfree_gso_skb:
1817 1830 if (likely(skb->next == NULL))
1831 skb->destructor = DEV_GSO_CB(skb)->destructor;
1818out_kfree_skb: 1832out_kfree_skb:
1819 kfree_skb(skb); 1833 kfree_skb(skb);
1820 return NETDEV_TX_OK; 1834 return rc;
1821} 1835}
1822 1836
1823static u32 skb_tx_hashrnd; 1837static u32 skb_tx_hashrnd;
@@ -1844,6 +1858,20 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1844} 1858}
1845EXPORT_SYMBOL(skb_tx_hash); 1859EXPORT_SYMBOL(skb_tx_hash);
1846 1860
1861static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1862{
1863 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1864 if (net_ratelimit()) {
1865 WARN(1, "%s selects TX queue %d, but "
1866 "real number of TX queues is %d\n",
1867 dev->name, queue_index,
1868 dev->real_num_tx_queues);
1869 }
1870 return 0;
1871 }
1872 return queue_index;
1873}
1874
1847static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1875static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1848 struct sk_buff *skb) 1876 struct sk_buff *skb)
1849{ 1877{
@@ -1857,6 +1885,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1857 1885
1858 if (ops->ndo_select_queue) { 1886 if (ops->ndo_select_queue) {
1859 queue_index = ops->ndo_select_queue(dev, skb); 1887 queue_index = ops->ndo_select_queue(dev, skb);
1888 queue_index = dev_cap_txqueue(dev, queue_index);
1860 } else { 1889 } else {
1861 queue_index = 0; 1890 queue_index = 0;
1862 if (dev->real_num_tx_queues > 1) 1891 if (dev->real_num_tx_queues > 1)
@@ -2002,8 +2031,8 @@ gso:
2002 HARD_TX_LOCK(dev, txq, cpu); 2031 HARD_TX_LOCK(dev, txq, cpu);
2003 2032
2004 if (!netif_tx_queue_stopped(txq)) { 2033 if (!netif_tx_queue_stopped(txq)) {
2005 rc = NET_XMIT_SUCCESS; 2034 rc = dev_hard_start_xmit(skb, dev, txq);
2006 if (!dev_hard_start_xmit(skb, dev, txq)) { 2035 if (dev_xmit_complete(rc)) {
2007 HARD_TX_UNLOCK(dev, txq); 2036 HARD_TX_UNLOCK(dev, txq);
2008 goto out; 2037 goto out;
2009 } 2038 }
@@ -4701,7 +4730,8 @@ static void net_set_todo(struct net_device *dev)
4701 4730
4702static void rollback_registered_many(struct list_head *head) 4731static void rollback_registered_many(struct list_head *head)
4703{ 4732{
4704 struct net_device *dev; 4733 struct net_device *dev, *aux, *fdev;
4734 LIST_HEAD(pernet_list);
4705 4735
4706 BUG_ON(dev_boot_phase); 4736 BUG_ON(dev_boot_phase);
4707 ASSERT_RTNL(); 4737 ASSERT_RTNL();
@@ -4759,8 +4789,24 @@ static void rollback_registered_many(struct list_head *head)
4759 4789
4760 synchronize_net(); 4790 synchronize_net();
4761 4791
4762 list_for_each_entry(dev, head, unreg_list) 4792 list_for_each_entry_safe(dev, aux, head, unreg_list) {
4793 int new_net = 1;
4794 list_for_each_entry(fdev, &pernet_list, unreg_list) {
4795 if (dev_net(dev) == dev_net(fdev)) {
4796 new_net = 0;
4797 dev_put(dev);
4798 break;
4799 }
4800 }
4801 if (new_net)
4802 list_move(&dev->unreg_list, &pernet_list);
4803 }
4804
4805 list_for_each_entry_safe(dev, aux, &pernet_list, unreg_list) {
4806 call_netdevice_notifiers(NETDEV_UNREGISTER_PERNET, dev);
4807 list_move(&dev->unreg_list, head);
4763 dev_put(dev); 4808 dev_put(dev);
4809 }
4764} 4810}
4765 4811
4766static void rollback_registered(struct net_device *dev) 4812static void rollback_registered(struct net_device *dev)
@@ -4845,8 +4891,6 @@ EXPORT_SYMBOL(netdev_fix_features);
4845 4891
4846int register_netdevice(struct net_device *dev) 4892int register_netdevice(struct net_device *dev)
4847{ 4893{
4848 struct hlist_head *head;
4849 struct hlist_node *p;
4850 int ret; 4894 int ret;
4851 struct net *net = dev_net(dev); 4895 struct net *net = dev_net(dev);
4852 4896
@@ -4875,26 +4919,14 @@ int register_netdevice(struct net_device *dev)
4875 } 4919 }
4876 } 4920 }
4877 4921
4878 if (!dev_valid_name(dev->name)) { 4922 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
4879 ret = -EINVAL; 4923 if (ret)
4880 goto err_uninit; 4924 goto err_uninit;
4881 }
4882 4925
4883 dev->ifindex = dev_new_index(net); 4926 dev->ifindex = dev_new_index(net);
4884 if (dev->iflink == -1) 4927 if (dev->iflink == -1)
4885 dev->iflink = dev->ifindex; 4928 dev->iflink = dev->ifindex;
4886 4929
4887 /* Check for existence of name */
4888 head = dev_name_hash(net, dev->name);
4889 hlist_for_each(p, head) {
4890 struct net_device *d
4891 = hlist_entry(p, struct net_device, name_hlist);
4892 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4893 ret = -EEXIST;
4894 goto err_uninit;
4895 }
4896 }
4897
4898 /* Fix illegal checksum combinations */ 4930 /* Fix illegal checksum combinations */
4899 if ((dev->features & NETIF_F_HW_CSUM) && 4931 if ((dev->features & NETIF_F_HW_CSUM) &&
4900 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 4932 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
@@ -5047,6 +5079,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5047{ 5079{
5048 unsigned long rebroadcast_time, warning_time; 5080 unsigned long rebroadcast_time, warning_time;
5049 5081
5082 linkwatch_forget_dev(dev);
5083
5050 rebroadcast_time = warning_time = jiffies; 5084 rebroadcast_time = warning_time = jiffies;
5051 while (atomic_read(&dev->refcnt) != 0) { 5085 while (atomic_read(&dev->refcnt) != 0) {
5052 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 5086 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
@@ -5054,6 +5088,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5054 5088
5055 /* Rebroadcast unregister notification */ 5089 /* Rebroadcast unregister notification */
5056 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5090 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5091 /* don't resend NETDEV_UNREGISTER_PERNET, _PERNET users
5092 * should have already handle it the first time */
5057 5093
5058 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5094 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5059 &dev->state)) { 5095 &dev->state)) {
@@ -5149,6 +5185,32 @@ void netdev_run_todo(void)
5149} 5185}
5150 5186
5151/** 5187/**
5188 * dev_txq_stats_fold - fold tx_queues stats
5189 * @dev: device to get statistics from
5190 * @stats: struct net_device_stats to hold results
5191 */
5192void dev_txq_stats_fold(const struct net_device *dev,
5193 struct net_device_stats *stats)
5194{
5195 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5196 unsigned int i;
5197 struct netdev_queue *txq;
5198
5199 for (i = 0; i < dev->num_tx_queues; i++) {
5200 txq = netdev_get_tx_queue(dev, i);
5201 tx_bytes += txq->tx_bytes;
5202 tx_packets += txq->tx_packets;
5203 tx_dropped += txq->tx_dropped;
5204 }
5205 if (tx_bytes || tx_packets || tx_dropped) {
5206 stats->tx_bytes = tx_bytes;
5207 stats->tx_packets = tx_packets;
5208 stats->tx_dropped = tx_dropped;
5209 }
5210}
5211EXPORT_SYMBOL(dev_txq_stats_fold);
5212
5213/**
5152 * dev_get_stats - get network device statistics 5214 * dev_get_stats - get network device statistics
5153 * @dev: device to get statistics from 5215 * @dev: device to get statistics from
5154 * 5216 *
@@ -5162,25 +5224,9 @@ const struct net_device_stats *dev_get_stats(struct net_device *dev)
5162 5224
5163 if (ops->ndo_get_stats) 5225 if (ops->ndo_get_stats)
5164 return ops->ndo_get_stats(dev); 5226 return ops->ndo_get_stats(dev);
5165 else { 5227
5166 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; 5228 dev_txq_stats_fold(dev, &dev->stats);
5167 struct net_device_stats *stats = &dev->stats; 5229 return &dev->stats;
5168 unsigned int i;
5169 struct netdev_queue *txq;
5170
5171 for (i = 0; i < dev->num_tx_queues; i++) {
5172 txq = netdev_get_tx_queue(dev, i);
5173 tx_bytes += txq->tx_bytes;
5174 tx_packets += txq->tx_packets;
5175 tx_dropped += txq->tx_dropped;
5176 }
5177 if (tx_bytes || tx_packets || tx_dropped) {
5178 stats->tx_bytes = tx_bytes;
5179 stats->tx_packets = tx_packets;
5180 stats->tx_dropped = tx_dropped;
5181 }
5182 return stats;
5183 }
5184} 5230}
5185EXPORT_SYMBOL(dev_get_stats); 5231EXPORT_SYMBOL(dev_get_stats);
5186 5232
@@ -5261,6 +5307,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5261 5307
5262 INIT_LIST_HEAD(&dev->napi_list); 5308 INIT_LIST_HEAD(&dev->napi_list);
5263 INIT_LIST_HEAD(&dev->unreg_list); 5309 INIT_LIST_HEAD(&dev->unreg_list);
5310 INIT_LIST_HEAD(&dev->link_watch_list);
5264 dev->priv_flags = IFF_XMIT_DST_RELEASE; 5311 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5265 setup(dev); 5312 setup(dev);
5266 strcpy(dev->name, name); 5313 strcpy(dev->name, name);
@@ -5355,6 +5402,10 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
5355 * unregister_netdevice_many - unregister many devices 5402 * unregister_netdevice_many - unregister many devices
5356 * @head: list of devices 5403 * @head: list of devices
5357 * 5404 *
5405 * WARNING: Calling this modifies the given list
5406 * (in rollback_registered_many). It may change the order of the elements
5407 * in the list. However, you can assume it does not add or delete elements
5408 * to/from the list.
5358 */ 5409 */
5359void unregister_netdevice_many(struct list_head *head) 5410void unregister_netdevice_many(struct list_head *head)
5360{ 5411{
@@ -5403,8 +5454,6 @@ EXPORT_SYMBOL(unregister_netdev);
5403 5454
5404int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 5455int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5405{ 5456{
5406 char buf[IFNAMSIZ];
5407 const char *destname;
5408 int err; 5457 int err;
5409 5458
5410 ASSERT_RTNL(); 5459 ASSERT_RTNL();
@@ -5437,20 +5486,11 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5437 * we can use it in the destination network namespace. 5486 * we can use it in the destination network namespace.
5438 */ 5487 */
5439 err = -EEXIST; 5488 err = -EEXIST;
5440 destname = dev->name; 5489 if (__dev_get_by_name(net, dev->name)) {
5441 if (__dev_get_by_name(net, destname)) {
5442 /* We get here if we can't use the current device name */ 5490 /* We get here if we can't use the current device name */
5443 if (!pat) 5491 if (!pat)
5444 goto out; 5492 goto out;
5445 if (!dev_valid_name(pat)) 5493 if (dev_get_valid_name(net, pat, dev->name, 1))
5446 goto out;
5447 if (strchr(pat, '%')) {
5448 if (__dev_alloc_name(net, pat, buf) < 0)
5449 goto out;
5450 destname = buf;
5451 } else
5452 destname = pat;
5453 if (__dev_get_by_name(net, destname))
5454 goto out; 5494 goto out;
5455 } 5495 }
5456 5496
@@ -5474,6 +5514,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5474 this device. They should clean all the things. 5514 this device. They should clean all the things.
5475 */ 5515 */
5476 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5516 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5517 call_netdevice_notifiers(NETDEV_UNREGISTER_PERNET, dev);
5477 5518
5478 /* 5519 /*
5479 * Flush the unicast and multicast chains 5520 * Flush the unicast and multicast chains
@@ -5486,10 +5527,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5486 /* Actually switch the network namespace */ 5527 /* Actually switch the network namespace */
5487 dev_net_set(dev, net); 5528 dev_net_set(dev, net);
5488 5529
5489 /* Assign the new device name */
5490 if (destname != dev->name)
5491 strcpy(dev->name, destname);
5492
5493 /* If there is an ifindex conflict assign a new one */ 5530 /* If there is an ifindex conflict assign a new one */
5494 if (__dev_get_by_index(net, dev->ifindex)) { 5531 if (__dev_get_by_index(net, dev->ifindex)) {
5495 int iflink = (dev->iflink == dev->ifindex); 5532 int iflink = (dev->iflink == dev->ifindex);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index bf8f7af699d7..5910b555a54a 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -35,7 +35,7 @@ static unsigned long linkwatch_nextevent;
35static void linkwatch_event(struct work_struct *dummy); 35static void linkwatch_event(struct work_struct *dummy);
36static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); 36static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
37 37
38static struct net_device *lweventlist; 38static LIST_HEAD(lweventlist);
39static DEFINE_SPINLOCK(lweventlist_lock); 39static DEFINE_SPINLOCK(lweventlist_lock);
40 40
41static unsigned char default_operstate(const struct net_device *dev) 41static unsigned char default_operstate(const struct net_device *dev)
@@ -89,8 +89,10 @@ static void linkwatch_add_event(struct net_device *dev)
89 unsigned long flags; 89 unsigned long flags;
90 90
91 spin_lock_irqsave(&lweventlist_lock, flags); 91 spin_lock_irqsave(&lweventlist_lock, flags);
92 dev->link_watch_next = lweventlist; 92 if (list_empty(&dev->link_watch_list)) {
93 lweventlist = dev; 93 list_add_tail(&dev->link_watch_list, &lweventlist);
94 dev_hold(dev);
95 }
94 spin_unlock_irqrestore(&lweventlist_lock, flags); 96 spin_unlock_irqrestore(&lweventlist_lock, flags);
95} 97}
96 98
@@ -133,9 +135,35 @@ static void linkwatch_schedule_work(int urgent)
133} 135}
134 136
135 137
138static void linkwatch_do_dev(struct net_device *dev)
139{
140 /*
141 * Make sure the above read is complete since it can be
142 * rewritten as soon as we clear the bit below.
143 */
144 smp_mb__before_clear_bit();
145
146 /* We are about to handle this device,
147 * so new events can be accepted
148 */
149 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
150
151 rfc2863_policy(dev);
152 if (dev->flags & IFF_UP) {
153 if (netif_carrier_ok(dev))
154 dev_activate(dev);
155 else
156 dev_deactivate(dev);
157
158 netdev_state_change(dev);
159 }
160 dev_put(dev);
161}
162
136static void __linkwatch_run_queue(int urgent_only) 163static void __linkwatch_run_queue(int urgent_only)
137{ 164{
138 struct net_device *next; 165 struct net_device *dev;
166 LIST_HEAD(wrk);
139 167
140 /* 168 /*
141 * Limit the number of linkwatch events to one 169 * Limit the number of linkwatch events to one
@@ -153,46 +181,40 @@ static void __linkwatch_run_queue(int urgent_only)
153 clear_bit(LW_URGENT, &linkwatch_flags); 181 clear_bit(LW_URGENT, &linkwatch_flags);
154 182
155 spin_lock_irq(&lweventlist_lock); 183 spin_lock_irq(&lweventlist_lock);
156 next = lweventlist; 184 list_splice_init(&lweventlist, &wrk);
157 lweventlist = NULL;
158 spin_unlock_irq(&lweventlist_lock);
159 185
160 while (next) { 186 while (!list_empty(&wrk)) {
161 struct net_device *dev = next;
162 187
163 next = dev->link_watch_next; 188 dev = list_first_entry(&wrk, struct net_device, link_watch_list);
189 list_del_init(&dev->link_watch_list);
164 190
165 if (urgent_only && !linkwatch_urgent_event(dev)) { 191 if (urgent_only && !linkwatch_urgent_event(dev)) {
166 linkwatch_add_event(dev); 192 list_add_tail(&dev->link_watch_list, &lweventlist);
167 continue; 193 continue;
168 } 194 }
169 195 spin_unlock_irq(&lweventlist_lock);
170 /* 196 linkwatch_do_dev(dev);
171 * Make sure the above read is complete since it can be 197 spin_lock_irq(&lweventlist_lock);
172 * rewritten as soon as we clear the bit below.
173 */
174 smp_mb__before_clear_bit();
175
176 /* We are about to handle this device,
177 * so new events can be accepted
178 */
179 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
180
181 rfc2863_policy(dev);
182 if (dev->flags & IFF_UP) {
183 if (netif_carrier_ok(dev))
184 dev_activate(dev);
185 else
186 dev_deactivate(dev);
187
188 netdev_state_change(dev);
189 }
190
191 dev_put(dev);
192 } 198 }
193 199
194 if (lweventlist) 200 if (!list_empty(&lweventlist))
195 linkwatch_schedule_work(0); 201 linkwatch_schedule_work(0);
202 spin_unlock_irq(&lweventlist_lock);
203}
204
205void linkwatch_forget_dev(struct net_device *dev)
206{
207 unsigned long flags;
208 int clean = 0;
209
210 spin_lock_irqsave(&lweventlist_lock, flags);
211 if (!list_empty(&dev->link_watch_list)) {
212 list_del_init(&dev->link_watch_list);
213 clean = 1;
214 }
215 spin_unlock_irqrestore(&lweventlist_lock, flags);
216 if (clean)
217 linkwatch_do_dev(dev);
196} 218}
197 219
198 220
@@ -216,8 +238,6 @@ void linkwatch_fire_event(struct net_device *dev)
216 bool urgent = linkwatch_urgent_event(dev); 238 bool urgent = linkwatch_urgent_event(dev);
217 239
218 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { 240 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
219 dev_hold(dev);
220
221 linkwatch_add_event(dev); 241 linkwatch_add_event(dev);
222 } else if (!urgent) 242 } else if (!urgent)
223 return; 243 return;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 80a96166df39..941bac907484 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -493,6 +493,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
493{ 493{
494 struct skb_shared_info *shinfo; 494 struct skb_shared_info *shinfo;
495 495
496 if (irqs_disabled())
497 return 0;
498
496 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
497 return 0; 500 return 0;
498 501
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index b5ef237c8a74..6c916e2b8a84 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -68,7 +68,7 @@ extern struct neigh_table dn_neigh_table;
68 */ 68 */
69__le16 decnet_address = 0; 69__le16 decnet_address = 0;
70 70
71static DEFINE_RWLOCK(dndev_lock); 71static DEFINE_SPINLOCK(dndev_lock);
72static struct net_device *decnet_default_device; 72static struct net_device *decnet_default_device;
73static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); 73static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
74 74
@@ -557,7 +557,8 @@ rarok:
557struct net_device *dn_dev_get_default(void) 557struct net_device *dn_dev_get_default(void)
558{ 558{
559 struct net_device *dev; 559 struct net_device *dev;
560 read_lock(&dndev_lock); 560
561 spin_lock(&dndev_lock);
561 dev = decnet_default_device; 562 dev = decnet_default_device;
562 if (dev) { 563 if (dev) {
563 if (dev->dn_ptr) 564 if (dev->dn_ptr)
@@ -565,7 +566,8 @@ struct net_device *dn_dev_get_default(void)
565 else 566 else
566 dev = NULL; 567 dev = NULL;
567 } 568 }
568 read_unlock(&dndev_lock); 569 spin_unlock(&dndev_lock);
570
569 return dev; 571 return dev;
570} 572}
571 573
@@ -575,13 +577,15 @@ int dn_dev_set_default(struct net_device *dev, int force)
575 int rv = -EBUSY; 577 int rv = -EBUSY;
576 if (!dev->dn_ptr) 578 if (!dev->dn_ptr)
577 return -ENODEV; 579 return -ENODEV;
578 write_lock(&dndev_lock); 580
581 spin_lock(&dndev_lock);
579 if (force || decnet_default_device == NULL) { 582 if (force || decnet_default_device == NULL) {
580 old = decnet_default_device; 583 old = decnet_default_device;
581 decnet_default_device = dev; 584 decnet_default_device = dev;
582 rv = 0; 585 rv = 0;
583 } 586 }
584 write_unlock(&dndev_lock); 587 spin_unlock(&dndev_lock);
588
585 if (old) 589 if (old)
586 dev_put(old); 590 dev_put(old);
587 return rv; 591 return rv;
@@ -589,13 +593,14 @@ int dn_dev_set_default(struct net_device *dev, int force)
589 593
590static void dn_dev_check_default(struct net_device *dev) 594static void dn_dev_check_default(struct net_device *dev)
591{ 595{
592 write_lock(&dndev_lock); 596 spin_lock(&dndev_lock);
593 if (dev == decnet_default_device) { 597 if (dev == decnet_default_device) {
594 decnet_default_device = NULL; 598 decnet_default_device = NULL;
595 } else { 599 } else {
596 dev = NULL; 600 dev = NULL;
597 } 601 }
598 write_unlock(&dndev_lock); 602 spin_unlock(&dndev_lock);
603
599 if (dev) 604 if (dev)
600 dev_put(dev); 605 dev_put(dev);
601} 606}
@@ -828,13 +833,17 @@ static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
828 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 833 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
829 struct dn_ifaddr *ifa; 834 struct dn_ifaddr *ifa;
830 int rv = -ENODEV; 835 int rv = -ENODEV;
836
831 if (dn_db == NULL) 837 if (dn_db == NULL)
832 goto out; 838 goto out;
839
840 rtnl_lock();
833 ifa = dn_db->ifa_list; 841 ifa = dn_db->ifa_list;
834 if (ifa != NULL) { 842 if (ifa != NULL) {
835 *addr = ifa->ifa_local; 843 *addr = ifa->ifa_local;
836 rv = 0; 844 rv = 0;
837 } 845 }
846 rtnl_unlock();
838out: 847out:
839 return rv; 848 return rv;
840} 849}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 5a883affecd3..dd3db88f8f0a 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -393,10 +393,3 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
393 return ((ssize_t) l); 393 return ((ssize_t) l);
394} 394}
395EXPORT_SYMBOL(sysfs_format_mac); 395EXPORT_SYMBOL(sysfs_format_mac);
396
397char *print_mac(char *buf, const unsigned char *addr)
398{
399 _format_mac_addr(buf, MAC_BUF_SIZE, addr, ETH_ALEN);
400 return buf;
401}
402EXPORT_SYMBOL(print_mac);
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 38bac70cca10..268691256a6d 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -205,7 +205,7 @@ err_nl:
205err: 205err:
206 return rc; 206 return rc;
207} 207}
208module_init(wpan_phy_class_init); 208subsys_initcall(wpan_phy_class_init);
209 209
210static void __exit wpan_phy_class_exit(void) 210static void __exit wpan_phy_class_exit(void)
211{ 211{
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c2045f9615da..7620382058a0 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1174,39 +1174,54 @@ nla_put_failure:
1174static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 1174static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1175{ 1175{
1176 struct net *net = sock_net(skb->sk); 1176 struct net *net = sock_net(skb->sk);
1177 int idx, ip_idx; 1177 int h, s_h;
1178 int idx, s_idx;
1179 int ip_idx, s_ip_idx;
1178 struct net_device *dev; 1180 struct net_device *dev;
1179 struct in_device *in_dev; 1181 struct in_device *in_dev;
1180 struct in_ifaddr *ifa; 1182 struct in_ifaddr *ifa;
1181 int s_ip_idx, s_idx = cb->args[0]; 1183 struct hlist_head *head;
1184 struct hlist_node *node;
1182 1185
1183 s_ip_idx = ip_idx = cb->args[1]; 1186 s_h = cb->args[0];
1184 idx = 0; 1187 s_idx = idx = cb->args[1];
1185 for_each_netdev(net, dev) { 1188 s_ip_idx = ip_idx = cb->args[2];
1186 if (idx < s_idx)
1187 goto cont;
1188 if (idx > s_idx)
1189 s_ip_idx = 0;
1190 in_dev = __in_dev_get_rtnl(dev);
1191 if (!in_dev)
1192 goto cont;
1193 1189
1194 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; 1190 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1195 ifa = ifa->ifa_next, ip_idx++) { 1191 idx = 0;
1196 if (ip_idx < s_ip_idx) 1192 head = &net->dev_index_head[h];
1197 continue; 1193 rcu_read_lock();
1198 if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, 1194 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1195 if (idx < s_idx)
1196 goto cont;
1197 if (idx > s_idx)
1198 s_ip_idx = 0;
1199 in_dev = __in_dev_get_rcu(dev);
1200 if (!in_dev)
1201 goto cont;
1202
1203 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1204 ifa = ifa->ifa_next, ip_idx++) {
1205 if (ip_idx < s_ip_idx)
1206 continue;
1207 if (inet_fill_ifaddr(skb, ifa,
1208 NETLINK_CB(cb->skb).pid,
1199 cb->nlh->nlmsg_seq, 1209 cb->nlh->nlmsg_seq,
1200 RTM_NEWADDR, NLM_F_MULTI) <= 0) 1210 RTM_NEWADDR, NLM_F_MULTI) <= 0) {
1201 goto done; 1211 rcu_read_unlock();
1202 } 1212 goto done;
1213 }
1214 }
1203cont: 1215cont:
1204 idx++; 1216 idx++;
1217 }
1218 rcu_read_unlock();
1205 } 1219 }
1206 1220
1207done: 1221done:
1208 cb->args[0] = idx; 1222 cb->args[0] = h;
1209 cb->args[1] = ip_idx; 1223 cb->args[1] = idx;
1224 cb->args[2] = ip_idx;
1210 1225
1211 return skb->len; 1226 return skb->len;
1212} 1227}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 816e2180bd60..6c1e56aef1f4 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -895,11 +895,11 @@ static void nl_fib_lookup_exit(struct net *net)
895 net->ipv4.fibnl = NULL; 895 net->ipv4.fibnl = NULL;
896} 896}
897 897
898static void fib_disable_ip(struct net_device *dev, int force) 898static void fib_disable_ip(struct net_device *dev, int force, int delay)
899{ 899{
900 if (fib_sync_down_dev(dev, force)) 900 if (fib_sync_down_dev(dev, force))
901 fib_flush(dev_net(dev)); 901 fib_flush(dev_net(dev));
902 rt_cache_flush(dev_net(dev), 0); 902 rt_cache_flush(dev_net(dev), delay);
903 arp_ifdown(dev); 903 arp_ifdown(dev);
904} 904}
905 905
@@ -922,7 +922,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
922 /* Last address was deleted from this interface. 922 /* Last address was deleted from this interface.
923 Disable IP. 923 Disable IP.
924 */ 924 */
925 fib_disable_ip(dev, 1); 925 fib_disable_ip(dev, 1, 0);
926 } else { 926 } else {
927 rt_cache_flush(dev_net(dev), -1); 927 rt_cache_flush(dev_net(dev), -1);
928 } 928 }
@@ -937,7 +937,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
937 struct in_device *in_dev = __in_dev_get_rtnl(dev); 937 struct in_device *in_dev = __in_dev_get_rtnl(dev);
938 938
939 if (event == NETDEV_UNREGISTER) { 939 if (event == NETDEV_UNREGISTER) {
940 fib_disable_ip(dev, 2); 940 fib_disable_ip(dev, 2, -1);
941 return NOTIFY_DONE; 941 return NOTIFY_DONE;
942 } 942 }
943 943
@@ -955,10 +955,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
955 rt_cache_flush(dev_net(dev), -1); 955 rt_cache_flush(dev_net(dev), -1);
956 break; 956 break;
957 case NETDEV_DOWN: 957 case NETDEV_DOWN:
958 fib_disable_ip(dev, 0); 958 fib_disable_ip(dev, 0, 0);
959 break; 959 break;
960 case NETDEV_CHANGEMTU: 960 case NETDEV_CHANGEMTU:
961 case NETDEV_CHANGE: 961 case NETDEV_CHANGE:
962 case NETDEV_UNREGISTER_PERNET:
962 rt_cache_flush(dev_net(dev), 0); 963 rt_cache_flush(dev_net(dev), 0);
963 break; 964 break;
964 } 965 }
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index bd24f6560a49..6110c6d6e613 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2313,7 +2313,8 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2313 state->in_dev = NULL; 2313 state->in_dev = NULL;
2314 for_each_netdev_rcu(net, state->dev) { 2314 for_each_netdev_rcu(net, state->dev) {
2315 struct in_device *in_dev; 2315 struct in_device *in_dev;
2316 in_dev = in_dev_get(state->dev); 2316
2317 in_dev = __in_dev_get_rcu(state->dev);
2317 if (!in_dev) 2318 if (!in_dev)
2318 continue; 2319 continue;
2319 read_lock(&in_dev->mc_list_lock); 2320 read_lock(&in_dev->mc_list_lock);
@@ -2323,7 +2324,6 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2323 break; 2324 break;
2324 } 2325 }
2325 read_unlock(&in_dev->mc_list_lock); 2326 read_unlock(&in_dev->mc_list_lock);
2326 in_dev_put(in_dev);
2327 } 2327 }
2328 return im; 2328 return im;
2329} 2329}
@@ -2333,16 +2333,15 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
2333 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2333 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2334 im = im->next; 2334 im = im->next;
2335 while (!im) { 2335 while (!im) {
2336 if (likely(state->in_dev != NULL)) { 2336 if (likely(state->in_dev != NULL))
2337 read_unlock(&state->in_dev->mc_list_lock); 2337 read_unlock(&state->in_dev->mc_list_lock);
2338 in_dev_put(state->in_dev); 2338
2339 } 2339 state->dev = next_net_device_rcu(state->dev);
2340 state->dev = next_net_device(state->dev);
2341 if (!state->dev) { 2340 if (!state->dev) {
2342 state->in_dev = NULL; 2341 state->in_dev = NULL;
2343 break; 2342 break;
2344 } 2343 }
2345 state->in_dev = in_dev_get(state->dev); 2344 state->in_dev = __in_dev_get_rcu(state->dev);
2346 if (!state->in_dev) 2345 if (!state->in_dev)
2347 continue; 2346 continue;
2348 read_lock(&state->in_dev->mc_list_lock); 2347 read_lock(&state->in_dev->mc_list_lock);
@@ -2384,7 +2383,6 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2384 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2383 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2385 if (likely(state->in_dev != NULL)) { 2384 if (likely(state->in_dev != NULL)) {
2386 read_unlock(&state->in_dev->mc_list_lock); 2385 read_unlock(&state->in_dev->mc_list_lock);
2387 in_dev_put(state->in_dev);
2388 state->in_dev = NULL; 2386 state->in_dev = NULL;
2389 } 2387 }
2390 state->dev = NULL; 2388 state->dev = NULL;
@@ -2464,7 +2462,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2464 state->im = NULL; 2462 state->im = NULL;
2465 for_each_netdev_rcu(net, state->dev) { 2463 for_each_netdev_rcu(net, state->dev) {
2466 struct in_device *idev; 2464 struct in_device *idev;
2467 idev = in_dev_get(state->dev); 2465 idev = __in_dev_get_rcu(state->dev);
2468 if (unlikely(idev == NULL)) 2466 if (unlikely(idev == NULL))
2469 continue; 2467 continue;
2470 read_lock(&idev->mc_list_lock); 2468 read_lock(&idev->mc_list_lock);
@@ -2480,7 +2478,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2480 spin_unlock_bh(&im->lock); 2478 spin_unlock_bh(&im->lock);
2481 } 2479 }
2482 read_unlock(&idev->mc_list_lock); 2480 read_unlock(&idev->mc_list_lock);
2483 in_dev_put(idev);
2484 } 2481 }
2485 return psf; 2482 return psf;
2486} 2483}
@@ -2494,16 +2491,15 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2494 spin_unlock_bh(&state->im->lock); 2491 spin_unlock_bh(&state->im->lock);
2495 state->im = state->im->next; 2492 state->im = state->im->next;
2496 while (!state->im) { 2493 while (!state->im) {
2497 if (likely(state->idev != NULL)) { 2494 if (likely(state->idev != NULL))
2498 read_unlock(&state->idev->mc_list_lock); 2495 read_unlock(&state->idev->mc_list_lock);
2499 in_dev_put(state->idev); 2496
2500 } 2497 state->dev = next_net_device_rcu(state->dev);
2501 state->dev = next_net_device(state->dev);
2502 if (!state->dev) { 2498 if (!state->dev) {
2503 state->idev = NULL; 2499 state->idev = NULL;
2504 goto out; 2500 goto out;
2505 } 2501 }
2506 state->idev = in_dev_get(state->dev); 2502 state->idev = __in_dev_get_rcu(state->dev);
2507 if (!state->idev) 2503 if (!state->idev)
2508 continue; 2504 continue;
2509 read_lock(&state->idev->mc_list_lock); 2505 read_lock(&state->idev->mc_list_lock);
@@ -2555,7 +2551,6 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2555 } 2551 }
2556 if (likely(state->idev != NULL)) { 2552 if (likely(state->idev != NULL)) {
2557 read_unlock(&state->idev->mc_list_lock); 2553 read_unlock(&state->idev->mc_list_lock);
2558 in_dev_put(state->idev);
2559 state->idev = NULL; 2554 state->idev = NULL;
2560 } 2555 }
2561 state->dev = NULL; 2556 state->dev = NULL;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index b1fbe18feb5a..6bcfe52a9c87 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -67,9 +67,6 @@
67 * ip_id_count: idlock 67 * ip_id_count: idlock
68 */ 68 */
69 69
70/* Exported for inet_getid inline function. */
71DEFINE_SPINLOCK(inet_peer_idlock);
72
73static struct kmem_cache *peer_cachep __read_mostly; 70static struct kmem_cache *peer_cachep __read_mostly;
74 71
75#define node_height(x) x->avl_height 72#define node_height(x) x->avl_height
@@ -390,7 +387,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
390 n->v4daddr = daddr; 387 n->v4daddr = daddr;
391 atomic_set(&n->refcnt, 1); 388 atomic_set(&n->refcnt, 1);
392 atomic_set(&n->rid, 0); 389 atomic_set(&n->rid, 0);
393 n->ip_id_count = secure_ip_id(daddr); 390 atomic_set(&n->ip_id_count, secure_ip_id(daddr));
394 n->tcp_ts_stamp = 0; 391 n->tcp_ts_stamp = 0;
395 392
396 write_lock_bh(&peer_pool_lock); 393 write_lock_bh(&peer_pool_lock);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a7de9e3a8f18..c5f6af5d0f34 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -125,7 +125,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev);
125 125
126#define HASH_SIZE 16 126#define HASH_SIZE 16
127 127
128static int ipgre_net_id; 128static int ipgre_net_id __read_mostly;
129struct ipgre_net { 129struct ipgre_net {
130 struct ip_tunnel *tunnels[4][HASH_SIZE]; 130 struct ip_tunnel *tunnels[4][HASH_SIZE];
131 131
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index c5b1f71c3cd8..7242ffcc44e5 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -119,7 +119,7 @@
119#define HASH_SIZE 16 119#define HASH_SIZE 16
120#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 120#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
121 121
122static int ipip_net_id; 122static int ipip_net_id __read_mostly;
123struct ipip_net { 123struct ipip_net {
124 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 124 struct ip_tunnel *tunnels_r_l[HASH_SIZE];
125 struct ip_tunnel *tunnels_r[HASH_SIZE]; 125 struct ip_tunnel *tunnels_r[HASH_SIZE];
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ef4ee45b928f..54596f73eff5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -494,8 +494,10 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
494 return -EINVAL; 494 return -EINVAL;
495 } 495 }
496 496
497 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) 497 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
498 dev_put(dev);
498 return -EADDRNOTAVAIL; 499 return -EADDRNOTAVAIL;
500 }
499 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 501 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
500 ip_rt_multicast_event(in_dev); 502 ip_rt_multicast_event(in_dev);
501 503
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ff258b57680b..4284ceef7945 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2852,7 +2852,7 @@ static int rt_fill_info(struct net *net,
2852 error = rt->u.dst.error; 2852 error = rt->u.dst.error;
2853 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; 2853 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2854 if (rt->peer) { 2854 if (rt->peer) {
2855 id = rt->peer->ip_id_count; 2855 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2856 if (rt->peer->tcp_ts_stamp) { 2856 if (rt->peer->tcp_ts_stamp) {
2857 ts = rt->peer->tcp_ts; 2857 ts = rt->peer->tcp_ts;
2858 tsage = get_seconds() - rt->peer->tcp_ts_stamp; 2858 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e0cfa633680a..524f9760193b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1183,7 +1183,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1183#if TCP_DEBUG 1183#if TCP_DEBUG
1184 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1184 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1185 1185
1186 WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1186 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1187 KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1188 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1187#endif 1189#endif
1188 1190
1189 if (inet_csk_ack_scheduled(sk)) { 1191 if (inet_csk_ack_scheduled(sk)) {
@@ -1430,11 +1432,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1430 /* Now that we have two receive queues this 1432 /* Now that we have two receive queues this
1431 * shouldn't happen. 1433 * shouldn't happen.
1432 */ 1434 */
1433 if (before(*seq, TCP_SKB_CB(skb)->seq)) { 1435 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1434 printk(KERN_INFO "recvmsg bug: copied %X " 1436 KERN_INFO "recvmsg bug: copied %X "
1435 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); 1437 "seq %X rcvnxt %X fl %X\n", *seq,
1438 TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1439 flags))
1436 break; 1440 break;
1437 } 1441
1438 offset = *seq - TCP_SKB_CB(skb)->seq; 1442 offset = *seq - TCP_SKB_CB(skb)->seq;
1439 if (tcp_hdr(skb)->syn) 1443 if (tcp_hdr(skb)->syn)
1440 offset--; 1444 offset--;
@@ -1443,8 +1447,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1443 if (tcp_hdr(skb)->fin) 1447 if (tcp_hdr(skb)->fin)
1444 goto found_fin_ok; 1448 goto found_fin_ok;
1445 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " 1449 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
1446 "copied %X seq %X\n", *seq, 1450 "copied %X seq %X rcvnxt %X fl %X\n",
1447 TCP_SKB_CB(skb)->seq); 1451 *seq, TCP_SKB_CB(skb)->seq,
1452 tp->rcv_nxt, flags);
1448 } 1453 }
1449 1454
1450 /* Well, if we have backlog, try to process it now yet. */ 1455 /* Well, if we have backlog, try to process it now yet. */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index be0c5bf7bfca..cc306ac6eb51 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
140 * "len" is invariant segment length, including TCP header. 140 * "len" is invariant segment length, including TCP header.
141 */ 141 */
142 len += skb->data - skb_transport_header(skb); 142 len += skb->data - skb_transport_header(skb);
143 if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) || 143 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
144 /* If PSH is not set, packet should be 144 /* If PSH is not set, packet should be
145 * full sized, provided peer TCP is not badly broken. 145 * full sized, provided peer TCP is not badly broken.
146 * This observation (if it is correct 8)) allows 146 * This observation (if it is correct 8)) allows
@@ -411,7 +411,7 @@ void tcp_initialize_rcv_mss(struct sock *sk)
411 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 411 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
412 412
413 hint = min(hint, tp->rcv_wnd / 2); 413 hint = min(hint, tp->rcv_wnd / 2);
414 hint = min(hint, TCP_MIN_RCVMSS); 414 hint = min(hint, TCP_MSS_DEFAULT);
415 hint = max(hint, TCP_MIN_MSS); 415 hint = max(hint, TCP_MIN_MSS);
416 416
417 inet_csk(sk)->icsk_ack.rcv_mss = hint; 417 inet_csk(sk)->icsk_ack.rcv_mss = hint;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 657ae334f125..df18ce04f41e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
204 * when trying new connection. 204 * when trying new connection.
205 */ 205 */
206 if (peer != NULL && 206 if (peer != NULL &&
207 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) { 207 (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; 208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts; 209 tp->rx_opt.ts_recent = peer->tcp_ts;
210 } 210 }
@@ -217,7 +217,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
217 if (inet->opt) 217 if (inet->opt)
218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
219 219
220 tp->rx_opt.mss_clamp = 536; 220 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
221 221
222 /* Socket identity is still unknown (sport may be zero). 222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket 223 * However we set state to SYN-SENT and not releasing socket
@@ -1268,7 +1268,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1268 goto drop_and_free; 1268 goto drop_and_free;
1269 1269
1270 tcp_clear_options(&tmp_opt); 1270 tcp_clear_options(&tmp_opt);
1271 tmp_opt.mss_clamp = 536; 1271 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1272 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; 1272 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1273 1273
1274 tcp_parse_options(skb, &tmp_opt, 0, dst); 1274 tcp_parse_options(skb, &tmp_opt, 0, dst);
@@ -1308,7 +1308,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1308 tcp_death_row.sysctl_tw_recycle && 1308 tcp_death_row.sysctl_tw_recycle &&
1309 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1309 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1310 peer->v4daddr == saddr) { 1310 peer->v4daddr == saddr) {
1311 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1311 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1312 (s32)(peer->tcp_ts - req->ts_recent) > 1312 (s32)(peer->tcp_ts - req->ts_recent) >
1313 TCP_PAWS_WINDOW) { 1313 TCP_PAWS_WINDOW) {
1314 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1314 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
@@ -1727,9 +1727,9 @@ int tcp_v4_remember_stamp(struct sock *sk)
1727 1727
1728 if (peer) { 1728 if (peer) {
1729 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || 1729 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1730 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && 1730 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1731 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { 1731 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1732 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; 1732 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1733 peer->tcp_ts = tp->rx_opt.ts_recent; 1733 peer->tcp_ts = tp->rx_opt.ts_recent;
1734 } 1734 }
1735 if (release_it) 1735 if (release_it)
@@ -1748,9 +1748,9 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1748 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 1748 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1749 1749
1750 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || 1750 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1751 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && 1751 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1752 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) { 1752 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1753 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp; 1753 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1754 peer->tcp_ts = tcptw->tw_ts_recent; 1754 peer->tcp_ts = tcptw->tw_ts_recent;
1755 } 1755 }
1756 inet_putpeer(peer); 1756 inet_putpeer(peer);
@@ -1815,7 +1815,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1815 */ 1815 */
1816 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 1816 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1817 tp->snd_cwnd_clamp = ~0; 1817 tp->snd_cwnd_clamp = ~0;
1818 tp->mss_cache = 536; 1818 tp->mss_cache = TCP_MSS_DEFAULT;
1819 1819
1820 tp->reordering = sysctl_tcp_reordering; 1820 tp->reordering = sysctl_tcp_reordering;
1821 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 1821 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a9d34e224cb6..4be22280e6b3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -476,7 +476,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
476 if (newtp->af_specific->md5_lookup(sk, newsk)) 476 if (newtp->af_specific->md5_lookup(sk, newsk))
477 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 477 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
478#endif 478#endif
479 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) 479 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
480 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 480 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
481 newtp->rx_opt.mss_clamp = req->mss; 481 newtp->rx_opt.mss_clamp = req->mss;
482 TCP_ECN_openreq_child(newtp, req); 482 TCP_ECN_openreq_child(newtp, req);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0ab39fedd2dc..522bdc77206c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3481,91 +3481,114 @@ enum addr_type_t
3481 ANYCAST_ADDR, 3481 ANYCAST_ADDR,
3482}; 3482};
3483 3483
3484/* called with rcu_read_lock() */
3485static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3486 struct netlink_callback *cb, enum addr_type_t type,
3487 int s_ip_idx, int *p_ip_idx)
3488{
3489 struct inet6_ifaddr *ifa;
3490 struct ifmcaddr6 *ifmca;
3491 struct ifacaddr6 *ifaca;
3492 int err = 1;
3493 int ip_idx = *p_ip_idx;
3494
3495 read_lock_bh(&idev->lock);
3496 switch (type) {
3497 case UNICAST_ADDR:
3498 /* unicast address incl. temp addr */
3499 for (ifa = idev->addr_list; ifa;
3500 ifa = ifa->if_next, ip_idx++) {
3501 if (ip_idx < s_ip_idx)
3502 continue;
3503 err = inet6_fill_ifaddr(skb, ifa,
3504 NETLINK_CB(cb->skb).pid,
3505 cb->nlh->nlmsg_seq,
3506 RTM_NEWADDR,
3507 NLM_F_MULTI);
3508 if (err <= 0)
3509 break;
3510 }
3511 break;
3512 case MULTICAST_ADDR:
3513 /* multicast address */
3514 for (ifmca = idev->mc_list; ifmca;
3515 ifmca = ifmca->next, ip_idx++) {
3516 if (ip_idx < s_ip_idx)
3517 continue;
3518 err = inet6_fill_ifmcaddr(skb, ifmca,
3519 NETLINK_CB(cb->skb).pid,
3520 cb->nlh->nlmsg_seq,
3521 RTM_GETMULTICAST,
3522 NLM_F_MULTI);
3523 if (err <= 0)
3524 break;
3525 }
3526 break;
3527 case ANYCAST_ADDR:
3528 /* anycast address */
3529 for (ifaca = idev->ac_list; ifaca;
3530 ifaca = ifaca->aca_next, ip_idx++) {
3531 if (ip_idx < s_ip_idx)
3532 continue;
3533 err = inet6_fill_ifacaddr(skb, ifaca,
3534 NETLINK_CB(cb->skb).pid,
3535 cb->nlh->nlmsg_seq,
3536 RTM_GETANYCAST,
3537 NLM_F_MULTI);
3538 if (err <= 0)
3539 break;
3540 }
3541 break;
3542 default:
3543 break;
3544 }
3545 read_unlock_bh(&idev->lock);
3546 *p_ip_idx = ip_idx;
3547 return err;
3548}
3549
3484static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, 3550static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3485 enum addr_type_t type) 3551 enum addr_type_t type)
3486{ 3552{
3553 struct net *net = sock_net(skb->sk);
3554 int h, s_h;
3487 int idx, ip_idx; 3555 int idx, ip_idx;
3488 int s_idx, s_ip_idx; 3556 int s_idx, s_ip_idx;
3489 int err = 1;
3490 struct net_device *dev; 3557 struct net_device *dev;
3491 struct inet6_dev *idev = NULL; 3558 struct inet6_dev *idev;
3492 struct inet6_ifaddr *ifa; 3559 struct hlist_head *head;
3493 struct ifmcaddr6 *ifmca; 3560 struct hlist_node *node;
3494 struct ifacaddr6 *ifaca;
3495 struct net *net = sock_net(skb->sk);
3496 3561
3497 s_idx = cb->args[0]; 3562 s_h = cb->args[0];
3498 s_ip_idx = ip_idx = cb->args[1]; 3563 s_idx = idx = cb->args[1];
3564 s_ip_idx = ip_idx = cb->args[2];
3499 3565
3500 idx = 0; 3566 rcu_read_lock();
3501 for_each_netdev(net, dev) { 3567 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3502 if (idx < s_idx) 3568 idx = 0;
3503 goto cont; 3569 head = &net->dev_index_head[h];
3504 if (idx > s_idx) 3570 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3505 s_ip_idx = 0; 3571 if (idx < s_idx)
3506 ip_idx = 0; 3572 goto cont;
3507 if ((idev = in6_dev_get(dev)) == NULL) 3573 if (idx > s_idx)
3508 goto cont; 3574 s_ip_idx = 0;
3509 read_lock_bh(&idev->lock); 3575 ip_idx = 0;
3510 switch (type) { 3576 if ((idev = __in6_dev_get(dev)) == NULL)
3511 case UNICAST_ADDR: 3577 goto cont;
3512 /* unicast address incl. temp addr */
3513 for (ifa = idev->addr_list; ifa;
3514 ifa = ifa->if_next, ip_idx++) {
3515 if (ip_idx < s_ip_idx)
3516 continue;
3517 err = inet6_fill_ifaddr(skb, ifa,
3518 NETLINK_CB(cb->skb).pid,
3519 cb->nlh->nlmsg_seq,
3520 RTM_NEWADDR,
3521 NLM_F_MULTI);
3522 if (err <= 0)
3523 break;
3524 }
3525 break;
3526 case MULTICAST_ADDR:
3527 /* multicast address */
3528 for (ifmca = idev->mc_list; ifmca;
3529 ifmca = ifmca->next, ip_idx++) {
3530 if (ip_idx < s_ip_idx)
3531 continue;
3532 err = inet6_fill_ifmcaddr(skb, ifmca,
3533 NETLINK_CB(cb->skb).pid,
3534 cb->nlh->nlmsg_seq,
3535 RTM_GETMULTICAST,
3536 NLM_F_MULTI);
3537 if (err <= 0)
3538 break;
3539 }
3540 break;
3541 case ANYCAST_ADDR:
3542 /* anycast address */
3543 for (ifaca = idev->ac_list; ifaca;
3544 ifaca = ifaca->aca_next, ip_idx++) {
3545 if (ip_idx < s_ip_idx)
3546 continue;
3547 err = inet6_fill_ifacaddr(skb, ifaca,
3548 NETLINK_CB(cb->skb).pid,
3549 cb->nlh->nlmsg_seq,
3550 RTM_GETANYCAST,
3551 NLM_F_MULTI);
3552 if (err <= 0)
3553 break;
3554 }
3555 break;
3556 default:
3557 break;
3558 }
3559 read_unlock_bh(&idev->lock);
3560 in6_dev_put(idev);
3561 3578
3562 if (err <= 0) 3579 if (in6_dump_addrs(idev, skb, cb, type,
3563 break; 3580 s_ip_idx, &ip_idx) <= 0)
3581 goto done;
3564cont: 3582cont:
3565 idx++; 3583 idx++;
3584 }
3566 } 3585 }
3567 cb->args[0] = idx; 3586done:
3568 cb->args[1] = ip_idx; 3587 rcu_read_unlock();
3588 cb->args[0] = h;
3589 cb->args[1] = idx;
3590 cb->args[2] = ip_idx;
3591
3569 return skb->len; 3592 return skb->len;
3570} 3593}
3571 3594
@@ -3830,7 +3853,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3830{ 3853{
3831 struct net *net = sock_net(skb->sk); 3854 struct net *net = sock_net(skb->sk);
3832 int h, s_h; 3855 int h, s_h;
3833 int idx = 0, err, s_idx; 3856 int idx = 0, s_idx;
3834 struct net_device *dev; 3857 struct net_device *dev;
3835 struct inet6_dev *idev; 3858 struct inet6_dev *idev;
3836 struct hlist_head *head; 3859 struct hlist_head *head;
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 2f00ca83f049..f1c74c8ef9de 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -431,9 +431,9 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
431 struct net *net = seq_file_net(seq); 431 struct net *net = seq_file_net(seq);
432 432
433 state->idev = NULL; 433 state->idev = NULL;
434 for_each_netdev(net, state->dev) { 434 for_each_netdev_rcu(net, state->dev) {
435 struct inet6_dev *idev; 435 struct inet6_dev *idev;
436 idev = in6_dev_get(state->dev); 436 idev = __in6_dev_get(state->dev);
437 if (!idev) 437 if (!idev)
438 continue; 438 continue;
439 read_lock_bh(&idev->lock); 439 read_lock_bh(&idev->lock);
@@ -443,7 +443,6 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
443 break; 443 break;
444 } 444 }
445 read_unlock_bh(&idev->lock); 445 read_unlock_bh(&idev->lock);
446 in6_dev_put(idev);
447 } 446 }
448 return im; 447 return im;
449} 448}
@@ -454,16 +453,15 @@ static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im
454 453
455 im = im->aca_next; 454 im = im->aca_next;
456 while (!im) { 455 while (!im) {
457 if (likely(state->idev != NULL)) { 456 if (likely(state->idev != NULL))
458 read_unlock_bh(&state->idev->lock); 457 read_unlock_bh(&state->idev->lock);
459 in6_dev_put(state->idev); 458
460 } 459 state->dev = next_net_device_rcu(state->dev);
461 state->dev = next_net_device(state->dev);
462 if (!state->dev) { 460 if (!state->dev) {
463 state->idev = NULL; 461 state->idev = NULL;
464 break; 462 break;
465 } 463 }
466 state->idev = in6_dev_get(state->dev); 464 state->idev = __in6_dev_get(state->dev);
467 if (!state->idev) 465 if (!state->idev)
468 continue; 466 continue;
469 read_lock_bh(&state->idev->lock); 467 read_lock_bh(&state->idev->lock);
@@ -482,29 +480,30 @@ static struct ifacaddr6 *ac6_get_idx(struct seq_file *seq, loff_t pos)
482} 480}
483 481
484static void *ac6_seq_start(struct seq_file *seq, loff_t *pos) 482static void *ac6_seq_start(struct seq_file *seq, loff_t *pos)
485 __acquires(dev_base_lock) 483 __acquires(RCU)
486{ 484{
487 read_lock(&dev_base_lock); 485 rcu_read_lock();
488 return ac6_get_idx(seq, *pos); 486 return ac6_get_idx(seq, *pos);
489} 487}
490 488
491static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos) 489static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
492{ 490{
493 struct ifacaddr6 *im; 491 struct ifacaddr6 *im = ac6_get_next(seq, v);
494 im = ac6_get_next(seq, v); 492
495 ++*pos; 493 ++*pos;
496 return im; 494 return im;
497} 495}
498 496
499static void ac6_seq_stop(struct seq_file *seq, void *v) 497static void ac6_seq_stop(struct seq_file *seq, void *v)
500 __releases(dev_base_lock) 498 __releases(RCU)
501{ 499{
502 struct ac6_iter_state *state = ac6_seq_private(seq); 500 struct ac6_iter_state *state = ac6_seq_private(seq);
501
503 if (likely(state->idev != NULL)) { 502 if (likely(state->idev != NULL)) {
504 read_unlock_bh(&state->idev->lock); 503 read_unlock_bh(&state->idev->lock);
505 in6_dev_put(state->idev); 504 state->idev = NULL;
506 } 505 }
507 read_unlock(&dev_base_lock); 506 rcu_read_unlock();
508} 507}
509 508
510static int ac6_seq_show(struct seq_file *seq, void *v) 509static int ac6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1d614113a4ba..e5c0f6bb8314 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -78,7 +78,7 @@ static void ip6_fb_tnl_dev_init(struct net_device *dev);
78static void ip6_tnl_dev_init(struct net_device *dev); 78static void ip6_tnl_dev_init(struct net_device *dev);
79static void ip6_tnl_dev_setup(struct net_device *dev); 79static void ip6_tnl_dev_setup(struct net_device *dev);
80 80
81static int ip6_tnl_net_id; 81static int ip6_tnl_net_id __read_mostly;
82struct ip6_tnl_net { 82struct ip6_tnl_net {
83 /* the IPv6 tunnel fallback device */ 83 /* the IPv6 tunnel fallback device */
84 struct net_device *fb_tnl_dev; 84 struct net_device *fb_tnl_dev;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index f9fcf690bd5d..1f9c44442e65 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2375,9 +2375,9 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2375 struct net *net = seq_file_net(seq); 2375 struct net *net = seq_file_net(seq);
2376 2376
2377 state->idev = NULL; 2377 state->idev = NULL;
2378 for_each_netdev(net, state->dev) { 2378 for_each_netdev_rcu(net, state->dev) {
2379 struct inet6_dev *idev; 2379 struct inet6_dev *idev;
2380 idev = in6_dev_get(state->dev); 2380 idev = __in6_dev_get(state->dev);
2381 if (!idev) 2381 if (!idev)
2382 continue; 2382 continue;
2383 read_lock_bh(&idev->lock); 2383 read_lock_bh(&idev->lock);
@@ -2387,7 +2387,6 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2387 break; 2387 break;
2388 } 2388 }
2389 read_unlock_bh(&idev->lock); 2389 read_unlock_bh(&idev->lock);
2390 in6_dev_put(idev);
2391 } 2390 }
2392 return im; 2391 return im;
2393} 2392}
@@ -2398,16 +2397,15 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr
2398 2397
2399 im = im->next; 2398 im = im->next;
2400 while (!im) { 2399 while (!im) {
2401 if (likely(state->idev != NULL)) { 2400 if (likely(state->idev != NULL))
2402 read_unlock_bh(&state->idev->lock); 2401 read_unlock_bh(&state->idev->lock);
2403 in6_dev_put(state->idev); 2402
2404 } 2403 state->dev = next_net_device_rcu(state->dev);
2405 state->dev = next_net_device(state->dev);
2406 if (!state->dev) { 2404 if (!state->dev) {
2407 state->idev = NULL; 2405 state->idev = NULL;
2408 break; 2406 break;
2409 } 2407 }
2410 state->idev = in6_dev_get(state->dev); 2408 state->idev = __in6_dev_get(state->dev);
2411 if (!state->idev) 2409 if (!state->idev)
2412 continue; 2410 continue;
2413 read_lock_bh(&state->idev->lock); 2411 read_lock_bh(&state->idev->lock);
@@ -2426,31 +2424,31 @@ static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2426} 2424}
2427 2425
2428static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) 2426static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2429 __acquires(dev_base_lock) 2427 __acquires(RCU)
2430{ 2428{
2431 read_lock(&dev_base_lock); 2429 rcu_read_lock();
2432 return igmp6_mc_get_idx(seq, *pos); 2430 return igmp6_mc_get_idx(seq, *pos);
2433} 2431}
2434 2432
2435static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2433static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2436{ 2434{
2437 struct ifmcaddr6 *im; 2435 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2438 im = igmp6_mc_get_next(seq, v); 2436
2439 ++*pos; 2437 ++*pos;
2440 return im; 2438 return im;
2441} 2439}
2442 2440
2443static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) 2441static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2444 __releases(dev_base_lock) 2442 __releases(RCU)
2445{ 2443{
2446 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2444 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2445
2447 if (likely(state->idev != NULL)) { 2446 if (likely(state->idev != NULL)) {
2448 read_unlock_bh(&state->idev->lock); 2447 read_unlock_bh(&state->idev->lock);
2449 in6_dev_put(state->idev);
2450 state->idev = NULL; 2448 state->idev = NULL;
2451 } 2449 }
2452 state->dev = NULL; 2450 state->dev = NULL;
2453 read_unlock(&dev_base_lock); 2451 rcu_read_unlock();
2454} 2452}
2455 2453
2456static int igmp6_mc_seq_show(struct seq_file *seq, void *v) 2454static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
@@ -2507,9 +2505,9 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2507 2505
2508 state->idev = NULL; 2506 state->idev = NULL;
2509 state->im = NULL; 2507 state->im = NULL;
2510 for_each_netdev(net, state->dev) { 2508 for_each_netdev_rcu(net, state->dev) {
2511 struct inet6_dev *idev; 2509 struct inet6_dev *idev;
2512 idev = in6_dev_get(state->dev); 2510 idev = __in6_dev_get(state->dev);
2513 if (unlikely(idev == NULL)) 2511 if (unlikely(idev == NULL))
2514 continue; 2512 continue;
2515 read_lock_bh(&idev->lock); 2513 read_lock_bh(&idev->lock);
@@ -2525,7 +2523,6 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2525 spin_unlock_bh(&im->mca_lock); 2523 spin_unlock_bh(&im->mca_lock);
2526 } 2524 }
2527 read_unlock_bh(&idev->lock); 2525 read_unlock_bh(&idev->lock);
2528 in6_dev_put(idev);
2529 } 2526 }
2530 return psf; 2527 return psf;
2531} 2528}
@@ -2539,16 +2536,15 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s
2539 spin_unlock_bh(&state->im->mca_lock); 2536 spin_unlock_bh(&state->im->mca_lock);
2540 state->im = state->im->next; 2537 state->im = state->im->next;
2541 while (!state->im) { 2538 while (!state->im) {
2542 if (likely(state->idev != NULL)) { 2539 if (likely(state->idev != NULL))
2543 read_unlock_bh(&state->idev->lock); 2540 read_unlock_bh(&state->idev->lock);
2544 in6_dev_put(state->idev); 2541
2545 } 2542 state->dev = next_net_device_rcu(state->dev);
2546 state->dev = next_net_device(state->dev);
2547 if (!state->dev) { 2543 if (!state->dev) {
2548 state->idev = NULL; 2544 state->idev = NULL;
2549 goto out; 2545 goto out;
2550 } 2546 }
2551 state->idev = in6_dev_get(state->dev); 2547 state->idev = __in6_dev_get(state->dev);
2552 if (!state->idev) 2548 if (!state->idev)
2553 continue; 2549 continue;
2554 read_lock_bh(&state->idev->lock); 2550 read_lock_bh(&state->idev->lock);
@@ -2573,9 +2569,9 @@ static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2573} 2569}
2574 2570
2575static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2571static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2576 __acquires(dev_base_lock) 2572 __acquires(RCU)
2577{ 2573{
2578 read_lock(&dev_base_lock); 2574 rcu_read_lock();
2579 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2575 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2580} 2576}
2581 2577
@@ -2591,7 +2587,7 @@ static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2591} 2587}
2592 2588
2593static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) 2589static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2594 __releases(dev_base_lock) 2590 __releases(RCU)
2595{ 2591{
2596 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2592 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2597 if (likely(state->im != NULL)) { 2593 if (likely(state->im != NULL)) {
@@ -2600,11 +2596,10 @@ static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2600 } 2596 }
2601 if (likely(state->idev != NULL)) { 2597 if (likely(state->idev != NULL)) {
2602 read_unlock_bh(&state->idev->lock); 2598 read_unlock_bh(&state->idev->lock);
2603 in6_dev_put(state->idev);
2604 state->idev = NULL; 2599 state->idev = NULL;
2605 } 2600 }
2606 state->dev = NULL; 2601 state->dev = NULL;
2607 read_unlock(&dev_base_lock); 2602 rcu_read_unlock();
2608} 2603}
2609 2604
2610static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) 2605static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b6e145a673ab..d9deaa7753ef 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -66,7 +66,7 @@ static void ipip6_fb_tunnel_init(struct net_device *dev);
66static void ipip6_tunnel_init(struct net_device *dev); 66static void ipip6_tunnel_init(struct net_device *dev);
67static void ipip6_tunnel_setup(struct net_device *dev); 67static void ipip6_tunnel_setup(struct net_device *dev);
68 68
69static int sit_net_id; 69static int sit_net_id __read_mostly;
70struct sit_net { 70struct sit_net {
71 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 71 struct ip_tunnel *tunnels_r_l[HASH_SIZE];
72 struct ip_tunnel *tunnels_r[HASH_SIZE]; 72 struct ip_tunnel *tunnels_r[HASH_SIZE];
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 696a22f034e8..de709091b26d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1851,7 +1851,7 @@ static int tcp_v6_init_sock(struct sock *sk)
1851 */ 1851 */
1852 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 1852 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1853 tp->snd_cwnd_clamp = ~0; 1853 tp->snd_cwnd_clamp = ~0;
1854 tp->mss_cache = 536; 1854 tp->mss_cache = TCP_MSS_DEFAULT;
1855 1855
1856 tp->reordering = sysctl_tcp_reordering; 1856 tp->reordering = sysctl_tcp_reordering;
1857 1857
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3973d0e61e56..3b1f5f5f8de7 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1768,7 +1768,6 @@ static void iucv_tasklet_fn(unsigned long ignored)
1768 */ 1768 */
1769static void iucv_work_fn(struct work_struct *work) 1769static void iucv_work_fn(struct work_struct *work)
1770{ 1770{
1771 typedef void iucv_irq_fn(struct iucv_irq_data *);
1772 LIST_HEAD(work_queue); 1771 LIST_HEAD(work_queue);
1773 struct iucv_irq_list *p, *n; 1772 struct iucv_irq_list *p, *n;
1774 1773
@@ -1878,14 +1877,25 @@ int iucv_path_table_empty(void)
1878static int iucv_pm_freeze(struct device *dev) 1877static int iucv_pm_freeze(struct device *dev)
1879{ 1878{
1880 int cpu; 1879 int cpu;
1880 struct iucv_irq_list *p, *n;
1881 int rc = 0; 1881 int rc = 0;
1882 1882
1883#ifdef CONFIG_PM_DEBUG 1883#ifdef CONFIG_PM_DEBUG
1884 printk(KERN_WARNING "iucv_pm_freeze\n"); 1884 printk(KERN_WARNING "iucv_pm_freeze\n");
1885#endif 1885#endif
1886 if (iucv_pm_state != IUCV_PM_FREEZING) {
1887 for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
1888 smp_call_function_single(cpu, iucv_block_cpu_almost,
1889 NULL, 1);
1890 cancel_work_sync(&iucv_work);
1891 list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
1892 list_del_init(&p->list);
1893 iucv_sever_pathid(p->data.ippathid,
1894 iucv_error_no_listener);
1895 kfree(p);
1896 }
1897 }
1886 iucv_pm_state = IUCV_PM_FREEZING; 1898 iucv_pm_state = IUCV_PM_FREEZING;
1887 for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
1888 smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1);
1889 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) 1899 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
1890 rc = dev->driver->pm->freeze(dev); 1900 rc = dev->driver->pm->freeze(dev);
1891 if (iucv_path_table_empty()) 1901 if (iucv_path_table_empty())
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 86b2c22d0918..478c8b32a5fb 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -35,7 +35,7 @@
35#define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x)) 35#define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x))
36#define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x)) 36#define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x))
37 37
38static int pfkey_net_id; 38static int pfkey_net_id __read_mostly;
39struct netns_pfkey { 39struct netns_pfkey {
40 /* List of all pfkey sockets. */ 40 /* List of all pfkey sockets. */
41 struct hlist_head table; 41 struct hlist_head table;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 1b816a2ea813..80abdf297b36 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -384,7 +384,7 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
384}; 384};
385 385
386/* this module per-net specifics */ 386/* this module per-net specifics */
387static int dccp_net_id; 387static int dccp_net_id __read_mostly;
388struct dccp_net { 388struct dccp_net {
389 int dccp_loose; 389 int dccp_loose;
390 unsigned int dccp_timeout[CT_DCCP_MAX + 1]; 390 unsigned int dccp_timeout[CT_DCCP_MAX + 1];
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index a54a0af0edba..91d0e719d67c 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -43,7 +43,7 @@
43#define GRE_TIMEOUT (30 * HZ) 43#define GRE_TIMEOUT (30 * HZ)
44#define GRE_STREAM_TIMEOUT (180 * HZ) 44#define GRE_STREAM_TIMEOUT (180 * HZ)
45 45
46static int proto_gre_net_id; 46static int proto_gre_net_id __read_mostly;
47struct netns_proto_gre { 47struct netns_proto_gre {
48 rwlock_t keymap_lock; 48 rwlock_t keymap_lock;
49 struct list_head keymap_list; 49 struct list_head keymap_list;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f30d596dbc58..eff5b0ddc5ca 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -498,7 +498,7 @@ static int netlink_release(struct socket *sock)
498 498
499 skb_queue_purge(&sk->sk_write_queue); 499 skb_queue_purge(&sk->sk_write_queue);
500 500
501 if (nlk->pid && !nlk->subscriptions) { 501 if (nlk->pid) {
502 struct netlink_notify n = { 502 struct netlink_notify n = {
503 .net = sock_net(sk), 503 .net = sock_net(sk),
504 .protocol = sk->sk_protocol, 504 .protocol = sk->sk_protocol,
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 8d3a55b4a30c..526d0273991a 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -35,7 +35,6 @@
35 35
36/* Transport protocol registration */ 36/* Transport protocol registration */
37static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 37static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
38static DEFINE_SPINLOCK(proto_tab_lock);
39 38
40static struct phonet_protocol *phonet_proto_get(int protocol) 39static struct phonet_protocol *phonet_proto_get(int protocol)
41{ 40{
@@ -44,11 +43,11 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
44 if (protocol >= PHONET_NPROTO) 43 if (protocol >= PHONET_NPROTO)
45 return NULL; 44 return NULL;
46 45
47 spin_lock(&proto_tab_lock); 46 rcu_read_lock();
48 pp = proto_tab[protocol]; 47 pp = rcu_dereference(proto_tab[protocol]);
49 if (pp && !try_module_get(pp->prot->owner)) 48 if (pp && !try_module_get(pp->prot->owner))
50 pp = NULL; 49 pp = NULL;
51 spin_unlock(&proto_tab_lock); 50 rcu_read_unlock();
52 51
53 return pp; 52 return pp;
54} 53}
@@ -439,6 +438,8 @@ static struct packet_type phonet_packet_type __read_mostly = {
439 .func = phonet_rcv, 438 .func = phonet_rcv,
440}; 439};
441 440
441static DEFINE_MUTEX(proto_tab_lock);
442
442int __init_or_module phonet_proto_register(int protocol, 443int __init_or_module phonet_proto_register(int protocol,
443 struct phonet_protocol *pp) 444 struct phonet_protocol *pp)
444{ 445{
@@ -451,12 +452,12 @@ int __init_or_module phonet_proto_register(int protocol,
451 if (err) 452 if (err)
452 return err; 453 return err;
453 454
454 spin_lock(&proto_tab_lock); 455 mutex_lock(&proto_tab_lock);
455 if (proto_tab[protocol]) 456 if (proto_tab[protocol])
456 err = -EBUSY; 457 err = -EBUSY;
457 else 458 else
458 proto_tab[protocol] = pp; 459 rcu_assign_pointer(proto_tab[protocol], pp);
459 spin_unlock(&proto_tab_lock); 460 mutex_unlock(&proto_tab_lock);
460 461
461 return err; 462 return err;
462} 463}
@@ -464,10 +465,11 @@ EXPORT_SYMBOL(phonet_proto_register);
464 465
465void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) 466void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
466{ 467{
467 spin_lock(&proto_tab_lock); 468 mutex_lock(&proto_tab_lock);
468 BUG_ON(proto_tab[protocol] != pp); 469 BUG_ON(proto_tab[protocol] != pp);
469 proto_tab[protocol] = NULL; 470 rcu_assign_pointer(proto_tab[protocol], NULL);
470 spin_unlock(&proto_tab_lock); 471 mutex_unlock(&proto_tab_lock);
472 synchronize_rcu();
471 proto_unregister(pp->prot); 473 proto_unregister(pp->prot);
472} 474}
473EXPORT_SYMBOL(phonet_proto_unregister); 475EXPORT_SYMBOL(phonet_proto_unregister);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 6d64fda1afc9..d87388c94b00 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -34,7 +34,7 @@
34#include <net/phonet/pn_dev.h> 34#include <net/phonet/pn_dev.h>
35 35
36struct phonet_routes { 36struct phonet_routes {
37 spinlock_t lock; 37 struct mutex lock;
38 struct net_device *table[64]; 38 struct net_device *table[64];
39}; 39};
40 40
@@ -43,7 +43,7 @@ struct phonet_net {
43 struct phonet_routes routes; 43 struct phonet_routes routes;
44}; 44};
45 45
46int phonet_net_id; 46int phonet_net_id __read_mostly;
47 47
48struct phonet_device_list *phonet_device_list(struct net *net) 48struct phonet_device_list *phonet_device_list(struct net *net)
49{ 49{
@@ -61,7 +61,8 @@ static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
61 pnd->netdev = dev; 61 pnd->netdev = dev;
62 bitmap_zero(pnd->addrs, 64); 62 bitmap_zero(pnd->addrs, 64);
63 63
64 list_add(&pnd->list, &pndevs->list); 64 BUG_ON(!mutex_is_locked(&pndevs->lock));
65 list_add_rcu(&pnd->list, &pndevs->list);
65 return pnd; 66 return pnd;
66} 67}
67 68
@@ -70,6 +71,7 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
70 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); 71 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
71 struct phonet_device *pnd; 72 struct phonet_device *pnd;
72 73
74 BUG_ON(!mutex_is_locked(&pndevs->lock));
73 list_for_each_entry(pnd, &pndevs->list, list) { 75 list_for_each_entry(pnd, &pndevs->list, list) {
74 if (pnd->netdev == dev) 76 if (pnd->netdev == dev)
75 return pnd; 77 return pnd;
@@ -77,6 +79,18 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
77 return NULL; 79 return NULL;
78} 80}
79 81
82static struct phonet_device *__phonet_get_rcu(struct net_device *dev)
83{
84 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
85 struct phonet_device *pnd;
86
87 list_for_each_entry_rcu(pnd, &pndevs->list, list) {
88 if (pnd->netdev == dev)
89 return pnd;
90 }
91 return NULL;
92}
93
80static void phonet_device_destroy(struct net_device *dev) 94static void phonet_device_destroy(struct net_device *dev)
81{ 95{
82 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); 96 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
@@ -84,11 +98,11 @@ static void phonet_device_destroy(struct net_device *dev)
84 98
85 ASSERT_RTNL(); 99 ASSERT_RTNL();
86 100
87 spin_lock_bh(&pndevs->lock); 101 mutex_lock(&pndevs->lock);
88 pnd = __phonet_get(dev); 102 pnd = __phonet_get(dev);
89 if (pnd) 103 if (pnd)
90 list_del(&pnd->list); 104 list_del_rcu(&pnd->list);
91 spin_unlock_bh(&pndevs->lock); 105 mutex_unlock(&pndevs->lock);
92 106
93 if (pnd) { 107 if (pnd) {
94 u8 addr; 108 u8 addr;
@@ -106,8 +120,8 @@ struct net_device *phonet_device_get(struct net *net)
106 struct phonet_device *pnd; 120 struct phonet_device *pnd;
107 struct net_device *dev = NULL; 121 struct net_device *dev = NULL;
108 122
109 spin_lock_bh(&pndevs->lock); 123 rcu_read_lock();
110 list_for_each_entry(pnd, &pndevs->list, list) { 124 list_for_each_entry_rcu(pnd, &pndevs->list, list) {
111 dev = pnd->netdev; 125 dev = pnd->netdev;
112 BUG_ON(!dev); 126 BUG_ON(!dev);
113 127
@@ -118,7 +132,7 @@ struct net_device *phonet_device_get(struct net *net)
118 } 132 }
119 if (dev) 133 if (dev)
120 dev_hold(dev); 134 dev_hold(dev);
121 spin_unlock_bh(&pndevs->lock); 135 rcu_read_unlock();
122 return dev; 136 return dev;
123} 137}
124 138
@@ -128,7 +142,7 @@ int phonet_address_add(struct net_device *dev, u8 addr)
128 struct phonet_device *pnd; 142 struct phonet_device *pnd;
129 int err = 0; 143 int err = 0;
130 144
131 spin_lock_bh(&pndevs->lock); 145 mutex_lock(&pndevs->lock);
132 /* Find or create Phonet-specific device data */ 146 /* Find or create Phonet-specific device data */
133 pnd = __phonet_get(dev); 147 pnd = __phonet_get(dev);
134 if (pnd == NULL) 148 if (pnd == NULL)
@@ -137,7 +151,7 @@ int phonet_address_add(struct net_device *dev, u8 addr)
137 err = -ENOMEM; 151 err = -ENOMEM;
138 else if (test_and_set_bit(addr >> 2, pnd->addrs)) 152 else if (test_and_set_bit(addr >> 2, pnd->addrs))
139 err = -EEXIST; 153 err = -EEXIST;
140 spin_unlock_bh(&pndevs->lock); 154 mutex_unlock(&pndevs->lock);
141 return err; 155 return err;
142} 156}
143 157
@@ -147,27 +161,32 @@ int phonet_address_del(struct net_device *dev, u8 addr)
147 struct phonet_device *pnd; 161 struct phonet_device *pnd;
148 int err = 0; 162 int err = 0;
149 163
150 spin_lock_bh(&pndevs->lock); 164 mutex_lock(&pndevs->lock);
151 pnd = __phonet_get(dev); 165 pnd = __phonet_get(dev);
152 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) 166 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) {
153 err = -EADDRNOTAVAIL; 167 err = -EADDRNOTAVAIL;
154 else if (bitmap_empty(pnd->addrs, 64)) { 168 pnd = NULL;
155 list_del(&pnd->list); 169 } else if (bitmap_empty(pnd->addrs, 64))
170 list_del_rcu(&pnd->list);
171 else
172 pnd = NULL;
173 mutex_unlock(&pndevs->lock);
174
175 if (pnd) {
176 synchronize_rcu();
156 kfree(pnd); 177 kfree(pnd);
157 } 178 }
158 spin_unlock_bh(&pndevs->lock);
159 return err; 179 return err;
160} 180}
161 181
162/* Gets a source address toward a destination, through a interface. */ 182/* Gets a source address toward a destination, through a interface. */
163u8 phonet_address_get(struct net_device *dev, u8 daddr) 183u8 phonet_address_get(struct net_device *dev, u8 daddr)
164{ 184{
165 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
166 struct phonet_device *pnd; 185 struct phonet_device *pnd;
167 u8 saddr; 186 u8 saddr;
168 187
169 spin_lock_bh(&pndevs->lock); 188 rcu_read_lock();
170 pnd = __phonet_get(dev); 189 pnd = __phonet_get_rcu(dev);
171 if (pnd) { 190 if (pnd) {
172 BUG_ON(bitmap_empty(pnd->addrs, 64)); 191 BUG_ON(bitmap_empty(pnd->addrs, 64));
173 192
@@ -178,7 +197,7 @@ u8 phonet_address_get(struct net_device *dev, u8 daddr)
178 saddr = find_first_bit(pnd->addrs, 64) << 2; 197 saddr = find_first_bit(pnd->addrs, 64) << 2;
179 } else 198 } else
180 saddr = PN_NO_ADDR; 199 saddr = PN_NO_ADDR;
181 spin_unlock_bh(&pndevs->lock); 200 rcu_read_unlock();
182 201
183 if (saddr == PN_NO_ADDR) { 202 if (saddr == PN_NO_ADDR) {
184 /* Fallback to another device */ 203 /* Fallback to another device */
@@ -200,8 +219,8 @@ int phonet_address_lookup(struct net *net, u8 addr)
200 struct phonet_device *pnd; 219 struct phonet_device *pnd;
201 int err = -EADDRNOTAVAIL; 220 int err = -EADDRNOTAVAIL;
202 221
203 spin_lock_bh(&pndevs->lock); 222 rcu_read_lock();
204 list_for_each_entry(pnd, &pndevs->list, list) { 223 list_for_each_entry_rcu(pnd, &pndevs->list, list) {
205 /* Don't allow unregistering devices! */ 224 /* Don't allow unregistering devices! */
206 if ((pnd->netdev->reg_state != NETREG_REGISTERED) || 225 if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
207 ((pnd->netdev->flags & IFF_UP)) != IFF_UP) 226 ((pnd->netdev->flags & IFF_UP)) != IFF_UP)
@@ -213,7 +232,7 @@ int phonet_address_lookup(struct net *net, u8 addr)
213 } 232 }
214 } 233 }
215found: 234found:
216 spin_unlock_bh(&pndevs->lock); 235 rcu_read_unlock();
217 return err; 236 return err;
218} 237}
219 238
@@ -248,17 +267,22 @@ static void phonet_route_autodel(struct net_device *dev)
248 267
249 /* Remove left-over Phonet routes */ 268 /* Remove left-over Phonet routes */
250 bitmap_zero(deleted, 64); 269 bitmap_zero(deleted, 64);
251 spin_lock_bh(&pnn->routes.lock); 270 mutex_lock(&pnn->routes.lock);
252 for (i = 0; i < 64; i++) 271 for (i = 0; i < 64; i++)
253 if (dev == pnn->routes.table[i]) { 272 if (dev == pnn->routes.table[i]) {
273 rcu_assign_pointer(pnn->routes.table[i], NULL);
254 set_bit(i, deleted); 274 set_bit(i, deleted);
255 pnn->routes.table[i] = NULL;
256 dev_put(dev);
257 } 275 }
258 spin_unlock_bh(&pnn->routes.lock); 276 mutex_unlock(&pnn->routes.lock);
277
278 if (bitmap_empty(deleted, 64))
279 return; /* short-circuit RCU */
280 synchronize_rcu();
259 for (i = find_first_bit(deleted, 64); i < 64; 281 for (i = find_first_bit(deleted, 64); i < 64;
260 i = find_next_bit(deleted, 64, i + 1)) 282 i = find_next_bit(deleted, 64, i + 1)) {
261 rtm_phonet_notify(RTM_DELROUTE, dev, i); 283 rtm_phonet_notify(RTM_DELROUTE, dev, i);
284 dev_put(dev);
285 }
262} 286}
263 287
264/* notify Phonet of device events */ 288/* notify Phonet of device events */
@@ -299,8 +323,8 @@ static int phonet_init_net(struct net *net)
299 } 323 }
300 324
301 INIT_LIST_HEAD(&pnn->pndevs.list); 325 INIT_LIST_HEAD(&pnn->pndevs.list);
302 spin_lock_init(&pnn->pndevs.lock); 326 mutex_init(&pnn->pndevs.lock);
303 spin_lock_init(&pnn->routes.lock); 327 mutex_init(&pnn->routes.lock);
304 net_assign_generic(net, phonet_net_id, pnn); 328 net_assign_generic(net, phonet_net_id, pnn);
305 return 0; 329 return 0;
306} 330}
@@ -361,13 +385,13 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
361 int err = -EEXIST; 385 int err = -EEXIST;
362 386
363 daddr = daddr >> 2; 387 daddr = daddr >> 2;
364 spin_lock_bh(&routes->lock); 388 mutex_lock(&routes->lock);
365 if (routes->table[daddr] == NULL) { 389 if (routes->table[daddr] == NULL) {
366 routes->table[daddr] = dev; 390 rcu_assign_pointer(routes->table[daddr], dev);
367 dev_hold(dev); 391 dev_hold(dev);
368 err = 0; 392 err = 0;
369 } 393 }
370 spin_unlock_bh(&routes->lock); 394 mutex_unlock(&routes->lock);
371 return err; 395 return err;
372} 396}
373 397
@@ -375,17 +399,20 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
375{ 399{
376 struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); 400 struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
377 struct phonet_routes *routes = &pnn->routes; 401 struct phonet_routes *routes = &pnn->routes;
378 int err = -ENOENT;
379 402
380 daddr = daddr >> 2; 403 daddr = daddr >> 2;
381 spin_lock_bh(&routes->lock); 404 mutex_lock(&routes->lock);
382 if (dev == routes->table[daddr]) { 405 if (dev == routes->table[daddr])
383 routes->table[daddr] = NULL; 406 rcu_assign_pointer(routes->table[daddr], NULL);
384 dev_put(dev); 407 else
385 err = 0; 408 dev = NULL;
386 } 409 mutex_unlock(&routes->lock);
387 spin_unlock_bh(&routes->lock); 410
388 return err; 411 if (!dev)
412 return -ENOENT;
413 synchronize_rcu();
414 dev_put(dev);
415 return 0;
389} 416}
390 417
391struct net_device *phonet_route_get(struct net *net, u8 daddr) 418struct net_device *phonet_route_get(struct net *net, u8 daddr)
@@ -397,9 +424,9 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr)
397 ASSERT_RTNL(); /* no need to hold the device */ 424 ASSERT_RTNL(); /* no need to hold the device */
398 425
399 daddr >>= 2; 426 daddr >>= 2;
400 spin_lock_bh(&routes->lock); 427 rcu_read_lock();
401 dev = routes->table[daddr]; 428 dev = rcu_dereference(routes->table[daddr]);
402 spin_unlock_bh(&routes->lock); 429 rcu_read_unlock();
403 return dev; 430 return dev;
404} 431}
405 432
@@ -409,11 +436,12 @@ struct net_device *phonet_route_output(struct net *net, u8 daddr)
409 struct phonet_routes *routes = &pnn->routes; 436 struct phonet_routes *routes = &pnn->routes;
410 struct net_device *dev; 437 struct net_device *dev;
411 438
412 spin_lock_bh(&routes->lock); 439 daddr >>= 2;
413 dev = routes->table[daddr >> 2]; 440 rcu_read_lock();
441 dev = rcu_dereference(routes->table[daddr]);
414 if (dev) 442 if (dev)
415 dev_hold(dev); 443 dev_hold(dev);
416 spin_unlock_bh(&routes->lock); 444 rcu_read_unlock();
417 445
418 if (!dev) 446 if (!dev)
419 dev = phonet_device_get(net); /* Default route */ 447 dev = phonet_device_get(net); /* Default route */
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 609e509b369b..2e6c7eb8e76a 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -131,8 +131,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
131 int addr_idx = 0, addr_start_idx = cb->args[1]; 131 int addr_idx = 0, addr_start_idx = cb->args[1];
132 132
133 pndevs = phonet_device_list(sock_net(skb->sk)); 133 pndevs = phonet_device_list(sock_net(skb->sk));
134 spin_lock_bh(&pndevs->lock); 134 rcu_read_lock();
135 list_for_each_entry(pnd, &pndevs->list, list) { 135 list_for_each_entry_rcu(pnd, &pndevs->list, list) {
136 u8 addr; 136 u8 addr;
137 137
138 if (dev_idx > dev_start_idx) 138 if (dev_idx > dev_start_idx)
@@ -154,7 +154,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
154 } 154 }
155 155
156out: 156out:
157 spin_unlock_bh(&pndevs->lock); 157 rcu_read_unlock();
158 cb->args[0] = dev_idx; 158 cb->args[0] = dev_idx;
159 cb->args[1] = addr_idx; 159 cb->args[1] = addr_idx;
160 160
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index b9aaab4e0354..797479369881 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -65,48 +65,53 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
65 struct tc_mirred *parm; 65 struct tc_mirred *parm;
66 struct tcf_mirred *m; 66 struct tcf_mirred *m;
67 struct tcf_common *pc; 67 struct tcf_common *pc;
68 struct net_device *dev = NULL; 68 struct net_device *dev;
69 int ret = 0, err; 69 int ret, ok_push = 0;
70 int ok_push = 0;
71 70
72 if (nla == NULL) 71 if (nla == NULL)
73 return -EINVAL; 72 return -EINVAL;
74 73 ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy);
75 err = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); 74 if (ret < 0)
76 if (err < 0) 75 return ret;
77 return err;
78
79 if (tb[TCA_MIRRED_PARMS] == NULL) 76 if (tb[TCA_MIRRED_PARMS] == NULL)
80 return -EINVAL; 77 return -EINVAL;
81 parm = nla_data(tb[TCA_MIRRED_PARMS]); 78 parm = nla_data(tb[TCA_MIRRED_PARMS]);
82 79 switch (parm->eaction) {
80 case TCA_EGRESS_MIRROR:
81 case TCA_EGRESS_REDIR:
82 break;
83 default:
84 return -EINVAL;
85 }
83 if (parm->ifindex) { 86 if (parm->ifindex) {
84 dev = __dev_get_by_index(&init_net, parm->ifindex); 87 dev = __dev_get_by_index(&init_net, parm->ifindex);
85 if (dev == NULL) 88 if (dev == NULL)
86 return -ENODEV; 89 return -ENODEV;
87 switch (dev->type) { 90 switch (dev->type) {
88 case ARPHRD_TUNNEL: 91 case ARPHRD_TUNNEL:
89 case ARPHRD_TUNNEL6: 92 case ARPHRD_TUNNEL6:
90 case ARPHRD_SIT: 93 case ARPHRD_SIT:
91 case ARPHRD_IPGRE: 94 case ARPHRD_IPGRE:
92 case ARPHRD_VOID: 95 case ARPHRD_VOID:
93 case ARPHRD_NONE: 96 case ARPHRD_NONE:
94 ok_push = 0; 97 ok_push = 0;
95 break; 98 break;
96 default: 99 default:
97 ok_push = 1; 100 ok_push = 1;
98 break; 101 break;
99 } 102 }
103 } else {
104 dev = NULL;
100 } 105 }
101 106
102 pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); 107 pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info);
103 if (!pc) { 108 if (!pc) {
104 if (!parm->ifindex) 109 if (dev == NULL)
105 return -EINVAL; 110 return -EINVAL;
106 pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, 111 pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind,
107 &mirred_idx_gen, &mirred_hash_info); 112 &mirred_idx_gen, &mirred_hash_info);
108 if (IS_ERR(pc)) 113 if (IS_ERR(pc))
109 return PTR_ERR(pc); 114 return PTR_ERR(pc);
110 ret = ACT_P_CREATED; 115 ret = ACT_P_CREATED;
111 } else { 116 } else {
112 if (!ovr) { 117 if (!ovr) {
@@ -119,12 +124,12 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
119 spin_lock_bh(&m->tcf_lock); 124 spin_lock_bh(&m->tcf_lock);
120 m->tcf_action = parm->action; 125 m->tcf_action = parm->action;
121 m->tcfm_eaction = parm->eaction; 126 m->tcfm_eaction = parm->eaction;
122 if (parm->ifindex) { 127 if (dev != NULL) {
123 m->tcfm_ifindex = parm->ifindex; 128 m->tcfm_ifindex = parm->ifindex;
124 if (ret != ACT_P_CREATED) 129 if (ret != ACT_P_CREATED)
125 dev_put(m->tcfm_dev); 130 dev_put(m->tcfm_dev);
126 m->tcfm_dev = dev;
127 dev_hold(dev); 131 dev_hold(dev);
132 m->tcfm_dev = dev;
128 m->tcfm_ok_push = ok_push; 133 m->tcfm_ok_push = ok_push;
129 } 134 }
130 spin_unlock_bh(&m->tcf_lock); 135 spin_unlock_bh(&m->tcf_lock);
@@ -148,47 +153,32 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
148{ 153{
149 struct tcf_mirred *m = a->priv; 154 struct tcf_mirred *m = a->priv;
150 struct net_device *dev; 155 struct net_device *dev;
151 struct sk_buff *skb2 = NULL; 156 struct sk_buff *skb2;
152 u32 at = G_TC_AT(skb->tc_verd); 157 u32 at;
158 int retval, err = 1;
153 159
154 spin_lock(&m->tcf_lock); 160 spin_lock(&m->tcf_lock);
155
156 dev = m->tcfm_dev;
157 m->tcf_tm.lastuse = jiffies; 161 m->tcf_tm.lastuse = jiffies;
158 162
159 if (!(dev->flags&IFF_UP) ) { 163 dev = m->tcfm_dev;
164 if (!(dev->flags & IFF_UP)) {
160 if (net_ratelimit()) 165 if (net_ratelimit())
161 printk("mirred to Houston: device %s is gone!\n", 166 printk("mirred to Houston: device %s is gone!\n",
162 dev->name); 167 dev->name);
163bad_mirred: 168 goto out;
164 if (skb2 != NULL)
165 kfree_skb(skb2);
166 m->tcf_qstats.overlimits++;
167 m->tcf_bstats.bytes += qdisc_pkt_len(skb);
168 m->tcf_bstats.packets++;
169 spin_unlock(&m->tcf_lock);
170 /* should we be asking for packet to be dropped?
171 * may make sense for redirect case only
172 */
173 return TC_ACT_SHOT;
174 } 169 }
175 170
176 skb2 = skb_act_clone(skb, GFP_ATOMIC); 171 skb2 = skb_act_clone(skb, GFP_ATOMIC);
177 if (skb2 == NULL) 172 if (skb2 == NULL)
178 goto bad_mirred; 173 goto out;
179 if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
180 m->tcfm_eaction != TCA_EGRESS_REDIR) {
181 if (net_ratelimit())
182 printk("tcf_mirred unknown action %d\n",
183 m->tcfm_eaction);
184 goto bad_mirred;
185 }
186 174
187 m->tcf_bstats.bytes += qdisc_pkt_len(skb2); 175 m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
188 m->tcf_bstats.packets++; 176 m->tcf_bstats.packets++;
189 if (!(at & AT_EGRESS)) 177 at = G_TC_AT(skb->tc_verd);
178 if (!(at & AT_EGRESS)) {
190 if (m->tcfm_ok_push) 179 if (m->tcfm_ok_push)
191 skb_push(skb2, skb2->dev->hard_header_len); 180 skb_push(skb2, skb2->dev->hard_header_len);
181 }
192 182
193 /* mirror is always swallowed */ 183 /* mirror is always swallowed */
194 if (m->tcfm_eaction != TCA_EGRESS_MIRROR) 184 if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
@@ -197,8 +187,23 @@ bad_mirred:
197 skb2->dev = dev; 187 skb2->dev = dev;
198 skb2->iif = skb->dev->ifindex; 188 skb2->iif = skb->dev->ifindex;
199 dev_queue_xmit(skb2); 189 dev_queue_xmit(skb2);
190 err = 0;
191
192out:
193 if (err) {
194 m->tcf_qstats.overlimits++;
195 m->tcf_bstats.bytes += qdisc_pkt_len(skb);
196 m->tcf_bstats.packets++;
197 /* should we be asking for packet to be dropped?
198 * may make sense for redirect case only
199 */
200 retval = TC_ACT_SHOT;
201 } else {
202 retval = m->tcf_action;
203 }
200 spin_unlock(&m->tcf_lock); 204 spin_unlock(&m->tcf_lock);
201 return m->tcf_action; 205
206 return retval;
202} 207}
203 208
204static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 209static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4ae6aa562f2b..5173c1e1b19c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -119,32 +119,26 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
119 spin_unlock(root_lock); 119 spin_unlock(root_lock);
120 120
121 HARD_TX_LOCK(dev, txq, smp_processor_id()); 121 HARD_TX_LOCK(dev, txq, smp_processor_id());
122 if (!netif_tx_queue_stopped(txq) && 122 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
123 !netif_tx_queue_frozen(txq))
124 ret = dev_hard_start_xmit(skb, dev, txq); 123 ret = dev_hard_start_xmit(skb, dev, txq);
124
125 HARD_TX_UNLOCK(dev, txq); 125 HARD_TX_UNLOCK(dev, txq);
126 126
127 spin_lock(root_lock); 127 spin_lock(root_lock);
128 128
129 switch (ret) { 129 if (dev_xmit_complete(ret)) {
130 case NETDEV_TX_OK: 130 /* Driver sent out skb successfully or skb was consumed */
131 /* Driver sent out skb successfully */
132 ret = qdisc_qlen(q); 131 ret = qdisc_qlen(q);
133 break; 132 } else if (ret == NETDEV_TX_LOCKED) {
134
135 case NETDEV_TX_LOCKED:
136 /* Driver try lock failed */ 133 /* Driver try lock failed */
137 ret = handle_dev_cpu_collision(skb, txq, q); 134 ret = handle_dev_cpu_collision(skb, txq, q);
138 break; 135 } else {
139
140 default:
141 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 136 /* Driver returned NETDEV_TX_BUSY - requeue skb */
142 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 137 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
143 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 138 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
144 dev->name, ret, q->q.qlen); 139 dev->name, ret, q->q.qlen);
145 140
146 ret = dev_requeue_skb(skb, q); 141 ret = dev_requeue_skb(skb, q);
147 break;
148 } 142 }
149 143
150 if (ret && (netif_tx_queue_stopped(txq) || 144 if (ret && (netif_tx_queue_stopped(txq) ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 8450960df24f..7eed77a39d0d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1485,15 +1485,13 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
1485 * local endpoint and the remote peer. 1485 * local endpoint and the remote peer.
1486 */ 1486 */
1487int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1487int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1488 gfp_t gfp) 1488 sctp_scope_t scope, gfp_t gfp)
1489{ 1489{
1490 sctp_scope_t scope;
1491 int flags; 1490 int flags;
1492 1491
1493 /* Use scoping rules to determine the subset of addresses from 1492 /* Use scoping rules to determine the subset of addresses from
1494 * the endpoint. 1493 * the endpoint.
1495 */ 1494 */
1496 scope = sctp_scope(&asoc->peer.active_path->ipaddr);
1497 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; 1495 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1498 if (asoc->peer.ipv4_address) 1496 if (asoc->peer.ipv4_address)
1499 flags |= SCTP_ADDR4_PEERSUPP; 1497 flags |= SCTP_ADDR4_PEERSUPP;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c8fae1983dd1..d4df45022ffa 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -384,6 +384,11 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
384 if (!new_asoc) 384 if (!new_asoc)
385 goto nomem; 385 goto nomem;
386 386
387 if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
388 sctp_scope(sctp_source(chunk)),
389 GFP_ATOMIC) < 0)
390 goto nomem_init;
391
387 /* The call, sctp_process_init(), can fail on memory allocation. */ 392 /* The call, sctp_process_init(), can fail on memory allocation. */
388 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 393 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
389 sctp_source(chunk), 394 sctp_source(chunk),
@@ -401,9 +406,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
401 len = ntohs(err_chunk->chunk_hdr->length) - 406 len = ntohs(err_chunk->chunk_hdr->length) -
402 sizeof(sctp_chunkhdr_t); 407 sizeof(sctp_chunkhdr_t);
403 408
404 if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
405 goto nomem_init;
406
407 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); 409 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
408 if (!repl) 410 if (!repl)
409 goto nomem_init; 411 goto nomem_init;
@@ -1452,6 +1454,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1452 if (!new_asoc) 1454 if (!new_asoc)
1453 goto nomem; 1455 goto nomem;
1454 1456
1457 if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
1458 sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
1459 goto nomem;
1460
1455 /* In the outbound INIT ACK the endpoint MUST copy its current 1461 /* In the outbound INIT ACK the endpoint MUST copy its current
1456 * Verification Tag and Peers Verification tag into a reserved 1462 * Verification Tag and Peers Verification tag into a reserved
1457 * place (local tie-tag and per tie-tag) within the state cookie. 1463 * place (local tie-tag and per tie-tag) within the state cookie.
@@ -1488,9 +1494,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1488 sizeof(sctp_chunkhdr_t); 1494 sizeof(sctp_chunkhdr_t);
1489 } 1495 }
1490 1496
1491 if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
1492 goto nomem;
1493
1494 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); 1497 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
1495 if (!repl) 1498 if (!repl)
1496 goto nomem; 1499 goto nomem;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 4085db99033d..66b1f02b17ba 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1080,6 +1080,13 @@ static int __sctp_connect(struct sock* sk,
1080 err = -ENOMEM; 1080 err = -ENOMEM;
1081 goto out_free; 1081 goto out_free;
1082 } 1082 }
1083
1084 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
1085 GFP_KERNEL);
1086 if (err < 0) {
1087 goto out_free;
1088 }
1089
1083 } 1090 }
1084 1091
1085 /* Prime the peer's transport structures. */ 1092 /* Prime the peer's transport structures. */
@@ -1095,11 +1102,6 @@ static int __sctp_connect(struct sock* sk,
1095 walk_size += af->sockaddr_len; 1102 walk_size += af->sockaddr_len;
1096 } 1103 }
1097 1104
1098 err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
1099 if (err < 0) {
1100 goto out_free;
1101 }
1102
1103 /* In case the user of sctp_connectx() wants an association 1105 /* In case the user of sctp_connectx() wants an association
1104 * id back, assign one now. 1106 * id back, assign one now.
1105 */ 1107 */
@@ -1274,22 +1276,30 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1274} 1276}
1275 1277
1276/* 1278/*
1277 * New (hopefully final) interface for the API. The option buffer is used 1279 * New (hopefully final) interface for the API.
1278 * both for the returned association id and the addresses. 1280 * We use the sctp_getaddrs_old structure so that use-space library
1281 * can avoid any unnecessary allocations. The only defferent part
1282 * is that we store the actual length of the address buffer into the
1283 * addrs_num structure member. That way we can re-use the existing
1284 * code.
1279 */ 1285 */
1280SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, 1286SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
1281 char __user *optval, 1287 char __user *optval,
1282 int __user *optlen) 1288 int __user *optlen)
1283{ 1289{
1290 struct sctp_getaddrs_old param;
1284 sctp_assoc_t assoc_id = 0; 1291 sctp_assoc_t assoc_id = 0;
1285 int err = 0; 1292 int err = 0;
1286 1293
1287 if (len < sizeof(assoc_id)) 1294 if (len < sizeof(param))
1288 return -EINVAL; 1295 return -EINVAL;
1289 1296
1297 if (copy_from_user(&param, optval, sizeof(param)))
1298 return -EFAULT;
1299
1290 err = __sctp_setsockopt_connectx(sk, 1300 err = __sctp_setsockopt_connectx(sk,
1291 (struct sockaddr __user *)(optval + sizeof(assoc_id)), 1301 (struct sockaddr __user *)param.addrs,
1292 len - sizeof(assoc_id), &assoc_id); 1302 param.addr_num, &assoc_id);
1293 1303
1294 if (err == 0 || err == -EINPROGRESS) { 1304 if (err == 0 || err == -EINPROGRESS) {
1295 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1305 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
@@ -1689,6 +1699,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1689 goto out_unlock; 1699 goto out_unlock;
1690 } 1700 }
1691 asoc = new_asoc; 1701 asoc = new_asoc;
1702 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1703 if (err < 0) {
1704 err = -ENOMEM;
1705 goto out_free;
1706 }
1692 1707
1693 /* If the SCTP_INIT ancillary data is specified, set all 1708 /* If the SCTP_INIT ancillary data is specified, set all
1694 * the association init values accordingly. 1709 * the association init values accordingly.
@@ -1718,11 +1733,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1718 err = -ENOMEM; 1733 err = -ENOMEM;
1719 goto out_free; 1734 goto out_free;
1720 } 1735 }
1721 err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
1722 if (err < 0) {
1723 err = -ENOMEM;
1724 goto out_free;
1725 }
1726 } 1736 }
1727 1737
1728 /* ASSERT: we have a valid association at this point. */ 1738 /* ASSERT: we have a valid association at this point. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index c256e4839316..3b141bb32faf 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -308,7 +308,8 @@ void sctp_transport_route(struct sctp_transport *transport,
308 /* Initialize sk->sk_rcv_saddr, if the transport is the 308 /* Initialize sk->sk_rcv_saddr, if the transport is the
309 * association's active path for getsockname(). 309 * association's active path for getsockname().
310 */ 310 */
311 if (asoc && (transport == asoc->peer.active_path)) 311 if (asoc && (!asoc->peer.primary_path ||
312 (transport == asoc->peer.active_path)))
312 opt->pf->af->to_sk_saddr(&transport->saddr, 313 opt->pf->af->to_sk_saddr(&transport->saddr,
313 asoc->base.sk); 314 asoc->base.sk);
314 } else 315 } else
diff --git a/net/socket.c b/net/socket.c
index befd9f5b1620..402abb39cbfe 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -100,14 +100,6 @@
100#include <linux/if_tun.h> 100#include <linux/if_tun.h>
101#include <linux/ipv6_route.h> 101#include <linux/ipv6_route.h>
102#include <linux/route.h> 102#include <linux/route.h>
103#include <linux/atmdev.h>
104#include <linux/atmarp.h>
105#include <linux/atmsvc.h>
106#include <linux/atmlec.h>
107#include <linux/atmclip.h>
108#include <linux/atmmpc.h>
109#include <linux/atm_tcp.h>
110#include <linux/sonet.h>
111#include <linux/sockios.h> 103#include <linux/sockios.h>
112#include <linux/atalk.h> 104#include <linux/atalk.h>
113 105
@@ -2723,38 +2715,15 @@ static int siocdevprivate_ioctl(struct net *net, unsigned int cmd,
2723static int dev_ifsioc(struct net *net, struct socket *sock, 2715static int dev_ifsioc(struct net *net, struct socket *sock,
2724 unsigned int cmd, struct compat_ifreq __user *uifr32) 2716 unsigned int cmd, struct compat_ifreq __user *uifr32)
2725{ 2717{
2726 struct ifreq ifr; 2718 struct ifreq __user *uifr;
2727 struct compat_ifmap __user *uifmap32;
2728 mm_segment_t old_fs;
2729 int err; 2719 int err;
2730 2720
2731 uifmap32 = &uifr32->ifr_ifru.ifru_map; 2721 uifr = compat_alloc_user_space(sizeof(*uifr));
2732 switch (cmd) { 2722 if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
2733 case SIOCSIFMAP: 2723 return -EFAULT;
2734 err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); 2724
2735 err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); 2725 err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
2736 err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); 2726
2737 err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
2738 err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
2739 err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
2740 err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
2741 if (err)
2742 return -EFAULT;
2743 break;
2744 case SIOCSHWTSTAMP:
2745 if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
2746 return -EFAULT;
2747 ifr.ifr_data = compat_ptr(uifr32->ifr_ifru.ifru_data);
2748 break;
2749 default:
2750 if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
2751 return -EFAULT;
2752 break;
2753 }
2754 old_fs = get_fs();
2755 set_fs (KERNEL_DS);
2756 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ifr);
2757 set_fs (old_fs);
2758 if (!err) { 2727 if (!err) {
2759 switch (cmd) { 2728 switch (cmd) {
2760 case SIOCGIFFLAGS: 2729 case SIOCGIFFLAGS:
@@ -2771,18 +2740,7 @@ static int dev_ifsioc(struct net *net, struct socket *sock,
2771 case SIOCGIFTXQLEN: 2740 case SIOCGIFTXQLEN:
2772 case SIOCGMIIPHY: 2741 case SIOCGMIIPHY:
2773 case SIOCGMIIREG: 2742 case SIOCGMIIREG:
2774 if (copy_to_user(uifr32, &ifr, sizeof(*uifr32))) 2743 if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
2775 return -EFAULT;
2776 break;
2777 case SIOCGIFMAP:
2778 err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
2779 err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
2780 err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
2781 err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
2782 err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
2783 err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
2784 err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
2785 if (err)
2786 err = -EFAULT; 2744 err = -EFAULT;
2787 break; 2745 break;
2788 } 2746 }
@@ -2790,6 +2748,65 @@ static int dev_ifsioc(struct net *net, struct socket *sock,
2790 return err; 2748 return err;
2791} 2749}
2792 2750
2751static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
2752 struct compat_ifreq __user *uifr32)
2753{
2754 struct ifreq ifr;
2755 struct compat_ifmap __user *uifmap32;
2756 mm_segment_t old_fs;
2757 int err;
2758
2759 uifmap32 = &uifr32->ifr_ifru.ifru_map;
2760 err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name));
2761 err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
2762 err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
2763 err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
2764 err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
2765 err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
2766 err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
2767 if (err)
2768 return -EFAULT;
2769
2770 old_fs = get_fs();
2771 set_fs (KERNEL_DS);
2772 err = dev_ioctl(net, cmd, (void __user *)&ifr);
2773 set_fs (old_fs);
2774
2775 if (cmd == SIOCGIFMAP && !err) {
2776 err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
2777 err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
2778 err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
2779 err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
2780 err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
2781 err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
2782 err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
2783 if (err)
2784 err = -EFAULT;
2785 }
2786 return err;
2787}
2788
2789static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32)
2790{
2791 void __user *uptr;
2792 compat_uptr_t uptr32;
2793 struct ifreq __user *uifr;
2794
2795 uifr = compat_alloc_user_space(sizeof (*uifr));
2796 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
2797 return -EFAULT;
2798
2799 if (get_user(uptr32, &uifr32->ifr_data))
2800 return -EFAULT;
2801
2802 uptr = compat_ptr(uptr32);
2803
2804 if (put_user(uptr, &uifr->ifr_data))
2805 return -EFAULT;
2806
2807 return dev_ioctl(net, SIOCSHWTSTAMP, uifr);
2808}
2809
2793struct rtentry32 { 2810struct rtentry32 {
2794 u32 rt_pad1; 2811 u32 rt_pad1;
2795 struct sockaddr rt_dst; /* target address */ 2812 struct sockaddr rt_dst; /* target address */
@@ -2892,173 +2909,6 @@ static int old_bridge_ioctl(compat_ulong_t __user *argp)
2892 return -EINVAL; 2909 return -EINVAL;
2893} 2910}
2894 2911
2895struct atmif_sioc32 {
2896 compat_int_t number;
2897 compat_int_t length;
2898 compat_caddr_t arg;
2899};
2900
2901struct atm_iobuf32 {
2902 compat_int_t length;
2903 compat_caddr_t buffer;
2904};
2905
2906#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct atmif_sioc32)
2907#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct atm_iobuf32)
2908#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct atmif_sioc32)
2909#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct atmif_sioc32)
2910#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct atmif_sioc32)
2911#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct atmif_sioc32)
2912#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct atmif_sioc32)
2913#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct atmif_sioc32)
2914#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct atmif_sioc32)
2915#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct atmif_sioc32)
2916#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct atmif_sioc32)
2917#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct atmif_sioc32)
2918#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct atmif_sioc32)
2919#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct atmif_sioc32)
2920#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct atmif_sioc32)
2921#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct atmif_sioc32)
2922#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct atmif_sioc32)
2923
2924static struct {
2925 unsigned int cmd32;
2926 unsigned int cmd;
2927} atm_ioctl_map[] = {
2928 { ATM_GETLINKRATE32, ATM_GETLINKRATE },
2929 { ATM_GETNAMES32, ATM_GETNAMES },
2930 { ATM_GETTYPE32, ATM_GETTYPE },
2931 { ATM_GETESI32, ATM_GETESI },
2932 { ATM_GETADDR32, ATM_GETADDR },
2933 { ATM_RSTADDR32, ATM_RSTADDR },
2934 { ATM_ADDADDR32, ATM_ADDADDR },
2935 { ATM_DELADDR32, ATM_DELADDR },
2936 { ATM_GETCIRANGE32, ATM_GETCIRANGE },
2937 { ATM_SETCIRANGE32, ATM_SETCIRANGE },
2938 { ATM_SETESI32, ATM_SETESI },
2939 { ATM_SETESIF32, ATM_SETESIF },
2940 { ATM_GETSTAT32, ATM_GETSTAT },
2941 { ATM_GETSTATZ32, ATM_GETSTATZ },
2942 { ATM_GETLOOP32, ATM_GETLOOP },
2943 { ATM_SETLOOP32, ATM_SETLOOP },
2944 { ATM_QUERYLOOP32, ATM_QUERYLOOP }
2945};
2946
2947#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map)
2948
2949static int do_atm_iobuf(struct net *net, struct socket *sock,
2950 unsigned int cmd, unsigned long arg)
2951{
2952 struct atm_iobuf __user *iobuf;
2953 struct atm_iobuf32 __user *iobuf32;
2954 u32 data;
2955 void __user *datap;
2956 int len, err;
2957
2958 iobuf = compat_alloc_user_space(sizeof(*iobuf));
2959 iobuf32 = compat_ptr(arg);
2960
2961 if (get_user(len, &iobuf32->length) ||
2962 get_user(data, &iobuf32->buffer))
2963 return -EFAULT;
2964 datap = compat_ptr(data);
2965 if (put_user(len, &iobuf->length) ||
2966 put_user(datap, &iobuf->buffer))
2967 return -EFAULT;
2968
2969 err = sock_do_ioctl(net, sock, cmd, (unsigned long)iobuf);
2970
2971 if (!err) {
2972 if (copy_in_user(&iobuf32->length, &iobuf->length,
2973 sizeof(int)))
2974 err = -EFAULT;
2975 }
2976
2977 return err;
2978}
2979
2980static int do_atmif_sioc(struct net *net, struct socket *sock,
2981 unsigned int cmd, unsigned long arg)
2982{
2983 struct atmif_sioc __user *sioc;
2984 struct atmif_sioc32 __user *sioc32;
2985 u32 data;
2986 void __user *datap;
2987 int err;
2988
2989 sioc = compat_alloc_user_space(sizeof(*sioc));
2990 sioc32 = compat_ptr(arg);
2991
2992 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
2993 get_user(data, &sioc32->arg))
2994 return -EFAULT;
2995 datap = compat_ptr(data);
2996 if (put_user(datap, &sioc->arg))
2997 return -EFAULT;
2998
2999 err = sock_do_ioctl(net, sock, cmd, (unsigned long) sioc);
3000
3001 if (!err) {
3002 if (copy_in_user(&sioc32->length, &sioc->length,
3003 sizeof(int)))
3004 err = -EFAULT;
3005 }
3006 return err;
3007}
3008
3009static int do_atm_ioctl(struct net *net, struct socket *sock,
3010 unsigned int cmd32, unsigned long arg)
3011{
3012 int i;
3013 unsigned int cmd = 0;
3014
3015 switch (cmd32) {
3016 case SONET_GETSTAT:
3017 case SONET_GETSTATZ:
3018 case SONET_GETDIAG:
3019 case SONET_SETDIAG:
3020 case SONET_CLRDIAG:
3021 case SONET_SETFRAMING:
3022 case SONET_GETFRAMING:
3023 case SONET_GETFRSENSE:
3024 return do_atmif_sioc(net, sock, cmd32, arg);
3025 }
3026
3027 for (i = 0; i < NR_ATM_IOCTL; i++) {
3028 if (cmd32 == atm_ioctl_map[i].cmd32) {
3029 cmd = atm_ioctl_map[i].cmd;
3030 break;
3031 }
3032 }
3033 if (i == NR_ATM_IOCTL)
3034 return -EINVAL;
3035
3036 switch (cmd) {
3037 case ATM_GETNAMES:
3038 return do_atm_iobuf(net, sock, cmd, arg);
3039
3040 case ATM_GETLINKRATE:
3041 case ATM_GETTYPE:
3042 case ATM_GETESI:
3043 case ATM_GETADDR:
3044 case ATM_RSTADDR:
3045 case ATM_ADDADDR:
3046 case ATM_DELADDR:
3047 case ATM_GETCIRANGE:
3048 case ATM_SETCIRANGE:
3049 case ATM_SETESI:
3050 case ATM_SETESIF:
3051 case ATM_GETSTAT:
3052 case ATM_GETSTATZ:
3053 case ATM_GETLOOP:
3054 case ATM_SETLOOP:
3055 case ATM_QUERYLOOP:
3056 return do_atmif_sioc(net, sock, cmd, arg);
3057 }
3058
3059 return -EINVAL;
3060}
3061
3062static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, 2912static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3063 unsigned int cmd, unsigned long arg) 2913 unsigned int cmd, unsigned long arg)
3064{ 2914{
@@ -3081,6 +2931,9 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3081 return ethtool_ioctl(net, argp); 2931 return ethtool_ioctl(net, argp);
3082 case SIOCWANDEV: 2932 case SIOCWANDEV:
3083 return compat_siocwandev(net, argp); 2933 return compat_siocwandev(net, argp);
2934 case SIOCGIFMAP:
2935 case SIOCSIFMAP:
2936 return compat_sioc_ifmap(net, cmd, argp);
3084 case SIOCBONDENSLAVE: 2937 case SIOCBONDENSLAVE:
3085 case SIOCBONDRELEASE: 2938 case SIOCBONDRELEASE:
3086 case SIOCBONDSETHWADDR: 2939 case SIOCBONDSETHWADDR:
@@ -3095,6 +2948,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3095 return do_siocgstamp(net, sock, cmd, argp); 2948 return do_siocgstamp(net, sock, cmd, argp);
3096 case SIOCGSTAMPNS: 2949 case SIOCGSTAMPNS:
3097 return do_siocgstampns(net, sock, cmd, argp); 2950 return do_siocgstampns(net, sock, cmd, argp);
2951 case SIOCSHWTSTAMP:
2952 return compat_siocshwtstamp(net, argp);
3098 2953
3099 case FIOSETOWN: 2954 case FIOSETOWN:
3100 case SIOCSPGRP: 2955 case SIOCSPGRP:
@@ -3121,12 +2976,9 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3121 case SIOCADDMULTI: 2976 case SIOCADDMULTI:
3122 case SIOCDELMULTI: 2977 case SIOCDELMULTI:
3123 case SIOCGIFINDEX: 2978 case SIOCGIFINDEX:
3124 case SIOCGIFMAP:
3125 case SIOCSIFMAP:
3126 case SIOCGIFADDR: 2979 case SIOCGIFADDR:
3127 case SIOCSIFADDR: 2980 case SIOCSIFADDR:
3128 case SIOCSIFHWBROADCAST: 2981 case SIOCSIFHWBROADCAST:
3129 case SIOCSHWTSTAMP:
3130 case SIOCDIFADDR: 2982 case SIOCDIFADDR:
3131 case SIOCGIFBRDADDR: 2983 case SIOCGIFBRDADDR:
3132 case SIOCSIFBRDADDR: 2984 case SIOCSIFBRDADDR:
@@ -3146,49 +2998,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3146 case SIOCSMIIREG: 2998 case SIOCSMIIREG:
3147 return dev_ifsioc(net, sock, cmd, argp); 2999 return dev_ifsioc(net, sock, cmd, argp);
3148 3000
3149 case ATM_GETLINKRATE32:
3150 case ATM_GETNAMES32:
3151 case ATM_GETTYPE32:
3152 case ATM_GETESI32:
3153 case ATM_GETADDR32:
3154 case ATM_RSTADDR32:
3155 case ATM_ADDADDR32:
3156 case ATM_DELADDR32:
3157 case ATM_GETCIRANGE32:
3158 case ATM_SETCIRANGE32:
3159 case ATM_SETESI32:
3160 case ATM_SETESIF32:
3161 case ATM_GETSTAT32:
3162 case ATM_GETSTATZ32:
3163 case ATM_GETLOOP32:
3164 case ATM_SETLOOP32:
3165 case ATM_QUERYLOOP32:
3166 case SONET_GETSTAT:
3167 case SONET_GETSTATZ:
3168 case SONET_GETDIAG:
3169 case SONET_SETDIAG:
3170 case SONET_CLRDIAG:
3171 case SONET_SETFRAMING:
3172 case SONET_GETFRAMING:
3173 case SONET_GETFRSENSE:
3174 return do_atm_ioctl(net, sock, cmd, arg);
3175
3176 case ATMSIGD_CTRL:
3177 case ATMARPD_CTRL:
3178 case ATMLEC_CTRL:
3179 case ATMLEC_MCAST:
3180 case ATMLEC_DATA:
3181 case ATM_SETSC:
3182 case SIOCSIFATMTCP:
3183 case SIOCMKCLIP:
3184 case ATMARP_MKIP:
3185 case ATMARP_SETENTRY:
3186 case ATMARP_ENCAP:
3187 case ATMTCP_CREATE:
3188 case ATMTCP_REMOVE:
3189 case ATMMPC_CTRL:
3190 case ATMMPC_DATA:
3191
3192 case SIOCSARP: 3001 case SIOCSARP:
3193 case SIOCGARP: 3002 case SIOCGARP:
3194 case SIOCDARP: 3003 case SIOCDARP: