aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/Kconfig12
-rw-r--r--drivers/block/aoe/aoecmd.c10
-rw-r--r--drivers/char/sx.c9
-rw-r--r--drivers/char/vt.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c90
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c36
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c75
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c138
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c72
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c26
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c95
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c80
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c44
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/video/cx18/cx18-driver.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/net/3c59x.c73
-rw-r--r--drivers/net/Kconfig14
-rw-r--r--drivers/net/appletalk/cops.c16
-rw-r--r--drivers/net/bonding/bond_main.c24
-rw-r--r--drivers/net/bonding/bond_sysfs.c16
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/e1000e/defines.h10
-rw-r--r--drivers/net/e1000e/e1000.h7
-rw-r--r--drivers/net/e1000e/ethtool.c45
-rw-r--r--drivers/net/e1000e/hw.h22
-rw-r--r--drivers/net/e1000e/ich8lan.c83
-rw-r--r--drivers/net/e1000e/netdev.c330
-rw-r--r--drivers/net/e1000e/phy.c278
-rw-r--r--drivers/net/eexpress.c11
-rw-r--r--drivers/net/fs_enet/mii-fec.c3
-rw-r--r--drivers/net/gianfar.c5
-rw-r--r--drivers/net/gianfar.h3
-rw-r--r--drivers/net/gianfar_sysfs.c10
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mv643xx_eth.c160
-rw-r--r--drivers/net/pcnet32.c61
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/tulip/uli526x.c38
-rw-r--r--drivers/net/ucc_geth.c278
-rw-r--r--drivers/net/ucc_geth.h48
-rw-r--r--drivers/net/ucc_geth_ethtool.c6
-rw-r--r--drivers/net/ucc_geth_mii.c4
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/rtc/rtc-ds1511.c4
-rw-r--r--drivers/s390/char/tty3270.c15
-rw-r--r--drivers/s390/cio/blacklist.c323
-rw-r--r--drivers/s390/cio/cio.c39
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/cio_debug.h6
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/s390/cio/device.c25
-rw-r--r--drivers/s390/cio/device_fsm.c44
-rw-r--r--drivers/s390/cio/device_id.c4
-rw-r--r--drivers/s390/cio/device_pgid.c12
-rw-r--r--drivers/s390/s390mach.c3
-rw-r--r--drivers/scsi/dpt_i2o.c78
-rw-r--r--drivers/scsi/dpti.h13
-rw-r--r--drivers/serial/serial_core.c3
-rw-r--r--drivers/usb/serial/iuu_phoenix.c6
72 files changed, 1935 insertions, 1024 deletions
diff --git a/drivers/accessibility/Kconfig b/drivers/accessibility/Kconfig
index 1264c4b98094..ef3b65bfdd0a 100644
--- a/drivers/accessibility/Kconfig
+++ b/drivers/accessibility/Kconfig
@@ -1,7 +1,17 @@
1menuconfig ACCESSIBILITY 1menuconfig ACCESSIBILITY
2 bool "Accessibility support" 2 bool "Accessibility support"
3 ---help--- 3 ---help---
4 Enable a submenu where accessibility items may be enabled. 4 Accessibility handles all special kinds of hardware devices or
5 software adapters which help people with disabilities (e.g.
6 blindness) to use computers.
7
8 That includes braille devices, speech synthesis, keyboard
9 remapping, etc.
10
11 Say Y here to get to see options for accessibility.
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and disabled.
5 15
6 If unsure, say N. 16 If unsure, say N.
7 17
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8fc429cf82b6..41f818be2f7e 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -755,11 +755,13 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
755{ 755{
756 unsigned long n_sect = bio->bi_size >> 9; 756 unsigned long n_sect = bio->bi_size >> 9;
757 const int rw = bio_data_dir(bio); 757 const int rw = bio_data_dir(bio);
758 struct hd_struct *part;
758 759
759 all_stat_inc(disk, ios[rw], sector); 760 part = get_part(disk, sector);
760 all_stat_add(disk, ticks[rw], duration, sector); 761 all_stat_inc(disk, part, ios[rw], sector);
761 all_stat_add(disk, sectors[rw], n_sect, sector); 762 all_stat_add(disk, part, ticks[rw], duration, sector);
762 all_stat_add(disk, io_ticks, duration, sector); 763 all_stat_add(disk, part, sectors[rw], n_sect, sector);
764 all_stat_add(disk, part, io_ticks, duration, sector);
763} 765}
764 766
765void 767void
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index f39f6fd89350..b1a7a8cb65ea 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -970,7 +970,8 @@ static int sx_set_real_termios(void *ptr)
970 sx_write_channel_byte(port, hi_mask, 0x1f); 970 sx_write_channel_byte(port, hi_mask, 0x1f);
971 break; 971 break;
972 default: 972 default:
973 printk(KERN_INFO "sx: Invalid wordsize: %u\n", CFLAG & CSIZE); 973 printk(KERN_INFO "sx: Invalid wordsize: %u\n",
974 (unsigned int)CFLAG & CSIZE);
974 break; 975 break;
975 } 976 }
976 977
@@ -997,7 +998,8 @@ static int sx_set_real_termios(void *ptr)
997 set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags); 998 set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags);
998 } 999 }
999 sx_dprintk(SX_DEBUG_TERMIOS, "iflags: %x(%d) ", 1000 sx_dprintk(SX_DEBUG_TERMIOS, "iflags: %x(%d) ",
1000 port->gs.tty->termios->c_iflag, I_OTHER(port->gs.tty)); 1001 (unsigned int)port->gs.tty->termios->c_iflag,
1002 I_OTHER(port->gs.tty));
1001 1003
1002/* Tell line discipline whether we will do output cooking. 1004/* Tell line discipline whether we will do output cooking.
1003 * If OPOST is set and no other output flags are set then we can do output 1005 * If OPOST is set and no other output flags are set then we can do output
@@ -1010,7 +1012,8 @@ static int sx_set_real_termios(void *ptr)
1010 clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags); 1012 clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags);
1011 } 1013 }
1012 sx_dprintk(SX_DEBUG_TERMIOS, "oflags: %x(%d)\n", 1014 sx_dprintk(SX_DEBUG_TERMIOS, "oflags: %x(%d)\n",
1013 port->gs.tty->termios->c_oflag, O_OTHER(port->gs.tty)); 1015 (unsigned int)port->gs.tty->termios->c_oflag,
1016 O_OTHER(port->gs.tty));
1014 /* port->c_dcd = sx_get_CD (port); */ 1017 /* port->c_dcd = sx_get_CD (port); */
1015 func_exit(); 1018 func_exit();
1016 return 0; 1019 return 0;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index e458b08139af..fa1ffbf2c621 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2742,6 +2742,10 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2742 tty->winsize.ws_row = vc_cons[currcons].d->vc_rows; 2742 tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
2743 tty->winsize.ws_col = vc_cons[currcons].d->vc_cols; 2743 tty->winsize.ws_col = vc_cons[currcons].d->vc_cols;
2744 } 2744 }
2745 if (vc->vc_utf)
2746 tty->termios->c_iflag |= IUTF8;
2747 else
2748 tty->termios->c_iflag &= ~IUTF8;
2745 release_console_sem(); 2749 release_console_sem();
2746 vcs_make_sysfs(tty); 2750 vcs_make_sysfs(tty);
2747 return ret; 2751 return ret;
@@ -2918,6 +2922,8 @@ int __init vty_init(void)
2918 console_driver->minor_start = 1; 2922 console_driver->minor_start = 1;
2919 console_driver->type = TTY_DRIVER_TYPE_CONSOLE; 2923 console_driver->type = TTY_DRIVER_TYPE_CONSOLE;
2920 console_driver->init_termios = tty_std_termios; 2924 console_driver->init_termios = tty_std_termios;
2925 if (default_utf8)
2926 console_driver->init_termios.c_iflag |= IUTF8;
2921 console_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; 2927 console_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
2922 tty_set_operations(console_driver, &con_ops); 2928 tty_set_operations(console_driver, &con_ops);
2923 if (tty_register_driver(console_driver)) 2929 if (tty_register_driver(console_driver))
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 5fd8506a8657..ebf9d3043f80 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -588,7 +588,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
588 * caller aquires the ctrl_qp lock before the call 588 * caller aquires the ctrl_qp lock before the call
589 */ 589 */
590static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, 590static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
591 u32 len, void *data, int completion) 591 u32 len, void *data)
592{ 592{
593 u32 i, nr_wqe, copy_len; 593 u32 i, nr_wqe, copy_len;
594 u8 *copy_data; 594 u8 *copy_data;
@@ -624,7 +624,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
624 flag = 0; 624 flag = 0;
625 if (i == (nr_wqe - 1)) { 625 if (i == (nr_wqe - 1)) {
626 /* last WQE */ 626 /* last WQE */
627 flag = completion ? T3_COMPLETION_FLAG : 0; 627 flag = T3_COMPLETION_FLAG;
628 if (len % 32) 628 if (len % 32)
629 utx_len = len / 32 + 1; 629 utx_len = len / 32 + 1;
630 else 630 else
@@ -683,21 +683,20 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
683 return 0; 683 return 0;
684} 684}
685 685
686/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size 686/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
687 * OUT: stag index, actual pbl_size, pbl_addr allocated. 687 * OUT: stag index
688 * TBD: shared memory region support 688 * TBD: shared memory region support
689 */ 689 */
690static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, 690static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
691 u32 *stag, u8 stag_state, u32 pdid, 691 u32 *stag, u8 stag_state, u32 pdid,
692 enum tpt_mem_type type, enum tpt_mem_perm perm, 692 enum tpt_mem_type type, enum tpt_mem_perm perm,
693 u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl, 693 u32 zbva, u64 to, u32 len, u8 page_size,
694 u32 *pbl_size, u32 *pbl_addr) 694 u32 pbl_size, u32 pbl_addr)
695{ 695{
696 int err; 696 int err;
697 struct tpt_entry tpt; 697 struct tpt_entry tpt;
698 u32 stag_idx; 698 u32 stag_idx;
699 u32 wptr; 699 u32 wptr;
700 int rereg = (*stag != T3_STAG_UNSET);
701 700
702 stag_state = stag_state > 0; 701 stag_state = stag_state > 0;
703 stag_idx = (*stag) >> 8; 702 stag_idx = (*stag) >> 8;
@@ -711,30 +710,8 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
711 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", 710 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
712 __func__, stag_state, type, pdid, stag_idx); 711 __func__, stag_state, type, pdid, stag_idx);
713 712
714 if (reset_tpt_entry)
715 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
716 else if (!rereg) {
717 *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3);
718 if (!*pbl_addr) {
719 return -ENOMEM;
720 }
721 }
722
723 mutex_lock(&rdev_p->ctrl_qp.lock); 713 mutex_lock(&rdev_p->ctrl_qp.lock);
724 714
725 /* write PBL first if any - update pbl only if pbl list exist */
726 if (pbl) {
727
728 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
729 __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
730 *pbl_size);
731 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
732 (*pbl_addr >> 5),
733 (*pbl_size << 3), pbl, 0);
734 if (err)
735 goto ret;
736 }
737
738 /* write TPT entry */ 715 /* write TPT entry */
739 if (reset_tpt_entry) 716 if (reset_tpt_entry)
740 memset(&tpt, 0, sizeof(tpt)); 717 memset(&tpt, 0, sizeof(tpt));
@@ -749,23 +726,23 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
749 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | 726 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
750 V_TPT_PAGE_SIZE(page_size)); 727 V_TPT_PAGE_SIZE(page_size));
751 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : 728 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
752 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3)); 729 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
753 tpt.len = cpu_to_be32(len); 730 tpt.len = cpu_to_be32(len);
754 tpt.va_hi = cpu_to_be32((u32) (to >> 32)); 731 tpt.va_hi = cpu_to_be32((u32) (to >> 32));
755 tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL)); 732 tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
756 tpt.rsvd_bind_cnt_or_pstag = 0; 733 tpt.rsvd_bind_cnt_or_pstag = 0;
757 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 : 734 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
758 cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2)); 735 cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
759 } 736 }
760 err = cxio_hal_ctrl_qp_write_mem(rdev_p, 737 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
761 stag_idx + 738 stag_idx +
762 (rdev_p->rnic_info.tpt_base >> 5), 739 (rdev_p->rnic_info.tpt_base >> 5),
763 sizeof(tpt), &tpt, 1); 740 sizeof(tpt), &tpt);
764 741
765 /* release the stag index to free pool */ 742 /* release the stag index to free pool */
766 if (reset_tpt_entry) 743 if (reset_tpt_entry)
767 cxio_hal_put_stag(rdev_p->rscp, stag_idx); 744 cxio_hal_put_stag(rdev_p->rscp, stag_idx);
768ret: 745
769 wptr = rdev_p->ctrl_qp.wptr; 746 wptr = rdev_p->ctrl_qp.wptr;
770 mutex_unlock(&rdev_p->ctrl_qp.lock); 747 mutex_unlock(&rdev_p->ctrl_qp.lock);
771 if (!err) 748 if (!err)
@@ -776,44 +753,67 @@ ret:
776 return err; 753 return err;
777} 754}
778 755
756int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
757 u32 pbl_addr, u32 pbl_size)
758{
759 u32 wptr;
760 int err;
761
762 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
763 __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
764 pbl_size);
765
766 mutex_lock(&rdev_p->ctrl_qp.lock);
767 err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
768 pbl);
769 wptr = rdev_p->ctrl_qp.wptr;
770 mutex_unlock(&rdev_p->ctrl_qp.lock);
771 if (err)
772 return err;
773
774 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
775 SEQ32_GE(rdev_p->ctrl_qp.rptr,
776 wptr)))
777 return -ERESTARTSYS;
778
779 return 0;
780}
781
779int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 782int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
780 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 783 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
781 u8 page_size, __be64 *pbl, u32 *pbl_size, 784 u8 page_size, u32 pbl_size, u32 pbl_addr)
782 u32 *pbl_addr)
783{ 785{
784 *stag = T3_STAG_UNSET; 786 *stag = T3_STAG_UNSET;
785 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, 787 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
786 zbva, to, len, page_size, pbl, pbl_size, pbl_addr); 788 zbva, to, len, page_size, pbl_size, pbl_addr);
787} 789}
788 790
789int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 791int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
790 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 792 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
791 u8 page_size, __be64 *pbl, u32 *pbl_size, 793 u8 page_size, u32 pbl_size, u32 pbl_addr)
792 u32 *pbl_addr)
793{ 794{
794 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, 795 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
795 zbva, to, len, page_size, pbl, pbl_size, pbl_addr); 796 zbva, to, len, page_size, pbl_size, pbl_addr);
796} 797}
797 798
798int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size, 799int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
799 u32 pbl_addr) 800 u32 pbl_addr)
800{ 801{
801 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, 802 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
802 &pbl_size, &pbl_addr); 803 pbl_size, pbl_addr);
803} 804}
804 805
805int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid) 806int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
806{ 807{
807 u32 pbl_size = 0;
808 *stag = T3_STAG_UNSET; 808 *stag = T3_STAG_UNSET;
809 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0, 809 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
810 NULL, &pbl_size, NULL); 810 0, 0);
811} 811}
812 812
813int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag) 813int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
814{ 814{
815 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, 815 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
816 NULL, NULL); 816 0, 0);
817} 817}
818 818
819int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) 819int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 69ab08ebc680..6e128f6bab05 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -154,14 +154,14 @@ int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
154int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 154int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
155 struct cxio_ucontext *uctx); 155 struct cxio_ucontext *uctx);
156int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); 156int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
157int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
158 u32 pbl_addr, u32 pbl_size);
157int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 159int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
158 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 160 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
159 u8 page_size, __be64 *pbl, u32 *pbl_size, 161 u8 page_size, u32 pbl_size, u32 pbl_addr);
160 u32 *pbl_addr);
161int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 162int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
162 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 163 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
163 u8 page_size, __be64 *pbl, u32 *pbl_size, 164 u8 page_size, u32 pbl_size, u32 pbl_addr);
164 u32 *pbl_addr);
165int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size, 165int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
166 u32 pbl_addr); 166 u32 pbl_addr);
167int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid); 167int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 45ed4f25ef78..bd233c087653 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -250,7 +250,6 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
250 */ 250 */
251 251
252#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ 252#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
253#define PBL_CHUNK 2*1024*1024
254 253
255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) 254u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
256{ 255{
@@ -267,14 +266,35 @@ void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
267 266
268int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p) 267int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
269{ 268{
270 unsigned long i; 269 unsigned pbl_start, pbl_chunk;
270
271 rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); 271 rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
272 if (rdev_p->pbl_pool) 272 if (!rdev_p->pbl_pool)
273 for (i = rdev_p->rnic_info.pbl_base; 273 return -ENOMEM;
274 i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1; 274
275 i += PBL_CHUNK) 275 pbl_start = rdev_p->rnic_info.pbl_base;
276 gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1); 276 pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
277 return rdev_p->pbl_pool ? 0 : -ENOMEM; 277
278 while (pbl_start < rdev_p->rnic_info.pbl_top) {
279 pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
280 pbl_chunk);
281 if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
282 PDBG("%s failed to add PBL chunk (%x/%x)\n",
283 __func__, pbl_start, pbl_chunk);
284 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
285 printk(KERN_WARNING MOD "%s: Failed to add all PBL chunks (%x/%x)\n",
286 __func__, pbl_start, rdev_p->rnic_info.pbl_top - pbl_start);
287 return 0;
288 }
289 pbl_chunk >>= 1;
290 } else {
291 PDBG("%s added PBL chunk (%x/%x)\n",
292 __func__, pbl_start, pbl_chunk);
293 pbl_start += pbl_chunk;
294 }
295 }
296
297 return 0;
278} 298}
279 299
280void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p) 300void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index 58c3d61bcd14..ec49a5cbdebb 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -35,17 +35,26 @@
35#include <rdma/ib_verbs.h> 35#include <rdma/ib_verbs.h>
36 36
37#include "cxio_hal.h" 37#include "cxio_hal.h"
38#include "cxio_resource.h"
38#include "iwch.h" 39#include "iwch.h"
39#include "iwch_provider.h" 40#include "iwch_provider.h"
40 41
41int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 42static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
42 struct iwch_mr *mhp,
43 int shift,
44 __be64 *page_list)
45{ 43{
46 u32 stag;
47 u32 mmid; 44 u32 mmid;
48 45
46 mhp->attr.state = 1;
47 mhp->attr.stag = stag;
48 mmid = stag >> 8;
49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
50 insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
51 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
52}
53
54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
55 struct iwch_mr *mhp, int shift)
56{
57 u32 stag;
49 58
50 if (cxio_register_phys_mem(&rhp->rdev, 59 if (cxio_register_phys_mem(&rhp->rdev,
51 &stag, mhp->attr.pdid, 60 &stag, mhp->attr.pdid,
@@ -53,28 +62,21 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
53 mhp->attr.zbva, 62 mhp->attr.zbva,
54 mhp->attr.va_fbo, 63 mhp->attr.va_fbo,
55 mhp->attr.len, 64 mhp->attr.len,
56 shift-12, 65 shift - 12,
57 page_list, 66 mhp->attr.pbl_size, mhp->attr.pbl_addr))
58 &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
59 return -ENOMEM; 67 return -ENOMEM;
60 mhp->attr.state = 1; 68
61 mhp->attr.stag = stag; 69 iwch_finish_mem_reg(mhp, stag);
62 mmid = stag >> 8; 70
63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
64 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
65 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
66 return 0; 71 return 0;
67} 72}
68 73
69int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 74int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
70 struct iwch_mr *mhp, 75 struct iwch_mr *mhp,
71 int shift, 76 int shift,
72 __be64 *page_list,
73 int npages) 77 int npages)
74{ 78{
75 u32 stag; 79 u32 stag;
76 u32 mmid;
77
78 80
79 /* We could support this... */ 81 /* We could support this... */
80 if (npages > mhp->attr.pbl_size) 82 if (npages > mhp->attr.pbl_size)
@@ -87,19 +89,40 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
87 mhp->attr.zbva, 89 mhp->attr.zbva,
88 mhp->attr.va_fbo, 90 mhp->attr.va_fbo,
89 mhp->attr.len, 91 mhp->attr.len,
90 shift-12, 92 shift - 12,
91 page_list, 93 mhp->attr.pbl_size, mhp->attr.pbl_addr))
92 &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
93 return -ENOMEM; 94 return -ENOMEM;
94 mhp->attr.state = 1; 95
95 mhp->attr.stag = stag; 96 iwch_finish_mem_reg(mhp, stag);
96 mmid = stag >> 8; 97
97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 98 return 0;
98 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 99}
99 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); 100
101int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
102{
103 mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
104 npages << 3);
105
106 if (!mhp->attr.pbl_addr)
107 return -ENOMEM;
108
109 mhp->attr.pbl_size = npages;
110
100 return 0; 111 return 0;
101} 112}
102 113
114void iwch_free_pbl(struct iwch_mr *mhp)
115{
116 cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
117 mhp->attr.pbl_size << 3);
118}
119
120int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
121{
122 return cxio_write_pbl(&mhp->rhp->rdev, pages,
123 mhp->attr.pbl_addr + (offset << 3), npages);
124}
125
103int build_phys_page_list(struct ib_phys_buf *buffer_list, 126int build_phys_page_list(struct ib_phys_buf *buffer_list,
104 int num_phys_buf, 127 int num_phys_buf,
105 u64 *iova_start, 128 u64 *iova_start,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index d07d3a377b5f..8934178a23ee 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -442,6 +442,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
442 mmid = mhp->attr.stag >> 8; 442 mmid = mhp->attr.stag >> 8;
443 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 443 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
444 mhp->attr.pbl_addr); 444 mhp->attr.pbl_addr);
445 iwch_free_pbl(mhp);
445 remove_handle(rhp, &rhp->mmidr, mmid); 446 remove_handle(rhp, &rhp->mmidr, mmid);
446 if (mhp->kva) 447 if (mhp->kva)
447 kfree((void *) (unsigned long) mhp->kva); 448 kfree((void *) (unsigned long) mhp->kva);
@@ -475,6 +476,8 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
475 if (!mhp) 476 if (!mhp)
476 return ERR_PTR(-ENOMEM); 477 return ERR_PTR(-ENOMEM);
477 478
479 mhp->rhp = rhp;
480
478 /* First check that we have enough alignment */ 481 /* First check that we have enough alignment */
479 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { 482 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
480 ret = -EINVAL; 483 ret = -EINVAL;
@@ -492,7 +495,17 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
492 if (ret) 495 if (ret)
493 goto err; 496 goto err;
494 497
495 mhp->rhp = rhp; 498 ret = iwch_alloc_pbl(mhp, npages);
499 if (ret) {
500 kfree(page_list);
501 goto err_pbl;
502 }
503
504 ret = iwch_write_pbl(mhp, page_list, npages, 0);
505 kfree(page_list);
506 if (ret)
507 goto err_pbl;
508
496 mhp->attr.pdid = php->pdid; 509 mhp->attr.pdid = php->pdid;
497 mhp->attr.zbva = 0; 510 mhp->attr.zbva = 0;
498 511
@@ -502,12 +515,15 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
502 515
503 mhp->attr.len = (u32) total_size; 516 mhp->attr.len = (u32) total_size;
504 mhp->attr.pbl_size = npages; 517 mhp->attr.pbl_size = npages;
505 ret = iwch_register_mem(rhp, php, mhp, shift, page_list); 518 ret = iwch_register_mem(rhp, php, mhp, shift);
506 kfree(page_list); 519 if (ret)
507 if (ret) { 520 goto err_pbl;
508 goto err; 521
509 }
510 return &mhp->ibmr; 522 return &mhp->ibmr;
523
524err_pbl:
525 iwch_free_pbl(mhp);
526
511err: 527err:
512 kfree(mhp); 528 kfree(mhp);
513 return ERR_PTR(ret); 529 return ERR_PTR(ret);
@@ -560,7 +576,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
560 return ret; 576 return ret;
561 } 577 }
562 578
563 ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); 579 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
564 kfree(page_list); 580 kfree(page_list);
565 if (ret) { 581 if (ret) {
566 return ret; 582 return ret;
@@ -602,6 +618,8 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
602 if (!mhp) 618 if (!mhp)
603 return ERR_PTR(-ENOMEM); 619 return ERR_PTR(-ENOMEM);
604 620
621 mhp->rhp = rhp;
622
605 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 623 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
606 if (IS_ERR(mhp->umem)) { 624 if (IS_ERR(mhp->umem)) {
607 err = PTR_ERR(mhp->umem); 625 err = PTR_ERR(mhp->umem);
@@ -615,10 +633,14 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
615 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) 633 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
616 n += chunk->nents; 634 n += chunk->nents;
617 635
618 pages = kmalloc(n * sizeof(u64), GFP_KERNEL); 636 err = iwch_alloc_pbl(mhp, n);
637 if (err)
638 goto err;
639
640 pages = (__be64 *) __get_free_page(GFP_KERNEL);
619 if (!pages) { 641 if (!pages) {
620 err = -ENOMEM; 642 err = -ENOMEM;
621 goto err; 643 goto err_pbl;
622 } 644 }
623 645
624 i = n = 0; 646 i = n = 0;
@@ -630,25 +652,38 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
630 pages[i++] = cpu_to_be64(sg_dma_address( 652 pages[i++] = cpu_to_be64(sg_dma_address(
631 &chunk->page_list[j]) + 653 &chunk->page_list[j]) +
632 mhp->umem->page_size * k); 654 mhp->umem->page_size * k);
655 if (i == PAGE_SIZE / sizeof *pages) {
656 err = iwch_write_pbl(mhp, pages, i, n);
657 if (err)
658 goto pbl_done;
659 n += i;
660 i = 0;
661 }
633 } 662 }
634 } 663 }
635 664
636 mhp->rhp = rhp; 665 if (i)
666 err = iwch_write_pbl(mhp, pages, i, n);
667
668pbl_done:
669 free_page((unsigned long) pages);
670 if (err)
671 goto err_pbl;
672
637 mhp->attr.pdid = php->pdid; 673 mhp->attr.pdid = php->pdid;
638 mhp->attr.zbva = 0; 674 mhp->attr.zbva = 0;
639 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 675 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
640 mhp->attr.va_fbo = virt; 676 mhp->attr.va_fbo = virt;
641 mhp->attr.page_size = shift - 12; 677 mhp->attr.page_size = shift - 12;
642 mhp->attr.len = (u32) length; 678 mhp->attr.len = (u32) length;
643 mhp->attr.pbl_size = i; 679
644 err = iwch_register_mem(rhp, php, mhp, shift, pages); 680 err = iwch_register_mem(rhp, php, mhp, shift);
645 kfree(pages);
646 if (err) 681 if (err)
647 goto err; 682 goto err_pbl;
648 683
649 if (udata && !t3a_device(rhp)) { 684 if (udata && !t3a_device(rhp)) {
650 uresp.pbl_addr = (mhp->attr.pbl_addr - 685 uresp.pbl_addr = (mhp->attr.pbl_addr -
651 rhp->rdev.rnic_info.pbl_base) >> 3; 686 rhp->rdev.rnic_info.pbl_base) >> 3;
652 PDBG("%s user resp pbl_addr 0x%x\n", __func__, 687 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
653 uresp.pbl_addr); 688 uresp.pbl_addr);
654 689
@@ -661,6 +696,9 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
661 696
662 return &mhp->ibmr; 697 return &mhp->ibmr;
663 698
699err_pbl:
700 iwch_free_pbl(mhp);
701
664err: 702err:
665 ib_umem_release(mhp->umem); 703 ib_umem_release(mhp->umem);
666 kfree(mhp); 704 kfree(mhp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index db5100d27ca2..836163fc5429 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -340,14 +340,14 @@ int iwch_quiesce_qps(struct iwch_cq *chp);
340int iwch_resume_qps(struct iwch_cq *chp); 340int iwch_resume_qps(struct iwch_cq *chp);
341void stop_read_rep_timer(struct iwch_qp *qhp); 341void stop_read_rep_timer(struct iwch_qp *qhp);
342int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 342int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
343 struct iwch_mr *mhp, 343 struct iwch_mr *mhp, int shift);
344 int shift,
345 __be64 *page_list);
346int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 344int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
347 struct iwch_mr *mhp, 345 struct iwch_mr *mhp,
348 int shift, 346 int shift,
349 __be64 *page_list,
350 int npages); 347 int npages);
348int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
349void iwch_free_pbl(struct iwch_mr *mhp);
350int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
351int build_phys_page_list(struct ib_phys_buf *buffer_list, 351int build_phys_page_list(struct ib_phys_buf *buffer_list,
352 int num_phys_buf, 352 int num_phys_buf,
353 u64 *iova_start, 353 u64 *iova_start,
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 00bab60f6de4..1e9e99a13933 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -192,6 +192,8 @@ struct ehca_qp {
192 int mtu_shift; 192 int mtu_shift;
193 u32 message_count; 193 u32 message_count;
194 u32 packet_count; 194 u32 packet_count;
195 atomic_t nr_events; /* events seen */
196 wait_queue_head_t wait_completion;
195}; 197};
196 198
197#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 199#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index ca5eb0cb628c..ce1ab0571be3 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -204,6 +204,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
204 204
205 read_lock(&ehca_qp_idr_lock); 205 read_lock(&ehca_qp_idr_lock);
206 qp = idr_find(&ehca_qp_idr, token); 206 qp = idr_find(&ehca_qp_idr, token);
207 if (qp)
208 atomic_inc(&qp->nr_events);
207 read_unlock(&ehca_qp_idr_lock); 209 read_unlock(&ehca_qp_idr_lock);
208 210
209 if (!qp) 211 if (!qp)
@@ -223,6 +225,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
223 if (fatal && qp->ext_type == EQPT_SRQBASE) 225 if (fatal && qp->ext_type == EQPT_SRQBASE)
224 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); 226 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
225 227
228 if (atomic_dec_and_test(&qp->nr_events))
229 wake_up(&qp->wait_completion);
226 return; 230 return;
227} 231}
228 232
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 18fba92fa7ae..3f59587338ea 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -566,6 +566,8 @@ static struct ehca_qp *internal_create_qp(
566 return ERR_PTR(-ENOMEM); 566 return ERR_PTR(-ENOMEM);
567 } 567 }
568 568
569 atomic_set(&my_qp->nr_events, 0);
570 init_waitqueue_head(&my_qp->wait_completion);
569 spin_lock_init(&my_qp->spinlock_s); 571 spin_lock_init(&my_qp->spinlock_s);
570 spin_lock_init(&my_qp->spinlock_r); 572 spin_lock_init(&my_qp->spinlock_r);
571 my_qp->qp_type = qp_type; 573 my_qp->qp_type = qp_type;
@@ -1934,6 +1936,9 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1934 idr_remove(&ehca_qp_idr, my_qp->token); 1936 idr_remove(&ehca_qp_idr, my_qp->token);
1935 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 1937 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1936 1938
1939 /* now wait until all pending events have completed */
1940 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
1941
1937 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 1942 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1938 if (h_ret != H_SUCCESS) { 1943 if (h_ret != H_SUCCESS) {
1939 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " 1944 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index acf30c06a0c0..ce7b7c34360e 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1197,7 +1197,7 @@ void ipath_kreceive(struct ipath_portdata *pd)
1197 } 1197 }
1198 1198
1199reloop: 1199reloop:
1200 for (last = 0, i = 1; !last; i++) { 1200 for (last = 0, i = 1; !last; i += !last) {
1201 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr); 1201 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1202 eflags = ipath_hdrget_err_flags(rhf_addr); 1202 eflags = ipath_hdrget_err_flags(rhf_addr);
1203 etype = ipath_hdrget_rcv_type(rhf_addr); 1203 etype = ipath_hdrget_rcv_type(rhf_addr);
@@ -1428,6 +1428,40 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1428 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1428 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1429} 1429}
1430 1430
1431/*
1432 * used to force update of pioavailshadow if we can't get a pio buffer.
1433 * Needed primarily due to exitting freeze mode after recovering
1434 * from errors. Done lazily, because it's safer (known to not
1435 * be writing pio buffers).
1436 */
1437static void ipath_reset_availshadow(struct ipath_devdata *dd)
1438{
1439 int i, im;
1440 unsigned long flags;
1441
1442 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1443 for (i = 0; i < dd->ipath_pioavregs; i++) {
1444 u64 val, oldval;
1445 /* deal with 6110 chip bug on high register #s */
1446 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1447 i ^ 1 : i;
1448 val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
1449 /*
1450 * busy out the buffers not in the kernel avail list,
1451 * without changing the generation bits.
1452 */
1453 oldval = dd->ipath_pioavailshadow[i];
1454 dd->ipath_pioavailshadow[i] = val |
1455 ((~dd->ipath_pioavailkernel[i] <<
1456 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
1457 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
1458 if (oldval != dd->ipath_pioavailshadow[i])
1459 ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1460 i, oldval, dd->ipath_pioavailshadow[i]);
1461 }
1462 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1463}
1464
1431/** 1465/**
1432 * ipath_setrcvhdrsize - set the receive header size 1466 * ipath_setrcvhdrsize - set the receive header size
1433 * @dd: the infinipath device 1467 * @dd: the infinipath device
@@ -1482,9 +1516,12 @@ static noinline void no_pio_bufs(struct ipath_devdata *dd)
1482 */ 1516 */
1483 ipath_stats.sps_nopiobufs++; 1517 ipath_stats.sps_nopiobufs++;
1484 if (!(++dd->ipath_consec_nopiobuf % 100000)) { 1518 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1485 ipath_dbg("%u pio sends with no bufavail; dmacopy: " 1519 ipath_force_pio_avail_update(dd); /* at start */
1486 "%llx %llx %llx %llx; shadow: %lx %lx %lx %lx\n", 1520 ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
1521 "%llx %llx %llx %llx\n"
1522 "ipath shadow: %lx %lx %lx %lx\n",
1487 dd->ipath_consec_nopiobuf, 1523 dd->ipath_consec_nopiobuf,
1524 (unsigned long)get_cycles(),
1488 (unsigned long long) le64_to_cpu(dma[0]), 1525 (unsigned long long) le64_to_cpu(dma[0]),
1489 (unsigned long long) le64_to_cpu(dma[1]), 1526 (unsigned long long) le64_to_cpu(dma[1]),
1490 (unsigned long long) le64_to_cpu(dma[2]), 1527 (unsigned long long) le64_to_cpu(dma[2]),
@@ -1496,14 +1533,17 @@ static noinline void no_pio_bufs(struct ipath_devdata *dd)
1496 */ 1533 */
1497 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 1534 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1498 (sizeof(shadow[0]) * 4 * 4)) 1535 (sizeof(shadow[0]) * 4 * 4))
1499 ipath_dbg("2nd group: dmacopy: %llx %llx " 1536 ipath_dbg("2nd group: dmacopy: "
1500 "%llx %llx; shadow: %lx %lx %lx %lx\n", 1537 "%llx %llx %llx %llx\n"
1538 "ipath shadow: %lx %lx %lx %lx\n",
1501 (unsigned long long)le64_to_cpu(dma[4]), 1539 (unsigned long long)le64_to_cpu(dma[4]),
1502 (unsigned long long)le64_to_cpu(dma[5]), 1540 (unsigned long long)le64_to_cpu(dma[5]),
1503 (unsigned long long)le64_to_cpu(dma[6]), 1541 (unsigned long long)le64_to_cpu(dma[6]),
1504 (unsigned long long)le64_to_cpu(dma[7]), 1542 (unsigned long long)le64_to_cpu(dma[7]),
1505 shadow[4], shadow[5], shadow[6], 1543 shadow[4], shadow[5], shadow[6], shadow[7]);
1506 shadow[7]); 1544
1545 /* at end, so update likely happened */
1546 ipath_reset_availshadow(dd);
1507 } 1547 }
1508} 1548}
1509 1549
@@ -1652,19 +1692,46 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1652 unsigned len, int avail) 1692 unsigned len, int avail)
1653{ 1693{
1654 unsigned long flags; 1694 unsigned long flags;
1655 unsigned end; 1695 unsigned end, cnt = 0, next;
1656 1696
1657 /* There are two bits per send buffer (busy and generation) */ 1697 /* There are two bits per send buffer (busy and generation) */
1658 start *= 2; 1698 start *= 2;
1659 len *= 2; 1699 end = start + len * 2;
1660 end = start + len;
1661 1700
1662 /* Set or clear the generation bits. */
1663 spin_lock_irqsave(&ipath_pioavail_lock, flags); 1701 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1702 /* Set or clear the busy bit in the shadow. */
1664 while (start < end) { 1703 while (start < end) {
1665 if (avail) { 1704 if (avail) {
1666 __clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT, 1705 unsigned long dma;
1667 dd->ipath_pioavailshadow); 1706 int i, im;
1707 /*
1708 * the BUSY bit will never be set, because we disarm
1709 * the user buffers before we hand them back to the
1710 * kernel. We do have to make sure the generation
1711 * bit is set correctly in shadow, since it could
1712 * have changed many times while allocated to user.
1713 * We can't use the bitmap functions on the full
1714 * dma array because it is always little-endian, so
1715 * we have to flip to host-order first.
1716 * BITS_PER_LONG is slightly wrong, since it's
1717 * always 64 bits per register in chip...
1718 * We only work on 64 bit kernels, so that's OK.
1719 */
1720 /* deal with 6110 chip bug on high register #s */
1721 i = start / BITS_PER_LONG;
1722 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1723 i ^ 1 : i;
1724 __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
1725 + start, dd->ipath_pioavailshadow);
1726 dma = (unsigned long) le64_to_cpu(
1727 dd->ipath_pioavailregs_dma[im]);
1728 if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1729 + start) % BITS_PER_LONG, &dma))
1730 __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1731 + start, dd->ipath_pioavailshadow);
1732 else
1733 __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1734 + start, dd->ipath_pioavailshadow);
1668 __set_bit(start, dd->ipath_pioavailkernel); 1735 __set_bit(start, dd->ipath_pioavailkernel);
1669 } else { 1736 } else {
1670 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT, 1737 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
@@ -1673,7 +1740,44 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1673 } 1740 }
1674 start += 2; 1741 start += 2;
1675 } 1742 }
1743
1744 if (dd->ipath_pioupd_thresh) {
1745 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1746 next = find_first_bit(dd->ipath_pioavailkernel, end);
1747 while (next < end) {
1748 cnt++;
1749 next = find_next_bit(dd->ipath_pioavailkernel, end,
1750 next + 1);
1751 }
1752 }
1676 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1753 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1754
1755 /*
1756 * When moving buffers from kernel to user, if number assigned to
1757 * the user is less than the pio update threshold, and threshold
1758 * is supported (cnt was computed > 0), drop the update threshold
1759 * so we update at least once per allocated number of buffers.
1760 * In any case, if the kernel buffers are less than the threshold,
1761 * drop the threshold. We don't bother increasing it, having once
1762 * decreased it, since it would typically just cycle back and forth.
1763 * If we don't decrease below buffers in use, we can wait a long
1764 * time for an update, until some other context uses PIO buffers.
1765 */
1766 if (!avail && len < cnt)
1767 cnt = len;
1768 if (cnt < dd->ipath_pioupd_thresh) {
1769 dd->ipath_pioupd_thresh = cnt;
1770 ipath_dbg("Decreased pio update threshold to %u\n",
1771 dd->ipath_pioupd_thresh);
1772 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1773 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
1774 << INFINIPATH_S_UPDTHRESH_SHIFT);
1775 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
1776 << INFINIPATH_S_UPDTHRESH_SHIFT;
1777 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1778 dd->ipath_sendctrl);
1779 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1780 }
1677} 1781}
1678 1782
1679/** 1783/**
@@ -1794,8 +1898,8 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1794 1898
1795 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 1899 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1796 skip_cancel = 1900 skip_cancel =
1797 !test_bit(IPATH_SDMA_DISABLED, statp) && 1901 test_and_set_bit(IPATH_SDMA_ABORTING, statp)
1798 test_and_set_bit(IPATH_SDMA_ABORTING, statp); 1902 && !test_bit(IPATH_SDMA_DISABLED, statp);
1799 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 1903 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1800 if (skip_cancel) 1904 if (skip_cancel)
1801 goto bail; 1905 goto bail;
@@ -1826,6 +1930,9 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1826 ipath_disarm_piobufs(dd, 0, 1930 ipath_disarm_piobufs(dd, 0,
1827 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k); 1931 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1828 1932
1933 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1934 set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1935
1829 if (restore_sendctrl) { 1936 if (restore_sendctrl) {
1830 /* else done by caller later if needed */ 1937 /* else done by caller later if needed */
1831 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 1938 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
@@ -1845,7 +1952,6 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1845 /* only wait so long for intr */ 1952 /* only wait so long for intr */
1846 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ; 1953 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1847 dd->ipath_sdma_reset_wait = 200; 1954 dd->ipath_sdma_reset_wait = 200;
1848 __set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1849 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) 1955 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1850 tasklet_hi_schedule(&dd->ipath_sdma_abort_task); 1956 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1851 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 1957 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 8b1752202e78..3295177c937e 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -173,47 +173,25 @@ static int ipath_get_base_info(struct file *fp,
173 (void *) dd->ipath_statusp - 173 (void *) dd->ipath_statusp -
174 (void *) dd->ipath_pioavailregs_dma; 174 (void *) dd->ipath_pioavailregs_dma;
175 if (!shared) { 175 if (!shared) {
176 kinfo->spi_piocnt = dd->ipath_pbufsport; 176 kinfo->spi_piocnt = pd->port_piocnt;
177 kinfo->spi_piobufbase = (u64) pd->port_piobufs; 177 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
178 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + 178 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
179 dd->ipath_ureg_align * pd->port_port; 179 dd->ipath_ureg_align * pd->port_port;
180 } else if (master) { 180 } else if (master) {
181 kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) + 181 kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
182 (dd->ipath_pbufsport % subport_cnt); 182 (pd->port_piocnt % subport_cnt);
183 /* Master's PIO buffers are after all the slave's */ 183 /* Master's PIO buffers are after all the slave's */
184 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 184 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
185 dd->ipath_palign * 185 dd->ipath_palign *
186 (dd->ipath_pbufsport - kinfo->spi_piocnt); 186 (pd->port_piocnt - kinfo->spi_piocnt);
187 } else { 187 } else {
188 unsigned slave = subport_fp(fp) - 1; 188 unsigned slave = subport_fp(fp) - 1;
189 189
190 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; 190 kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
191 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 191 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
192 dd->ipath_palign * kinfo->spi_piocnt * slave; 192 dd->ipath_palign * kinfo->spi_piocnt * slave;
193 } 193 }
194 194
195 /*
196 * Set the PIO avail update threshold to no larger
197 * than the number of buffers per process. Note that
198 * we decrease it here, but won't ever increase it.
199 */
200 if (dd->ipath_pioupd_thresh &&
201 kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
202 unsigned long flags;
203
204 dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
205 ipath_dbg("Decreased pio update threshold to %u\n",
206 dd->ipath_pioupd_thresh);
207 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
208 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
209 << INFINIPATH_S_UPDTHRESH_SHIFT);
210 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
211 << INFINIPATH_S_UPDTHRESH_SHIFT;
212 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
213 dd->ipath_sendctrl);
214 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
215 }
216
217 if (shared) { 195 if (shared) {
218 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + 196 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
219 dd->ipath_ureg_align * pd->port_port; 197 dd->ipath_ureg_align * pd->port_port;
@@ -1309,19 +1287,19 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1309 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; 1287 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
1310 if (!pd->port_subport_cnt) { 1288 if (!pd->port_subport_cnt) {
1311 /* port is not shared */ 1289 /* port is not shared */
1312 piocnt = dd->ipath_pbufsport; 1290 piocnt = pd->port_piocnt;
1313 piobufs = pd->port_piobufs; 1291 piobufs = pd->port_piobufs;
1314 } else if (!subport_fp(fp)) { 1292 } else if (!subport_fp(fp)) {
1315 /* caller is the master */ 1293 /* caller is the master */
1316 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + 1294 piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
1317 (dd->ipath_pbufsport % pd->port_subport_cnt); 1295 (pd->port_piocnt % pd->port_subport_cnt);
1318 piobufs = pd->port_piobufs + 1296 piobufs = pd->port_piobufs +
1319 dd->ipath_palign * (dd->ipath_pbufsport - piocnt); 1297 dd->ipath_palign * (pd->port_piocnt - piocnt);
1320 } else { 1298 } else {
1321 unsigned slave = subport_fp(fp) - 1; 1299 unsigned slave = subport_fp(fp) - 1;
1322 1300
1323 /* caller is a slave */ 1301 /* caller is a slave */
1324 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; 1302 piocnt = pd->port_piocnt / pd->port_subport_cnt;
1325 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; 1303 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
1326 } 1304 }
1327 1305
@@ -1633,9 +1611,6 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
1633 port_fp(fp) = pd; 1611 port_fp(fp) = pd;
1634 pd->port_pid = current->pid; 1612 pd->port_pid = current->pid;
1635 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1613 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1636 ipath_chg_pioavailkernel(dd,
1637 dd->ipath_pbufsport * (pd->port_port - 1),
1638 dd->ipath_pbufsport, 0);
1639 ipath_stats.sps_ports++; 1614 ipath_stats.sps_ports++;
1640 ret = 0; 1615 ret = 0;
1641 } else 1616 } else
@@ -1938,11 +1913,25 @@ static int ipath_do_user_init(struct file *fp,
1938 1913
1939 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ 1914 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
1940 1915
1916 /* some ports may get extra buffers, calculate that here */
1917 if (pd->port_port <= dd->ipath_ports_extrabuf)
1918 pd->port_piocnt = dd->ipath_pbufsport + 1;
1919 else
1920 pd->port_piocnt = dd->ipath_pbufsport;
1921
1941 /* for right now, kernel piobufs are at end, so port 1 is at 0 */ 1922 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
1923 if (pd->port_port <= dd->ipath_ports_extrabuf)
1924 pd->port_pio_base = (dd->ipath_pbufsport + 1)
1925 * (pd->port_port - 1);
1926 else
1927 pd->port_pio_base = dd->ipath_ports_extrabuf +
1928 dd->ipath_pbufsport * (pd->port_port - 1);
1942 pd->port_piobufs = dd->ipath_piobufbase + 1929 pd->port_piobufs = dd->ipath_piobufbase +
1943 dd->ipath_pbufsport * (pd->port_port - 1) * dd->ipath_palign; 1930 pd->port_pio_base * dd->ipath_palign;
1944 ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n", 1931 ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
1945 pd->port_port, pd->port_piobufs); 1932 " first pio %u\n", pd->port_port, pd->port_piobufs,
1933 pd->port_piocnt, pd->port_pio_base);
1934 ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
1946 1935
1947 /* 1936 /*
1948 * Now allocate the rcvhdr Q and eager TIDs; skip the TID 1937 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
@@ -2107,7 +2096,6 @@ static int ipath_close(struct inode *in, struct file *fp)
2107 } 2096 }
2108 2097
2109 if (dd->ipath_kregbase) { 2098 if (dd->ipath_kregbase) {
2110 int i;
2111 /* atomically clear receive enable port and intr avail. */ 2099 /* atomically clear receive enable port and intr avail. */
2112 clear_bit(dd->ipath_r_portenable_shift + port, 2100 clear_bit(dd->ipath_r_portenable_shift + port,
2113 &dd->ipath_rcvctrl); 2101 &dd->ipath_rcvctrl);
@@ -2136,9 +2124,9 @@ static int ipath_close(struct inode *in, struct file *fp)
2136 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, 2124 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
2137 pd->port_port, dd->ipath_dummy_hdrq_phys); 2125 pd->port_port, dd->ipath_dummy_hdrq_phys);
2138 2126
2139 i = dd->ipath_pbufsport * (port - 1); 2127 ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
2140 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); 2128 ipath_chg_pioavailkernel(dd, pd->port_pio_base,
2141 ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1); 2129 pd->port_piocnt, 1);
2142 2130
2143 dd->ipath_f_clear_tids(dd, pd->port_port); 2131 dd->ipath_f_clear_tids(dd, pd->port_port);
2144 2132
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
index e3ec0d1bdf50..8eee7830f042 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -595,7 +595,7 @@ static void ipath_7220_txe_recover(struct ipath_devdata *dd)
595 595
596 dev_info(&dd->pcidev->dev, 596 dev_info(&dd->pcidev->dev,
597 "Recovering from TXE PIO parity error\n"); 597 "Recovering from TXE PIO parity error\n");
598 ipath_disarm_senderrbufs(dd, 1); 598 ipath_disarm_senderrbufs(dd);
599} 599}
600 600
601 601
@@ -675,10 +675,8 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
675 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); 675 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
676 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { 676 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
677 /* 677 /*
678 * Parity errors in send memory are recoverable, 678 * Parity errors in send memory are recoverable by h/w
679 * just cancel the send (if indicated in * sendbuffererror), 679 * just do housekeeping, exit freeze mode and continue.
680 * count the occurrence, unfreeze (if no other handled
681 * hardware error bits are set), and continue.
682 */ 680 */
683 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 681 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
684 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) 682 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
@@ -687,13 +685,6 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
687 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 685 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
688 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) 686 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
689 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); 687 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
690 if (!hwerrs) {
691 /* else leave in freeze mode */
692 ipath_write_kreg(dd,
693 dd->ipath_kregs->kr_control,
694 dd->ipath_control);
695 goto bail;
696 }
697 } 688 }
698 if (hwerrs) { 689 if (hwerrs) {
699 /* 690 /*
@@ -723,8 +714,8 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
723 *dd->ipath_statusp |= IPATH_STATUS_HWERROR; 714 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
724 dd->ipath_flags &= ~IPATH_INITTED; 715 dd->ipath_flags &= ~IPATH_INITTED;
725 } else { 716 } else {
726 ipath_dbg("Clearing freezemode on ignored hardware " 717 ipath_dbg("Clearing freezemode on ignored or "
727 "error\n"); 718 "recovered hardware error\n");
728 ipath_clear_freeze(dd); 719 ipath_clear_freeze(dd);
729 } 720 }
730 } 721 }
@@ -870,8 +861,9 @@ static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
870 "revision %u.%u!\n", 861 "revision %u.%u!\n",
871 dd->ipath_majrev, dd->ipath_minrev); 862 dd->ipath_majrev, dd->ipath_minrev);
872 ret = 1; 863 ret = 1;
873 } else if (dd->ipath_minrev == 1) { 864 } else if (dd->ipath_minrev == 1 &&
874 /* Rev1 chips are prototype. Complain, but allow use */ 865 !(dd->ipath_flags & IPATH_INITTED)) {
866 /* Rev1 chips are prototype. Complain at init, but allow use */
875 ipath_dev_err(dd, "Unsupported hardware " 867 ipath_dev_err(dd, "Unsupported hardware "
876 "revision %u.%u, Contact support@qlogic.com\n", 868 "revision %u.%u, Contact support@qlogic.com\n",
877 dd->ipath_majrev, dd->ipath_minrev); 869 dd->ipath_majrev, dd->ipath_minrev);
@@ -1966,7 +1958,7 @@ static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
1966 dd->ipath_rcvctrl); 1958 dd->ipath_rcvctrl);
1967 dd->ipath_p0_rcvegrcnt = 2048; /* always */ 1959 dd->ipath_p0_rcvegrcnt = 2048; /* always */
1968 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) 1960 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1969 dd->ipath_pioreserved = 1; /* reserve a buffer */ 1961 dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
1970} 1962}
1971 1963
1972 1964
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 27dd89476660..3e5baa43fc82 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -41,7 +41,7 @@
41/* 41/*
42 * min buffers we want to have per port, after driver 42 * min buffers we want to have per port, after driver
43 */ 43 */
44#define IPATH_MIN_USER_PORT_BUFCNT 8 44#define IPATH_MIN_USER_PORT_BUFCNT 7
45 45
46/* 46/*
47 * Number of ports we are configured to use (to allow for more pio 47 * Number of ports we are configured to use (to allow for more pio
@@ -54,13 +54,9 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
54 54
55/* 55/*
56 * Number of buffers reserved for driver (verbs and layered drivers.) 56 * Number of buffers reserved for driver (verbs and layered drivers.)
57 * Reserved at end of buffer list. Initialized based on 57 * Initialized based on number of PIO buffers if not set via module interface.
58 * number of PIO buffers if not set via module interface.
59 * The problem with this is that it's global, but we'll use different 58 * The problem with this is that it's global, but we'll use different
60 * numbers for different chip types. So the default value is not 59 * numbers for different chip types.
61 * very useful. I've redefined it for the 1.3 release so that it's
62 * zero unless set by the user to something else, in which case we
63 * try to respect it.
64 */ 60 */
65static ushort ipath_kpiobufs; 61static ushort ipath_kpiobufs;
66 62
@@ -546,9 +542,12 @@ static void enable_chip(struct ipath_devdata *dd, int reinit)
546 pioavail = dd->ipath_pioavailregs_dma[i ^ 1]; 542 pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
547 else 543 else
548 pioavail = dd->ipath_pioavailregs_dma[i]; 544 pioavail = dd->ipath_pioavailregs_dma[i];
549 dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) | 545 /*
550 (~dd->ipath_pioavailkernel[i] << 546 * don't need to worry about ipath_pioavailkernel here
551 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT); 547 * because we will call ipath_chg_pioavailkernel() later
548 * in initialization, to busy out buffers as needed
549 */
550 dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
552 } 551 }
553 /* can get counters, stats, etc. */ 552 /* can get counters, stats, etc. */
554 dd->ipath_flags |= IPATH_PRESENT; 553 dd->ipath_flags |= IPATH_PRESENT;
@@ -708,12 +707,11 @@ static void verify_interrupt(unsigned long opaque)
708int ipath_init_chip(struct ipath_devdata *dd, int reinit) 707int ipath_init_chip(struct ipath_devdata *dd, int reinit)
709{ 708{
710 int ret = 0; 709 int ret = 0;
711 u32 val32, kpiobufs; 710 u32 kpiobufs, defkbufs;
712 u32 piobufs, uports; 711 u32 piobufs, uports;
713 u64 val; 712 u64 val;
714 struct ipath_portdata *pd; 713 struct ipath_portdata *pd;
715 gfp_t gfp_flags = GFP_USER | __GFP_COMP; 714 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
716 unsigned long flags;
717 715
718 ret = init_housekeeping(dd, reinit); 716 ret = init_housekeeping(dd, reinit);
719 if (ret) 717 if (ret)
@@ -753,56 +751,46 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
753 dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) 751 dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
754 / (sizeof(u64) * BITS_PER_BYTE / 2); 752 / (sizeof(u64) * BITS_PER_BYTE / 2);
755 uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0; 753 uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
756 if (ipath_kpiobufs == 0) { 754 if (piobufs > 144)
757 /* not set by user (this is default) */ 755 defkbufs = 32 + dd->ipath_pioreserved;
758 if (piobufs > 144)
759 kpiobufs = 32;
760 else
761 kpiobufs = 16;
762 }
763 else 756 else
764 kpiobufs = ipath_kpiobufs; 757 defkbufs = 16 + dd->ipath_pioreserved;
765 758
766 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { 759 if (ipath_kpiobufs && (ipath_kpiobufs +
760 (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {
767 int i = (int) piobufs - 761 int i = (int) piobufs -
768 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); 762 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
769 if (i < 1) 763 if (i < 1)
770 i = 1; 764 i = 1;
771 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " 765 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
772 "%d for kernel leaves too few for %d user ports " 766 "%d for kernel leaves too few for %d user ports "
773 "(%d each); using %u\n", kpiobufs, 767 "(%d each); using %u\n", ipath_kpiobufs,
774 piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i); 768 piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
775 /* 769 /*
776 * shouldn't change ipath_kpiobufs, because could be 770 * shouldn't change ipath_kpiobufs, because could be
777 * different for different devices... 771 * different for different devices...
778 */ 772 */
779 kpiobufs = i; 773 kpiobufs = i;
780 } 774 } else if (ipath_kpiobufs)
775 kpiobufs = ipath_kpiobufs;
776 else
777 kpiobufs = defkbufs;
781 dd->ipath_lastport_piobuf = piobufs - kpiobufs; 778 dd->ipath_lastport_piobuf = piobufs - kpiobufs;
782 dd->ipath_pbufsport = 779 dd->ipath_pbufsport =
783 uports ? dd->ipath_lastport_piobuf / uports : 0; 780 uports ? dd->ipath_lastport_piobuf / uports : 0;
784 val32 = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports); 781 /* if not an even divisor, some user ports get extra buffers */
785 if (val32 > 0) { 782 dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -
786 ipath_dbg("allocating %u pbufs/port leaves %u unused, " 783 (dd->ipath_pbufsport * uports);
787 "add to kernel\n", dd->ipath_pbufsport, val32); 784 if (dd->ipath_ports_extrabuf)
788 dd->ipath_lastport_piobuf -= val32; 785 ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "
789 kpiobufs += val32; 786 "ports <= %u\n", dd->ipath_pbufsport,
790 ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n", 787 dd->ipath_ports_extrabuf);
791 dd->ipath_pbufsport, val32);
792 }
793 dd->ipath_lastpioindex = 0; 788 dd->ipath_lastpioindex = 0;
794 dd->ipath_lastpioindexl = dd->ipath_piobcnt2k; 789 dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
795 ipath_chg_pioavailkernel(dd, 0, piobufs, 1); 790 /* ipath_pioavailshadow initialized earlier */
796 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " 791 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
797 "each for %u user ports\n", kpiobufs, 792 "each for %u user ports\n", kpiobufs,
798 piobufs, dd->ipath_pbufsport, uports); 793 piobufs, dd->ipath_pbufsport, uports);
799 if (dd->ipath_pioupd_thresh) {
800 if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
801 dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
802 if (kpiobufs < dd->ipath_pioupd_thresh)
803 dd->ipath_pioupd_thresh = kpiobufs;
804 }
805
806 ret = dd->ipath_f_early_init(dd); 794 ret = dd->ipath_f_early_init(dd);
807 if (ret) { 795 if (ret) {
808 ipath_dev_err(dd, "Early initialization failure\n"); 796 ipath_dev_err(dd, "Early initialization failure\n");
@@ -810,13 +798,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
810 } 798 }
811 799
812 /* 800 /*
813 * Cancel any possible active sends from early driver load.
814 * Follows early_init because some chips have to initialize
815 * PIO buffers in early_init to avoid false parity errors.
816 */
817 ipath_cancel_sends(dd, 0);
818
819 /*
820 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be 801 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
821 * done after early_init. 802 * done after early_init.
822 */ 803 */
@@ -836,6 +817,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
836 817
837 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr, 818 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
838 dd->ipath_pioavailregs_phys); 819 dd->ipath_pioavailregs_phys);
820
839 /* 821 /*
840 * this is to detect s/w errors, which the h/w works around by 822 * this is to detect s/w errors, which the h/w works around by
841 * ignoring the low 6 bits of address, if it wasn't aligned. 823 * ignoring the low 6 bits of address, if it wasn't aligned.
@@ -862,12 +844,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
862 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); 844 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
863 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); 845 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
864 846
865 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
866 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
867 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
868 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
869 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
870
871 /* 847 /*
872 * before error clears, since we expect serdes pll errors during 848 * before error clears, since we expect serdes pll errors during
873 * this, the first time after reset 849 * this, the first time after reset
@@ -940,6 +916,19 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
940 else 916 else
941 enable_chip(dd, reinit); 917 enable_chip(dd, reinit);
942 918
919 /* after enable_chip, so pioavailshadow setup */
920 ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
921
922 /*
923 * Cancel any possible active sends from early driver load.
924 * Follows early_init because some chips have to initialize
925 * PIO buffers in early_init to avoid false parity errors.
926 * After enable and ipath_chg_pioavailkernel so we can safely
927 * enable pioavail updates and PIOENABLE; packets are now
928 * ready to go out.
929 */
930 ipath_cancel_sends(dd, 1);
931
943 if (!reinit) { 932 if (!reinit) {
944 /* 933 /*
945 * Used when we close a port, for DMA already in flight 934 * Used when we close a port, for DMA already in flight
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 1b58f4737c71..26900b3b7a4e 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -38,42 +38,12 @@
38#include "ipath_verbs.h" 38#include "ipath_verbs.h"
39#include "ipath_common.h" 39#include "ipath_common.h"
40 40
41/*
42 * clear (write) a pio buffer, to clear a parity error. This routine
43 * should only be called when in freeze mode, and the buffer should be
44 * canceled afterwards.
45 */
46static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
47{
48 u32 __iomem *pbuf;
49 u32 dwcnt; /* dword count to write */
50 if (pnum < dd->ipath_piobcnt2k) {
51 pbuf = (u32 __iomem *) (dd->ipath_pio2kbase + pnum *
52 dd->ipath_palign);
53 dwcnt = dd->ipath_piosize2k >> 2;
54 }
55 else {
56 pbuf = (u32 __iomem *) (dd->ipath_pio4kbase +
57 (pnum - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
58 dwcnt = dd->ipath_piosize4k >> 2;
59 }
60 dev_info(&dd->pcidev->dev,
61 "Rewrite PIO buffer %u, to recover from parity error\n",
62 pnum);
63
64 /* no flush required, since already in freeze */
65 writel(dwcnt + 1, pbuf);
66 while (--dwcnt)
67 writel(0, pbuf++);
68}
69 41
70/* 42/*
71 * Called when we might have an error that is specific to a particular 43 * Called when we might have an error that is specific to a particular
72 * PIO buffer, and may need to cancel that buffer, so it can be re-used. 44 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
73 * If rewrite is true, and bits are set in the sendbufferror registers,
74 * we'll write to the buffer, for error recovery on parity errors.
75 */ 45 */
76void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite) 46void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
77{ 47{
78 u32 piobcnt; 48 u32 piobcnt;
79 unsigned long sbuf[4]; 49 unsigned long sbuf[4];
@@ -109,11 +79,8 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
109 } 79 }
110 80
111 for (i = 0; i < piobcnt; i++) 81 for (i = 0; i < piobcnt; i++)
112 if (test_bit(i, sbuf)) { 82 if (test_bit(i, sbuf))
113 if (rewrite)
114 ipath_clrpiobuf(dd, i);
115 ipath_disarm_piobufs(dd, i, 1); 83 ipath_disarm_piobufs(dd, i, 1);
116 }
117 /* ignore armlaunch errs for a bit */ 84 /* ignore armlaunch errs for a bit */
118 dd->ipath_lastcancel = jiffies+3; 85 dd->ipath_lastcancel = jiffies+3;
119 } 86 }
@@ -164,7 +131,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
164{ 131{
165 u64 ignore_this_time = 0; 132 u64 ignore_this_time = 0;
166 133
167 ipath_disarm_senderrbufs(dd, 0); 134 ipath_disarm_senderrbufs(dd);
168 if ((errs & E_SUM_LINK_PKTERRS) && 135 if ((errs & E_SUM_LINK_PKTERRS) &&
169 !(dd->ipath_flags & IPATH_LINKACTIVE)) { 136 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
170 /* 137 /*
@@ -909,8 +876,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
909 * processes (causing armlaunch), send errors due to going into freeze mode, 876 * processes (causing armlaunch), send errors due to going into freeze mode,
910 * etc., and try to avoid causing extra interrupts while doing so. 877 * etc., and try to avoid causing extra interrupts while doing so.
911 * Forcibly update the in-memory pioavail register copies after cleanup 878 * Forcibly update the in-memory pioavail register copies after cleanup
912 * because the chip won't do it for anything changing while in freeze mode 879 * because the chip won't do it while in freeze mode (the register values
913 * (we don't want to wait for the next pio buffer state change). 880 * themselves are kept correct).
914 * Make sure that we don't lose any important interrupts by using the chip 881 * Make sure that we don't lose any important interrupts by using the chip
915 * feature that says that writing 0 to a bit in *clear that is set in 882 * feature that says that writing 0 to a bit in *clear that is set in
916 * *status will cause an interrupt to be generated again (if allowed by 883 * *status will cause an interrupt to be generated again (if allowed by
@@ -918,44 +885,23 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
918 */ 885 */
919void ipath_clear_freeze(struct ipath_devdata *dd) 886void ipath_clear_freeze(struct ipath_devdata *dd)
920{ 887{
921 int i, im;
922 u64 val;
923
924 /* disable error interrupts, to avoid confusion */ 888 /* disable error interrupts, to avoid confusion */
925 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); 889 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
926 890
927 /* also disable interrupts; errormask is sometimes overwriten */ 891 /* also disable interrupts; errormask is sometimes overwriten */
928 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL); 892 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
929 893
930 /* 894 ipath_cancel_sends(dd, 1);
931 * clear all sends, because they have may been 895
932 * completed by usercode while in freeze mode, and 896 /* clear the freeze, and be sure chip saw it */
933 * therefore would not be sent, and eventually
934 * might cause the process to run out of bufs
935 */
936 ipath_cancel_sends(dd, 0);
937 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 897 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
938 dd->ipath_control); 898 dd->ipath_control);
899 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
939 900
940 /* ensure pio avail updates continue */ 901 /* force in-memory update now we are out of freeze */
941 ipath_force_pio_avail_update(dd); 902 ipath_force_pio_avail_update(dd);
942 903
943 /* 904 /*
944 * We just enabled pioavailupdate, so dma copy is almost certainly
945 * not yet right, so read the registers directly. Similar to init
946 */
947 for (i = 0; i < dd->ipath_pioavregs; i++) {
948 /* deal with 6110 chip bug */
949 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
950 i ^ 1 : i;
951 val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
952 dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
953 dd->ipath_pioavailshadow[i] = val |
954 (~dd->ipath_pioavailkernel[i] <<
955 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
956 }
957
958 /*
959 * force new interrupt if any hwerr, error or interrupt bits are 905 * force new interrupt if any hwerr, error or interrupt bits are
960 * still set, and clear "safe" send packet errors related to freeze 906 * still set, and clear "safe" send packet errors related to freeze
961 * and cancelling sends. Re-enable error interrupts before possible 907 * and cancelling sends. Re-enable error interrupts before possible
@@ -1312,10 +1258,8 @@ irqreturn_t ipath_intr(int irq, void *data)
1312 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1258 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1313 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 1259 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1314 1260
1315 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) 1261 /* always process; sdma verbs uses PIO for acks and VL15 */
1316 handle_layer_pioavail(dd); 1262 handle_layer_pioavail(dd);
1317 else
1318 ipath_dbg("unexpected BUFAVAIL intr\n");
1319 } 1263 }
1320 1264
1321 ret = IRQ_HANDLED; 1265 ret = IRQ_HANDLED;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 202337ae90dc..02b24a340599 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -117,6 +117,10 @@ struct ipath_portdata {
117 u16 port_subport_cnt; 117 u16 port_subport_cnt;
118 /* non-zero if port is being shared. */ 118 /* non-zero if port is being shared. */
119 u16 port_subport_id; 119 u16 port_subport_id;
120 /* number of pio bufs for this port (all procs, if shared) */
121 u32 port_piocnt;
122 /* first pio buffer for this port */
123 u32 port_pio_base;
120 /* chip offset of PIO buffers for this port */ 124 /* chip offset of PIO buffers for this port */
121 u32 port_piobufs; 125 u32 port_piobufs;
122 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */ 126 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
@@ -384,6 +388,8 @@ struct ipath_devdata {
384 u32 ipath_lastrpkts; 388 u32 ipath_lastrpkts;
385 /* pio bufs allocated per port */ 389 /* pio bufs allocated per port */
386 u32 ipath_pbufsport; 390 u32 ipath_pbufsport;
391 /* if remainder on bufs/port, ports < extrabuf get 1 extra */
392 u32 ipath_ports_extrabuf;
387 u32 ipath_pioupd_thresh; /* update threshold, some chips */ 393 u32 ipath_pioupd_thresh; /* update threshold, some chips */
388 /* 394 /*
389 * number of ports configured as max; zero is set to number chip 395 * number of ports configured as max; zero is set to number chip
@@ -1011,7 +1017,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *);
1011int ipath_update_eeprom_log(struct ipath_devdata *dd); 1017int ipath_update_eeprom_log(struct ipath_devdata *dd);
1012void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr); 1018void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
1013u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 1019u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
1014void ipath_disarm_senderrbufs(struct ipath_devdata *, int); 1020void ipath_disarm_senderrbufs(struct ipath_devdata *);
1015void ipath_force_pio_avail_update(struct ipath_devdata *); 1021void ipath_force_pio_avail_update(struct ipath_devdata *);
1016void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev); 1022void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
1017 1023
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index c405dfba5531..08b11b567614 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1746,7 +1746,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1746 qp->r_wrid_valid = 0; 1746 qp->r_wrid_valid = 0;
1747 wc.wr_id = qp->r_wr_id; 1747 wc.wr_id = qp->r_wr_id;
1748 wc.status = IB_WC_SUCCESS; 1748 wc.status = IB_WC_SUCCESS;
1749 wc.opcode = IB_WC_RECV; 1749 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1750 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1751 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1752 else
1753 wc.opcode = IB_WC_RECV;
1750 wc.vendor_err = 0; 1754 wc.vendor_err = 0;
1751 wc.qp = &qp->ibqp; 1755 wc.qp = &qp->ibqp;
1752 wc.src_qp = qp->remote_qpn; 1756 wc.src_qp = qp->remote_qpn;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 8ac5c1d82ccd..9e3fe61cbd08 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -481,9 +481,10 @@ done:
481 wake_up(&qp->wait); 481 wake_up(&qp->wait);
482} 482}
483 483
484static void want_buffer(struct ipath_devdata *dd) 484static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
485{ 485{
486 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) { 486 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
487 qp->ibqp.qp_type == IB_QPT_SMI) {
487 unsigned long flags; 488 unsigned long flags;
488 489
489 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 490 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
@@ -519,7 +520,7 @@ static void ipath_no_bufs_available(struct ipath_qp *qp,
519 spin_lock_irqsave(&dev->pending_lock, flags); 520 spin_lock_irqsave(&dev->pending_lock, flags);
520 list_add_tail(&qp->piowait, &dev->piowait); 521 list_add_tail(&qp->piowait, &dev->piowait);
521 spin_unlock_irqrestore(&dev->pending_lock, flags); 522 spin_unlock_irqrestore(&dev->pending_lock, flags);
522 want_buffer(dev->dd); 523 want_buffer(dev->dd, qp);
523 dev->n_piowait++; 524 dev->n_piowait++;
524} 525}
525 526
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 1974df7a9f78..3697449c1ba4 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -308,13 +308,15 @@ static void sdma_abort_task(unsigned long opaque)
308 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 308 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
309 309
310 /* 310 /*
311 * Don't restart sdma here. Wait until link is up to ACTIVE. 311 * Don't restart sdma here (with the exception
312 * VL15 MADs used to bring the link up use PIO, and multiple 312 * below). Wait until link is up to ACTIVE. VL15 MADs
313 * link transitions otherwise cause the sdma engine to be 313 * used to bring the link up use PIO, and multiple link
314 * transitions otherwise cause the sdma engine to be
314 * stopped and started multiple times. 315 * stopped and started multiple times.
315 * The disable is done here, including the shadow, so the 316 * The disable is done here, including the shadow,
316 * state is kept consistent. 317 * so the state is kept consistent.
317 * See ipath_restart_sdma() for the actual starting of sdma. 318 * See ipath_restart_sdma() for the actual starting
319 * of sdma.
318 */ 320 */
319 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 321 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
320 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; 322 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
@@ -326,6 +328,13 @@ static void sdma_abort_task(unsigned long opaque)
326 /* make sure I see next message */ 328 /* make sure I see next message */
327 dd->ipath_sdma_abort_jiffies = 0; 329 dd->ipath_sdma_abort_jiffies = 0;
328 330
331 /*
332 * Not everything that takes SDMA offline is a link
333 * status change. If the link was up, restart SDMA.
334 */
335 if (dd->ipath_flags & IPATH_LINKACTIVE)
336 ipath_restart_sdma(dd);
337
329 goto done; 338 goto done;
330 } 339 }
331 340
@@ -427,7 +436,12 @@ int setup_sdma(struct ipath_devdata *dd)
427 goto done; 436 goto done;
428 } 437 }
429 438
430 dd->ipath_sdma_status = 0; 439 /*
440 * Set initial status as if we had been up, then gone down.
441 * This lets initial start on transition to ACTIVE be the
442 * same as restart after link flap.
443 */
444 dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
431 dd->ipath_sdma_abort_jiffies = 0; 445 dd->ipath_sdma_abort_jiffies = 0;
432 dd->ipath_sdma_generation = 0; 446 dd->ipath_sdma_generation = 0;
433 dd->ipath_sdma_descq_tail = 0; 447 dd->ipath_sdma_descq_tail = 0;
@@ -449,16 +463,19 @@ int setup_sdma(struct ipath_devdata *dd)
449 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 463 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
450 dd->ipath_sdma_head_phys); 464 dd->ipath_sdma_head_phys);
451 465
452 /* Reserve all the former "kernel" piobufs */ 466 /*
453 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved; 467 * Reserve all the former "kernel" piobufs, using high number range
454 for (i = dd->ipath_lastport_piobuf; i < n; ++i) { 468 * so we get as many 4K buffers as possible
469 */
470 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
471 i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
472 ipath_chg_pioavailkernel(dd, i, n - i , 0);
473 for (; i < n; ++i) {
455 unsigned word = i / 64; 474 unsigned word = i / 64;
456 unsigned bit = i & 63; 475 unsigned bit = i & 63;
457 BUG_ON(word >= 3); 476 BUG_ON(word >= 3);
458 senddmabufmask[word] |= 1ULL << bit; 477 senddmabufmask[word] |= 1ULL << bit;
459 } 478 }
460 ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
461 n - dd->ipath_lastport_piobuf, 0);
462 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 479 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
463 senddmabufmask[0]); 480 senddmabufmask[0]);
464 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 481 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
@@ -615,6 +632,9 @@ void ipath_restart_sdma(struct ipath_devdata *dd)
615 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 632 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
616 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 633 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
617 634
635 /* notify upper layers */
636 ipath_ib_piobufavail(dd->verbs_dev);
637
618bail: 638bail:
619 return; 639 return;
620} 640}
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index e63927cce5b5..5015cd2e57bd 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -396,7 +396,6 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
396 396
397 wqe = get_swqe_ptr(qp, qp->s_head); 397 wqe = get_swqe_ptr(qp, qp->s_head);
398 wqe->wr = *wr; 398 wqe->wr = *wr;
399 wqe->ssn = qp->s_ssn++;
400 wqe->length = 0; 399 wqe->length = 0;
401 if (wr->num_sge) { 400 if (wr->num_sge) {
402 acc = wr->opcode >= IB_WR_RDMA_READ ? 401 acc = wr->opcode >= IB_WR_RDMA_READ ?
@@ -422,6 +421,7 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
422 goto bail_inval; 421 goto bail_inval;
423 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) 422 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
424 goto bail_inval; 423 goto bail_inval;
424 wqe->ssn = qp->s_ssn++;
425 qp->s_head = next; 425 qp->s_head = next;
426 426
427 ret = 0; 427 ret = 0;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 92b683411d5a..3ad8bd9f7543 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -14,7 +14,7 @@ if INPUT_MISC
14 14
15config INPUT_PCSPKR 15config INPUT_PCSPKR
16 tristate "PC Speaker support" 16 tristate "PC Speaker support"
17 depends on ALPHA || X86 || MIPS || PPC_PREP || PPC_CHRP || PPC_PSERIES 17 depends on PCSPKR_PLATFORM
18 depends on SND_PCSP=n 18 depends on SND_PCSP=n
19 help 19 help
20 Say Y here if you want the standard PC Speaker to be used for 20 Say Y here if you want the standard PC Speaker to be used for
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5938fa962922..faf3d8912979 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -886,7 +886,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
886 */ 886 */
887 raid10_find_phys(conf, r10_bio); 887 raid10_find_phys(conf, r10_bio);
888 retry_write: 888 retry_write:
889 blocked_rdev = 0; 889 blocked_rdev = NULL;
890 rcu_read_lock(); 890 rcu_read_lock();
891 for (i = 0; i < conf->copies; i++) { 891 for (i = 0; i < conf->copies; i++) {
892 int d = r10_bio->devs[i].devnum; 892 int d = r10_bio->devs[i].devnum;
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 73f742c7e818..cc11c4c0e7e7 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the kernel multimedia device drivers. 2# Makefile for the kernel multimedia device drivers.
3# 3#
4 4
5obj-y := common/
6
5obj-$(CONFIG_VIDEO_MEDIA) += common/ 7obj-$(CONFIG_VIDEO_MEDIA) += common/
6 8
7# Since hybrid devices are here, should be compiled if DVB and/or V4L 9# Since hybrid devices are here, should be compiled if DVB and/or V4L
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 8f5ed9b4bf83..3f55d47bc4b9 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -613,7 +613,7 @@ static int __devinit cx18_probe(struct pci_dev *dev,
613 } 613 }
614 614
615 cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC); 615 cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
616 if (cx == 0) { 616 if (!cx) {
617 spin_unlock(&cx18_cards_lock); 617 spin_unlock(&cx18_cards_lock);
618 return -ENOMEM; 618 return -ENOMEM;
619 } 619 }
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index a0baf2d0ba7f..48e1a01718ec 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1634,7 +1634,7 @@ static int saa7134_s_fmt_overlay(struct file *file, void *priv,
1634 struct saa7134_fh *fh = priv; 1634 struct saa7134_fh *fh = priv;
1635 struct saa7134_dev *dev = fh->dev; 1635 struct saa7134_dev *dev = fh->dev;
1636 int err; 1636 int err;
1637 unsigned int flags; 1637 unsigned long flags;
1638 1638
1639 if (saa7134_no_overlay > 0) { 1639 if (saa7134_no_overlay > 0) {
1640 printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n"); 1640 printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 6f8e7d4cf74d..2edda8cc7f99 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -319,7 +319,7 @@ static struct vortex_chip_info {
319 {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", 319 {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
320 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, 320 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
321 {"3c980 Cyclone", 321 {"3c980 Cyclone",
322 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, 322 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
323 323
324 {"3c980C Python-T", 324 {"3c980C Python-T",
325 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, 325 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
@@ -600,7 +600,6 @@ struct vortex_private {
600 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 600 struct sk_buff* tx_skbuff[TX_RING_SIZE];
601 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 601 unsigned int cur_rx, cur_tx; /* The next free ring entry */
602 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 602 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
603 struct net_device_stats stats; /* Generic stats */
604 struct vortex_extra_stats xstats; /* NIC-specific extra stats */ 603 struct vortex_extra_stats xstats; /* NIC-specific extra stats */
605 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ 604 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
606 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ 605 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -1875,7 +1874,7 @@ static void vortex_tx_timeout(struct net_device *dev)
1875 1874
1876 issue_and_wait(dev, TxReset); 1875 issue_and_wait(dev, TxReset);
1877 1876
1878 vp->stats.tx_errors++; 1877 dev->stats.tx_errors++;
1879 if (vp->full_bus_master_tx) { 1878 if (vp->full_bus_master_tx) {
1880 printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); 1879 printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
1881 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) 1880 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
@@ -1887,7 +1886,7 @@ static void vortex_tx_timeout(struct net_device *dev)
1887 iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); 1886 iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
1888 iowrite16(DownUnstall, ioaddr + EL3_CMD); 1887 iowrite16(DownUnstall, ioaddr + EL3_CMD);
1889 } else { 1888 } else {
1890 vp->stats.tx_dropped++; 1889 dev->stats.tx_dropped++;
1891 netif_wake_queue(dev); 1890 netif_wake_queue(dev);
1892 } 1891 }
1893 1892
@@ -1928,8 +1927,8 @@ vortex_error(struct net_device *dev, int status)
1928 } 1927 }
1929 dump_tx_ring(dev); 1928 dump_tx_ring(dev);
1930 } 1929 }
1931 if (tx_status & 0x14) vp->stats.tx_fifo_errors++; 1930 if (tx_status & 0x14) dev->stats.tx_fifo_errors++;
1932 if (tx_status & 0x38) vp->stats.tx_aborted_errors++; 1931 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
1933 if (tx_status & 0x08) vp->xstats.tx_max_collisions++; 1932 if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
1934 iowrite8(0, ioaddr + TxStatus); 1933 iowrite8(0, ioaddr + TxStatus);
1935 if (tx_status & 0x30) { /* txJabber or txUnderrun */ 1934 if (tx_status & 0x30) { /* txJabber or txUnderrun */
@@ -2051,8 +2050,8 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2051 if (vortex_debug > 2) 2050 if (vortex_debug > 2)
2052 printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", 2051 printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
2053 dev->name, tx_status); 2052 dev->name, tx_status);
2054 if (tx_status & 0x04) vp->stats.tx_fifo_errors++; 2053 if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
2055 if (tx_status & 0x38) vp->stats.tx_aborted_errors++; 2054 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
2056 if (tx_status & 0x30) { 2055 if (tx_status & 0x30) {
2057 issue_and_wait(dev, TxReset); 2056 issue_and_wait(dev, TxReset);
2058 } 2057 }
@@ -2350,7 +2349,7 @@ boomerang_interrupt(int irq, void *dev_id)
2350 } else { 2349 } else {
2351 printk(KERN_DEBUG "boomerang_interrupt: no skb!\n"); 2350 printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
2352 } 2351 }
2353 /* vp->stats.tx_packets++; Counted below. */ 2352 /* dev->stats.tx_packets++; Counted below. */
2354 dirty_tx++; 2353 dirty_tx++;
2355 } 2354 }
2356 vp->dirty_tx = dirty_tx; 2355 vp->dirty_tx = dirty_tx;
@@ -2409,12 +2408,12 @@ static int vortex_rx(struct net_device *dev)
2409 unsigned char rx_error = ioread8(ioaddr + RxErrors); 2408 unsigned char rx_error = ioread8(ioaddr + RxErrors);
2410 if (vortex_debug > 2) 2409 if (vortex_debug > 2)
2411 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); 2410 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2412 vp->stats.rx_errors++; 2411 dev->stats.rx_errors++;
2413 if (rx_error & 0x01) vp->stats.rx_over_errors++; 2412 if (rx_error & 0x01) dev->stats.rx_over_errors++;
2414 if (rx_error & 0x02) vp->stats.rx_length_errors++; 2413 if (rx_error & 0x02) dev->stats.rx_length_errors++;
2415 if (rx_error & 0x04) vp->stats.rx_frame_errors++; 2414 if (rx_error & 0x04) dev->stats.rx_frame_errors++;
2416 if (rx_error & 0x08) vp->stats.rx_crc_errors++; 2415 if (rx_error & 0x08) dev->stats.rx_crc_errors++;
2417 if (rx_error & 0x10) vp->stats.rx_length_errors++; 2416 if (rx_error & 0x10) dev->stats.rx_length_errors++;
2418 } else { 2417 } else {
2419 /* The packet length: up to 4.5K!. */ 2418 /* The packet length: up to 4.5K!. */
2420 int pkt_len = rx_status & 0x1fff; 2419 int pkt_len = rx_status & 0x1fff;
@@ -2446,7 +2445,7 @@ static int vortex_rx(struct net_device *dev)
2446 skb->protocol = eth_type_trans(skb, dev); 2445 skb->protocol = eth_type_trans(skb, dev);
2447 netif_rx(skb); 2446 netif_rx(skb);
2448 dev->last_rx = jiffies; 2447 dev->last_rx = jiffies;
2449 vp->stats.rx_packets++; 2448 dev->stats.rx_packets++;
2450 /* Wait a limited time to go to next packet. */ 2449 /* Wait a limited time to go to next packet. */
2451 for (i = 200; i >= 0; i--) 2450 for (i = 200; i >= 0; i--)
2452 if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) 2451 if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -2455,7 +2454,7 @@ static int vortex_rx(struct net_device *dev)
2455 } else if (vortex_debug > 0) 2454 } else if (vortex_debug > 0)
2456 printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " 2455 printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
2457 "size %d.\n", dev->name, pkt_len); 2456 "size %d.\n", dev->name, pkt_len);
2458 vp->stats.rx_dropped++; 2457 dev->stats.rx_dropped++;
2459 } 2458 }
2460 issue_and_wait(dev, RxDiscard); 2459 issue_and_wait(dev, RxDiscard);
2461 } 2460 }
@@ -2482,12 +2481,12 @@ boomerang_rx(struct net_device *dev)
2482 unsigned char rx_error = rx_status >> 16; 2481 unsigned char rx_error = rx_status >> 16;
2483 if (vortex_debug > 2) 2482 if (vortex_debug > 2)
2484 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); 2483 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2485 vp->stats.rx_errors++; 2484 dev->stats.rx_errors++;
2486 if (rx_error & 0x01) vp->stats.rx_over_errors++; 2485 if (rx_error & 0x01) dev->stats.rx_over_errors++;
2487 if (rx_error & 0x02) vp->stats.rx_length_errors++; 2486 if (rx_error & 0x02) dev->stats.rx_length_errors++;
2488 if (rx_error & 0x04) vp->stats.rx_frame_errors++; 2487 if (rx_error & 0x04) dev->stats.rx_frame_errors++;
2489 if (rx_error & 0x08) vp->stats.rx_crc_errors++; 2488 if (rx_error & 0x08) dev->stats.rx_crc_errors++;
2490 if (rx_error & 0x10) vp->stats.rx_length_errors++; 2489 if (rx_error & 0x10) dev->stats.rx_length_errors++;
2491 } else { 2490 } else {
2492 /* The packet length: up to 4.5K!. */ 2491 /* The packet length: up to 4.5K!. */
2493 int pkt_len = rx_status & 0x1fff; 2492 int pkt_len = rx_status & 0x1fff;
@@ -2529,7 +2528,7 @@ boomerang_rx(struct net_device *dev)
2529 } 2528 }
2530 netif_rx(skb); 2529 netif_rx(skb);
2531 dev->last_rx = jiffies; 2530 dev->last_rx = jiffies;
2532 vp->stats.rx_packets++; 2531 dev->stats.rx_packets++;
2533 } 2532 }
2534 entry = (++vp->cur_rx) % RX_RING_SIZE; 2533 entry = (++vp->cur_rx) % RX_RING_SIZE;
2535 } 2534 }
@@ -2591,7 +2590,7 @@ vortex_down(struct net_device *dev, int final_down)
2591 del_timer_sync(&vp->rx_oom_timer); 2590 del_timer_sync(&vp->rx_oom_timer);
2592 del_timer_sync(&vp->timer); 2591 del_timer_sync(&vp->timer);
2593 2592
2594 /* Turn off statistics ASAP. We update vp->stats below. */ 2593 /* Turn off statistics ASAP. We update dev->stats below. */
2595 iowrite16(StatsDisable, ioaddr + EL3_CMD); 2594 iowrite16(StatsDisable, ioaddr + EL3_CMD);
2596 2595
2597 /* Disable the receiver and transmitter. */ 2596 /* Disable the receiver and transmitter. */
@@ -2728,7 +2727,7 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2728 update_stats(ioaddr, dev); 2727 update_stats(ioaddr, dev);
2729 spin_unlock_irqrestore (&vp->lock, flags); 2728 spin_unlock_irqrestore (&vp->lock, flags);
2730 } 2729 }
2731 return &vp->stats; 2730 return &dev->stats;
2732} 2731}
2733 2732
2734/* Update statistics. 2733/* Update statistics.
@@ -2748,18 +2747,18 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2748 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ 2747 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2749 /* Switch to the stats window, and read everything. */ 2748 /* Switch to the stats window, and read everything. */
2750 EL3WINDOW(6); 2749 EL3WINDOW(6);
2751 vp->stats.tx_carrier_errors += ioread8(ioaddr + 0); 2750 dev->stats.tx_carrier_errors += ioread8(ioaddr + 0);
2752 vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); 2751 dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
2753 vp->stats.tx_window_errors += ioread8(ioaddr + 4); 2752 dev->stats.tx_window_errors += ioread8(ioaddr + 4);
2754 vp->stats.rx_fifo_errors += ioread8(ioaddr + 5); 2753 dev->stats.rx_fifo_errors += ioread8(ioaddr + 5);
2755 vp->stats.tx_packets += ioread8(ioaddr + 6); 2754 dev->stats.tx_packets += ioread8(ioaddr + 6);
2756 vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; 2755 dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
2757 /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */ 2756 /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
2758 /* Don't bother with register 9, an extension of registers 6&7. 2757 /* Don't bother with register 9, an extension of registers 6&7.
2759 If we do use the 6&7 values the atomic update assumption above 2758 If we do use the 6&7 values the atomic update assumption above
2760 is invalid. */ 2759 is invalid. */
2761 vp->stats.rx_bytes += ioread16(ioaddr + 10); 2760 dev->stats.rx_bytes += ioread16(ioaddr + 10);
2762 vp->stats.tx_bytes += ioread16(ioaddr + 12); 2761 dev->stats.tx_bytes += ioread16(ioaddr + 12);
2763 /* Extra stats for get_ethtool_stats() */ 2762 /* Extra stats for get_ethtool_stats() */
2764 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); 2763 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
2765 vp->xstats.tx_single_collisions += ioread8(ioaddr + 3); 2764 vp->xstats.tx_single_collisions += ioread8(ioaddr + 3);
@@ -2767,14 +2766,14 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2767 EL3WINDOW(4); 2766 EL3WINDOW(4);
2768 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); 2767 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
2769 2768
2770 vp->stats.collisions = vp->xstats.tx_multiple_collisions 2769 dev->stats.collisions = vp->xstats.tx_multiple_collisions
2771 + vp->xstats.tx_single_collisions 2770 + vp->xstats.tx_single_collisions
2772 + vp->xstats.tx_max_collisions; 2771 + vp->xstats.tx_max_collisions;
2773 2772
2774 { 2773 {
2775 u8 up = ioread8(ioaddr + 13); 2774 u8 up = ioread8(ioaddr + 13);
2776 vp->stats.rx_bytes += (up & 0x0f) << 16; 2775 dev->stats.rx_bytes += (up & 0x0f) << 16;
2777 vp->stats.tx_bytes += (up & 0xf0) << 12; 2776 dev->stats.tx_bytes += (up & 0xf0) << 12;
2778 } 2777 }
2779 2778
2780 EL3WINDOW(old_window >> 13); 2779 EL3WINDOW(old_window >> 13);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index af46341827f2..d27f54a2df77 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1273,20 +1273,6 @@ config PCNET32
1273 To compile this driver as a module, choose M here. The module 1273 To compile this driver as a module, choose M here. The module
1274 will be called pcnet32. 1274 will be called pcnet32.
1275 1275
1276config PCNET32_NAPI
1277 bool "Use RX polling (NAPI)"
1278 depends on PCNET32
1279 help
1280 NAPI is a new driver API designed to reduce CPU and interrupt load
1281 when the driver is receiving lots of packets from the card. It is
1282 still somewhat experimental and thus not yet enabled by default.
1283
1284 If your estimated Rx load is 10kpps or more, or if the card will be
1285 deployed on potentially unfriendly networks (e.g. in a firewall),
1286 then say Y here.
1287
1288 If in doubt, say N.
1289
1290config AMD8111_ETH 1276config AMD8111_ETH
1291 tristate "AMD 8111 (new PCI lance) support" 1277 tristate "AMD 8111 (new PCI lance) support"
1292 depends on NET_PCI && PCI 1278 depends on NET_PCI && PCI
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 82e9a5bd0dd2..a0b4c8516073 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -499,19 +499,13 @@ static void cops_reset(struct net_device *dev, int sleep)
499 { 499 {
500 outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */ 500 outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */
501 inb(ioaddr+DAYNA_RESET); /* Clear the reset */ 501 inb(ioaddr+DAYNA_RESET); /* Clear the reset */
502 if(sleep) 502 if (sleep)
503 { 503 msleep(333);
504 long snap=jiffies; 504 else
505 505 mdelay(333);
506 /* Let card finish initializing, about 1/3 second */
507 while (time_before(jiffies, snap + HZ/3))
508 schedule();
509 }
510 else
511 mdelay(333);
512 } 506 }
507
513 netif_wake_queue(dev); 508 netif_wake_queue(dev);
514 return;
515} 509}
516 510
517static void cops_load (struct net_device *dev) 511static void cops_load (struct net_device *dev)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6425603bc379..50a40e433154 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1425,13 +1425,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1425 res = netdev_set_master(slave_dev, bond_dev); 1425 res = netdev_set_master(slave_dev, bond_dev);
1426 if (res) { 1426 if (res) {
1427 dprintk("Error %d calling netdev_set_master\n", res); 1427 dprintk("Error %d calling netdev_set_master\n", res);
1428 goto err_close; 1428 goto err_restore_mac;
1429 } 1429 }
1430 /* open the slave since the application closed it */ 1430 /* open the slave since the application closed it */
1431 res = dev_open(slave_dev); 1431 res = dev_open(slave_dev);
1432 if (res) { 1432 if (res) {
1433 dprintk("Openning slave %s failed\n", slave_dev->name); 1433 dprintk("Openning slave %s failed\n", slave_dev->name);
1434 goto err_restore_mac; 1434 goto err_unset_master;
1435 } 1435 }
1436 1436
1437 new_slave->dev = slave_dev; 1437 new_slave->dev = slave_dev;
@@ -1444,7 +1444,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1444 */ 1444 */
1445 res = bond_alb_init_slave(bond, new_slave); 1445 res = bond_alb_init_slave(bond, new_slave);
1446 if (res) { 1446 if (res) {
1447 goto err_unset_master; 1447 goto err_close;
1448 } 1448 }
1449 } 1449 }
1450 1450
@@ -1619,7 +1619,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1619 1619
1620 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1620 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1621 if (res) 1621 if (res)
1622 goto err_unset_master; 1622 goto err_close;
1623 1623
1624 printk(KERN_INFO DRV_NAME 1624 printk(KERN_INFO DRV_NAME
1625 ": %s: enslaving %s as a%s interface with a%s link.\n", 1625 ": %s: enslaving %s as a%s interface with a%s link.\n",
@@ -1631,12 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1631 return 0; 1631 return 0;
1632 1632
1633/* Undo stages on error */ 1633/* Undo stages on error */
1634err_unset_master:
1635 netdev_set_master(slave_dev, NULL);
1636
1637err_close: 1634err_close:
1638 dev_close(slave_dev); 1635 dev_close(slave_dev);
1639 1636
1637err_unset_master:
1638 netdev_set_master(slave_dev, NULL);
1639
1640err_restore_mac: 1640err_restore_mac:
1641 if (!bond->params.fail_over_mac) { 1641 if (!bond->params.fail_over_mac) {
1642 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); 1642 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
@@ -4936,7 +4936,9 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4936 if (res < 0) { 4936 if (res < 0) {
4937 rtnl_lock(); 4937 rtnl_lock();
4938 down_write(&bonding_rwsem); 4938 down_write(&bonding_rwsem);
4939 goto out_bond; 4939 bond_deinit(bond_dev);
4940 unregister_netdevice(bond_dev);
4941 goto out_rtnl;
4940 } 4942 }
4941 4943
4942 return 0; 4944 return 0;
@@ -4990,9 +4992,10 @@ err:
4990 destroy_workqueue(bond->wq); 4992 destroy_workqueue(bond->wq);
4991 } 4993 }
4992 4994
4995 bond_destroy_sysfs();
4996
4993 rtnl_lock(); 4997 rtnl_lock();
4994 bond_free_all(); 4998 bond_free_all();
4995 bond_destroy_sysfs();
4996 rtnl_unlock(); 4999 rtnl_unlock();
4997out: 5000out:
4998 return res; 5001 return res;
@@ -5004,9 +5007,10 @@ static void __exit bonding_exit(void)
5004 unregister_netdevice_notifier(&bond_netdev_notifier); 5007 unregister_netdevice_notifier(&bond_netdev_notifier);
5005 unregister_inetaddr_notifier(&bond_inetaddr_notifier); 5008 unregister_inetaddr_notifier(&bond_inetaddr_notifier);
5006 5009
5010 bond_destroy_sysfs();
5011
5007 rtnl_lock(); 5012 rtnl_lock();
5008 bond_free_all(); 5013 bond_free_all();
5009 bond_destroy_sysfs();
5010 rtnl_unlock(); 5014 rtnl_unlock();
5011} 5015}
5012 5016
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 979c2d05ff9c..68c41a00d93d 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -146,29 +146,29 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
146 ": Unable remove bond %s due to open references.\n", 146 ": Unable remove bond %s due to open references.\n",
147 ifname); 147 ifname);
148 res = -EPERM; 148 res = -EPERM;
149 goto out; 149 goto out_unlock;
150 } 150 }
151 printk(KERN_INFO DRV_NAME 151 printk(KERN_INFO DRV_NAME
152 ": %s is being deleted...\n", 152 ": %s is being deleted...\n",
153 bond->dev->name); 153 bond->dev->name);
154 bond_destroy(bond); 154 bond_destroy(bond);
155 up_write(&bonding_rwsem); 155 goto out_unlock;
156 rtnl_unlock();
157 goto out;
158 } 156 }
159 157
160 printk(KERN_ERR DRV_NAME 158 printk(KERN_ERR DRV_NAME
161 ": unable to delete non-existent bond %s\n", ifname); 159 ": unable to delete non-existent bond %s\n", ifname);
162 res = -ENODEV; 160 res = -ENODEV;
163 up_write(&bonding_rwsem); 161 goto out_unlock;
164 rtnl_unlock();
165 goto out;
166 } 162 }
167 163
168err_no_cmd: 164err_no_cmd:
169 printk(KERN_ERR DRV_NAME 165 printk(KERN_ERR DRV_NAME
170 ": no command found in bonding_masters. Use +ifname or -ifname.\n"); 166 ": no command found in bonding_masters. Use +ifname or -ifname.\n");
171 res = -EPERM; 167 return -EPERM;
168
169out_unlock:
170 up_write(&bonding_rwsem);
171 rtnl_unlock();
172 172
173 /* Always return either count or an error. If you return 0, you'll 173 /* Always return either count or an error. If you return 0, you'll
174 * get called forever, which is bad. 174 * get called forever, which is bad.
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 05e5f59e87fa..ce949d5fae39 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1894,11 +1894,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1894 u8 *fw_data; 1894 u8 *fw_data;
1895 struct ch_mem_range t; 1895 struct ch_mem_range t;
1896 1896
1897 if (!capable(CAP_NET_ADMIN)) 1897 if (!capable(CAP_SYS_RAWIO))
1898 return -EPERM; 1898 return -EPERM;
1899 if (copy_from_user(&t, useraddr, sizeof(t))) 1899 if (copy_from_user(&t, useraddr, sizeof(t)))
1900 return -EFAULT; 1900 return -EFAULT;
1901 1901 /* Check t.len sanity ? */
1902 fw_data = kmalloc(t.len, GFP_KERNEL); 1902 fw_data = kmalloc(t.len, GFP_KERNEL);
1903 if (!fw_data) 1903 if (!fw_data)
1904 return -ENOMEM; 1904 return -ENOMEM;
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 2a53875cddbf..f823b8ba5785 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -648,6 +648,8 @@
648#define IFE_E_PHY_ID 0x02A80330 648#define IFE_E_PHY_ID 0x02A80330
649#define IFE_PLUS_E_PHY_ID 0x02A80320 649#define IFE_PLUS_E_PHY_ID 0x02A80320
650#define IFE_C_E_PHY_ID 0x02A80310 650#define IFE_C_E_PHY_ID 0x02A80310
651#define BME1000_E_PHY_ID 0x01410CB0
652#define BME1000_E_PHY_ID_R2 0x01410CB1
651 653
652/* M88E1000 Specific Registers */ 654/* M88E1000 Specific Registers */
653#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 655#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -701,6 +703,14 @@
701#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 703#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
702#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 704#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
703 705
706/* BME1000 PHY Specific Control Register */
707#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
708
709
710#define PHY_PAGE_SHIFT 5
711#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
712 ((reg) & MAX_PHY_REG_ADDRESS))
713
704/* 714/*
705 * Bits... 715 * Bits...
706 * 15-5: page 716 * 15-5: page
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 38bfd0d261fe..d3bc6f8101fa 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -127,7 +127,7 @@ struct e1000_buffer {
127 /* arrays of page information for packet split */ 127 /* arrays of page information for packet split */
128 struct e1000_ps_page *ps_pages; 128 struct e1000_ps_page *ps_pages;
129 }; 129 };
130 130 struct page *page;
131}; 131};
132 132
133struct e1000_ring { 133struct e1000_ring {
@@ -304,6 +304,7 @@ struct e1000_info {
304#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) 304#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
305#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 305#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
306#define FLAG_HAS_JUMBO_FRAMES (1 << 7) 306#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
307#define FLAG_IS_ICH (1 << 9)
307#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 308#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
308#define FLAG_IS_QUAD_PORT_A (1 << 12) 309#define FLAG_IS_QUAD_PORT_A (1 << 12)
309#define FLAG_IS_QUAD_PORT (1 << 13) 310#define FLAG_IS_QUAD_PORT (1 << 13)
@@ -386,6 +387,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
386 bool state); 387 bool state);
387extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); 388extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
388extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); 389extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
390extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
389 391
390extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); 392extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
391extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); 393extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
@@ -443,6 +445,9 @@ extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
443extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); 445extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
444extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); 446extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
445extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); 447extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
448extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
449extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
450extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
446extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 451extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
447extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 452extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
448extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); 453extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index ce045acce63e..a14561f40db0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -494,8 +494,12 @@ static int e1000_get_eeprom(struct net_device *netdev,
494 for (i = 0; i < last_word - first_word + 1; i++) { 494 for (i = 0; i < last_word - first_word + 1; i++) {
495 ret_val = e1000_read_nvm(hw, first_word + i, 1, 495 ret_val = e1000_read_nvm(hw, first_word + i, 1,
496 &eeprom_buff[i]); 496 &eeprom_buff[i]);
497 if (ret_val) 497 if (ret_val) {
498 /* a read error occurred, throw away the
499 * result */
500 memset(eeprom_buff, 0xff, sizeof(eeprom_buff));
498 break; 501 break;
502 }
499 } 503 }
500 } 504 }
501 505
@@ -803,8 +807,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
803 /* restore previous status */ 807 /* restore previous status */
804 ew32(STATUS, before); 808 ew32(STATUS, before);
805 809
806 if ((mac->type != e1000_ich8lan) && 810 if (!(adapter->flags & FLAG_IS_ICH)) {
807 (mac->type != e1000_ich9lan)) {
808 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); 811 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
809 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); 812 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
810 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); 813 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
@@ -824,15 +827,13 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
824 827
825 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); 828 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
826 829
827 before = (((mac->type == e1000_ich8lan) || 830 before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
828 (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE);
829 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); 831 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
830 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); 832 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
831 833
832 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); 834 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
833 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 835 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
834 if ((mac->type != e1000_ich8lan) && 836 if (!(adapter->flags & FLAG_IS_ICH))
835 (mac->type != e1000_ich9lan))
836 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); 837 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
837 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 838 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
838 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); 839 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
@@ -911,9 +912,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
911 912
912 /* Test each interrupt */ 913 /* Test each interrupt */
913 for (i = 0; i < 10; i++) { 914 for (i = 0; i < 10; i++) {
914 915 if ((adapter->flags & FLAG_IS_ICH) && (i == 8))
915 if (((adapter->hw.mac.type == e1000_ich8lan) ||
916 (adapter->hw.mac.type == e1000_ich9lan)) && i == 8)
917 continue; 916 continue;
918 917
919 /* Interrupt to test */ 918 /* Interrupt to test */
@@ -1184,6 +1183,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1184 struct e1000_hw *hw = &adapter->hw; 1183 struct e1000_hw *hw = &adapter->hw;
1185 u32 ctrl_reg = 0; 1184 u32 ctrl_reg = 0;
1186 u32 stat_reg = 0; 1185 u32 stat_reg = 0;
1186 u16 phy_reg = 0;
1187 1187
1188 hw->mac.autoneg = 0; 1188 hw->mac.autoneg = 0;
1189 1189
@@ -1211,6 +1211,28 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1211 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1211 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1212 E1000_CTRL_FD); /* Force Duplex to FULL */ 1212 E1000_CTRL_FD); /* Force Duplex to FULL */
1213 break; 1213 break;
1214 case e1000_phy_bm:
1215 /* Set Default MAC Interface speed to 1GB */
1216 e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
1217 phy_reg &= ~0x0007;
1218 phy_reg |= 0x006;
1219 e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
1220 /* Assert SW reset for above settings to take effect */
1221 e1000e_commit_phy(hw);
1222 mdelay(1);
1223 /* Force Full Duplex */
1224 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1225 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
1226 /* Set Link Up (in force link) */
1227 e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
1228 e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
1229 /* Force Link */
1230 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1231 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
1232 /* Set Early Link Enable */
1233 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1234 e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
1235 /* fall through */
1214 default: 1236 default:
1215 /* force 1000, set loopback */ 1237 /* force 1000, set loopback */
1216 e1e_wphy(hw, PHY_CONTROL, 0x4140); 1238 e1e_wphy(hw, PHY_CONTROL, 0x4140);
@@ -1224,8 +1246,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1224 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1246 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1225 E1000_CTRL_FD); /* Force Duplex to FULL */ 1247 E1000_CTRL_FD); /* Force Duplex to FULL */
1226 1248
1227 if ((adapter->hw.mac.type == e1000_ich8lan) || 1249 if (adapter->flags & FLAG_IS_ICH)
1228 (adapter->hw.mac.type == e1000_ich9lan))
1229 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ 1250 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
1230 } 1251 }
1231 1252
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index a930e6d9cf02..74f263acb172 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -216,6 +216,21 @@ enum e1e_registers {
216#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ 216#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
217#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ 217#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
218#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ 218#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
219#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
220#define IGP_PAGE_SHIFT 5
221#define PHY_REG_MASK 0x1F
222
223#define BM_WUC_PAGE 800
224#define BM_WUC_ADDRESS_OPCODE 0x11
225#define BM_WUC_DATA_OPCODE 0x12
226#define BM_WUC_ENABLE_PAGE 769
227#define BM_WUC_ENABLE_REG 17
228#define BM_WUC_ENABLE_BIT (1 << 2)
229#define BM_WUC_HOST_WU_BIT (1 << 4)
230
231#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
232#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
233#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
219 234
220#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 235#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
221#define IGP01E1000_PHY_POLARITY_MASK 0x0078 236#define IGP01E1000_PHY_POLARITY_MASK 0x0078
@@ -331,10 +346,16 @@ enum e1e_registers {
331#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 346#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
332#define E1000_DEV_ID_ICH8_IGP_M 0x104D 347#define E1000_DEV_ID_ICH8_IGP_M 0x104D
333#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD 348#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
349#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
350#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
351#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
334#define E1000_DEV_ID_ICH9_IGP_C 0x294C 352#define E1000_DEV_ID_ICH9_IGP_C 0x294C
335#define E1000_DEV_ID_ICH9_IFE 0x10C0 353#define E1000_DEV_ID_ICH9_IFE 0x10C0
336#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 354#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
337#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 355#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
356#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
357#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
358#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
338 359
339#define E1000_FUNC_1 1 360#define E1000_FUNC_1 1
340 361
@@ -378,6 +399,7 @@ enum e1000_phy_type {
378 e1000_phy_gg82563, 399 e1000_phy_gg82563,
379 e1000_phy_igp_3, 400 e1000_phy_igp_3,
380 e1000_phy_ife, 401 e1000_phy_ife,
402 e1000_phy_bm,
381}; 403};
382 404
383enum e1000_bus_width { 405enum e1000_bus_width {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 768485dbb2c6..9e38452a738c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -38,6 +38,12 @@
38 * 82566DM Gigabit Network Connection 38 * 82566DM Gigabit Network Connection
39 * 82566MC Gigabit Network Connection 39 * 82566MC Gigabit Network Connection
40 * 82566MM Gigabit Network Connection 40 * 82566MM Gigabit Network Connection
41 * 82567LM Gigabit Network Connection
42 * 82567LF Gigabit Network Connection
43 * 82567LM-2 Gigabit Network Connection
44 * 82567LF-2 Gigabit Network Connection
45 * 82567V-2 Gigabit Network Connection
46 * 82562GT-3 10/100 Network Connection
41 */ 47 */
42 48
43#include <linux/netdevice.h> 49#include <linux/netdevice.h>
@@ -198,6 +204,19 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
198 phy->addr = 1; 204 phy->addr = 1;
199 phy->reset_delay_us = 100; 205 phy->reset_delay_us = 100;
200 206
207 /*
208 * We may need to do this twice - once for IGP and if that fails,
209 * we'll set BM func pointers and try again
210 */
211 ret_val = e1000e_determine_phy_address(hw);
212 if (ret_val) {
213 hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
214 hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
215 ret_val = e1000e_determine_phy_address(hw);
216 if (ret_val)
217 return ret_val;
218 }
219
201 phy->id = 0; 220 phy->id = 0;
202 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && 221 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
203 (i++ < 100)) { 222 (i++ < 100)) {
@@ -219,6 +238,13 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
219 phy->type = e1000_phy_ife; 238 phy->type = e1000_phy_ife;
220 phy->autoneg_mask = E1000_ALL_NOT_GIG; 239 phy->autoneg_mask = E1000_ALL_NOT_GIG;
221 break; 240 break;
241 case BME1000_E_PHY_ID:
242 phy->type = e1000_phy_bm;
243 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
244 hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
245 hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
246 hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
247 break;
222 default: 248 default:
223 return -E1000_ERR_PHY; 249 return -E1000_ERR_PHY;
224 break; 250 break;
@@ -664,6 +690,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
664 return e1000_get_phy_info_ife_ich8lan(hw); 690 return e1000_get_phy_info_ife_ich8lan(hw);
665 break; 691 break;
666 case e1000_phy_igp_3: 692 case e1000_phy_igp_3:
693 case e1000_phy_bm:
667 return e1000e_get_phy_info_igp(hw); 694 return e1000e_get_phy_info_igp(hw);
668 break; 695 break;
669 default: 696 default:
@@ -728,7 +755,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
728 s32 ret_val = 0; 755 s32 ret_val = 0;
729 u16 data; 756 u16 data;
730 757
731 if (phy->type != e1000_phy_igp_3) 758 if (phy->type == e1000_phy_ife)
732 return ret_val; 759 return ret_val;
733 760
734 phy_ctrl = er32(PHY_CTRL); 761 phy_ctrl = er32(PHY_CTRL);
@@ -1918,8 +1945,35 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1918 ret_val = e1000e_copper_link_setup_igp(hw); 1945 ret_val = e1000e_copper_link_setup_igp(hw);
1919 if (ret_val) 1946 if (ret_val)
1920 return ret_val; 1947 return ret_val;
1948 } else if (hw->phy.type == e1000_phy_bm) {
1949 ret_val = e1000e_copper_link_setup_m88(hw);
1950 if (ret_val)
1951 return ret_val;
1921 } 1952 }
1922 1953
1954 if (hw->phy.type == e1000_phy_ife) {
1955 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
1956 if (ret_val)
1957 return ret_val;
1958
1959 reg_data &= ~IFE_PMC_AUTO_MDIX;
1960
1961 switch (hw->phy.mdix) {
1962 case 1:
1963 reg_data &= ~IFE_PMC_FORCE_MDIX;
1964 break;
1965 case 2:
1966 reg_data |= IFE_PMC_FORCE_MDIX;
1967 break;
1968 case 0:
1969 default:
1970 reg_data |= IFE_PMC_AUTO_MDIX;
1971 break;
1972 }
1973 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
1974 if (ret_val)
1975 return ret_val;
1976 }
1923 return e1000e_setup_copper_link(hw); 1977 return e1000e_setup_copper_link(hw);
1924} 1978}
1925 1979
@@ -2127,6 +2181,31 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
2127} 2181}
2128 2182
2129/** 2183/**
2184 * e1000e_disable_gig_wol_ich8lan - disable gig during WoL
2185 * @hw: pointer to the HW structure
2186 *
2187 * During S0 to Sx transition, it is possible the link remains at gig
2188 * instead of negotiating to a lower speed. Before going to Sx, set
2189 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
2190 * to a lower speed.
2191 *
2192 * Should only be called for ICH9 devices.
2193 **/
2194void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2195{
2196 u32 phy_ctrl;
2197
2198 if (hw->mac.type == e1000_ich9lan) {
2199 phy_ctrl = er32(PHY_CTRL);
2200 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
2201 E1000_PHY_CTRL_GBE_DISABLE;
2202 ew32(PHY_CTRL, phy_ctrl);
2203 }
2204
2205 return;
2206}
2207
2208/**
2130 * e1000_cleanup_led_ich8lan - Restore the default LED operation 2209 * e1000_cleanup_led_ich8lan - Restore the default LED operation
2131 * @hw: pointer to the HW structure 2210 * @hw: pointer to the HW structure
2132 * 2211 *
@@ -2247,6 +2326,7 @@ static struct e1000_nvm_operations ich8_nvm_ops = {
2247struct e1000_info e1000_ich8_info = { 2326struct e1000_info e1000_ich8_info = {
2248 .mac = e1000_ich8lan, 2327 .mac = e1000_ich8lan,
2249 .flags = FLAG_HAS_WOL 2328 .flags = FLAG_HAS_WOL
2329 | FLAG_IS_ICH
2250 | FLAG_RX_CSUM_ENABLED 2330 | FLAG_RX_CSUM_ENABLED
2251 | FLAG_HAS_CTRLEXT_ON_LOAD 2331 | FLAG_HAS_CTRLEXT_ON_LOAD
2252 | FLAG_HAS_AMT 2332 | FLAG_HAS_AMT
@@ -2262,6 +2342,7 @@ struct e1000_info e1000_ich8_info = {
2262struct e1000_info e1000_ich9_info = { 2342struct e1000_info e1000_ich9_info = {
2263 .mac = e1000_ich9lan, 2343 .mac = e1000_ich9lan,
2264 .flags = FLAG_HAS_JUMBO_FRAMES 2344 .flags = FLAG_HAS_JUMBO_FRAMES
2345 | FLAG_IS_ICH
2265 | FLAG_HAS_WOL 2346 | FLAG_HAS_WOL
2266 | FLAG_RX_CSUM_ENABLED 2347 | FLAG_RX_CSUM_ENABLED
2267 | FLAG_HAS_CTRLEXT_ON_LOAD 2348 | FLAG_HAS_CTRLEXT_ON_LOAD
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 8991ab8911e2..8cbb40f3a506 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -43,10 +43,11 @@
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/smp.h> 45#include <linux/smp.h>
46#include <linux/pm_qos_params.h>
46 47
47#include "e1000.h" 48#include "e1000.h"
48 49
49#define DRV_VERSION "0.2.1" 50#define DRV_VERSION "0.3.3.3-k2"
50char e1000e_driver_name[] = "e1000e"; 51char e1000e_driver_name[] = "e1000e";
51const char e1000e_driver_version[] = DRV_VERSION; 52const char e1000e_driver_version[] = DRV_VERSION;
52 53
@@ -341,6 +342,89 @@ no_buffers:
341} 342}
342 343
343/** 344/**
345 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
346 * @adapter: address of board private structure
347 * @rx_ring: pointer to receive ring structure
348 * @cleaned_count: number of buffers to allocate this pass
349 **/
350
351static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
352 int cleaned_count)
353{
354 struct net_device *netdev = adapter->netdev;
355 struct pci_dev *pdev = adapter->pdev;
356 struct e1000_rx_desc *rx_desc;
357 struct e1000_ring *rx_ring = adapter->rx_ring;
358 struct e1000_buffer *buffer_info;
359 struct sk_buff *skb;
360 unsigned int i;
361 unsigned int bufsz = 256 -
362 16 /* for skb_reserve */ -
363 NET_IP_ALIGN;
364
365 i = rx_ring->next_to_use;
366 buffer_info = &rx_ring->buffer_info[i];
367
368 while (cleaned_count--) {
369 skb = buffer_info->skb;
370 if (skb) {
371 skb_trim(skb, 0);
372 goto check_page;
373 }
374
375 skb = netdev_alloc_skb(netdev, bufsz);
376 if (unlikely(!skb)) {
377 /* Better luck next round */
378 adapter->alloc_rx_buff_failed++;
379 break;
380 }
381
382 /* Make buffer alignment 2 beyond a 16 byte boundary
383 * this will result in a 16 byte aligned IP header after
384 * the 14 byte MAC header is removed
385 */
386 skb_reserve(skb, NET_IP_ALIGN);
387
388 buffer_info->skb = skb;
389check_page:
390 /* allocate a new page if necessary */
391 if (!buffer_info->page) {
392 buffer_info->page = alloc_page(GFP_ATOMIC);
393 if (unlikely(!buffer_info->page)) {
394 adapter->alloc_rx_buff_failed++;
395 break;
396 }
397 }
398
399 if (!buffer_info->dma)
400 buffer_info->dma = pci_map_page(pdev,
401 buffer_info->page, 0,
402 PAGE_SIZE,
403 PCI_DMA_FROMDEVICE);
404
405 rx_desc = E1000_RX_DESC(*rx_ring, i);
406 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
407
408 if (unlikely(++i == rx_ring->count))
409 i = 0;
410 buffer_info = &rx_ring->buffer_info[i];
411 }
412
413 if (likely(rx_ring->next_to_use != i)) {
414 rx_ring->next_to_use = i;
415 if (unlikely(i-- == 0))
416 i = (rx_ring->count - 1);
417
418 /* Force memory writes to complete before letting h/w
419 * know there are new descriptors to fetch. (Only
420 * applicable for weak-ordered memory model archs,
421 * such as IA-64). */
422 wmb();
423 writel(i, adapter->hw.hw_addr + rx_ring->tail);
424 }
425}
426
427/**
344 * e1000_clean_rx_irq - Send received data up the network stack; legacy 428 * e1000_clean_rx_irq - Send received data up the network stack; legacy
345 * @adapter: board private structure 429 * @adapter: board private structure
346 * 430 *
@@ -783,6 +867,186 @@ next_desc:
783} 867}
784 868
785/** 869/**
870 * e1000_consume_page - helper function
871 **/
872static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
873 u16 length)
874{
875 bi->page = NULL;
876 skb->len += length;
877 skb->data_len += length;
878 skb->truesize += length;
879}
880
881/**
882 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
883 * @adapter: board private structure
884 *
885 * the return value indicates whether actual cleaning was done, there
886 * is no guarantee that everything was cleaned
887 **/
888
889static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
890 int *work_done, int work_to_do)
891{
892 struct net_device *netdev = adapter->netdev;
893 struct pci_dev *pdev = adapter->pdev;
894 struct e1000_ring *rx_ring = adapter->rx_ring;
895 struct e1000_rx_desc *rx_desc, *next_rxd;
896 struct e1000_buffer *buffer_info, *next_buffer;
897 u32 length;
898 unsigned int i;
899 int cleaned_count = 0;
900 bool cleaned = false;
901 unsigned int total_rx_bytes=0, total_rx_packets=0;
902
903 i = rx_ring->next_to_clean;
904 rx_desc = E1000_RX_DESC(*rx_ring, i);
905 buffer_info = &rx_ring->buffer_info[i];
906
907 while (rx_desc->status & E1000_RXD_STAT_DD) {
908 struct sk_buff *skb;
909 u8 status;
910
911 if (*work_done >= work_to_do)
912 break;
913 (*work_done)++;
914
915 status = rx_desc->status;
916 skb = buffer_info->skb;
917 buffer_info->skb = NULL;
918
919 ++i;
920 if (i == rx_ring->count)
921 i = 0;
922 next_rxd = E1000_RX_DESC(*rx_ring, i);
923 prefetch(next_rxd);
924
925 next_buffer = &rx_ring->buffer_info[i];
926
927 cleaned = true;
928 cleaned_count++;
929 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
930 PCI_DMA_FROMDEVICE);
931 buffer_info->dma = 0;
932
933 length = le16_to_cpu(rx_desc->length);
934
935 /* errors is only valid for DD + EOP descriptors */
936 if (unlikely((status & E1000_RXD_STAT_EOP) &&
937 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
938 /* recycle both page and skb */
939 buffer_info->skb = skb;
940 /* an error means any chain goes out the window
941 * too */
942 if (rx_ring->rx_skb_top)
943 dev_kfree_skb(rx_ring->rx_skb_top);
944 rx_ring->rx_skb_top = NULL;
945 goto next_desc;
946 }
947
948#define rxtop rx_ring->rx_skb_top
949 if (!(status & E1000_RXD_STAT_EOP)) {
950 /* this descriptor is only the beginning (or middle) */
951 if (!rxtop) {
952 /* this is the beginning of a chain */
953 rxtop = skb;
954 skb_fill_page_desc(rxtop, 0, buffer_info->page,
955 0, length);
956 } else {
957 /* this is the middle of a chain */
958 skb_fill_page_desc(rxtop,
959 skb_shinfo(rxtop)->nr_frags,
960 buffer_info->page, 0, length);
961 /* re-use the skb, only consumed the page */
962 buffer_info->skb = skb;
963 }
964 e1000_consume_page(buffer_info, rxtop, length);
965 goto next_desc;
966 } else {
967 if (rxtop) {
968 /* end of the chain */
969 skb_fill_page_desc(rxtop,
970 skb_shinfo(rxtop)->nr_frags,
971 buffer_info->page, 0, length);
972 /* re-use the current skb, we only consumed the
973 * page */
974 buffer_info->skb = skb;
975 skb = rxtop;
976 rxtop = NULL;
977 e1000_consume_page(buffer_info, skb, length);
978 } else {
979 /* no chain, got EOP, this buf is the packet
980 * copybreak to save the put_page/alloc_page */
981 if (length <= copybreak &&
982 skb_tailroom(skb) >= length) {
983 u8 *vaddr;
984 vaddr = kmap_atomic(buffer_info->page,
985 KM_SKB_DATA_SOFTIRQ);
986 memcpy(skb_tail_pointer(skb), vaddr,
987 length);
988 kunmap_atomic(vaddr,
989 KM_SKB_DATA_SOFTIRQ);
990 /* re-use the page, so don't erase
991 * buffer_info->page */
992 skb_put(skb, length);
993 } else {
994 skb_fill_page_desc(skb, 0,
995 buffer_info->page, 0,
996 length);
997 e1000_consume_page(buffer_info, skb,
998 length);
999 }
1000 }
1001 }
1002
1003 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1004 e1000_rx_checksum(adapter,
1005 (u32)(status) |
1006 ((u32)(rx_desc->errors) << 24),
1007 le16_to_cpu(rx_desc->csum), skb);
1008
1009 /* probably a little skewed due to removing CRC */
1010 total_rx_bytes += skb->len;
1011 total_rx_packets++;
1012
1013 /* eth type trans needs skb->data to point to something */
1014 if (!pskb_may_pull(skb, ETH_HLEN)) {
1015 ndev_err(netdev, "pskb_may_pull failed.\n");
1016 dev_kfree_skb(skb);
1017 goto next_desc;
1018 }
1019
1020 e1000_receive_skb(adapter, netdev, skb, status,
1021 rx_desc->special);
1022
1023next_desc:
1024 rx_desc->status = 0;
1025
1026 /* return some buffers to hardware, one at a time is too slow */
1027 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1028 adapter->alloc_rx_buf(adapter, cleaned_count);
1029 cleaned_count = 0;
1030 }
1031
1032 /* use prefetched values */
1033 rx_desc = next_rxd;
1034 buffer_info = next_buffer;
1035 }
1036 rx_ring->next_to_clean = i;
1037
1038 cleaned_count = e1000_desc_unused(rx_ring);
1039 if (cleaned_count)
1040 adapter->alloc_rx_buf(adapter, cleaned_count);
1041
1042 adapter->total_rx_bytes += total_rx_bytes;
1043 adapter->total_rx_packets += total_rx_packets;
1044 adapter->net_stats.rx_bytes += total_rx_bytes;
1045 adapter->net_stats.rx_packets += total_rx_packets;
1046 return cleaned;
1047}
1048
1049/**
786 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1050 * e1000_clean_rx_ring - Free Rx Buffers per Queue
787 * @adapter: board private structure 1051 * @adapter: board private structure
788 **/ 1052 **/
@@ -802,6 +1066,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
802 pci_unmap_single(pdev, buffer_info->dma, 1066 pci_unmap_single(pdev, buffer_info->dma,
803 adapter->rx_buffer_len, 1067 adapter->rx_buffer_len,
804 PCI_DMA_FROMDEVICE); 1068 PCI_DMA_FROMDEVICE);
1069 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1070 pci_unmap_page(pdev, buffer_info->dma,
1071 PAGE_SIZE,
1072 PCI_DMA_FROMDEVICE);
805 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1073 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
806 pci_unmap_single(pdev, buffer_info->dma, 1074 pci_unmap_single(pdev, buffer_info->dma,
807 adapter->rx_ps_bsize0, 1075 adapter->rx_ps_bsize0,
@@ -809,6 +1077,11 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
809 buffer_info->dma = 0; 1077 buffer_info->dma = 0;
810 } 1078 }
811 1079
1080 if (buffer_info->page) {
1081 put_page(buffer_info->page);
1082 buffer_info->page = NULL;
1083 }
1084
812 if (buffer_info->skb) { 1085 if (buffer_info->skb) {
813 dev_kfree_skb(buffer_info->skb); 1086 dev_kfree_skb(buffer_info->skb);
814 buffer_info->skb = NULL; 1087 buffer_info->skb = NULL;
@@ -1755,10 +2028,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1755 * a lot of memory, since we allocate 3 pages at all times 2028 * a lot of memory, since we allocate 3 pages at all times
1756 * per packet. 2029 * per packet.
1757 */ 2030 */
1758 adapter->rx_ps_pages = 0;
1759 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2031 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1760 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2032 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
2033 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
1761 adapter->rx_ps_pages = pages; 2034 adapter->rx_ps_pages = pages;
2035 else
2036 adapter->rx_ps_pages = 0;
1762 2037
1763 if (adapter->rx_ps_pages) { 2038 if (adapter->rx_ps_pages) {
1764 /* Configure extra packet-split registers */ 2039 /* Configure extra packet-split registers */
@@ -1819,9 +2094,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1819 sizeof(union e1000_rx_desc_packet_split); 2094 sizeof(union e1000_rx_desc_packet_split);
1820 adapter->clean_rx = e1000_clean_rx_irq_ps; 2095 adapter->clean_rx = e1000_clean_rx_irq_ps;
1821 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 2096 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2097 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2098 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2099 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2100 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1822 } else { 2101 } else {
1823 rdlen = rx_ring->count * 2102 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
1824 sizeof(struct e1000_rx_desc);
1825 adapter->clean_rx = e1000_clean_rx_irq; 2103 adapter->clean_rx = e1000_clean_rx_irq;
1826 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 2104 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1827 } 2105 }
@@ -1885,8 +2163,21 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1885 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 2163 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
1886 */ 2164 */
1887 if ((adapter->flags & FLAG_HAS_ERT) && 2165 if ((adapter->flags & FLAG_HAS_ERT) &&
1888 (adapter->netdev->mtu > ETH_DATA_LEN)) 2166 (adapter->netdev->mtu > ETH_DATA_LEN)) {
1889 ew32(ERT, E1000_ERT_2048); 2167 u32 rxdctl = er32(RXDCTL(0));
2168 ew32(RXDCTL(0), rxdctl | 0x3);
2169 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2170 /*
2171 * With jumbo frames and early-receive enabled, excessive
2172 * C4->C2 latencies result in dropped transactions.
2173 */
2174 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2175 e1000e_driver_name, 55);
2176 } else {
2177 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2178 e1000e_driver_name,
2179 PM_QOS_DEFAULT_VALUE);
2180 }
1890 2181
1891 /* Enable Receives */ 2182 /* Enable Receives */
1892 ew32(RCTL, rctl); 2183 ew32(RCTL, rctl);
@@ -2155,6 +2446,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
2155 2446
2156 /* Allow time for pending master requests to run */ 2447 /* Allow time for pending master requests to run */
2157 mac->ops.reset_hw(hw); 2448 mac->ops.reset_hw(hw);
2449
2450 /*
2451 * For parts with AMT enabled, let the firmware know
2452 * that the network interface is in control
2453 */
2454 if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw))
2455 e1000_get_hw_control(adapter);
2456
2158 ew32(WUC, 0); 2457 ew32(WUC, 0);
2159 2458
2160 if (mac->ops.init_hw(hw)) 2459 if (mac->ops.init_hw(hw))
@@ -3469,6 +3768,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3469 * means we reserve 2 more, this pushes us to allocate from the next 3768 * means we reserve 2 more, this pushes us to allocate from the next
3470 * larger slab size. 3769 * larger slab size.
3471 * i.e. RXBUFFER_2048 --> size-4096 slab 3770 * i.e. RXBUFFER_2048 --> size-4096 slab
3771 * However with the new *_jumbo_rx* routines, jumbo receives will use
3772 * fragmented skbs
3472 */ 3773 */
3473 3774
3474 if (max_frame <= 256) 3775 if (max_frame <= 256)
@@ -3626,6 +3927,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3626 ew32(CTRL_EXT, ctrl_ext); 3927 ew32(CTRL_EXT, ctrl_ext);
3627 } 3928 }
3628 3929
3930 if (adapter->flags & FLAG_IS_ICH)
3931 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
3932
3629 /* Allow time for pending master requests to run */ 3933 /* Allow time for pending master requests to run */
3630 e1000e_disable_pcie_master(&adapter->hw); 3934 e1000e_disable_pcie_master(&adapter->hw);
3631 3935
@@ -4292,6 +4596,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
4292 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 4596 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
4293 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 4597 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
4294 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 4598 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
4599 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
4600 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
4601 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
4602
4603 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
4604 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
4605 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
4295 4606
4296 { } /* terminate list */ 4607 { } /* terminate list */
4297}; 4608};
@@ -4326,7 +4637,9 @@ static int __init e1000_init_module(void)
4326 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", 4637 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
4327 e1000e_driver_name); 4638 e1000e_driver_name);
4328 ret = pci_register_driver(&e1000_driver); 4639 ret = pci_register_driver(&e1000_driver);
4329 4640 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name,
4641 PM_QOS_DEFAULT_VALUE);
4642
4330 return ret; 4643 return ret;
4331} 4644}
4332module_init(e1000_init_module); 4645module_init(e1000_init_module);
@@ -4340,6 +4653,7 @@ module_init(e1000_init_module);
4340static void __exit e1000_exit_module(void) 4653static void __exit e1000_exit_module(void)
4341{ 4654{
4342 pci_unregister_driver(&e1000_driver); 4655 pci_unregister_driver(&e1000_driver);
4656 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
4343} 4657}
4344module_exit(e1000_exit_module); 4658module_exit(e1000_exit_module);
4345 4659
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index e102332a6bee..b133dcf0e950 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -34,6 +34,9 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
34static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); 34static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
35static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); 35static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
36static s32 e1000_wait_autoneg(struct e1000_hw *hw); 36static s32 e1000_wait_autoneg(struct e1000_hw *hw);
37static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
38static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
39 u16 *data, bool read);
37 40
38/* Cable length tables */ 41/* Cable length tables */
39static const u16 e1000_m88_cable_length_table[] = 42static const u16 e1000_m88_cable_length_table[] =
@@ -465,6 +468,10 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
465 if (phy->disable_polarity_correction == 1) 468 if (phy->disable_polarity_correction == 1)
466 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 469 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
467 470
471 /* Enable downshift on BM (disabled by default) */
472 if (phy->type == e1000_phy_bm)
473 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
474
468 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 475 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
469 if (ret_val) 476 if (ret_val)
470 return ret_val; 477 return ret_val;
@@ -1776,6 +1783,10 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
1776 case IFE_C_E_PHY_ID: 1783 case IFE_C_E_PHY_ID:
1777 phy_type = e1000_phy_ife; 1784 phy_type = e1000_phy_ife;
1778 break; 1785 break;
1786 case BME1000_E_PHY_ID:
1787 case BME1000_E_PHY_ID_R2:
1788 phy_type = e1000_phy_bm;
1789 break;
1779 default: 1790 default:
1780 phy_type = e1000_phy_unknown; 1791 phy_type = e1000_phy_unknown;
1781 break; 1792 break;
@@ -1784,6 +1795,273 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
1784} 1795}
1785 1796
1786/** 1797/**
1798 * e1000e_determine_phy_address - Determines PHY address.
1799 * @hw: pointer to the HW structure
1800 *
1801 * This uses a trial and error method to loop through possible PHY
1802 * addresses. It tests each by reading the PHY ID registers and
1803 * checking for a match.
1804 **/
1805s32 e1000e_determine_phy_address(struct e1000_hw *hw)
1806{
1807 s32 ret_val = -E1000_ERR_PHY_TYPE;
1808 u32 phy_addr= 0;
1809 u32 i = 0;
1810 enum e1000_phy_type phy_type = e1000_phy_unknown;
1811
1812 do {
1813 for (phy_addr = 0; phy_addr < 4; phy_addr++) {
1814 hw->phy.addr = phy_addr;
1815 e1000e_get_phy_id(hw);
1816 phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
1817
1818 /*
1819 * If phy_type is valid, break - we found our
1820 * PHY address
1821 */
1822 if (phy_type != e1000_phy_unknown) {
1823 ret_val = 0;
1824 break;
1825 }
1826 }
1827 i++;
1828 } while ((ret_val != 0) && (i < 100));
1829
1830 return ret_val;
1831}
1832
1833/**
1834 * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
1835 * @page: page to access
1836 *
1837 * Returns the phy address for the page requested.
1838 **/
1839static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
1840{
1841 u32 phy_addr = 2;
1842
1843 if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
1844 phy_addr = 1;
1845
1846 return phy_addr;
1847}
1848
1849/**
1850 * e1000e_write_phy_reg_bm - Write BM PHY register
1851 * @hw: pointer to the HW structure
1852 * @offset: register offset to write to
1853 * @data: data to write at register offset
1854 *
1855 * Acquires semaphore, if necessary, then writes the data to PHY register
1856 * at the offset. Release any acquired semaphores before exiting.
1857 **/
1858s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
1859{
1860 s32 ret_val;
1861 u32 page_select = 0;
1862 u32 page = offset >> IGP_PAGE_SHIFT;
1863 u32 page_shift = 0;
1864
1865 /* Page 800 works differently than the rest so it has its own func */
1866 if (page == BM_WUC_PAGE) {
1867 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
1868 false);
1869 goto out;
1870 }
1871
1872 ret_val = hw->phy.ops.acquire_phy(hw);
1873 if (ret_val)
1874 goto out;
1875
1876 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
1877
1878 if (offset > MAX_PHY_MULTI_PAGE_REG) {
1879 /*
1880 * Page select is register 31 for phy address 1 and 22 for
1881 * phy address 2 and 3. Page select is shifted only for
1882 * phy address 1.
1883 */
1884 if (hw->phy.addr == 1) {
1885 page_shift = IGP_PAGE_SHIFT;
1886 page_select = IGP01E1000_PHY_PAGE_SELECT;
1887 } else {
1888 page_shift = 0;
1889 page_select = BM_PHY_PAGE_SELECT;
1890 }
1891
1892 /* Page is shifted left, PHY expects (page x 32) */
1893 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
1894 (page << page_shift));
1895 if (ret_val) {
1896 hw->phy.ops.release_phy(hw);
1897 goto out;
1898 }
1899 }
1900
1901 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
1902 data);
1903
1904 hw->phy.ops.release_phy(hw);
1905
1906out:
1907 return ret_val;
1908}
1909
1910/**
1911 * e1000e_read_phy_reg_bm - Read BM PHY register
1912 * @hw: pointer to the HW structure
1913 * @offset: register offset to be read
1914 * @data: pointer to the read data
1915 *
1916 * Acquires semaphore, if necessary, then reads the PHY register at offset
1917 * and storing the retrieved information in data. Release any acquired
1918 * semaphores before exiting.
1919 **/
1920s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
1921{
1922 s32 ret_val;
1923 u32 page_select = 0;
1924 u32 page = offset >> IGP_PAGE_SHIFT;
1925 u32 page_shift = 0;
1926
1927 /* Page 800 works differently than the rest so it has its own func */
1928 if (page == BM_WUC_PAGE) {
1929 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
1930 true);
1931 goto out;
1932 }
1933
1934 ret_val = hw->phy.ops.acquire_phy(hw);
1935 if (ret_val)
1936 goto out;
1937
1938 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
1939
1940 if (offset > MAX_PHY_MULTI_PAGE_REG) {
1941 /*
1942 * Page select is register 31 for phy address 1 and 22 for
1943 * phy address 2 and 3. Page select is shifted only for
1944 * phy address 1.
1945 */
1946 if (hw->phy.addr == 1) {
1947 page_shift = IGP_PAGE_SHIFT;
1948 page_select = IGP01E1000_PHY_PAGE_SELECT;
1949 } else {
1950 page_shift = 0;
1951 page_select = BM_PHY_PAGE_SELECT;
1952 }
1953
1954 /* Page is shifted left, PHY expects (page x 32) */
1955 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
1956 (page << page_shift));
1957 if (ret_val) {
1958 hw->phy.ops.release_phy(hw);
1959 goto out;
1960 }
1961 }
1962
1963 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
1964 data);
1965 hw->phy.ops.release_phy(hw);
1966
1967out:
1968 return ret_val;
1969}
1970
1971/**
1972 * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register
1973 * @hw: pointer to the HW structure
1974 * @offset: register offset to be read or written
1975 * @data: pointer to the data to read or write
1976 * @read: determines if operation is read or write
1977 *
1978 * Acquires semaphore, if necessary, then reads the PHY register at offset
1979 * and storing the retrieved information in data. Release any acquired
1980 * semaphores before exiting. Note that procedure to read the wakeup
1981 * registers are different. It works as such:
1982 * 1) Set page 769, register 17, bit 2 = 1
1983 * 2) Set page to 800 for host (801 if we were manageability)
1984 * 3) Write the address using the address opcode (0x11)
1985 * 4) Read or write the data using the data opcode (0x12)
1986 * 5) Restore 769_17.2 to its original value
1987 **/
1988static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
1989 u16 *data, bool read)
1990{
1991 s32 ret_val;
1992 u16 reg = ((u16)offset) & PHY_REG_MASK;
1993 u16 phy_reg = 0;
1994 u8 phy_acquired = 1;
1995
1996
1997 ret_val = hw->phy.ops.acquire_phy(hw);
1998 if (ret_val) {
1999 phy_acquired = 0;
2000 goto out;
2001 }
2002
2003 /* All operations in this function are phy address 1 */
2004 hw->phy.addr = 1;
2005
2006 /* Set page 769 */
2007 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2008 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
2009
2010 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
2011 if (ret_val)
2012 goto out;
2013
2014 /* First clear bit 4 to avoid a power state change */
2015 phy_reg &= ~(BM_WUC_HOST_WU_BIT);
2016 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2017 if (ret_val)
2018 goto out;
2019
2020 /* Write bit 2 = 1, and clear bit 4 to 769_17 */
2021 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG,
2022 phy_reg | BM_WUC_ENABLE_BIT);
2023 if (ret_val)
2024 goto out;
2025
2026 /* Select page 800 */
2027 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2028 (BM_WUC_PAGE << IGP_PAGE_SHIFT));
2029
2030 /* Write the page 800 offset value using opcode 0x11 */
2031 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
2032 if (ret_val)
2033 goto out;
2034
2035 if (read) {
2036 /* Read the page 800 value using opcode 0x12 */
2037 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
2038 data);
2039 } else {
2040 /* Read the page 800 value using opcode 0x12 */
2041 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
2042 *data);
2043 }
2044
2045 if (ret_val)
2046 goto out;
2047
2048 /*
2049 * Restore 769_17.2 to its original value
2050 * Set page 769
2051 */
2052 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2053 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
2054
2055 /* Clear 769_17.2 */
2056 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2057
2058out:
2059 if (phy_acquired == 1)
2060 hw->phy.ops.release_phy(hw);
2061 return ret_val;
2062}
2063
2064/**
1787 * e1000e_commit_phy - Soft PHY reset 2065 * e1000e_commit_phy - Soft PHY reset
1788 * @hw: pointer to the HW structure 2066 * @hw: pointer to the HW structure
1789 * 2067 *
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 2eb82aba4a8b..795c594a4b7c 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -202,7 +202,7 @@ static unsigned short start_code[] = {
202 0x0000,Cmd_MCast, 202 0x0000,Cmd_MCast,
203 0x0076, /* link to next command */ 203 0x0076, /* link to next command */
204#define CONF_NR_MULTICAST 0x44 204#define CONF_NR_MULTICAST 0x44
205 0x0000, /* number of multicast addresses */ 205 0x0000, /* number of bytes in multicast address(es) */
206#define CONF_MULTICAST 0x46 206#define CONF_MULTICAST 0x46
207 0x0000, 0x0000, 0x0000, /* some addresses */ 207 0x0000, 0x0000, 0x0000, /* some addresses */
208 0x0000, 0x0000, 0x0000, 208 0x0000, 0x0000, 0x0000,
@@ -1569,7 +1569,7 @@ static void eexp_hw_init586(struct net_device *dev)
1569 1569
1570static void eexp_setup_filter(struct net_device *dev) 1570static void eexp_setup_filter(struct net_device *dev)
1571{ 1571{
1572 struct dev_mc_list *dmi = dev->mc_list; 1572 struct dev_mc_list *dmi;
1573 unsigned short ioaddr = dev->base_addr; 1573 unsigned short ioaddr = dev->base_addr;
1574 int count = dev->mc_count; 1574 int count = dev->mc_count;
1575 int i; 1575 int i;
@@ -1580,9 +1580,9 @@ static void eexp_setup_filter(struct net_device *dev)
1580 } 1580 }
1581 1581
1582 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); 1582 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
1583 outw(count, ioaddr+SHADOW(CONF_NR_MULTICAST)); 1583 outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
1584 for (i = 0; i < count; i++) { 1584 for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) {
1585 unsigned short *data = (unsigned short *)dmi->dmi_addr; 1585 unsigned short *data;
1586 if (!dmi) { 1586 if (!dmi) {
1587 printk(KERN_INFO "%s: too few multicast addresses\n", dev->name); 1587 printk(KERN_INFO "%s: too few multicast addresses\n", dev->name);
1588 break; 1588 break;
@@ -1591,6 +1591,7 @@ static void eexp_setup_filter(struct net_device *dev)
1591 printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); 1591 printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
1592 continue; 1592 continue;
1593 } 1593 }
1594 data = (unsigned short *)dmi->dmi_addr;
1594 outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); 1595 outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
1595 outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); 1596 outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
1596 outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); 1597 outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index ba75efc9f5b5..f0014cfbb275 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -194,7 +194,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
194 194
195 ret = of_address_to_resource(ofdev->node, 0, &res); 195 ret = of_address_to_resource(ofdev->node, 0, &res);
196 if (ret) 196 if (ret)
197 return ret; 197 goto out_res;
198 198
199 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); 199 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
200 200
@@ -236,6 +236,7 @@ out_free_irqs:
236 kfree(new_bus->irq); 236 kfree(new_bus->irq);
237out_unmap_regs: 237out_unmap_regs:
238 iounmap(fec->fecp); 238 iounmap(fec->fecp);
239out_res:
239out_fec: 240out_fec:
240 kfree(fec); 241 kfree(fec);
241out_mii: 242out_mii:
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 587afe7be689..6f22f068d6ee 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -138,6 +138,7 @@ static int gfar_poll(struct napi_struct *napi, int budget);
138static void gfar_netpoll(struct net_device *dev); 138static void gfar_netpoll(struct net_device *dev);
139#endif 139#endif
140int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 140int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
141static int gfar_clean_tx_ring(struct net_device *dev);
141static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 142static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
142static void gfar_vlan_rx_register(struct net_device *netdev, 143static void gfar_vlan_rx_register(struct net_device *netdev,
143 struct vlan_group *grp); 144 struct vlan_group *grp);
@@ -1141,7 +1142,7 @@ static int gfar_close(struct net_device *dev)
1141} 1142}
1142 1143
1143/* Changes the mac address if the controller is not running. */ 1144/* Changes the mac address if the controller is not running. */
1144int gfar_set_mac_address(struct net_device *dev) 1145static int gfar_set_mac_address(struct net_device *dev)
1145{ 1146{
1146 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 1147 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1147 1148
@@ -1260,7 +1261,7 @@ static void gfar_timeout(struct net_device *dev)
1260} 1261}
1261 1262
1262/* Interrupt Handler for Transmit complete */ 1263/* Interrupt Handler for Transmit complete */
1263int gfar_clean_tx_ring(struct net_device *dev) 1264static int gfar_clean_tx_ring(struct net_device *dev)
1264{ 1265{
1265 struct txbd8 *bdp; 1266 struct txbd8 *bdp;
1266 struct gfar_private *priv = netdev_priv(dev); 1267 struct gfar_private *priv = netdev_priv(dev);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index fd487be3993e..27f37c81e52c 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -782,5 +782,8 @@ extern void gfar_halt(struct net_device *dev);
782extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 782extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
783 int enable, u32 regnum, u32 read); 783 int enable, u32 regnum, u32 read);
784void gfar_init_sysfs(struct net_device *dev); 784void gfar_init_sysfs(struct net_device *dev);
785int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
786 int regnum, u16 value);
787int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
785 788
786#endif /* __GIANFAR_H */ 789#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 230878b94190..5116f68e01b9 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -103,10 +103,10 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
103 103
104 spin_lock_irqsave(&priv->rxlock, flags); 104 spin_lock_irqsave(&priv->rxlock, flags);
105 if (length > priv->rx_buffer_size) 105 if (length > priv->rx_buffer_size)
106 return count; 106 goto out;
107 107
108 if (length == priv->rx_stash_size) 108 if (length == priv->rx_stash_size)
109 return count; 109 goto out;
110 110
111 priv->rx_stash_size = length; 111 priv->rx_stash_size = length;
112 112
@@ -125,6 +125,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
125 125
126 gfar_write(&priv->regs->attr, temp); 126 gfar_write(&priv->regs->attr, temp);
127 127
128out:
128 spin_unlock_irqrestore(&priv->rxlock, flags); 129 spin_unlock_irqrestore(&priv->rxlock, flags);
129 130
130 return count; 131 return count;
@@ -154,10 +155,10 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
154 155
155 spin_lock_irqsave(&priv->rxlock, flags); 156 spin_lock_irqsave(&priv->rxlock, flags);
156 if (index > priv->rx_stash_size) 157 if (index > priv->rx_stash_size)
157 return count; 158 goto out;
158 159
159 if (index == priv->rx_stash_index) 160 if (index == priv->rx_stash_index)
160 return count; 161 goto out;
161 162
162 priv->rx_stash_index = index; 163 priv->rx_stash_index = index;
163 164
@@ -166,6 +167,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
166 temp |= ATTRELI_EI(index); 167 temp |= ATTRELI_EI(index);
167 gfar_write(&priv->regs->attreli, flags); 168 gfar_write(&priv->regs->attreli, flags);
168 169
170out:
169 spin_unlock_irqrestore(&priv->rxlock, flags); 171 spin_unlock_irqrestore(&priv->rxlock, flags);
170 172
171 return count; 173 return count;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 2056cfc624dc..c36a03ae9bfb 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -450,7 +450,7 @@ static void macvlan_dellink(struct net_device *dev)
450 unregister_netdevice(dev); 450 unregister_netdevice(dev);
451 451
452 if (list_empty(&port->vlans)) 452 if (list_empty(&port->vlans))
453 macvlan_port_destroy(dev); 453 macvlan_port_destroy(port->dev);
454} 454}
455 455
456static struct rtnl_link_ops macvlan_link_ops __read_mostly = { 456static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 381b36e5f64c..b7915cdcc6a5 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -91,6 +91,11 @@
91 */ 91 */
92#define PHY_ADDR_REG 0x0000 92#define PHY_ADDR_REG 0x0000
93#define SMI_REG 0x0004 93#define SMI_REG 0x0004
94#define WINDOW_BASE(i) (0x0200 + ((i) << 3))
95#define WINDOW_SIZE(i) (0x0204 + ((i) << 3))
96#define WINDOW_REMAP_HIGH(i) (0x0280 + ((i) << 2))
97#define WINDOW_BAR_ENABLE 0x0290
98#define WINDOW_PROTECT(i) (0x0294 + ((i) << 4))
94 99
95/* 100/*
96 * Per-port registers. 101 * Per-port registers.
@@ -507,9 +512,23 @@ struct mv643xx_mib_counters {
507 u32 late_collision; 512 u32 late_collision;
508}; 513};
509 514
515struct mv643xx_shared_private {
516 void __iomem *eth_base;
517
518 /* used to protect SMI_REG, which is shared across ports */
519 spinlock_t phy_lock;
520
521 u32 win_protect;
522
523 unsigned int t_clk;
524};
525
510struct mv643xx_private { 526struct mv643xx_private {
527 struct mv643xx_shared_private *shared;
511 int port_num; /* User Ethernet port number */ 528 int port_num; /* User Ethernet port number */
512 529
530 struct mv643xx_shared_private *shared_smi;
531
513 u32 rx_sram_addr; /* Base address of rx sram area */ 532 u32 rx_sram_addr; /* Base address of rx sram area */
514 u32 rx_sram_size; /* Size of rx sram area */ 533 u32 rx_sram_size; /* Size of rx sram area */
515 u32 tx_sram_addr; /* Base address of tx sram area */ 534 u32 tx_sram_addr; /* Base address of tx sram area */
@@ -614,19 +633,14 @@ static const struct ethtool_ops mv643xx_ethtool_ops;
614static char mv643xx_driver_name[] = "mv643xx_eth"; 633static char mv643xx_driver_name[] = "mv643xx_eth";
615static char mv643xx_driver_version[] = "1.0"; 634static char mv643xx_driver_version[] = "1.0";
616 635
617static void __iomem *mv643xx_eth_base;
618
619/* used to protect SMI_REG, which is shared across ports */
620static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
621
622static inline u32 rdl(struct mv643xx_private *mp, int offset) 636static inline u32 rdl(struct mv643xx_private *mp, int offset)
623{ 637{
624 return readl(mv643xx_eth_base + offset); 638 return readl(mp->shared->eth_base + offset);
625} 639}
626 640
627static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) 641static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
628{ 642{
629 writel(data, mv643xx_eth_base + offset); 643 writel(data, mp->shared->eth_base + offset);
630} 644}
631 645
632/* 646/*
@@ -1119,7 +1133,6 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1119 * 1133 *
1120 * INPUT: 1134 * INPUT:
1121 * struct mv643xx_private *mp Ethernet port 1135 * struct mv643xx_private *mp Ethernet port
1122 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
1123 * unsigned int delay Delay in usec 1136 * unsigned int delay Delay in usec
1124 * 1137 *
1125 * OUTPUT: 1138 * OUTPUT:
@@ -1130,10 +1143,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1130 * 1143 *
1131 */ 1144 */
1132static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, 1145static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1133 unsigned int t_clk, unsigned int delay) 1146 unsigned int delay)
1134{ 1147{
1135 unsigned int port_num = mp->port_num; 1148 unsigned int port_num = mp->port_num;
1136 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1149 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1137 1150
1138 /* Set RX Coalescing mechanism */ 1151 /* Set RX Coalescing mechanism */
1139 wrl(mp, SDMA_CONFIG_REG(port_num), 1152 wrl(mp, SDMA_CONFIG_REG(port_num),
@@ -1158,7 +1171,6 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1158 * 1171 *
1159 * INPUT: 1172 * INPUT:
1160 * struct mv643xx_private *mp Ethernet port 1173 * struct mv643xx_private *mp Ethernet port
1161 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
1162 * unsigned int delay Delay in uSeconds 1174 * unsigned int delay Delay in uSeconds
1163 * 1175 *
1164 * OUTPUT: 1176 * OUTPUT:
@@ -1169,9 +1181,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1169 * 1181 *
1170 */ 1182 */
1171static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, 1183static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
1172 unsigned int t_clk, unsigned int delay) 1184 unsigned int delay)
1173{ 1185{
1174 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1186 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1175 1187
1176 /* Set TX Coalescing mechanism */ 1188 /* Set TX Coalescing mechanism */
1177 wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); 1189 wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
@@ -1413,11 +1425,11 @@ static int mv643xx_eth_open(struct net_device *dev)
1413 1425
1414#ifdef MV643XX_COAL 1426#ifdef MV643XX_COAL
1415 mp->rx_int_coal = 1427 mp->rx_int_coal =
1416 eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL); 1428 eth_port_set_rx_coal(mp, MV643XX_RX_COAL);
1417#endif 1429#endif
1418 1430
1419 mp->tx_int_coal = 1431 mp->tx_int_coal =
1420 eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL); 1432 eth_port_set_tx_coal(mp, MV643XX_TX_COAL);
1421 1433
1422 /* Unmask phy and link status changes interrupts */ 1434 /* Unmask phy and link status changes interrupts */
1423 wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); 1435 wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
@@ -1827,6 +1839,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1827 return -ENODEV; 1839 return -ENODEV;
1828 } 1840 }
1829 1841
1842 if (pd->shared == NULL) {
1843 printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n");
1844 return -ENODEV;
1845 }
1846
1830 dev = alloc_etherdev(sizeof(struct mv643xx_private)); 1847 dev = alloc_etherdev(sizeof(struct mv643xx_private));
1831 if (!dev) 1848 if (!dev)
1832 return -ENOMEM; 1849 return -ENOMEM;
@@ -1877,8 +1894,16 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1877 1894
1878 spin_lock_init(&mp->lock); 1895 spin_lock_init(&mp->lock);
1879 1896
1897 mp->shared = platform_get_drvdata(pd->shared);
1880 port_num = mp->port_num = pd->port_number; 1898 port_num = mp->port_num = pd->port_number;
1881 1899
1900 if (mp->shared->win_protect)
1901 wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect);
1902
1903 mp->shared_smi = mp->shared;
1904 if (pd->shared_smi != NULL)
1905 mp->shared_smi = platform_get_drvdata(pd->shared_smi);
1906
1882 /* set default config values */ 1907 /* set default config values */
1883 eth_port_uc_addr_get(mp, dev->dev_addr); 1908 eth_port_uc_addr_get(mp, dev->dev_addr);
1884 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1909 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
@@ -1983,30 +2008,91 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
1983 return 0; 2008 return 0;
1984} 2009}
1985 2010
2011static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp,
2012 struct mbus_dram_target_info *dram)
2013{
2014 void __iomem *base = msp->eth_base;
2015 u32 win_enable;
2016 u32 win_protect;
2017 int i;
2018
2019 for (i = 0; i < 6; i++) {
2020 writel(0, base + WINDOW_BASE(i));
2021 writel(0, base + WINDOW_SIZE(i));
2022 if (i < 4)
2023 writel(0, base + WINDOW_REMAP_HIGH(i));
2024 }
2025
2026 win_enable = 0x3f;
2027 win_protect = 0;
2028
2029 for (i = 0; i < dram->num_cs; i++) {
2030 struct mbus_dram_window *cs = dram->cs + i;
2031
2032 writel((cs->base & 0xffff0000) |
2033 (cs->mbus_attr << 8) |
2034 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2035 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2036
2037 win_enable &= ~(1 << i);
2038 win_protect |= 3 << (2 * i);
2039 }
2040
2041 writel(win_enable, base + WINDOW_BAR_ENABLE);
2042 msp->win_protect = win_protect;
2043}
2044
1986static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2045static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1987{ 2046{
1988 static int mv643xx_version_printed = 0; 2047 static int mv643xx_version_printed = 0;
2048 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2049 struct mv643xx_shared_private *msp;
1989 struct resource *res; 2050 struct resource *res;
2051 int ret;
1990 2052
1991 if (!mv643xx_version_printed++) 2053 if (!mv643xx_version_printed++)
1992 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); 2054 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
1993 2055
2056 ret = -EINVAL;
1994 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2057 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1995 if (res == NULL) 2058 if (res == NULL)
1996 return -ENODEV; 2059 goto out;
1997 2060
1998 mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1); 2061 ret = -ENOMEM;
1999 if (mv643xx_eth_base == NULL) 2062 msp = kmalloc(sizeof(*msp), GFP_KERNEL);
2000 return -ENOMEM; 2063 if (msp == NULL)
2064 goto out;
2065 memset(msp, 0, sizeof(*msp));
2066
2067 msp->eth_base = ioremap(res->start, res->end - res->start + 1);
2068 if (msp->eth_base == NULL)
2069 goto out_free;
2070
2071 spin_lock_init(&msp->phy_lock);
2072 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2073
2074 platform_set_drvdata(pdev, msp);
2075
2076 /*
2077 * (Re-)program MBUS remapping windows if we are asked to.
2078 */
2079 if (pd != NULL && pd->dram != NULL)
2080 mv643xx_eth_conf_mbus_windows(msp, pd->dram);
2001 2081
2002 return 0; 2082 return 0;
2003 2083
2084out_free:
2085 kfree(msp);
2086out:
2087 return ret;
2004} 2088}
2005 2089
2006static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2090static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2007{ 2091{
2008 iounmap(mv643xx_eth_base); 2092 struct mv643xx_shared_private *msp = platform_get_drvdata(pdev);
2009 mv643xx_eth_base = NULL; 2093
2094 iounmap(msp->eth_base);
2095 kfree(msp);
2010 2096
2011 return 0; 2097 return 0;
2012} 2098}
@@ -2906,15 +2992,16 @@ static void eth_port_reset(struct mv643xx_private *mp)
2906static void eth_port_read_smi_reg(struct mv643xx_private *mp, 2992static void eth_port_read_smi_reg(struct mv643xx_private *mp,
2907 unsigned int phy_reg, unsigned int *value) 2993 unsigned int phy_reg, unsigned int *value)
2908{ 2994{
2995 void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG;
2909 int phy_addr = ethernet_phy_get(mp); 2996 int phy_addr = ethernet_phy_get(mp);
2910 unsigned long flags; 2997 unsigned long flags;
2911 int i; 2998 int i;
2912 2999
2913 /* the SMI register is a shared resource */ 3000 /* the SMI register is a shared resource */
2914 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 3001 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
2915 3002
2916 /* wait for the SMI register to become available */ 3003 /* wait for the SMI register to become available */
2917 for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { 3004 for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) {
2918 if (i == PHY_WAIT_ITERATIONS) { 3005 if (i == PHY_WAIT_ITERATIONS) {
2919 printk("%s: PHY busy timeout\n", mp->dev->name); 3006 printk("%s: PHY busy timeout\n", mp->dev->name);
2920 goto out; 3007 goto out;
@@ -2922,11 +3009,11 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
2922 udelay(PHY_WAIT_MICRO_SECONDS); 3009 udelay(PHY_WAIT_MICRO_SECONDS);
2923 } 3010 }
2924 3011
2925 wrl(mp, SMI_REG, 3012 writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ,
2926 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); 3013 smi_reg);
2927 3014
2928 /* now wait for the data to be valid */ 3015 /* now wait for the data to be valid */
2929 for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) { 3016 for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) {
2930 if (i == PHY_WAIT_ITERATIONS) { 3017 if (i == PHY_WAIT_ITERATIONS) {
2931 printk("%s: PHY read timeout\n", mp->dev->name); 3018 printk("%s: PHY read timeout\n", mp->dev->name);
2932 goto out; 3019 goto out;
@@ -2934,9 +3021,9 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
2934 udelay(PHY_WAIT_MICRO_SECONDS); 3021 udelay(PHY_WAIT_MICRO_SECONDS);
2935 } 3022 }
2936 3023
2937 *value = rdl(mp, SMI_REG) & 0xffff; 3024 *value = readl(smi_reg) & 0xffff;
2938out: 3025out:
2939 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 3026 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
2940} 3027}
2941 3028
2942/* 3029/*
@@ -2962,17 +3049,16 @@ out:
2962static void eth_port_write_smi_reg(struct mv643xx_private *mp, 3049static void eth_port_write_smi_reg(struct mv643xx_private *mp,
2963 unsigned int phy_reg, unsigned int value) 3050 unsigned int phy_reg, unsigned int value)
2964{ 3051{
2965 int phy_addr; 3052 void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG;
2966 int i; 3053 int phy_addr = ethernet_phy_get(mp);
2967 unsigned long flags; 3054 unsigned long flags;
2968 3055 int i;
2969 phy_addr = ethernet_phy_get(mp);
2970 3056
2971 /* the SMI register is a shared resource */ 3057 /* the SMI register is a shared resource */
2972 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 3058 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
2973 3059
2974 /* wait for the SMI register to become available */ 3060 /* wait for the SMI register to become available */
2975 for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { 3061 for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) {
2976 if (i == PHY_WAIT_ITERATIONS) { 3062 if (i == PHY_WAIT_ITERATIONS) {
2977 printk("%s: PHY busy timeout\n", mp->dev->name); 3063 printk("%s: PHY busy timeout\n", mp->dev->name);
2978 goto out; 3064 goto out;
@@ -2980,10 +3066,10 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp,
2980 udelay(PHY_WAIT_MICRO_SECONDS); 3066 udelay(PHY_WAIT_MICRO_SECONDS);
2981 } 3067 }
2982 3068
2983 wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) | 3069 writel((phy_addr << 16) | (phy_reg << 21) |
2984 ETH_SMI_OPCODE_WRITE | (value & 0xffff)); 3070 ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg);
2985out: 3071out:
2986 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 3072 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
2987} 3073}
2988 3074
2989/* 3075/*
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 4eb322e5273d..a1c454dbc164 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,12 +22,8 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#ifdef CONFIG_PCNET32_NAPI 25#define DRV_VERSION "1.35"
26#define DRV_VERSION "1.34-NAPI" 26#define DRV_RELDATE "21.Apr.2008"
27#else
28#define DRV_VERSION "1.34"
29#endif
30#define DRV_RELDATE "14.Aug.2007"
31#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
32 28
33static const char *const version = 29static const char *const version =
@@ -445,30 +441,24 @@ static struct pcnet32_access pcnet32_dwio = {
445 441
446static void pcnet32_netif_stop(struct net_device *dev) 442static void pcnet32_netif_stop(struct net_device *dev)
447{ 443{
448#ifdef CONFIG_PCNET32_NAPI
449 struct pcnet32_private *lp = netdev_priv(dev); 444 struct pcnet32_private *lp = netdev_priv(dev);
450#endif 445
451 dev->trans_start = jiffies; 446 dev->trans_start = jiffies;
452#ifdef CONFIG_PCNET32_NAPI
453 napi_disable(&lp->napi); 447 napi_disable(&lp->napi);
454#endif
455 netif_tx_disable(dev); 448 netif_tx_disable(dev);
456} 449}
457 450
458static void pcnet32_netif_start(struct net_device *dev) 451static void pcnet32_netif_start(struct net_device *dev)
459{ 452{
460#ifdef CONFIG_PCNET32_NAPI
461 struct pcnet32_private *lp = netdev_priv(dev); 453 struct pcnet32_private *lp = netdev_priv(dev);
462 ulong ioaddr = dev->base_addr; 454 ulong ioaddr = dev->base_addr;
463 u16 val; 455 u16 val;
464#endif 456
465 netif_wake_queue(dev); 457 netif_wake_queue(dev);
466#ifdef CONFIG_PCNET32_NAPI
467 val = lp->a.read_csr(ioaddr, CSR3); 458 val = lp->a.read_csr(ioaddr, CSR3);
468 val &= 0x00ff; 459 val &= 0x00ff;
469 lp->a.write_csr(ioaddr, CSR3, val); 460 lp->a.write_csr(ioaddr, CSR3, val);
470 napi_enable(&lp->napi); 461 napi_enable(&lp->napi);
471#endif
472} 462}
473 463
474/* 464/*
@@ -911,11 +901,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
911 rc = 1; /* default to fail */ 901 rc = 1; /* default to fail */
912 902
913 if (netif_running(dev)) 903 if (netif_running(dev))
914#ifdef CONFIG_PCNET32_NAPI
915 pcnet32_netif_stop(dev); 904 pcnet32_netif_stop(dev);
916#else
917 pcnet32_close(dev);
918#endif
919 905
920 spin_lock_irqsave(&lp->lock, flags); 906 spin_lock_irqsave(&lp->lock, flags);
921 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ 907 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1046,7 +1032,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1046 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ 1032 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
1047 a->write_bcr(ioaddr, 32, (x & ~0x0002)); 1033 a->write_bcr(ioaddr, 32, (x & ~0x0002));
1048 1034
1049#ifdef CONFIG_PCNET32_NAPI
1050 if (netif_running(dev)) { 1035 if (netif_running(dev)) {
1051 pcnet32_netif_start(dev); 1036 pcnet32_netif_start(dev);
1052 pcnet32_restart(dev, CSR0_NORMAL); 1037 pcnet32_restart(dev, CSR0_NORMAL);
@@ -1055,16 +1040,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1055 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ 1040 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1056 } 1041 }
1057 spin_unlock_irqrestore(&lp->lock, flags); 1042 spin_unlock_irqrestore(&lp->lock, flags);
1058#else
1059 if (netif_running(dev)) {
1060 spin_unlock_irqrestore(&lp->lock, flags);
1061 pcnet32_open(dev);
1062 } else {
1063 pcnet32_purge_rx_ring(dev);
1064 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1065 spin_unlock_irqrestore(&lp->lock, flags);
1066 }
1067#endif
1068 1043
1069 return (rc); 1044 return (rc);
1070} /* end pcnet32_loopback_test */ 1045} /* end pcnet32_loopback_test */
@@ -1270,11 +1245,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1270 } 1245 }
1271 dev->stats.rx_bytes += skb->len; 1246 dev->stats.rx_bytes += skb->len;
1272 skb->protocol = eth_type_trans(skb, dev); 1247 skb->protocol = eth_type_trans(skb, dev);
1273#ifdef CONFIG_PCNET32_NAPI
1274 netif_receive_skb(skb); 1248 netif_receive_skb(skb);
1275#else
1276 netif_rx(skb);
1277#endif
1278 dev->last_rx = jiffies; 1249 dev->last_rx = jiffies;
1279 dev->stats.rx_packets++; 1250 dev->stats.rx_packets++;
1280 return; 1251 return;
@@ -1403,7 +1374,6 @@ static int pcnet32_tx(struct net_device *dev)
1403 return must_restart; 1374 return must_restart;
1404} 1375}
1405 1376
1406#ifdef CONFIG_PCNET32_NAPI
1407static int pcnet32_poll(struct napi_struct *napi, int budget) 1377static int pcnet32_poll(struct napi_struct *napi, int budget)
1408{ 1378{
1409 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); 1379 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
@@ -1442,7 +1412,6 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
1442 } 1412 }
1443 return work_done; 1413 return work_done;
1444} 1414}
1445#endif
1446 1415
1447#define PCNET32_REGS_PER_PHY 32 1416#define PCNET32_REGS_PER_PHY 32
1448#define PCNET32_MAX_PHYS 32 1417#define PCNET32_MAX_PHYS 32
@@ -1864,9 +1833,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1864 /* napi.weight is used in both the napi and non-napi cases */ 1833 /* napi.weight is used in both the napi and non-napi cases */
1865 lp->napi.weight = lp->rx_ring_size / 2; 1834 lp->napi.weight = lp->rx_ring_size / 2;
1866 1835
1867#ifdef CONFIG_PCNET32_NAPI
1868 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); 1836 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
1869#endif
1870 1837
1871 if (fdx && !(lp->options & PCNET32_PORT_ASEL) && 1838 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1872 ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) 1839 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
@@ -2297,9 +2264,7 @@ static int pcnet32_open(struct net_device *dev)
2297 goto err_free_ring; 2264 goto err_free_ring;
2298 } 2265 }
2299 2266
2300#ifdef CONFIG_PCNET32_NAPI
2301 napi_enable(&lp->napi); 2267 napi_enable(&lp->napi);
2302#endif
2303 2268
2304 /* Re-initialize the PCNET32, and start it when done. */ 2269 /* Re-initialize the PCNET32, and start it when done. */
2305 lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); 2270 lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2623,7 +2588,6 @@ pcnet32_interrupt(int irq, void *dev_id)
2623 dev->name, csr0); 2588 dev->name, csr0);
2624 /* unlike for the lance, there is no restart needed */ 2589 /* unlike for the lance, there is no restart needed */
2625 } 2590 }
2626#ifdef CONFIG_PCNET32_NAPI
2627 if (netif_rx_schedule_prep(dev, &lp->napi)) { 2591 if (netif_rx_schedule_prep(dev, &lp->napi)) {
2628 u16 val; 2592 u16 val;
2629 /* set interrupt masks */ 2593 /* set interrupt masks */
@@ -2634,24 +2598,9 @@ pcnet32_interrupt(int irq, void *dev_id)
2634 __netif_rx_schedule(dev, &lp->napi); 2598 __netif_rx_schedule(dev, &lp->napi);
2635 break; 2599 break;
2636 } 2600 }
2637#else
2638 pcnet32_rx(dev, lp->napi.weight);
2639 if (pcnet32_tx(dev)) {
2640 /* reset the chip to clear the error condition, then restart */
2641 lp->a.reset(ioaddr);
2642 lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
2643 pcnet32_restart(dev, CSR0_START);
2644 netif_wake_queue(dev);
2645 }
2646#endif
2647 csr0 = lp->a.read_csr(ioaddr, CSR0); 2601 csr0 = lp->a.read_csr(ioaddr, CSR0);
2648 } 2602 }
2649 2603
2650#ifndef CONFIG_PCNET32_NAPI
2651 /* Set interrupt enable. */
2652 lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
2653#endif
2654
2655 if (netif_msg_intr(lp)) 2604 if (netif_msg_intr(lp))
2656 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", 2605 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2657 dev->name, lp->a.read_csr(ioaddr, CSR0)); 2606 dev->name, lp->a.read_csr(ioaddr, CSR0));
@@ -2670,9 +2619,7 @@ static int pcnet32_close(struct net_device *dev)
2670 del_timer_sync(&lp->watchdog_timer); 2619 del_timer_sync(&lp->watchdog_timer);
2671 2620
2672 netif_stop_queue(dev); 2621 netif_stop_queue(dev);
2673#ifdef CONFIG_PCNET32_NAPI
2674 napi_disable(&lp->napi); 2622 napi_disable(&lp->napi);
2675#endif
2676 2623
2677 spin_lock_irqsave(&lp->lock, flags); 2624 spin_lock_irqsave(&lp->lock, flags);
2678 2625
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3c18bb594957..45cc2914d347 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -547,7 +547,7 @@ static void phy_force_reduction(struct phy_device *phydev)
547 * Must not be called from interrupt context, or while the 547 * Must not be called from interrupt context, or while the
548 * phydev->lock is held. 548 * phydev->lock is held.
549 */ 549 */
550void phy_error(struct phy_device *phydev) 550static void phy_error(struct phy_device *phydev)
551{ 551{
552 mutex_lock(&phydev->lock); 552 mutex_lock(&phydev->lock);
553 phydev->state = PHY_HALTED; 553 phydev->state = PHY_HALTED;
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a59c1f224aa8..2511ca7a12aa 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -434,10 +434,6 @@ static int uli526x_open(struct net_device *dev)
434 434
435 ULI526X_DBUG(0, "uli526x_open", 0); 435 ULI526X_DBUG(0, "uli526x_open", 0);
436 436
437 ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev);
438 if (ret)
439 return ret;
440
441 /* system variable init */ 437 /* system variable init */
442 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; 438 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
443 db->tx_packet_cnt = 0; 439 db->tx_packet_cnt = 0;
@@ -456,6 +452,10 @@ static int uli526x_open(struct net_device *dev)
456 /* Initialize ULI526X board */ 452 /* Initialize ULI526X board */
457 uli526x_init(dev); 453 uli526x_init(dev);
458 454
455 ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev);
456 if (ret)
457 return ret;
458
459 /* Active System Interface */ 459 /* Active System Interface */
460 netif_wake_queue(dev); 460 netif_wake_queue(dev);
461 461
@@ -1368,6 +1368,12 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1368 * This setup frame initialize ULI526X address filter mode 1368 * This setup frame initialize ULI526X address filter mode
1369 */ 1369 */
1370 1370
1371#ifdef __BIG_ENDIAN
1372#define FLT_SHIFT 16
1373#else
1374#define FLT_SHIFT 0
1375#endif
1376
1371static void send_filter_frame(struct net_device *dev, int mc_cnt) 1377static void send_filter_frame(struct net_device *dev, int mc_cnt)
1372{ 1378{
1373 struct uli526x_board_info *db = netdev_priv(dev); 1379 struct uli526x_board_info *db = netdev_priv(dev);
@@ -1384,27 +1390,27 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1384 1390
1385 /* Node address */ 1391 /* Node address */
1386 addrptr = (u16 *) dev->dev_addr; 1392 addrptr = (u16 *) dev->dev_addr;
1387 *suptr++ = addrptr[0]; 1393 *suptr++ = addrptr[0] << FLT_SHIFT;
1388 *suptr++ = addrptr[1]; 1394 *suptr++ = addrptr[1] << FLT_SHIFT;
1389 *suptr++ = addrptr[2]; 1395 *suptr++ = addrptr[2] << FLT_SHIFT;
1390 1396
1391 /* broadcast address */ 1397 /* broadcast address */
1392 *suptr++ = 0xffff; 1398 *suptr++ = 0xffff << FLT_SHIFT;
1393 *suptr++ = 0xffff; 1399 *suptr++ = 0xffff << FLT_SHIFT;
1394 *suptr++ = 0xffff; 1400 *suptr++ = 0xffff << FLT_SHIFT;
1395 1401
1396 /* fit the multicast address */ 1402 /* fit the multicast address */
1397 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 1403 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1398 addrptr = (u16 *) mcptr->dmi_addr; 1404 addrptr = (u16 *) mcptr->dmi_addr;
1399 *suptr++ = addrptr[0]; 1405 *suptr++ = addrptr[0] << FLT_SHIFT;
1400 *suptr++ = addrptr[1]; 1406 *suptr++ = addrptr[1] << FLT_SHIFT;
1401 *suptr++ = addrptr[2]; 1407 *suptr++ = addrptr[2] << FLT_SHIFT;
1402 } 1408 }
1403 1409
1404 for (; i<14; i++) { 1410 for (; i<14; i++) {
1405 *suptr++ = 0xffff; 1411 *suptr++ = 0xffff << FLT_SHIFT;
1406 *suptr++ = 0xffff; 1412 *suptr++ = 0xffff << FLT_SHIFT;
1407 *suptr++ = 0xffff; 1413 *suptr++ = 0xffff << FLT_SHIFT;
1408 } 1414 }
1409 1415
1410 /* prepare the setup frame */ 1416 /* prepare the setup frame */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 281ce3d39532..ca0bdac07a78 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -62,7 +62,6 @@
62#endif /* UGETH_VERBOSE_DEBUG */ 62#endif /* UGETH_VERBOSE_DEBUG */
63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
64 64
65void uec_set_ethtool_ops(struct net_device *netdev);
66 65
67static DEFINE_SPINLOCK(ugeth_lock); 66static DEFINE_SPINLOCK(ugeth_lock);
68 67
@@ -216,7 +215,8 @@ static struct list_head *dequeue(struct list_head *lh)
216 } 215 }
217} 216}
218 217
219static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) 218static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
219 u8 __iomem *bd)
220{ 220{
221 struct sk_buff *skb = NULL; 221 struct sk_buff *skb = NULL;
222 222
@@ -236,21 +236,22 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd)
236 236
237 skb->dev = ugeth->dev; 237 skb->dev = ugeth->dev;
238 238
239 out_be32(&((struct qe_bd *)bd)->buf, 239 out_be32(&((struct qe_bd __iomem *)bd)->buf,
240 dma_map_single(NULL, 240 dma_map_single(NULL,
241 skb->data, 241 skb->data,
242 ugeth->ug_info->uf_info.max_rx_buf_length + 242 ugeth->ug_info->uf_info.max_rx_buf_length +
243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
244 DMA_FROM_DEVICE)); 244 DMA_FROM_DEVICE));
245 245
246 out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W))); 246 out_be32((u32 __iomem *)bd,
247 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
247 248
248 return skb; 249 return skb;
249} 250}
250 251
251static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 252static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
252{ 253{
253 u8 *bd; 254 u8 __iomem *bd;
254 u32 bd_status; 255 u32 bd_status;
255 struct sk_buff *skb; 256 struct sk_buff *skb;
256 int i; 257 int i;
@@ -259,7 +260,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
259 i = 0; 260 i = 0;
260 261
261 do { 262 do {
262 bd_status = in_be32((u32*)bd); 263 bd_status = in_be32((u32 __iomem *)bd);
263 skb = get_new_skb(ugeth, bd); 264 skb = get_new_skb(ugeth, bd);
264 265
265 if (!skb) /* If can not allocate data buffer, 266 if (!skb) /* If can not allocate data buffer,
@@ -277,7 +278,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
277} 278}
278 279
279static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 280static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
280 volatile u32 *p_start, 281 u32 *p_start,
281 u8 num_entries, 282 u8 num_entries,
282 u32 thread_size, 283 u32 thread_size,
283 u32 thread_alignment, 284 u32 thread_alignment,
@@ -316,7 +317,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
316} 317}
317 318
318static int return_init_enet_entries(struct ucc_geth_private *ugeth, 319static int return_init_enet_entries(struct ucc_geth_private *ugeth,
319 volatile u32 *p_start, 320 u32 *p_start,
320 u8 num_entries, 321 u8 num_entries,
321 enum qe_risc_allocation risc, 322 enum qe_risc_allocation risc,
322 int skip_page_for_first_entry) 323 int skip_page_for_first_entry)
@@ -326,21 +327,22 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth,
326 int snum; 327 int snum;
327 328
328 for (i = 0; i < num_entries; i++) { 329 for (i = 0; i < num_entries; i++) {
330 u32 val = *p_start;
331
329 /* Check that this entry was actually valid -- 332 /* Check that this entry was actually valid --
330 needed in case failed in allocations */ 333 needed in case failed in allocations */
331 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { 334 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
332 snum = 335 snum =
333 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> 336 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
334 ENET_INIT_PARAM_SNUM_SHIFT; 337 ENET_INIT_PARAM_SNUM_SHIFT;
335 qe_put_snum((u8) snum); 338 qe_put_snum((u8) snum);
336 if (!((i == 0) && skip_page_for_first_entry)) { 339 if (!((i == 0) && skip_page_for_first_entry)) {
337 /* First entry of Rx does not have page */ 340 /* First entry of Rx does not have page */
338 init_enet_offset = 341 init_enet_offset =
339 (in_be32(p_start) & 342 (val & ENET_INIT_PARAM_PTR_MASK);
340 ENET_INIT_PARAM_PTR_MASK);
341 qe_muram_free(init_enet_offset); 343 qe_muram_free(init_enet_offset);
342 } 344 }
343 *(p_start++) = 0; /* Just for cosmetics */ 345 *p_start++ = 0;
344 } 346 }
345 } 347 }
346 348
@@ -349,7 +351,7 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth,
349 351
350#ifdef DEBUG 352#ifdef DEBUG
351static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 353static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
352 volatile u32 *p_start, 354 u32 __iomem *p_start,
353 u8 num_entries, 355 u8 num_entries,
354 u32 thread_size, 356 u32 thread_size,
355 enum qe_risc_allocation risc, 357 enum qe_risc_allocation risc,
@@ -360,11 +362,13 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
360 int snum; 362 int snum;
361 363
362 for (i = 0; i < num_entries; i++) { 364 for (i = 0; i < num_entries; i++) {
365 u32 val = in_be32(p_start);
366
363 /* Check that this entry was actually valid -- 367 /* Check that this entry was actually valid --
364 needed in case failed in allocations */ 368 needed in case failed in allocations */
365 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { 369 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
366 snum = 370 snum =
367 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> 371 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
368 ENET_INIT_PARAM_SNUM_SHIFT; 372 ENET_INIT_PARAM_SNUM_SHIFT;
369 qe_put_snum((u8) snum); 373 qe_put_snum((u8) snum);
370 if (!((i == 0) && skip_page_for_first_entry)) { 374 if (!((i == 0) && skip_page_for_first_entry)) {
@@ -440,7 +444,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
440 444
441static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 445static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
442{ 446{
443 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
444 448
445 if (!(paddr_num < NUM_OF_PADDRS)) { 449 if (!(paddr_num < NUM_OF_PADDRS)) {
446 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); 450 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
@@ -448,7 +452,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
448 } 452 }
449 453
450 p_82xx_addr_filt = 454 p_82xx_addr_filt =
451 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 455 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
452 addressfiltering; 456 addressfiltering;
453 457
454 /* Writing address ff.ff.ff.ff.ff.ff disables address 458 /* Writing address ff.ff.ff.ff.ff.ff disables address
@@ -463,11 +467,11 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
463static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 467static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
464 u8 *p_enet_addr) 468 u8 *p_enet_addr)
465{ 469{
466 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 470 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
467 u32 cecr_subblock; 471 u32 cecr_subblock;
468 472
469 p_82xx_addr_filt = 473 p_82xx_addr_filt =
470 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 474 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
471 addressfiltering; 475 addressfiltering;
472 476
473 cecr_subblock = 477 cecr_subblock =
@@ -487,7 +491,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
487static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) 491static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
488{ 492{
489 struct ucc_fast_private *uccf; 493 struct ucc_fast_private *uccf;
490 struct ucc_geth *ug_regs; 494 struct ucc_geth __iomem *ug_regs;
491 u32 maccfg2, uccm; 495 u32 maccfg2, uccm;
492 496
493 uccf = ugeth->uccf; 497 uccf = ugeth->uccf;
@@ -507,7 +511,7 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
507static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) 511static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
508{ 512{
509 struct ucc_fast_private *uccf; 513 struct ucc_fast_private *uccf;
510 struct ucc_geth *ug_regs; 514 struct ucc_geth __iomem *ug_regs;
511 u32 maccfg2, uccm; 515 u32 maccfg2, uccm;
512 516
513 uccf = ugeth->uccf; 517 uccf = ugeth->uccf;
@@ -538,13 +542,13 @@ static void get_statistics(struct ucc_geth_private *ugeth,
538 rx_firmware_statistics, 542 rx_firmware_statistics,
539 struct ucc_geth_hardware_statistics *hardware_statistics) 543 struct ucc_geth_hardware_statistics *hardware_statistics)
540{ 544{
541 struct ucc_fast *uf_regs; 545 struct ucc_fast __iomem *uf_regs;
542 struct ucc_geth *ug_regs; 546 struct ucc_geth __iomem *ug_regs;
543 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 547 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
544 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 548 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
545 549
546 ug_regs = ugeth->ug_regs; 550 ug_regs = ugeth->ug_regs;
547 uf_regs = (struct ucc_fast *) ug_regs; 551 uf_regs = (struct ucc_fast __iomem *) ug_regs;
548 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 552 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
549 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 553 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
550 554
@@ -1132,9 +1136,9 @@ static void dump_regs(struct ucc_geth_private *ugeth)
1132} 1136}
1133#endif /* DEBUG */ 1137#endif /* DEBUG */
1134 1138
1135static void init_default_reg_vals(volatile u32 *upsmr_register, 1139static void init_default_reg_vals(u32 __iomem *upsmr_register,
1136 volatile u32 *maccfg1_register, 1140 u32 __iomem *maccfg1_register,
1137 volatile u32 *maccfg2_register) 1141 u32 __iomem *maccfg2_register)
1138{ 1142{
1139 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1143 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1140 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); 1144 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
@@ -1148,7 +1152,7 @@ static int init_half_duplex_params(int alt_beb,
1148 u8 alt_beb_truncation, 1152 u8 alt_beb_truncation,
1149 u8 max_retransmissions, 1153 u8 max_retransmissions,
1150 u8 collision_window, 1154 u8 collision_window,
1151 volatile u32 *hafdup_register) 1155 u32 __iomem *hafdup_register)
1152{ 1156{
1153 u32 value = 0; 1157 u32 value = 0;
1154 1158
@@ -1180,7 +1184,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1180 u8 non_btb_ipg, 1184 u8 non_btb_ipg,
1181 u8 min_ifg, 1185 u8 min_ifg,
1182 u8 btb_ipg, 1186 u8 btb_ipg,
1183 volatile u32 *ipgifg_register) 1187 u32 __iomem *ipgifg_register)
1184{ 1188{
1185 u32 value = 0; 1189 u32 value = 0;
1186 1190
@@ -1215,9 +1219,9 @@ int init_flow_control_params(u32 automatic_flow_control_mode,
1215 int tx_flow_control_enable, 1219 int tx_flow_control_enable,
1216 u16 pause_period, 1220 u16 pause_period,
1217 u16 extension_field, 1221 u16 extension_field,
1218 volatile u32 *upsmr_register, 1222 u32 __iomem *upsmr_register,
1219 volatile u32 *uempr_register, 1223 u32 __iomem *uempr_register,
1220 volatile u32 *maccfg1_register) 1224 u32 __iomem *maccfg1_register)
1221{ 1225{
1222 u32 value = 0; 1226 u32 value = 0;
1223 1227
@@ -1243,8 +1247,8 @@ int init_flow_control_params(u32 automatic_flow_control_mode,
1243 1247
1244static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1248static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1245 int auto_zero_hardware_statistics, 1249 int auto_zero_hardware_statistics,
1246 volatile u32 *upsmr_register, 1250 u32 __iomem *upsmr_register,
1247 volatile u16 *uescr_register) 1251 u16 __iomem *uescr_register)
1248{ 1252{
1249 u32 upsmr_value = 0; 1253 u32 upsmr_value = 0;
1250 u16 uescr_value = 0; 1254 u16 uescr_value = 0;
@@ -1270,12 +1274,12 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1270static int init_firmware_statistics_gathering_mode(int 1274static int init_firmware_statistics_gathering_mode(int
1271 enable_tx_firmware_statistics, 1275 enable_tx_firmware_statistics,
1272 int enable_rx_firmware_statistics, 1276 int enable_rx_firmware_statistics,
1273 volatile u32 *tx_rmon_base_ptr, 1277 u32 __iomem *tx_rmon_base_ptr,
1274 u32 tx_firmware_statistics_structure_address, 1278 u32 tx_firmware_statistics_structure_address,
1275 volatile u32 *rx_rmon_base_ptr, 1279 u32 __iomem *rx_rmon_base_ptr,
1276 u32 rx_firmware_statistics_structure_address, 1280 u32 rx_firmware_statistics_structure_address,
1277 volatile u16 *temoder_register, 1281 u16 __iomem *temoder_register,
1278 volatile u32 *remoder_register) 1282 u32 __iomem *remoder_register)
1279{ 1283{
1280 /* Note: this function does not check if */ 1284 /* Note: this function does not check if */
1281 /* the parameters it receives are NULL */ 1285 /* the parameters it receives are NULL */
@@ -1307,8 +1311,8 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
1307 u8 address_byte_3, 1311 u8 address_byte_3,
1308 u8 address_byte_4, 1312 u8 address_byte_4,
1309 u8 address_byte_5, 1313 u8 address_byte_5,
1310 volatile u32 *macstnaddr1_register, 1314 u32 __iomem *macstnaddr1_register,
1311 volatile u32 *macstnaddr2_register) 1315 u32 __iomem *macstnaddr2_register)
1312{ 1316{
1313 u32 value = 0; 1317 u32 value = 0;
1314 1318
@@ -1344,7 +1348,7 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
1344} 1348}
1345 1349
1346static int init_check_frame_length_mode(int length_check, 1350static int init_check_frame_length_mode(int length_check,
1347 volatile u32 *maccfg2_register) 1351 u32 __iomem *maccfg2_register)
1348{ 1352{
1349 u32 value = 0; 1353 u32 value = 0;
1350 1354
@@ -1360,7 +1364,7 @@ static int init_check_frame_length_mode(int length_check,
1360} 1364}
1361 1365
1362static int init_preamble_length(u8 preamble_length, 1366static int init_preamble_length(u8 preamble_length,
1363 volatile u32 *maccfg2_register) 1367 u32 __iomem *maccfg2_register)
1364{ 1368{
1365 u32 value = 0; 1369 u32 value = 0;
1366 1370
@@ -1376,7 +1380,7 @@ static int init_preamble_length(u8 preamble_length,
1376 1380
1377static int init_rx_parameters(int reject_broadcast, 1381static int init_rx_parameters(int reject_broadcast,
1378 int receive_short_frames, 1382 int receive_short_frames,
1379 int promiscuous, volatile u32 *upsmr_register) 1383 int promiscuous, u32 __iomem *upsmr_register)
1380{ 1384{
1381 u32 value = 0; 1385 u32 value = 0;
1382 1386
@@ -1403,7 +1407,7 @@ static int init_rx_parameters(int reject_broadcast,
1403} 1407}
1404 1408
1405static int init_max_rx_buff_len(u16 max_rx_buf_len, 1409static int init_max_rx_buff_len(u16 max_rx_buf_len,
1406 volatile u16 *mrblr_register) 1410 u16 __iomem *mrblr_register)
1407{ 1411{
1408 /* max_rx_buf_len value must be a multiple of 128 */ 1412 /* max_rx_buf_len value must be a multiple of 128 */
1409 if ((max_rx_buf_len == 0) 1413 if ((max_rx_buf_len == 0)
@@ -1415,8 +1419,8 @@ static int init_max_rx_buff_len(u16 max_rx_buf_len,
1415} 1419}
1416 1420
1417static int init_min_frame_len(u16 min_frame_length, 1421static int init_min_frame_len(u16 min_frame_length,
1418 volatile u16 *minflr_register, 1422 u16 __iomem *minflr_register,
1419 volatile u16 *mrblr_register) 1423 u16 __iomem *mrblr_register)
1420{ 1424{
1421 u16 mrblr_value = 0; 1425 u16 mrblr_value = 0;
1422 1426
@@ -1431,8 +1435,8 @@ static int init_min_frame_len(u16 min_frame_length,
1431static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1435static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1432{ 1436{
1433 struct ucc_geth_info *ug_info; 1437 struct ucc_geth_info *ug_info;
1434 struct ucc_geth *ug_regs; 1438 struct ucc_geth __iomem *ug_regs;
1435 struct ucc_fast *uf_regs; 1439 struct ucc_fast __iomem *uf_regs;
1436 int ret_val; 1440 int ret_val;
1437 u32 upsmr, maccfg2, tbiBaseAddress; 1441 u32 upsmr, maccfg2, tbiBaseAddress;
1438 u16 value; 1442 u16 value;
@@ -1517,8 +1521,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1517static void adjust_link(struct net_device *dev) 1521static void adjust_link(struct net_device *dev)
1518{ 1522{
1519 struct ucc_geth_private *ugeth = netdev_priv(dev); 1523 struct ucc_geth_private *ugeth = netdev_priv(dev);
1520 struct ucc_geth *ug_regs; 1524 struct ucc_geth __iomem *ug_regs;
1521 struct ucc_fast *uf_regs; 1525 struct ucc_fast __iomem *uf_regs;
1522 struct phy_device *phydev = ugeth->phydev; 1526 struct phy_device *phydev = ugeth->phydev;
1523 unsigned long flags; 1527 unsigned long flags;
1524 int new_state = 0; 1528 int new_state = 0;
@@ -1678,9 +1682,9 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1678 uccf = ugeth->uccf; 1682 uccf = ugeth->uccf;
1679 1683
1680 /* Clear acknowledge bit */ 1684 /* Clear acknowledge bit */
1681 temp = ugeth->p_rx_glbl_pram->rxgstpack; 1685 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1682 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1686 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1683 ugeth->p_rx_glbl_pram->rxgstpack = temp; 1687 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
1684 1688
1685 /* Keep issuing command and checking acknowledge bit until 1689 /* Keep issuing command and checking acknowledge bit until
1686 it is asserted, according to spec */ 1690 it is asserted, according to spec */
@@ -1692,7 +1696,7 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1692 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1696 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1693 QE_CR_PROTOCOL_ETHERNET, 0); 1697 QE_CR_PROTOCOL_ETHERNET, 0);
1694 1698
1695 temp = ugeth->p_rx_glbl_pram->rxgstpack; 1699 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1696 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); 1700 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1697 1701
1698 uccf->stopped_rx = 1; 1702 uccf->stopped_rx = 1;
@@ -1991,19 +1995,20 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
1991 enum enet_addr_type 1995 enum enet_addr_type
1992 enet_addr_type) 1996 enet_addr_type)
1993{ 1997{
1994 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 1998 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
1995 struct ucc_fast_private *uccf; 1999 struct ucc_fast_private *uccf;
1996 enum comm_dir comm_dir; 2000 enum comm_dir comm_dir;
1997 struct list_head *p_lh; 2001 struct list_head *p_lh;
1998 u16 i, num; 2002 u16 i, num;
1999 u32 *addr_h, *addr_l; 2003 u32 __iomem *addr_h;
2004 u32 __iomem *addr_l;
2000 u8 *p_counter; 2005 u8 *p_counter;
2001 2006
2002 uccf = ugeth->uccf; 2007 uccf = ugeth->uccf;
2003 2008
2004 p_82xx_addr_filt = 2009 p_82xx_addr_filt =
2005 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 2010 (struct ucc_geth_82xx_address_filtering_pram __iomem *)
2006 addressfiltering; 2011 ugeth->p_rx_glbl_pram->addressfiltering;
2007 2012
2008 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 2013 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2009 addr_h = &(p_82xx_addr_filt->gaddr_h); 2014 addr_h = &(p_82xx_addr_filt->gaddr_h);
@@ -2079,7 +2084,7 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge
2079static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 2084static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2080{ 2085{
2081 u16 i, j; 2086 u16 i, j;
2082 u8 *bd; 2087 u8 __iomem *bd;
2083 2088
2084 if (!ugeth) 2089 if (!ugeth)
2085 return; 2090 return;
@@ -2154,8 +2159,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2154 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2155 if (ugeth->tx_skbuff[i][j]) { 2160 if (ugeth->tx_skbuff[i][j]) {
2156 dma_unmap_single(NULL, 2161 dma_unmap_single(NULL,
2157 ((struct qe_bd *)bd)->buf, 2162 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2158 (in_be32((u32 *)bd) & 2163 (in_be32((u32 __iomem *)bd) &
2159 BD_LENGTH_MASK), 2164 BD_LENGTH_MASK),
2160 DMA_TO_DEVICE); 2165 DMA_TO_DEVICE);
2161 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); 2166 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
@@ -2182,7 +2187,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2182 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2183 if (ugeth->rx_skbuff[i][j]) { 2188 if (ugeth->rx_skbuff[i][j]) {
2184 dma_unmap_single(NULL, 2189 dma_unmap_single(NULL,
2185 ((struct qe_bd *)bd)->buf, 2190 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2186 ugeth->ug_info-> 2191 ugeth->ug_info->
2187 uf_info.max_rx_buf_length + 2192 uf_info.max_rx_buf_length +
2188 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 2193 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
@@ -2218,8 +2223,8 @@ static void ucc_geth_set_multi(struct net_device *dev)
2218{ 2223{
2219 struct ucc_geth_private *ugeth; 2224 struct ucc_geth_private *ugeth;
2220 struct dev_mc_list *dmi; 2225 struct dev_mc_list *dmi;
2221 struct ucc_fast *uf_regs; 2226 struct ucc_fast __iomem *uf_regs;
2222 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2227 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2223 int i; 2228 int i;
2224 2229
2225 ugeth = netdev_priv(dev); 2230 ugeth = netdev_priv(dev);
@@ -2228,14 +2233,14 @@ static void ucc_geth_set_multi(struct net_device *dev)
2228 2233
2229 if (dev->flags & IFF_PROMISC) { 2234 if (dev->flags & IFF_PROMISC) {
2230 2235
2231 uf_regs->upsmr |= UPSMR_PRO; 2236 out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO);
2232 2237
2233 } else { 2238 } else {
2234 2239
2235 uf_regs->upsmr &= ~UPSMR_PRO; 2240 out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO);
2236 2241
2237 p_82xx_addr_filt = 2242 p_82xx_addr_filt =
2238 (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 2243 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
2239 p_rx_glbl_pram->addressfiltering; 2244 p_rx_glbl_pram->addressfiltering;
2240 2245
2241 if (dev->flags & IFF_ALLMULTI) { 2246 if (dev->flags & IFF_ALLMULTI) {
@@ -2270,7 +2275,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
2270 2275
2271static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2276static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2272{ 2277{
2273 struct ucc_geth *ug_regs = ugeth->ug_regs; 2278 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
2274 struct phy_device *phydev = ugeth->phydev; 2279 struct phy_device *phydev = ugeth->phydev;
2275 u32 tempval; 2280 u32 tempval;
2276 2281
@@ -2419,20 +2424,20 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2419 return -ENOMEM; 2424 return -ENOMEM;
2420 } 2425 }
2421 2426
2422 ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); 2427 ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth));
2423 2428
2424 return 0; 2429 return 0;
2425} 2430}
2426 2431
2427static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2432static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2428{ 2433{
2429 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2434 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2430 struct ucc_geth_init_pram *p_init_enet_pram; 2435 struct ucc_geth_init_pram __iomem *p_init_enet_pram;
2431 struct ucc_fast_private *uccf; 2436 struct ucc_fast_private *uccf;
2432 struct ucc_geth_info *ug_info; 2437 struct ucc_geth_info *ug_info;
2433 struct ucc_fast_info *uf_info; 2438 struct ucc_fast_info *uf_info;
2434 struct ucc_fast *uf_regs; 2439 struct ucc_fast __iomem *uf_regs;
2435 struct ucc_geth *ug_regs; 2440 struct ucc_geth __iomem *ug_regs;
2436 int ret_val = -EINVAL; 2441 int ret_val = -EINVAL;
2437 u32 remoder = UCC_GETH_REMODER_INIT; 2442 u32 remoder = UCC_GETH_REMODER_INIT;
2438 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; 2443 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
@@ -2440,7 +2445,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2440 u16 temoder = UCC_GETH_TEMODER_INIT; 2445 u16 temoder = UCC_GETH_TEMODER_INIT;
2441 u16 test; 2446 u16 test;
2442 u8 function_code = 0; 2447 u8 function_code = 0;
2443 u8 *bd, *endOfRing; 2448 u8 __iomem *bd;
2449 u8 __iomem *endOfRing;
2444 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2450 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2445 2451
2446 ugeth_vdbg("%s: IN", __FUNCTION__); 2452 ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -2602,11 +2608,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2602 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2608 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2603 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2609 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2604 ugeth->tx_bd_ring_offset[j] = 2610 ugeth->tx_bd_ring_offset[j] =
2605 kmalloc((u32) (length + align), GFP_KERNEL); 2611 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2606 2612
2607 if (ugeth->tx_bd_ring_offset[j] != 0) 2613 if (ugeth->tx_bd_ring_offset[j] != 0)
2608 ugeth->p_tx_bd_ring[j] = 2614 ugeth->p_tx_bd_ring[j] =
2609 (void*)((ugeth->tx_bd_ring_offset[j] + 2615 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
2610 align) & ~(align - 1)); 2616 align) & ~(align - 1));
2611 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2617 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2612 ugeth->tx_bd_ring_offset[j] = 2618 ugeth->tx_bd_ring_offset[j] =
@@ -2614,7 +2620,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2614 UCC_GETH_TX_BD_RING_ALIGNMENT); 2620 UCC_GETH_TX_BD_RING_ALIGNMENT);
2615 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2621 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2616 ugeth->p_tx_bd_ring[j] = 2622 ugeth->p_tx_bd_ring[j] =
2617 (u8 *) qe_muram_addr(ugeth-> 2623 (u8 __iomem *) qe_muram_addr(ugeth->
2618 tx_bd_ring_offset[j]); 2624 tx_bd_ring_offset[j]);
2619 } 2625 }
2620 if (!ugeth->p_tx_bd_ring[j]) { 2626 if (!ugeth->p_tx_bd_ring[j]) {
@@ -2626,8 +2632,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2626 return -ENOMEM; 2632 return -ENOMEM;
2627 } 2633 }
2628 /* Zero unused end of bd ring, according to spec */ 2634 /* Zero unused end of bd ring, according to spec */
2629 memset(ugeth->p_tx_bd_ring[j] + 2635 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
2630 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0, 2636 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
2631 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2637 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2632 } 2638 }
2633 2639
@@ -2639,10 +2645,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2639 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2645 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2640 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2646 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2641 ugeth->rx_bd_ring_offset[j] = 2647 ugeth->rx_bd_ring_offset[j] =
2642 kmalloc((u32) (length + align), GFP_KERNEL); 2648 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2643 if (ugeth->rx_bd_ring_offset[j] != 0) 2649 if (ugeth->rx_bd_ring_offset[j] != 0)
2644 ugeth->p_rx_bd_ring[j] = 2650 ugeth->p_rx_bd_ring[j] =
2645 (void*)((ugeth->rx_bd_ring_offset[j] + 2651 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
2646 align) & ~(align - 1)); 2652 align) & ~(align - 1));
2647 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2653 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2648 ugeth->rx_bd_ring_offset[j] = 2654 ugeth->rx_bd_ring_offset[j] =
@@ -2650,7 +2656,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2650 UCC_GETH_RX_BD_RING_ALIGNMENT); 2656 UCC_GETH_RX_BD_RING_ALIGNMENT);
2651 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2657 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2652 ugeth->p_rx_bd_ring[j] = 2658 ugeth->p_rx_bd_ring[j] =
2653 (u8 *) qe_muram_addr(ugeth-> 2659 (u8 __iomem *) qe_muram_addr(ugeth->
2654 rx_bd_ring_offset[j]); 2660 rx_bd_ring_offset[j]);
2655 } 2661 }
2656 if (!ugeth->p_rx_bd_ring[j]) { 2662 if (!ugeth->p_rx_bd_ring[j]) {
@@ -2685,14 +2691,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2685 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2691 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2686 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2692 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2687 /* clear bd buffer */ 2693 /* clear bd buffer */
2688 out_be32(&((struct qe_bd *)bd)->buf, 0); 2694 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2689 /* set bd status and length */ 2695 /* set bd status and length */
2690 out_be32((u32 *)bd, 0); 2696 out_be32((u32 __iomem *)bd, 0);
2691 bd += sizeof(struct qe_bd); 2697 bd += sizeof(struct qe_bd);
2692 } 2698 }
2693 bd -= sizeof(struct qe_bd); 2699 bd -= sizeof(struct qe_bd);
2694 /* set bd status and length */ 2700 /* set bd status and length */
2695 out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */ 2701 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
2696 } 2702 }
2697 2703
2698 /* Init Rx bds */ 2704 /* Init Rx bds */
@@ -2717,14 +2723,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2717 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2723 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2718 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2724 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2719 /* set bd status and length */ 2725 /* set bd status and length */
2720 out_be32((u32 *)bd, R_I); 2726 out_be32((u32 __iomem *)bd, R_I);
2721 /* clear bd buffer */ 2727 /* clear bd buffer */
2722 out_be32(&((struct qe_bd *)bd)->buf, 0); 2728 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2723 bd += sizeof(struct qe_bd); 2729 bd += sizeof(struct qe_bd);
2724 } 2730 }
2725 bd -= sizeof(struct qe_bd); 2731 bd -= sizeof(struct qe_bd);
2726 /* set bd status and length */ 2732 /* set bd status and length */
2727 out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */ 2733 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
2728 } 2734 }
2729 2735
2730 /* 2736 /*
@@ -2744,10 +2750,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2744 return -ENOMEM; 2750 return -ENOMEM;
2745 } 2751 }
2746 ugeth->p_tx_glbl_pram = 2752 ugeth->p_tx_glbl_pram =
2747 (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth-> 2753 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
2748 tx_glbl_pram_offset); 2754 tx_glbl_pram_offset);
2749 /* Zero out p_tx_glbl_pram */ 2755 /* Zero out p_tx_glbl_pram */
2750 memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2756 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2751 2757
2752 /* Fill global PRAM */ 2758 /* Fill global PRAM */
2753 2759
@@ -2768,7 +2774,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2768 } 2774 }
2769 2775
2770 ugeth->p_thread_data_tx = 2776 ugeth->p_thread_data_tx =
2771 (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth-> 2777 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
2772 thread_dat_tx_offset); 2778 thread_dat_tx_offset);
2773 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2779 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2774 2780
@@ -2779,7 +2785,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2779 2785
2780 /* iphoffset */ 2786 /* iphoffset */
2781 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2787 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
2782 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; 2788 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
2789 ug_info->iphoffset[i]);
2783 2790
2784 /* SQPTR */ 2791 /* SQPTR */
2785 /* Size varies with number of Tx queues */ 2792 /* Size varies with number of Tx queues */
@@ -2797,7 +2804,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2797 } 2804 }
2798 2805
2799 ugeth->p_send_q_mem_reg = 2806 ugeth->p_send_q_mem_reg =
2800 (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth-> 2807 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
2801 send_q_mem_reg_offset); 2808 send_q_mem_reg_offset);
2802 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2809 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2803 2810
@@ -2841,25 +2848,26 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2841 } 2848 }
2842 2849
2843 ugeth->p_scheduler = 2850 ugeth->p_scheduler =
2844 (struct ucc_geth_scheduler *) qe_muram_addr(ugeth-> 2851 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
2845 scheduler_offset); 2852 scheduler_offset);
2846 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2853 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
2847 ugeth->scheduler_offset); 2854 ugeth->scheduler_offset);
2848 /* Zero out p_scheduler */ 2855 /* Zero out p_scheduler */
2849 memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2856 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
2850 2857
2851 /* Set values in scheduler */ 2858 /* Set values in scheduler */
2852 out_be32(&ugeth->p_scheduler->mblinterval, 2859 out_be32(&ugeth->p_scheduler->mblinterval,
2853 ug_info->mblinterval); 2860 ug_info->mblinterval);
2854 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2861 out_be16(&ugeth->p_scheduler->nortsrbytetime,
2855 ug_info->nortsrbytetime); 2862 ug_info->nortsrbytetime);
2856 ugeth->p_scheduler->fracsiz = ug_info->fracsiz; 2863 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
2857 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; 2864 out_8(&ugeth->p_scheduler->strictpriorityq,
2858 ugeth->p_scheduler->txasap = ug_info->txasap; 2865 ug_info->strictpriorityq);
2859 ugeth->p_scheduler->extrabw = ug_info->extrabw; 2866 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
2867 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
2860 for (i = 0; i < NUM_TX_QUEUES; i++) 2868 for (i = 0; i < NUM_TX_QUEUES; i++)
2861 ugeth->p_scheduler->weightfactor[i] = 2869 out_8(&ugeth->p_scheduler->weightfactor[i],
2862 ug_info->weightfactor[i]; 2870 ug_info->weightfactor[i]);
2863 2871
2864 /* Set pointers to cpucount registers in scheduler */ 2872 /* Set pointers to cpucount registers in scheduler */
2865 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); 2873 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
@@ -2890,10 +2898,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2890 return -ENOMEM; 2898 return -ENOMEM;
2891 } 2899 }
2892 ugeth->p_tx_fw_statistics_pram = 2900 ugeth->p_tx_fw_statistics_pram =
2893 (struct ucc_geth_tx_firmware_statistics_pram *) 2901 (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
2894 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2902 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
2895 /* Zero out p_tx_fw_statistics_pram */ 2903 /* Zero out p_tx_fw_statistics_pram */
2896 memset(ugeth->p_tx_fw_statistics_pram, 2904 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
2897 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2905 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
2898 } 2906 }
2899 2907
@@ -2930,10 +2938,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2930 return -ENOMEM; 2938 return -ENOMEM;
2931 } 2939 }
2932 ugeth->p_rx_glbl_pram = 2940 ugeth->p_rx_glbl_pram =
2933 (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth-> 2941 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
2934 rx_glbl_pram_offset); 2942 rx_glbl_pram_offset);
2935 /* Zero out p_rx_glbl_pram */ 2943 /* Zero out p_rx_glbl_pram */
2936 memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2944 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
2937 2945
2938 /* Fill global PRAM */ 2946 /* Fill global PRAM */
2939 2947
@@ -2953,7 +2961,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2953 } 2961 }
2954 2962
2955 ugeth->p_thread_data_rx = 2963 ugeth->p_thread_data_rx =
2956 (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth-> 2964 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
2957 thread_dat_rx_offset); 2965 thread_dat_rx_offset);
2958 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2966 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
2959 2967
@@ -2976,10 +2984,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2976 return -ENOMEM; 2984 return -ENOMEM;
2977 } 2985 }
2978 ugeth->p_rx_fw_statistics_pram = 2986 ugeth->p_rx_fw_statistics_pram =
2979 (struct ucc_geth_rx_firmware_statistics_pram *) 2987 (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
2980 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2988 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
2981 /* Zero out p_rx_fw_statistics_pram */ 2989 /* Zero out p_rx_fw_statistics_pram */
2982 memset(ugeth->p_rx_fw_statistics_pram, 0, 2990 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
2983 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2991 sizeof(struct ucc_geth_rx_firmware_statistics_pram));
2984 } 2992 }
2985 2993
@@ -3000,7 +3008,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3000 } 3008 }
3001 3009
3002 ugeth->p_rx_irq_coalescing_tbl = 3010 ugeth->p_rx_irq_coalescing_tbl =
3003 (struct ucc_geth_rx_interrupt_coalescing_table *) 3011 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
3004 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 3012 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3005 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 3013 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3006 ugeth->rx_irq_coalescing_tbl_offset); 3014 ugeth->rx_irq_coalescing_tbl_offset);
@@ -3069,11 +3077,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3069 } 3077 }
3070 3078
3071 ugeth->p_rx_bd_qs_tbl = 3079 ugeth->p_rx_bd_qs_tbl =
3072 (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth-> 3080 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
3073 rx_bd_qs_tbl_offset); 3081 rx_bd_qs_tbl_offset);
3074 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 3082 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3075 /* Zero out p_rx_bd_qs_tbl */ 3083 /* Zero out p_rx_bd_qs_tbl */
3076 memset(ugeth->p_rx_bd_qs_tbl, 3084 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
3077 0, 3085 0,
3078 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3086 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3079 sizeof(struct ucc_geth_rx_prefetched_bds))); 3087 sizeof(struct ucc_geth_rx_prefetched_bds)));
@@ -3133,7 +3141,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3133 &ugeth->p_rx_glbl_pram->remoder); 3141 &ugeth->p_rx_glbl_pram->remoder);
3134 3142
3135 /* function code register */ 3143 /* function code register */
3136 ugeth->p_rx_glbl_pram->rstate = function_code; 3144 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
3137 3145
3138 /* initialize extended filtering */ 3146 /* initialize extended filtering */
3139 if (ug_info->rxExtendedFiltering) { 3147 if (ug_info->rxExtendedFiltering) {
@@ -3160,7 +3168,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3160 } 3168 }
3161 3169
3162 ugeth->p_exf_glbl_param = 3170 ugeth->p_exf_glbl_param =
3163 (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth-> 3171 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
3164 exf_glbl_param_offset); 3172 exf_glbl_param_offset);
3165 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 3173 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3166 ugeth->exf_glbl_param_offset); 3174 ugeth->exf_glbl_param_offset);
@@ -3175,7 +3183,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3175 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 3183 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3176 3184
3177 p_82xx_addr_filt = 3185 p_82xx_addr_filt =
3178 (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 3186 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
3179 p_rx_glbl_pram->addressfiltering; 3187 p_rx_glbl_pram->addressfiltering;
3180 3188
3181 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 3189 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
@@ -3307,17 +3315,21 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3307 return -ENOMEM; 3315 return -ENOMEM;
3308 } 3316 }
3309 p_init_enet_pram = 3317 p_init_enet_pram =
3310 (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset); 3318 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
3311 3319
3312 /* Copy shadow InitEnet command parameter structure into PRAM */ 3320 /* Copy shadow InitEnet command parameter structure into PRAM */
3313 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; 3321 out_8(&p_init_enet_pram->resinit1,
3314 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; 3322 ugeth->p_init_enet_param_shadow->resinit1);
3315 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; 3323 out_8(&p_init_enet_pram->resinit2,
3316 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; 3324 ugeth->p_init_enet_param_shadow->resinit2);
3325 out_8(&p_init_enet_pram->resinit3,
3326 ugeth->p_init_enet_param_shadow->resinit3);
3327 out_8(&p_init_enet_pram->resinit4,
3328 ugeth->p_init_enet_param_shadow->resinit4);
3317 out_be16(&p_init_enet_pram->resinit5, 3329 out_be16(&p_init_enet_pram->resinit5,
3318 ugeth->p_init_enet_param_shadow->resinit5); 3330 ugeth->p_init_enet_param_shadow->resinit5);
3319 p_init_enet_pram->largestexternallookupkeysize = 3331 out_8(&p_init_enet_pram->largestexternallookupkeysize,
3320 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; 3332 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
3321 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3333 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3322 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3334 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3323 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) 3335 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
@@ -3371,7 +3383,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3371#ifdef CONFIG_UGETH_TX_ON_DEMAND 3383#ifdef CONFIG_UGETH_TX_ON_DEMAND
3372 struct ucc_fast_private *uccf; 3384 struct ucc_fast_private *uccf;
3373#endif 3385#endif
3374 u8 *bd; /* BD pointer */ 3386 u8 __iomem *bd; /* BD pointer */
3375 u32 bd_status; 3387 u32 bd_status;
3376 u8 txQ = 0; 3388 u8 txQ = 0;
3377 3389
@@ -3383,7 +3395,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3383 3395
3384 /* Start from the next BD that should be filled */ 3396 /* Start from the next BD that should be filled */
3385 bd = ugeth->txBd[txQ]; 3397 bd = ugeth->txBd[txQ];
3386 bd_status = in_be32((u32 *)bd); 3398 bd_status = in_be32((u32 __iomem *)bd);
3387 /* Save the skb pointer so we can free it later */ 3399 /* Save the skb pointer so we can free it later */
3388 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3400 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3389 3401
@@ -3393,7 +3405,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3393 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3405 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3394 3406
3395 /* set up the buffer descriptor */ 3407 /* set up the buffer descriptor */
3396 out_be32(&((struct qe_bd *)bd)->buf, 3408 out_be32(&((struct qe_bd __iomem *)bd)->buf,
3397 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3409 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3398 3410
3399 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3411 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
@@ -3401,7 +3413,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3401 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3413 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3402 3414
3403 /* set bd status and length */ 3415 /* set bd status and length */
3404 out_be32((u32 *)bd, bd_status); 3416 out_be32((u32 __iomem *)bd, bd_status);
3405 3417
3406 dev->trans_start = jiffies; 3418 dev->trans_start = jiffies;
3407 3419
@@ -3441,7 +3453,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3441static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3453static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3442{ 3454{
3443 struct sk_buff *skb; 3455 struct sk_buff *skb;
3444 u8 *bd; 3456 u8 __iomem *bd;
3445 u16 length, howmany = 0; 3457 u16 length, howmany = 0;
3446 u32 bd_status; 3458 u32 bd_status;
3447 u8 *bdBuffer; 3459 u8 *bdBuffer;
@@ -3454,11 +3466,11 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3454 /* collect received buffers */ 3466 /* collect received buffers */
3455 bd = ugeth->rxBd[rxQ]; 3467 bd = ugeth->rxBd[rxQ];
3456 3468
3457 bd_status = in_be32((u32 *)bd); 3469 bd_status = in_be32((u32 __iomem *)bd);
3458 3470
3459 /* while there are received buffers and BD is full (~R_E) */ 3471 /* while there are received buffers and BD is full (~R_E) */
3460 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3472 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3461 bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf); 3473 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
3462 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3474 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3463 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3475 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3464 3476
@@ -3516,7 +3528,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3516 else 3528 else
3517 bd += sizeof(struct qe_bd); 3529 bd += sizeof(struct qe_bd);
3518 3530
3519 bd_status = in_be32((u32 *)bd); 3531 bd_status = in_be32((u32 __iomem *)bd);
3520 } 3532 }
3521 3533
3522 ugeth->rxBd[rxQ] = bd; 3534 ugeth->rxBd[rxQ] = bd;
@@ -3527,11 +3539,11 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3527{ 3539{
3528 /* Start from the next BD that should be filled */ 3540 /* Start from the next BD that should be filled */
3529 struct ucc_geth_private *ugeth = netdev_priv(dev); 3541 struct ucc_geth_private *ugeth = netdev_priv(dev);
3530 u8 *bd; /* BD pointer */ 3542 u8 __iomem *bd; /* BD pointer */
3531 u32 bd_status; 3543 u32 bd_status;
3532 3544
3533 bd = ugeth->confBd[txQ]; 3545 bd = ugeth->confBd[txQ];
3534 bd_status = in_be32((u32 *)bd); 3546 bd_status = in_be32((u32 __iomem *)bd);
3535 3547
3536 /* Normal processing. */ 3548 /* Normal processing. */
3537 while ((bd_status & T_R) == 0) { 3549 while ((bd_status & T_R) == 0) {
@@ -3561,7 +3573,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3561 bd += sizeof(struct qe_bd); 3573 bd += sizeof(struct qe_bd);
3562 else 3574 else
3563 bd = ugeth->p_tx_bd_ring[txQ]; 3575 bd = ugeth->p_tx_bd_ring[txQ];
3564 bd_status = in_be32((u32 *)bd); 3576 bd_status = in_be32((u32 __iomem *)bd);
3565 } 3577 }
3566 ugeth->confBd[txQ] = bd; 3578 ugeth->confBd[txQ] = bd;
3567 return 0; 3579 return 0;
@@ -3910,7 +3922,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3910 return -EINVAL; 3922 return -EINVAL;
3911 } 3923 }
3912 } else { 3924 } else {
3913 prop = of_get_property(np, "rx-clock", NULL); 3925 prop = of_get_property(np, "tx-clock", NULL);
3914 if (!prop) { 3926 if (!prop) {
3915 printk(KERN_ERR 3927 printk(KERN_ERR
3916 "ucc_geth: mising tx-clock-name property\n"); 3928 "ucc_geth: mising tx-clock-name property\n");
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 9f8b7580a3a4..abc0e2242634 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -700,8 +700,8 @@ struct ucc_geth_82xx_address_filtering_pram {
700 u32 iaddr_l; /* individual address filter, low */ 700 u32 iaddr_l; /* individual address filter, low */
701 u32 gaddr_h; /* group address filter, high */ 701 u32 gaddr_h; /* group address filter, high */
702 u32 gaddr_l; /* group address filter, low */ 702 u32 gaddr_l; /* group address filter, low */
703 struct ucc_geth_82xx_enet_address taddr; 703 struct ucc_geth_82xx_enet_address __iomem taddr;
704 struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS]; 704 struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
705 u8 res0[0x40 - 0x38]; 705 u8 res0[0x40 - 0x38];
706} __attribute__ ((packed)); 706} __attribute__ ((packed));
707 707
@@ -1186,40 +1186,40 @@ struct ucc_geth_private {
1186 struct ucc_fast_private *uccf; 1186 struct ucc_fast_private *uccf;
1187 struct net_device *dev; 1187 struct net_device *dev;
1188 struct napi_struct napi; 1188 struct napi_struct napi;
1189 struct ucc_geth *ug_regs; 1189 struct ucc_geth __iomem *ug_regs;
1190 struct ucc_geth_init_pram *p_init_enet_param_shadow; 1190 struct ucc_geth_init_pram *p_init_enet_param_shadow;
1191 struct ucc_geth_exf_global_pram *p_exf_glbl_param; 1191 struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
1192 u32 exf_glbl_param_offset; 1192 u32 exf_glbl_param_offset;
1193 struct ucc_geth_rx_global_pram *p_rx_glbl_pram; 1193 struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
1194 u32 rx_glbl_pram_offset; 1194 u32 rx_glbl_pram_offset;
1195 struct ucc_geth_tx_global_pram *p_tx_glbl_pram; 1195 struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
1196 u32 tx_glbl_pram_offset; 1196 u32 tx_glbl_pram_offset;
1197 struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg; 1197 struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
1198 u32 send_q_mem_reg_offset; 1198 u32 send_q_mem_reg_offset;
1199 struct ucc_geth_thread_data_tx *p_thread_data_tx; 1199 struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
1200 u32 thread_dat_tx_offset; 1200 u32 thread_dat_tx_offset;
1201 struct ucc_geth_thread_data_rx *p_thread_data_rx; 1201 struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx;
1202 u32 thread_dat_rx_offset; 1202 u32 thread_dat_rx_offset;
1203 struct ucc_geth_scheduler *p_scheduler; 1203 struct ucc_geth_scheduler __iomem *p_scheduler;
1204 u32 scheduler_offset; 1204 u32 scheduler_offset;
1205 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 1205 struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram;
1206 u32 tx_fw_statistics_pram_offset; 1206 u32 tx_fw_statistics_pram_offset;
1207 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 1207 struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram;
1208 u32 rx_fw_statistics_pram_offset; 1208 u32 rx_fw_statistics_pram_offset;
1209 struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl; 1209 struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl;
1210 u32 rx_irq_coalescing_tbl_offset; 1210 u32 rx_irq_coalescing_tbl_offset;
1211 struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl; 1211 struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
1212 u32 rx_bd_qs_tbl_offset; 1212 u32 rx_bd_qs_tbl_offset;
1213 u8 *p_tx_bd_ring[NUM_TX_QUEUES]; 1213 u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
1214 u32 tx_bd_ring_offset[NUM_TX_QUEUES]; 1214 u32 tx_bd_ring_offset[NUM_TX_QUEUES];
1215 u8 *p_rx_bd_ring[NUM_RX_QUEUES]; 1215 u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
1216 u32 rx_bd_ring_offset[NUM_RX_QUEUES]; 1216 u32 rx_bd_ring_offset[NUM_RX_QUEUES];
1217 u8 *confBd[NUM_TX_QUEUES]; 1217 u8 __iomem *confBd[NUM_TX_QUEUES];
1218 u8 *txBd[NUM_TX_QUEUES]; 1218 u8 __iomem *txBd[NUM_TX_QUEUES];
1219 u8 *rxBd[NUM_RX_QUEUES]; 1219 u8 __iomem *rxBd[NUM_RX_QUEUES];
1220 int badFrame[NUM_RX_QUEUES]; 1220 int badFrame[NUM_RX_QUEUES];
1221 u16 cpucount[NUM_TX_QUEUES]; 1221 u16 cpucount[NUM_TX_QUEUES];
1222 volatile u16 *p_cpucount[NUM_TX_QUEUES]; 1222 u16 __iomem *p_cpucount[NUM_TX_QUEUES];
1223 int indAddrRegUsed[NUM_OF_PADDRS]; 1223 int indAddrRegUsed[NUM_OF_PADDRS];
1224 u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ 1224 u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
1225 u8 numGroupAddrInHash; 1225 u8 numGroupAddrInHash;
@@ -1251,4 +1251,12 @@ struct ucc_geth_private {
1251 int oldlink; 1251 int oldlink;
1252}; 1252};
1253 1253
1254void uec_set_ethtool_ops(struct net_device *netdev);
1255int init_flow_control_params(u32 automatic_flow_control_mode,
1256 int rx_flow_control_enable, int tx_flow_control_enable,
1257 u16 pause_period, u16 extension_field,
1258 u32 __iomem *upsmr_register, u32 __iomem *uempr_register,
1259 u32 __iomem *maccfg1_register);
1260
1261
1254#endif /* __UCC_GETH_H__ */ 1262#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 9a9622c13e2b..299b7f176950 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -108,12 +108,6 @@ static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
108#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) 108#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
109#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) 109#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
110 110
111extern int init_flow_control_params(u32 automatic_flow_control_mode,
112 int rx_flow_control_enable,
113 int tx_flow_control_enable, u16 pause_period,
114 u16 extension_field, volatile u32 *upsmr_register,
115 volatile u32 *uempr_register, volatile u32 *maccfg1_register);
116
117static int 111static int
118uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 112uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
119{ 113{
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index 2af490781005..940474736922 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -104,7 +104,7 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
104} 104}
105 105
106/* Reset the MIIM registers, and wait for the bus to free */ 106/* Reset the MIIM registers, and wait for the bus to free */
107int uec_mdio_reset(struct mii_bus *bus) 107static int uec_mdio_reset(struct mii_bus *bus)
108{ 108{
109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; 109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
110 unsigned int timeout = PHY_INIT_TIMEOUT; 110 unsigned int timeout = PHY_INIT_TIMEOUT;
@@ -240,7 +240,7 @@ reg_map_fail:
240 return err; 240 return err;
241} 241}
242 242
243int uec_mdio_remove(struct of_device *ofdev) 243static int uec_mdio_remove(struct of_device *ofdev)
244{ 244{
245 struct device *device = &ofdev->dev; 245 struct device *device = &ofdev->dev;
246 struct mii_bus *bus = dev_get_drvdata(device); 246 struct mii_bus *bus = dev_get_drvdata(device);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6f245cfb6624..dc6f097062df 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1381,6 +1381,10 @@ static const struct usb_device_id products [] = {
1381 USB_DEVICE (0x0411, 0x003d), 1381 USB_DEVICE (0x0411, 0x003d),
1382 .driver_info = (unsigned long) &ax8817x_info, 1382 .driver_info = (unsigned long) &ax8817x_info,
1383}, { 1383}, {
1384 // Buffalo LUA-U2-GT 10/100/1000
1385 USB_DEVICE (0x0411, 0x006e),
1386 .driver_info = (unsigned long) &ax88178_info,
1387}, {
1384 // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter" 1388 // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
1385 USB_DEVICE (0x6189, 0x182d), 1389 USB_DEVICE (0x6189, 0x182d),
1386 .driver_info = (unsigned long) &ax8817x_info, 1390 .driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index a83a40b3ebaa..0f0d27d1c4ca 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -184,7 +184,7 @@ ds1511_wdog_disable(void)
184static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm) 184static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
185{ 185{
186 u8 mon, day, dow, hrs, min, sec, yrs, cen; 186 u8 mon, day, dow, hrs, min, sec, yrs, cen;
187 unsigned int flags; 187 unsigned long flags;
188 188
189 /* 189 /*
190 * won't have to change this for a while 190 * won't have to change this for a while
@@ -247,7 +247,7 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
247static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm) 247static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
248{ 248{
249 unsigned int century; 249 unsigned int century;
250 unsigned int flags; 250 unsigned long flags;
251 251
252 spin_lock_irqsave(&ds1511_lock, flags); 252 spin_lock_irqsave(&ds1511_lock, flags);
253 rtc_disable_update(); 253 rtc_disable_update();
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index c1f2adefad41..5043150019ac 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -965,8 +965,7 @@ tty3270_write_room(struct tty_struct *tty)
965 * Insert character into the screen at the current position with the 965 * Insert character into the screen at the current position with the
966 * current color and highlight. This function does NOT do cursor movement. 966 * current color and highlight. This function does NOT do cursor movement.
967 */ 967 */
968static int 968static void tty3270_put_character(struct tty3270 *tp, char ch)
969tty3270_put_character(struct tty3270 *tp, char ch)
970{ 969{
971 struct tty3270_line *line; 970 struct tty3270_line *line;
972 struct tty3270_cell *cell; 971 struct tty3270_cell *cell;
@@ -986,7 +985,6 @@ tty3270_put_character(struct tty3270 *tp, char ch)
986 cell->character = tp->view.ascebc[(unsigned int) ch]; 985 cell->character = tp->view.ascebc[(unsigned int) ch];
987 cell->highlight = tp->highlight; 986 cell->highlight = tp->highlight;
988 cell->f_color = tp->f_color; 987 cell->f_color = tp->f_color;
989 return 1;
990} 988}
991 989
992/* 990/*
@@ -1612,16 +1610,15 @@ tty3270_write(struct tty_struct * tty,
1612/* 1610/*
1613 * Put single characters to the ttys character buffer 1611 * Put single characters to the ttys character buffer
1614 */ 1612 */
1615static void 1613static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
1616tty3270_put_char(struct tty_struct *tty, unsigned char ch)
1617{ 1614{
1618 struct tty3270 *tp; 1615 struct tty3270 *tp;
1619 1616
1620 tp = tty->driver_data; 1617 tp = tty->driver_data;
1621 if (!tp) 1618 if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
1622 return; 1619 return 0;
1623 if (tp->char_count < TTY3270_CHAR_BUF_SIZE) 1620 tp->char_buf[tp->char_count++] = ch;
1624 tp->char_buf[tp->char_count++] = ch; 1621 return 1;
1625} 1622}
1626 1623
1627/* 1624/*
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 40ef948fcb3a..9c21b8f43f9b 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/cio.h>
22 23
23#include "blacklist.h" 24#include "blacklist.h"
24#include "cio.h" 25#include "cio.h"
@@ -43,164 +44,169 @@ typedef enum {add, free} range_action;
43 * Function: blacklist_range 44 * Function: blacklist_range
44 * (Un-)blacklist the devices from-to 45 * (Un-)blacklist the devices from-to
45 */ 46 */
46static void 47static int blacklist_range(range_action action, unsigned int from_ssid,
47blacklist_range (range_action action, unsigned int from, unsigned int to, 48 unsigned int to_ssid, unsigned int from,
48 unsigned int ssid) 49 unsigned int to, int msgtrigger)
49{ 50{
50 if (!to) 51 if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
51 to = from; 52 if (msgtrigger)
52 53 printk(KERN_WARNING "cio: Invalid cio_ignore range "
53 if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) { 54 "0.%x.%04x-0.%x.%04x\n", from_ssid, from,
54 printk (KERN_WARNING "cio: Invalid blacklist range " 55 to_ssid, to);
55 "0.%x.%04x to 0.%x.%04x, skipping\n", 56 return 1;
56 ssid, from, ssid, to);
57 return;
58 } 57 }
59 for (; from <= to; from++) { 58
59 while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
60 (from <= to))) {
60 if (action == add) 61 if (action == add)
61 set_bit (from, bl_dev[ssid]); 62 set_bit(from, bl_dev[from_ssid]);
62 else 63 else
63 clear_bit (from, bl_dev[ssid]); 64 clear_bit(from, bl_dev[from_ssid]);
65 from++;
66 if (from > __MAX_SUBCHANNEL) {
67 from_ssid++;
68 from = 0;
69 }
64 } 70 }
71
72 return 0;
65} 73}
66 74
67/* 75static int pure_hex(char **cp, unsigned int *val, int min_digit,
68 * Function: blacklist_busid 76 int max_digit, int max_val)
69 * Get devno/busid from given string.
70 * Shamelessly grabbed from dasd_devmap.c.
71 */
72static int
73blacklist_busid(char **str, int *id0, int *ssid, int *devno)
74{ 77{
75 int val, old_style; 78 int diff;
76 char *sav; 79 unsigned int value;
77 80
78 sav = *str; 81 diff = 0;
82 *val = 0;
79 83
80 /* check for leading '0x' */ 84 while (isxdigit(**cp) && (diff <= max_digit)) {
81 old_style = 0; 85
82 if ((*str)[0] == '0' && (*str)[1] == 'x') { 86 if (isdigit(**cp))
83 *str += 2; 87 value = **cp - '0';
84 old_style = 1; 88 else
85 } 89 value = tolower(**cp) - 'a' + 10;
86 if (!isxdigit((*str)[0])) /* We require at least one hex digit */ 90 *val = *val * 16 + value;
87 goto confused; 91 (*cp)++;
88 val = simple_strtoul(*str, str, 16); 92 diff++;
89 if (old_style || (*str)[0] != '.') {
90 *id0 = *ssid = 0;
91 if (val < 0 || val > 0xffff)
92 goto confused;
93 *devno = val;
94 if ((*str)[0] != ',' && (*str)[0] != '-' &&
95 (*str)[0] != '\n' && (*str)[0] != '\0')
96 goto confused;
97 return 0;
98 } 93 }
99 /* New style x.y.z busid */ 94
100 if (val < 0 || val > 0xff) 95 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
101 goto confused; 96 return 1;
102 *id0 = val; 97
103 (*str)++;
104 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
105 goto confused;
106 val = simple_strtoul(*str, str, 16);
107 if (val < 0 || val > 0xff || (*str)++[0] != '.')
108 goto confused;
109 *ssid = val;
110 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
111 goto confused;
112 val = simple_strtoul(*str, str, 16);
113 if (val < 0 || val > 0xffff)
114 goto confused;
115 *devno = val;
116 if ((*str)[0] != ',' && (*str)[0] != '-' &&
117 (*str)[0] != '\n' && (*str)[0] != '\0')
118 goto confused;
119 return 0; 98 return 0;
120confused:
121 strsep(str, ",\n");
122 printk(KERN_WARNING "cio: Invalid cio_ignore parameter '%s'\n", sav);
123 return 1;
124} 99}
125 100
126static int 101static int parse_busid(char *str, int *cssid, int *ssid, int *devno,
127blacklist_parse_parameters (char *str, range_action action) 102 int msgtrigger)
128{ 103{
129 int from, to, from_id0, to_id0, from_ssid, to_ssid; 104 char *str_work;
130 105 int val, rc, ret;
131 while (*str != 0 && *str != '\n') { 106
132 range_action ra = action; 107 rc = 1;
133 while(*str == ',') 108
134 str++; 109 if (*str == '\0')
135 if (*str == '!') { 110 goto out;
136 ra = !action; 111
137 ++str; 112 /* old style */
113 str_work = str;
114 val = simple_strtoul(str, &str_work, 16);
115
116 if (*str_work == '\0') {
117 if (val <= __MAX_SUBCHANNEL) {
118 *devno = val;
119 *ssid = 0;
120 *cssid = 0;
121 rc = 0;
138 } 122 }
123 goto out;
124 }
139 125
140 /* 126 /* new style */
141 * Since we have to parse the proc commands and the 127 str_work = str;
142 * kernel arguments we have to check four cases 128 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
143 */ 129 if (ret || (str_work[0] != '.'))
144 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || 130 goto out;
145 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { 131 str_work++;
146 int j; 132 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
147 133 if (ret || (str_work[0] != '.'))
148 str += 3; 134 goto out;
149 for (j=0; j <= __MAX_SSID; j++) 135 str_work++;
150 blacklist_range(ra, 0, __MAX_SUBCHANNEL, j); 136 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
151 } else { 137 if (ret || (str_work[0] != '\0'))
152 int rc; 138 goto out;
139
140 rc = 0;
141out:
142 if (rc && msgtrigger)
143 printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n",
144 str);
145
146 return rc;
147}
153 148
154 rc = blacklist_busid(&str, &from_id0, 149static int blacklist_parse_parameters(char *str, range_action action,
155 &from_ssid, &from); 150 int msgtrigger)
156 if (rc) 151{
157 continue; 152 int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
158 to = from; 153 int rc, totalrc;
159 to_id0 = from_id0; 154 char *parm;
160 to_ssid = from_ssid; 155 range_action ra;
161 if (*str == '-') { 156
162 str++; 157 totalrc = 0;
163 rc = blacklist_busid(&str, &to_id0, 158
164 &to_ssid, &to); 159 while ((parm = strsep(&str, ","))) {
165 if (rc) 160 rc = 0;
166 continue; 161 ra = action;
167 } 162 if (*parm == '!') {
168 if (*str == '-') { 163 if (ra == add)
169 printk(KERN_WARNING "cio: invalid cio_ignore " 164 ra = free;
170 "parameter '%s'\n", 165 else
171 strsep(&str, ",\n")); 166 ra = add;
172 continue; 167 parm++;
173 } 168 }
174 if ((from_id0 != to_id0) || 169 if (strcmp(parm, "all") == 0) {
175 (from_ssid != to_ssid)) { 170 from_cssid = 0;
176 printk(KERN_WARNING "cio: invalid cio_ignore " 171 from_ssid = 0;
177 "range %x.%x.%04x-%x.%x.%04x\n", 172 from = 0;
178 from_id0, from_ssid, from, 173 to_cssid = __MAX_CSSID;
179 to_id0, to_ssid, to); 174 to_ssid = __MAX_SSID;
180 continue; 175 to = __MAX_SUBCHANNEL;
176 } else {
177 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
178 &from_ssid, &from, msgtrigger);
179 if (!rc) {
180 if (parm != NULL)
181 rc = parse_busid(parm, &to_cssid,
182 &to_ssid, &to,
183 msgtrigger);
184 else {
185 to_cssid = from_cssid;
186 to_ssid = from_ssid;
187 to = from;
188 }
181 } 189 }
182 blacklist_range (ra, from, to, to_ssid);
183 } 190 }
191 if (!rc) {
192 rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
193 msgtrigger);
194 if (rc)
195 totalrc = 1;
196 } else
197 totalrc = 1;
184 } 198 }
185 return 1; 199
200 return totalrc;
186} 201}
187 202
188/* Parsing the commandline for blacklist parameters, e.g. to blacklist
189 * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of:
190 * - cio_ignore=1234-1236
191 * - cio_ignore=0x1234-0x1235,1236
192 * - cio_ignore=0x1234,1235-1236
193 * - cio_ignore=1236 cio_ignore=1234-0x1236
194 * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235
195 * - cio_ignore=0.0.1234-0.0.1236
196 * - cio_ignore=0.0.1234,0x1235,1236
197 * - ...
198 */
199static int __init 203static int __init
200blacklist_setup (char *str) 204blacklist_setup (char *str)
201{ 205{
202 CIO_MSG_EVENT(6, "Reading blacklist parameters\n"); 206 CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
203 return blacklist_parse_parameters (str, add); 207 if (blacklist_parse_parameters(str, add, 1))
208 return 0;
209 return 1;
204} 210}
205 211
206__setup ("cio_ignore=", blacklist_setup); 212__setup ("cio_ignore=", blacklist_setup);
@@ -224,27 +230,23 @@ is_blacklisted (int ssid, int devno)
224 * Function: blacklist_parse_proc_parameters 230 * Function: blacklist_parse_proc_parameters
225 * parse the stuff which is piped to /proc/cio_ignore 231 * parse the stuff which is piped to /proc/cio_ignore
226 */ 232 */
227static void 233static int blacklist_parse_proc_parameters(char *buf)
228blacklist_parse_proc_parameters (char *buf)
229{ 234{
230 if (strncmp (buf, "free ", 5) == 0) { 235 int rc;
231 blacklist_parse_parameters (buf + 5, free); 236 char *parm;
232 } else if (strncmp (buf, "add ", 4) == 0) { 237
233 /* 238 parm = strsep(&buf, " ");
234 * We don't need to check for known devices since 239
235 * css_probe_device will handle this correctly. 240 if (strcmp("free", parm) == 0)
236 */ 241 rc = blacklist_parse_parameters(buf, free, 0);
237 blacklist_parse_parameters (buf + 4, add); 242 else if (strcmp("add", parm) == 0)
238 } else { 243 rc = blacklist_parse_parameters(buf, add, 0);
239 printk (KERN_WARNING "cio: cio_ignore: Parse error; \n" 244 else
240 KERN_WARNING "try using 'free all|<devno-range>," 245 return 1;
241 "<devno-range>,...'\n"
242 KERN_WARNING "or 'add <devno-range>,"
243 "<devno-range>,...'\n");
244 return;
245 }
246 246
247 css_schedule_reprobe(); 247 css_schedule_reprobe();
248
249 return rc;
248} 250}
249 251
250/* Iterator struct for all devices. */ 252/* Iterator struct for all devices. */
@@ -328,6 +330,8 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
328 size_t user_len, loff_t *offset) 330 size_t user_len, loff_t *offset)
329{ 331{
330 char *buf; 332 char *buf;
333 size_t i;
334 ssize_t rc, ret;
331 335
332 if (*offset) 336 if (*offset)
333 return -EINVAL; 337 return -EINVAL;
@@ -336,16 +340,27 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
336 buf = vmalloc (user_len + 1); /* maybe better use the stack? */ 340 buf = vmalloc (user_len + 1); /* maybe better use the stack? */
337 if (buf == NULL) 341 if (buf == NULL)
338 return -ENOMEM; 342 return -ENOMEM;
343 memset(buf, 0, user_len + 1);
344
339 if (strncpy_from_user (buf, user_buf, user_len) < 0) { 345 if (strncpy_from_user (buf, user_buf, user_len) < 0) {
340 vfree (buf); 346 rc = -EFAULT;
341 return -EFAULT; 347 goto out_free;
342 } 348 }
343 buf[user_len] = '\0';
344 349
345 blacklist_parse_proc_parameters (buf); 350 i = user_len - 1;
351 while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
352 buf[i] = '\0';
353 i--;
354 }
355 ret = blacklist_parse_proc_parameters(buf);
356 if (ret)
357 rc = -EINVAL;
358 else
359 rc = user_len;
346 360
361out_free:
347 vfree (buf); 362 vfree (buf);
348 return user_len; 363 return rc;
349} 364}
350 365
351static const struct seq_operations cio_ignore_proc_seq_ops = { 366static const struct seq_operations cio_ignore_proc_seq_ops = {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 08a578161306..82c6a2d45128 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -39,23 +39,6 @@ debug_info_t *cio_debug_msg_id;
39debug_info_t *cio_debug_trace_id; 39debug_info_t *cio_debug_trace_id;
40debug_info_t *cio_debug_crw_id; 40debug_info_t *cio_debug_crw_id;
41 41
42int cio_show_msg;
43
44static int __init
45cio_setup (char *parm)
46{
47 if (!strcmp (parm, "yes"))
48 cio_show_msg = 1;
49 else if (!strcmp (parm, "no"))
50 cio_show_msg = 0;
51 else
52 printk(KERN_ERR "cio: cio_setup: "
53 "invalid cio_msg parameter '%s'", parm);
54 return 1;
55}
56
57__setup ("cio_msg=", cio_setup);
58
59/* 42/*
60 * Function: cio_debug_init 43 * Function: cio_debug_init
61 * Initializes three debug logs for common I/O: 44 * Initializes three debug logs for common I/O:
@@ -166,7 +149,7 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
166 149
167 stsch (sch->schid, &sch->schib); 150 stsch (sch->schid, &sch->schib);
168 151
169 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " 152 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
170 "subchannel 0.%x.%04x!\n", sch->schid.ssid, 153 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
171 sch->schid.sch_no); 154 sch->schid.sch_no);
172 sprintf(dbf_text, "no%s", sch->dev.bus_id); 155 sprintf(dbf_text, "no%s", sch->dev.bus_id);
@@ -567,10 +550,9 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
567 * ... just being curious we check for non I/O subchannels 550 * ... just being curious we check for non I/O subchannels
568 */ 551 */
569 if (sch->st != 0) { 552 if (sch->st != 0) {
570 CIO_DEBUG(KERN_INFO, 0, 553 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports "
571 "Subchannel 0.%x.%04x reports " 554 "non-I/O subchannel type %04X\n",
572 "non-I/O subchannel type %04X\n", 555 sch->schid.ssid, sch->schid.sch_no, sch->st);
573 sch->schid.ssid, sch->schid.sch_no, sch->st);
574 /* We stop here for non-io subchannels. */ 556 /* We stop here for non-io subchannels. */
575 err = sch->st; 557 err = sch->st;
576 goto out; 558 goto out;
@@ -588,7 +570,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
588 * This device must not be known to Linux. So we simply 570 * This device must not be known to Linux. So we simply
589 * say that there is no device and return ENODEV. 571 * say that there is no device and return ENODEV.
590 */ 572 */
591 CIO_MSG_EVENT(4, "Blacklisted device detected " 573 CIO_MSG_EVENT(6, "Blacklisted device detected "
592 "at devno %04X, subchannel set %x\n", 574 "at devno %04X, subchannel set %x\n",
593 sch->schib.pmcw.dev, sch->schid.ssid); 575 sch->schib.pmcw.dev, sch->schid.ssid);
594 err = -ENODEV; 576 err = -ENODEV;
@@ -601,12 +583,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
601 sch->lpm = sch->schib.pmcw.pam & sch->opm; 583 sch->lpm = sch->schib.pmcw.pam & sch->opm;
602 sch->isc = 3; 584 sch->isc = 3;
603 585
604 CIO_DEBUG(KERN_INFO, 0, 586 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X "
605 "Detected device %04x on subchannel 0.%x.%04X" 587 "- PIM = %02X, PAM = %02X, POM = %02X\n",
606 " - PIM = %02X, PAM = %02X, POM = %02X\n", 588 sch->schib.pmcw.dev, sch->schid.ssid,
607 sch->schib.pmcw.dev, sch->schid.ssid, 589 sch->schid.sch_no, sch->schib.pmcw.pim,
608 sch->schid.sch_no, sch->schib.pmcw.pim, 590 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
609 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
610 591
611 /* 592 /*
612 * We now have to initially ... 593 * We now have to initially ...
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 3c75412904dc..6e933aebe013 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -118,6 +118,4 @@ extern void *cio_get_console_priv(void);
118#define cio_get_console_priv() NULL 118#define cio_get_console_priv() NULL
119#endif 119#endif
120 120
121extern int cio_show_msg;
122
123#endif 121#endif
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index d7429ef6c666..e64e8278c42e 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -31,10 +31,4 @@ static inline void CIO_HEX_EVENT(int level, void *data, int length)
31 } 31 }
32} 32}
33 33
34#define CIO_DEBUG(printk_level, event_level, msg...) do { \
35 if (cio_show_msg) \
36 printk(printk_level "cio: " msg); \
37 CIO_MSG_EVENT(event_level, msg); \
38 } while (0)
39
40#endif 34#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 595e327d2f76..a76956512b2d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -570,7 +570,7 @@ static void reprobe_all(struct work_struct *unused)
570{ 570{
571 int ret; 571 int ret;
572 572
573 CIO_MSG_EVENT(2, "reprobe start\n"); 573 CIO_MSG_EVENT(4, "reprobe start\n");
574 574
575 need_reprobe = 0; 575 need_reprobe = 0;
576 /* Make sure initial subchannel scan is done. */ 576 /* Make sure initial subchannel scan is done. */
@@ -578,7 +578,7 @@ static void reprobe_all(struct work_struct *unused)
578 atomic_read(&ccw_device_init_count) == 0); 578 atomic_read(&ccw_device_init_count) == 0);
579 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); 579 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
580 580
581 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 581 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
582 need_reprobe); 582 need_reprobe);
583} 583}
584 584
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index abfd601d237a..e22813db74a2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -341,7 +341,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
341 rc = device_schedule_callback(&cdev->dev, 341 rc = device_schedule_callback(&cdev->dev,
342 ccw_device_remove_orphan_cb); 342 ccw_device_remove_orphan_cb);
343 if (rc) 343 if (rc)
344 CIO_MSG_EVENT(2, "Couldn't unregister orphan " 344 CIO_MSG_EVENT(0, "Couldn't unregister orphan "
345 "0.%x.%04x\n", 345 "0.%x.%04x\n",
346 cdev->private->dev_id.ssid, 346 cdev->private->dev_id.ssid,
347 cdev->private->dev_id.devno); 347 cdev->private->dev_id.devno);
@@ -351,7 +351,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
351 rc = device_schedule_callback(cdev->dev.parent, 351 rc = device_schedule_callback(cdev->dev.parent,
352 ccw_device_remove_sch_cb); 352 ccw_device_remove_sch_cb);
353 if (rc) 353 if (rc)
354 CIO_MSG_EVENT(2, "Couldn't unregister disconnected device " 354 CIO_MSG_EVENT(0, "Couldn't unregister disconnected device "
355 "0.%x.%04x\n", 355 "0.%x.%04x\n",
356 cdev->private->dev_id.ssid, 356 cdev->private->dev_id.ssid,
357 cdev->private->dev_id.devno); 357 cdev->private->dev_id.devno);
@@ -397,7 +397,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
397 if (ret == 0) 397 if (ret == 0)
398 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 398 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
399 else { 399 else {
400 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " 400 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
401 "device 0.%x.%04x\n", 401 "device 0.%x.%04x\n",
402 ret, cdev->private->dev_id.ssid, 402 ret, cdev->private->dev_id.ssid,
403 cdev->private->dev_id.devno); 403 cdev->private->dev_id.devno);
@@ -433,7 +433,7 @@ int ccw_device_set_online(struct ccw_device *cdev)
433 if (ret == 0) 433 if (ret == 0)
434 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 434 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
435 else { 435 else {
436 CIO_MSG_EVENT(2, "ccw_device_online returned %d, " 436 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
437 "device 0.%x.%04x\n", 437 "device 0.%x.%04x\n",
438 ret, cdev->private->dev_id.ssid, 438 ret, cdev->private->dev_id.ssid,
439 cdev->private->dev_id.devno); 439 cdev->private->dev_id.devno);
@@ -451,7 +451,7 @@ int ccw_device_set_online(struct ccw_device *cdev)
451 if (ret == 0) 451 if (ret == 0)
452 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 452 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
453 else 453 else
454 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " 454 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
455 "device 0.%x.%04x\n", 455 "device 0.%x.%04x\n",
456 ret, cdev->private->dev_id.ssid, 456 ret, cdev->private->dev_id.ssid,
457 cdev->private->dev_id.devno); 457 cdev->private->dev_id.devno);
@@ -803,7 +803,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
803 other_sch = to_subchannel(get_device(cdev->dev.parent)); 803 other_sch = to_subchannel(get_device(cdev->dev.parent));
804 ret = device_move(&cdev->dev, &sch->dev); 804 ret = device_move(&cdev->dev, &sch->dev);
805 if (ret) { 805 if (ret) {
806 CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed " 806 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
807 "(ret=%d)!\n", cdev->private->dev_id.ssid, 807 "(ret=%d)!\n", cdev->private->dev_id.ssid,
808 cdev->private->dev_id.devno, ret); 808 cdev->private->dev_id.devno, ret);
809 put_device(&other_sch->dev); 809 put_device(&other_sch->dev);
@@ -933,7 +933,7 @@ io_subchannel_register(struct work_struct *work)
933 ret = device_reprobe(&cdev->dev); 933 ret = device_reprobe(&cdev->dev);
934 if (ret) 934 if (ret)
935 /* We can't do much here. */ 935 /* We can't do much here. */
936 CIO_MSG_EVENT(2, "device_reprobe() returned" 936 CIO_MSG_EVENT(0, "device_reprobe() returned"
937 " %d for 0.%x.%04x\n", ret, 937 " %d for 0.%x.%04x\n", ret,
938 cdev->private->dev_id.ssid, 938 cdev->private->dev_id.ssid,
939 cdev->private->dev_id.devno); 939 cdev->private->dev_id.devno);
@@ -1086,7 +1086,7 @@ static void ccw_device_move_to_sch(struct work_struct *work)
1086 rc = device_move(&cdev->dev, &sch->dev); 1086 rc = device_move(&cdev->dev, &sch->dev);
1087 mutex_unlock(&sch->reg_mutex); 1087 mutex_unlock(&sch->reg_mutex);
1088 if (rc) { 1088 if (rc) {
1089 CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel " 1089 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
1090 "0.%x.%04x failed (ret=%d)!\n", 1090 "0.%x.%04x failed (ret=%d)!\n",
1091 cdev->private->dev_id.ssid, 1091 cdev->private->dev_id.ssid,
1092 cdev->private->dev_id.devno, sch->schid.ssid, 1092 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -1446,8 +1446,7 @@ ccw_device_remove (struct device *dev)
1446 wait_event(cdev->private->wait_q, 1446 wait_event(cdev->private->wait_q,
1447 dev_fsm_final_state(cdev)); 1447 dev_fsm_final_state(cdev));
1448 else 1448 else
1449 //FIXME: we can't fail! 1449 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1450 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
1451 "device 0.%x.%04x\n", 1450 "device 0.%x.%04x\n",
1452 ret, cdev->private->dev_id.ssid, 1451 ret, cdev->private->dev_id.ssid,
1453 cdev->private->dev_id.devno); 1452 cdev->private->dev_id.devno);
@@ -1524,7 +1523,7 @@ static int recovery_check(struct device *dev, void *data)
1524 spin_lock_irq(cdev->ccwlock); 1523 spin_lock_irq(cdev->ccwlock);
1525 switch (cdev->private->state) { 1524 switch (cdev->private->state) {
1526 case DEV_STATE_DISCONNECTED: 1525 case DEV_STATE_DISCONNECTED:
1527 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1526 CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
1528 cdev->private->dev_id.ssid, 1527 cdev->private->dev_id.ssid,
1529 cdev->private->dev_id.devno); 1528 cdev->private->dev_id.devno);
1530 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1529 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
@@ -1554,7 +1553,7 @@ static void recovery_work_func(struct work_struct *unused)
1554 } 1553 }
1555 spin_unlock_irq(&recovery_lock); 1554 spin_unlock_irq(&recovery_lock);
1556 } else 1555 } else
1557 CIO_MSG_EVENT(2, "recovery: end\n"); 1556 CIO_MSG_EVENT(4, "recovery: end\n");
1558} 1557}
1559 1558
1560static DECLARE_WORK(recovery_work, recovery_work_func); 1559static DECLARE_WORK(recovery_work, recovery_work_func);
@@ -1572,7 +1571,7 @@ void ccw_device_schedule_recovery(void)
1572{ 1571{
1573 unsigned long flags; 1572 unsigned long flags;
1574 1573
1575 CIO_MSG_EVENT(2, "recovery: schedule\n"); 1574 CIO_MSG_EVENT(4, "recovery: schedule\n");
1576 spin_lock_irqsave(&recovery_lock, flags); 1575 spin_lock_irqsave(&recovery_lock, flags);
1577 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1576 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1578 recovery_phase = 0; 1577 recovery_phase = 0;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 99403b0a97a7..e268d5a77c12 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -322,10 +322,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
322 same_dev = 0; /* Keep the compiler quiet... */ 322 same_dev = 0; /* Keep the compiler quiet... */
323 switch (state) { 323 switch (state) {
324 case DEV_STATE_NOT_OPER: 324 case DEV_STATE_NOT_OPER:
325 CIO_DEBUG(KERN_WARNING, 2, 325 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
326 "SenseID : unknown device %04x on subchannel " 326 "subchannel 0.%x.%04x\n",
327 "0.%x.%04x\n", cdev->private->dev_id.devno, 327 cdev->private->dev_id.devno,
328 sch->schid.ssid, sch->schid.sch_no); 328 sch->schid.ssid, sch->schid.sch_no);
329 break; 329 break;
330 case DEV_STATE_OFFLINE: 330 case DEV_STATE_OFFLINE:
331 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 331 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
@@ -348,20 +348,19 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
348 return; 348 return;
349 } 349 }
350 /* Issue device info message. */ 350 /* Issue device info message. */
351 CIO_DEBUG(KERN_INFO, 2, 351 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
352 "SenseID : device 0.%x.%04x reports: " 352 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
353 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 353 "%04X/%02X\n",
354 "%04X/%02X\n", 354 cdev->private->dev_id.ssid,
355 cdev->private->dev_id.ssid, 355 cdev->private->dev_id.devno,
356 cdev->private->dev_id.devno, 356 cdev->id.cu_type, cdev->id.cu_model,
357 cdev->id.cu_type, cdev->id.cu_model, 357 cdev->id.dev_type, cdev->id.dev_model);
358 cdev->id.dev_type, cdev->id.dev_model);
359 break; 358 break;
360 case DEV_STATE_BOXED: 359 case DEV_STATE_BOXED:
361 CIO_DEBUG(KERN_WARNING, 2, 360 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
362 "SenseID : boxed device %04x on subchannel " 361 " subchannel 0.%x.%04x\n",
363 "0.%x.%04x\n", cdev->private->dev_id.devno, 362 cdev->private->dev_id.devno,
364 sch->schid.ssid, sch->schid.sch_no); 363 sch->schid.ssid, sch->schid.sch_no);
365 break; 364 break;
366 } 365 }
367 cdev->private->state = state; 366 cdev->private->state = state;
@@ -443,9 +442,8 @@ ccw_device_done(struct ccw_device *cdev, int state)
443 442
444 443
445 if (state == DEV_STATE_BOXED) 444 if (state == DEV_STATE_BOXED)
446 CIO_DEBUG(KERN_WARNING, 2, 445 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
447 "Boxed device %04x on subchannel %04x\n", 446 cdev->private->dev_id.devno, sch->schid.sch_no);
448 cdev->private->dev_id.devno, sch->schid.sch_no);
449 447
450 if (cdev->private->flags.donotify) { 448 if (cdev->private->flags.donotify) {
451 cdev->private->flags.donotify = 0; 449 cdev->private->flags.donotify = 0;
@@ -900,7 +898,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
900 /* Basic sense hasn't started. Try again. */ 898 /* Basic sense hasn't started. Try again. */
901 ccw_device_do_sense(cdev, irb); 899 ccw_device_do_sense(cdev, irb);
902 else { 900 else {
903 CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited " 901 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
904 "interrupt during w4sense...\n", 902 "interrupt during w4sense...\n",
905 cdev->private->dev_id.ssid, 903 cdev->private->dev_id.ssid,
906 cdev->private->dev_id.devno); 904 cdev->private->dev_id.devno);
@@ -1169,8 +1167,10 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1169static void 1167static void
1170ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) 1168ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1171{ 1169{
1172 CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n", 1170 CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1173 cdev->private->state, dev_event); 1171 "0.%x.%04x\n", cdev->private->state, dev_event,
1172 cdev->private->dev_id.ssid,
1173 cdev->private->dev_id.devno);
1174 BUG(); 1174 BUG();
1175} 1175}
1176 1176
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index dc4d87f77f6c..cba7020517ed 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -214,7 +214,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
214 * sense id information. So, for intervention required, 214 * sense id information. So, for intervention required,
215 * we use the "whack it until it talks" strategy... 215 * we use the "whack it until it talks" strategy...
216 */ 216 */
217 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel " 217 CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
218 "0.%x.%04x reports cmd reject\n", 218 "0.%x.%04x reports cmd reject\n",
219 cdev->private->dev_id.devno, sch->schid.ssid, 219 cdev->private->dev_id.devno, sch->schid.ssid,
220 sch->schid.sch_no); 220 sch->schid.sch_no);
@@ -239,7 +239,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
239 239
240 lpm = to_io_private(sch)->orb.lpm; 240 lpm = to_io_private(sch)->orb.lpm;
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
242 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " 242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is " 243 "on subchannel 0.%x.%04x is "
244 "'not operational'\n", lpm, 244 "'not operational'\n", lpm,
245 cdev->private->dev_id.devno, 245 cdev->private->dev_id.devno,
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index c52449a1f9fc..ba559053402e 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -79,7 +79,7 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
79 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 79 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
80 if (ret != -EACCES) 80 if (ret != -EACCES)
81 return ret; 81 return ret;
82 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 82 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
83 "0.%x.%04x, lpm %02X, became 'not " 83 "0.%x.%04x, lpm %02X, became 'not "
84 "operational'\n", 84 "operational'\n",
85 cdev->private->dev_id.devno, 85 cdev->private->dev_id.devno,
@@ -159,7 +159,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
159 u8 lpm; 159 u8 lpm;
160 160
161 lpm = to_io_private(sch)->orb.lpm; 161 lpm = to_io_private(sch)->orb.lpm;
162 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," 162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n", 163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid, 164 cdev->private->dev_id.devno, sch->schid.ssid,
165 sch->schid.sch_no, lpm); 165 sch->schid.sch_no, lpm);
@@ -275,7 +275,7 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
275 return ret; 275 return ret;
276 } 276 }
277 /* PGID command failed on this path. */ 277 /* PGID command failed on this path. */
278 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 278 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
279 "0.%x.%04x, lpm %02X, became 'not operational'\n", 279 "0.%x.%04x, lpm %02X, became 'not operational'\n",
280 cdev->private->dev_id.devno, sch->schid.ssid, 280 cdev->private->dev_id.devno, sch->schid.ssid,
281 sch->schid.sch_no, cdev->private->imask); 281 sch->schid.sch_no, cdev->private->imask);
@@ -317,7 +317,7 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
317 return ret; 317 return ret;
318 } 318 }
319 /* nop command failed on this path. */ 319 /* nop command failed on this path. */
320 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " 320 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
321 "0.%x.%04x, lpm %02X, became 'not operational'\n", 321 "0.%x.%04x, lpm %02X, became 'not operational'\n",
322 cdev->private->dev_id.devno, sch->schid.ssid, 322 cdev->private->dev_id.devno, sch->schid.ssid,
323 sch->schid.sch_no, cdev->private->imask); 323 sch->schid.sch_no, cdev->private->imask);
@@ -362,7 +362,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
362 return -EAGAIN; 362 return -EAGAIN;
363 } 363 }
364 if (irb->scsw.cc == 3) { 364 if (irb->scsw.cc == 3) {
365 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x," 365 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
366 " lpm %02X, became 'not operational'\n", 366 " lpm %02X, became 'not operational'\n",
367 cdev->private->dev_id.devno, sch->schid.ssid, 367 cdev->private->dev_id.devno, sch->schid.ssid,
368 sch->schid.sch_no, cdev->private->imask); 368 sch->schid.sch_no, cdev->private->imask);
@@ -391,7 +391,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
391 return -ETIME; 391 return -ETIME;
392 } 392 }
393 if (irb->scsw.cc == 3) { 393 if (irb->scsw.cc == 3) {
394 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x," 394 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
395 " lpm %02X, became 'not operational'\n", 395 " lpm %02X, became 'not operational'\n",
396 cdev->private->dev_id.devno, sch->schid.ssid, 396 cdev->private->dev_id.devno, sch->schid.ssid,
397 sch->schid.sch_no, cdev->private->imask); 397 sch->schid.sch_no, cdev->private->imask);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 4d4b54277c43..5080f343ad74 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -48,10 +48,11 @@ s390_collect_crw_info(void *param)
48 int ccode; 48 int ccode;
49 struct semaphore *sem; 49 struct semaphore *sem;
50 unsigned int chain; 50 unsigned int chain;
51 int ignore;
51 52
52 sem = (struct semaphore *)param; 53 sem = (struct semaphore *)param;
53repeat: 54repeat:
54 down_interruptible(sem); 55 ignore = down_interruptible(sem);
55 chain = 0; 56 chain = 0;
56 while (1) { 57 while (1) {
57 if (unlikely(chain > 1)) { 58 if (unlikely(chain > 1)) {
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 0fb5bf4c43ac..8508816f303d 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1967,45 +1967,6 @@ cleanup:
1967 return rcode; 1967 return rcode;
1968} 1968}
1969 1969
1970
1971/*
1972 * This routine returns information about the system. This does not effect
1973 * any logic and if the info is wrong - it doesn't matter.
1974 */
1975
1976/* Get all the info we can not get from kernel services */
1977static int adpt_system_info(void __user *buffer)
1978{
1979 sysInfo_S si;
1980
1981 memset(&si, 0, sizeof(si));
1982
1983 si.osType = OS_LINUX;
1984 si.osMajorVersion = 0;
1985 si.osMinorVersion = 0;
1986 si.osRevision = 0;
1987 si.busType = SI_PCI_BUS;
1988 si.processorFamily = DPTI_sig.dsProcessorFamily;
1989
1990#if defined __i386__
1991 adpt_i386_info(&si);
1992#elif defined (__ia64__)
1993 adpt_ia64_info(&si);
1994#elif defined(__sparc__)
1995 adpt_sparc_info(&si);
1996#elif defined (__alpha__)
1997 adpt_alpha_info(&si);
1998#else
1999 si.processorType = 0xff ;
2000#endif
2001 if(copy_to_user(buffer, &si, sizeof(si))){
2002 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2003 return -EFAULT;
2004 }
2005
2006 return 0;
2007}
2008
2009#if defined __ia64__ 1970#if defined __ia64__
2010static void adpt_ia64_info(sysInfo_S* si) 1971static void adpt_ia64_info(sysInfo_S* si)
2011{ 1972{
@@ -2016,7 +1977,6 @@ static void adpt_ia64_info(sysInfo_S* si)
2016} 1977}
2017#endif 1978#endif
2018 1979
2019
2020#if defined __sparc__ 1980#if defined __sparc__
2021static void adpt_sparc_info(sysInfo_S* si) 1981static void adpt_sparc_info(sysInfo_S* si)
2022{ 1982{
@@ -2026,7 +1986,6 @@ static void adpt_sparc_info(sysInfo_S* si)
2026 si->processorType = PROC_ULTRASPARC; 1986 si->processorType = PROC_ULTRASPARC;
2027} 1987}
2028#endif 1988#endif
2029
2030#if defined __alpha__ 1989#if defined __alpha__
2031static void adpt_alpha_info(sysInfo_S* si) 1990static void adpt_alpha_info(sysInfo_S* si)
2032{ 1991{
@@ -2038,7 +1997,6 @@ static void adpt_alpha_info(sysInfo_S* si)
2038#endif 1997#endif
2039 1998
2040#if defined __i386__ 1999#if defined __i386__
2041
2042static void adpt_i386_info(sysInfo_S* si) 2000static void adpt_i386_info(sysInfo_S* si)
2043{ 2001{
2044 // This is all the info we need for now 2002 // This is all the info we need for now
@@ -2059,9 +2017,45 @@ static void adpt_i386_info(sysInfo_S* si)
2059 break; 2017 break;
2060 } 2018 }
2061} 2019}
2020#endif
2021
2022/*
2023 * This routine returns information about the system. This does not effect
2024 * any logic and if the info is wrong - it doesn't matter.
2025 */
2062 2026
2027/* Get all the info we can not get from kernel services */
2028static int adpt_system_info(void __user *buffer)
2029{
2030 sysInfo_S si;
2031
2032 memset(&si, 0, sizeof(si));
2033
2034 si.osType = OS_LINUX;
2035 si.osMajorVersion = 0;
2036 si.osMinorVersion = 0;
2037 si.osRevision = 0;
2038 si.busType = SI_PCI_BUS;
2039 si.processorFamily = DPTI_sig.dsProcessorFamily;
2040
2041#if defined __i386__
2042 adpt_i386_info(&si);
2043#elif defined (__ia64__)
2044 adpt_ia64_info(&si);
2045#elif defined(__sparc__)
2046 adpt_sparc_info(&si);
2047#elif defined (__alpha__)
2048 adpt_alpha_info(&si);
2049#else
2050 si.processorType = 0xff ;
2063#endif 2051#endif
2052 if (copy_to_user(buffer, &si, sizeof(si))){
2053 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2054 return -EFAULT;
2055 }
2064 2056
2057 return 0;
2058}
2065 2059
2066static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, 2060static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2067 ulong arg) 2061 ulong arg)
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 924cd5a51676..337746d46043 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -316,19 +316,6 @@ static int adpt_close(struct inode *inode, struct file *file);
316static void adpt_delay(int millisec); 316static void adpt_delay(int millisec);
317#endif 317#endif
318 318
319#if defined __ia64__
320static void adpt_ia64_info(sysInfo_S* si);
321#endif
322#if defined __sparc__
323static void adpt_sparc_info(sysInfo_S* si);
324#endif
325#if defined __alpha__
326static void adpt_sparc_info(sysInfo_S* si);
327#endif
328#if defined __i386__
329static void adpt_i386_info(sysInfo_S* si);
330#endif
331
332#define PRINT_BUFFER_SIZE 512 319#define PRINT_BUFFER_SIZE 512
333 320
334#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags 321#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 1e2b9d826f69..eab032733790 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -556,7 +556,7 @@ static int uart_chars_in_buffer(struct tty_struct *tty)
556static void uart_flush_buffer(struct tty_struct *tty) 556static void uart_flush_buffer(struct tty_struct *tty)
557{ 557{
558 struct uart_state *state = tty->driver_data; 558 struct uart_state *state = tty->driver_data;
559 struct uart_port *port = state->port; 559 struct uart_port *port;
560 unsigned long flags; 560 unsigned long flags;
561 561
562 /* 562 /*
@@ -568,6 +568,7 @@ static void uart_flush_buffer(struct tty_struct *tty)
568 return; 568 return;
569 } 569 }
570 570
571 port = state->port;
571 pr_debug("uart_flush_buffer(%d) called\n", tty->index); 572 pr_debug("uart_flush_buffer(%d) called\n", tty->index);
572 573
573 spin_lock_irqsave(&port->lock, flags); 574 spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 8a217648b250..a01e987c7d32 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -643,7 +643,7 @@ static void read_buf_callback(struct urb *urb)
643static int iuu_bulk_write(struct usb_serial_port *port) 643static int iuu_bulk_write(struct usb_serial_port *port)
644{ 644{
645 struct iuu_private *priv = usb_get_serial_port_data(port); 645 struct iuu_private *priv = usb_get_serial_port_data(port);
646 unsigned int flags; 646 unsigned long flags;
647 int result; 647 int result;
648 int i; 648 int i;
649 char *buf_ptr = port->write_urb->transfer_buffer; 649 char *buf_ptr = port->write_urb->transfer_buffer;
@@ -694,7 +694,7 @@ static void iuu_uart_read_callback(struct urb *urb)
694{ 694{
695 struct usb_serial_port *port = urb->context; 695 struct usb_serial_port *port = urb->context;
696 struct iuu_private *priv = usb_get_serial_port_data(port); 696 struct iuu_private *priv = usb_get_serial_port_data(port);
697 unsigned int flags; 697 unsigned long flags;
698 int status; 698 int status;
699 int error = 0; 699 int error = 0;
700 int len = 0; 700 int len = 0;
@@ -759,7 +759,7 @@ static int iuu_uart_write(struct usb_serial_port *port, const u8 *buf,
759 int count) 759 int count)
760{ 760{
761 struct iuu_private *priv = usb_get_serial_port_data(port); 761 struct iuu_private *priv = usb_get_serial_port_data(port);
762 unsigned int flags; 762 unsigned long flags;
763 dbg("%s - enter", __func__); 763 dbg("%s - enter", __func__);
764 764
765 if (count > 256) 765 if (count > 256)