aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acorn/char/Makefile1
-rw-r--r--drivers/acorn/char/i2c.c368
-rw-r--r--drivers/acorn/char/pcf8583.c284
-rw-r--r--drivers/acorn/char/pcf8583.h41
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c70
-rw-r--r--drivers/ata/ata_generic.c10
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libata-acpi.c88
-rw-r--r--drivers/ata/libata-core.c285
-rw-r--r--drivers/ata/libata-eh.c249
-rw-r--r--drivers/ata/libata-scsi.c46
-rw-r--r--drivers/ata/libata-sff.c8
-rw-r--r--drivers/ata/libata.h13
-rw-r--r--drivers/ata/pata_ali.c8
-rw-r--r--drivers/ata/pata_amd.c12
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_cmd64x.c8
-rw-r--r--drivers/ata/pata_cs5520.c35
-rw-r--r--drivers/ata/pata_cs5530.c8
-rw-r--r--drivers/ata/pata_cs5535.c4
-rw-r--r--drivers/ata/pata_cypress.c4
-rw-r--r--drivers/ata/pata_efar.c4
-rw-r--r--drivers/ata/pata_hpt366.c9
-rw-r--r--drivers/ata/pata_hpt37x.c2
-rw-r--r--drivers/ata/pata_hpt3x3.c6
-rw-r--r--drivers/ata/pata_isapnp.c3
-rw-r--r--drivers/ata/pata_it8213.c4
-rw-r--r--drivers/ata/pata_it821x.c10
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c7
-rw-r--r--drivers/ata/pata_jmicron.c57
-rw-r--r--drivers/ata/pata_legacy.c22
-rw-r--r--drivers/ata/pata_marvell.c4
-rw-r--r--drivers/ata/pata_mpc52xx.c4
-rw-r--r--drivers/ata/pata_mpiix.c4
-rw-r--r--drivers/ata/pata_netcell.c4
-rw-r--r--drivers/ata/pata_ns87410.c4
-rw-r--r--drivers/ata/pata_oldpiix.c9
-rw-r--r--drivers/ata/pata_opti.c6
-rw-r--r--drivers/ata/pata_optidma.c6
-rw-r--r--drivers/ata/pata_pcmcia.c9
-rw-r--r--drivers/ata/pata_pdc2027x.c2
-rw-r--r--drivers/ata/pata_pdc202xx_old.c26
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_qdi.c14
-rw-r--r--drivers/ata/pata_radisys.c4
-rw-r--r--drivers/ata/pata_rz1000.c7
-rw-r--r--drivers/ata/pata_sc1200.c7
-rw-r--r--drivers/ata/pata_scc.c1230
-rw-r--r--drivers/ata/pata_serverworks.c8
-rw-r--r--drivers/ata/pata_sil680.c22
-rw-r--r--drivers/ata/pata_sis.c13
-rw-r--r--drivers/ata/pata_sl82c105.c42
-rw-r--r--drivers/ata/pata_triflex.c4
-rw-r--r--drivers/ata/pata_via.c8
-rw-r--r--drivers/ata/pata_winbond.c2
-rw-r--r--drivers/ata/pdc_adma.c4
-rw-r--r--drivers/ata/sata_inic162x.c7
-rw-r--r--drivers/ata/sata_mv.c39
-rw-r--r--drivers/ata/sata_nv.c242
-rw-r--r--drivers/ata/sata_promise.c32
-rw-r--r--drivers/ata/sata_qstor.c4
-rw-r--r--drivers/ata/sata_sil.c14
-rw-r--r--drivers/ata/sata_sil24.c7
-rw-r--r--drivers/ata/sata_sis.c5
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/ata/sata_sx4.c16
-rw-r--r--drivers/ata/sata_uli.c2
-rw-r--r--drivers/ata/sata_via.c30
-rw-r--r--drivers/ata/sata_vsc.c125
-rw-r--r--drivers/ata/sis.h5
-rw-r--r--drivers/block/Kconfig16
-rw-r--r--drivers/block/aoe/aoecmd.c12
-rw-r--r--drivers/block/aoe/aoenet.c5
-rw-r--r--drivers/block/cciss.c49
-rw-r--r--drivers/block/umem.c5
-rw-r--r--drivers/cdrom/viocd.c27
-rw-r--r--drivers/char/Kconfig15
-rw-r--r--drivers/char/agp/hp-agp.c2
-rw-r--r--drivers/char/agp/i460-agp.c2
-rw-r--r--drivers/char/agp/parisc-agp.c2
-rw-r--r--drivers/char/agp/sgi-agp.c2
-rw-r--r--drivers/char/agp/uninorth-agp.c4
-rw-r--r--drivers/char/cyclades.c1
-rw-r--r--drivers/char/ds1286.c9
-rw-r--r--drivers/char/epca.c17
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c5
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c3
-rw-r--r--drivers/char/tty_io.c14
-rw-r--r--drivers/clocksource/acpi_pm.c5
-rw-r--r--drivers/clocksource/cyclone.c2
-rw-r--r--drivers/connector/connector.c22
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/crypto/geode-aes.c3
-rw-r--r--drivers/hid/hid-core.c7
-rw-r--r--drivers/hid/hid-debug.c1
-rw-r--r--drivers/hid/hid-input.c37
-rw-r--r--drivers/i2c/busses/Kconfig10
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-acorn.c97
-rw-r--r--drivers/ide/Kconfig10
-rw-r--r--drivers/ide/cris/ide-cris.c2
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-iops.c2
-rw-r--r--drivers/ide/ide-lib.c18
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide.c39
-rw-r--r--drivers/ide/legacy/ali14xx.c11
-rw-r--r--drivers/ide/legacy/dtc2278.c14
-rw-r--r--drivers/ide/legacy/ht6560b.c10
-rw-r--r--drivers/ide/legacy/ide-cs.c7
-rw-r--r--drivers/ide/legacy/qd65xx.c14
-rw-r--r--drivers/ide/legacy/umc8672.c15
-rw-r--r--drivers/ide/mips/au1xxx-ide.c2
-rw-r--r--drivers/ide/pci/alim15x3.c35
-rw-r--r--drivers/ide/pci/cmd640.c1
-rw-r--r--drivers/ide/pci/cmd64x.c108
-rw-r--r--drivers/ide/pci/delkin_cb.c2
-rw-r--r--drivers/ide/pci/generic.c2
-rw-r--r--drivers/ide/pci/opti621.c3
-rw-r--r--drivers/ide/pci/piix.c49
-rw-r--r--drivers/ide/pci/rz1000.c2
-rw-r--r--drivers/ide/pci/siimage.c5
-rw-r--r--drivers/ide/pci/slc90e66.c38
-rw-r--r--drivers/ide/ppc/pmac.c33
-rw-r--r--drivers/ide/ppc/scc_pata.c27
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/gpio_keys.c4
-rw-r--r--drivers/input/serio/i8042.c10
-rw-r--r--drivers/isdn/capi/Kconfig16
-rw-r--r--drivers/isdn/capi/capidrv.c28
-rw-r--r--drivers/isdn/capi/capiutil.c254
-rw-r--r--drivers/isdn/capi/kcapi.c77
-rw-r--r--drivers/isdn/gigaset/Makefile9
-rw-r--r--drivers/isdn/gigaset/asyncdata.c5
-rw-r--r--drivers/kvm/kvm.h13
-rw-r--r--drivers/kvm/kvm_main.c776
-rw-r--r--drivers/kvm/kvm_svm.h3
-rw-r--r--drivers/kvm/mmu.c36
-rw-r--r--drivers/kvm/paging_tmpl.h18
-rw-r--r--drivers/kvm/svm.c42
-rw-r--r--drivers/kvm/vmx.c33
-rw-r--r--drivers/md/md.c140
-rw-r--r--drivers/md/raid10.c38
-rw-r--r--drivers/md/raid5.c161
-rw-r--r--drivers/md/raid6mmx.c16
-rw-r--r--drivers/md/raid6sse1.c17
-rw-r--r--drivers/md/raid6sse2.c22
-rw-r--r--drivers/md/raid6x86.h218
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c4
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c2
-rw-r--r--drivers/media/video/cafe_ccic.c4
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c6
-rw-r--r--drivers/media/video/cx25840/cx25840-firmware.c2
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c14
-rw-r--r--drivers/media/video/cx88/cx88-video.c4
-rw-r--r--drivers/media/video/cx88/cx88.h1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c13
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.h7
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/video/saa7115.c2
-rw-r--r--drivers/media/video/saa7127.c2
-rw-r--r--drivers/media/video/tvp5150.c2
-rw-r--r--drivers/media/video/upd64031a.c2
-rw-r--r--drivers/media/video/upd64083.c2
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/video/v4l2-common.c43
-rw-r--r--drivers/media/video/videodev.c36
-rw-r--r--drivers/mmc/mmc.c83
-rw-r--r--drivers/mmc/sdhci.c39
-rw-r--r--drivers/net/3c59x.c8
-rw-r--r--drivers/net/8139cp.c3
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/acenic.c5
-rw-r--r--drivers/net/amd8111e.c3
-rw-r--r--drivers/net/atl1/atl1_main.c5
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/bonding/bond_main.c100
-rw-r--r--drivers/net/chelsio/cxgb2.c3
-rw-r--r--drivers/net/chelsio/sge.c1
-rw-r--r--drivers/net/cxgb3/adapter.h11
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h33
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c69
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb3/sge.c390
-rw-r--r--drivers/net/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/cxgb3/version.h4
-rw-r--r--drivers/net/de600.c6
-rw-r--r--drivers/net/e1000/e1000_main.c13
-rw-r--r--drivers/net/ehea/ehea_main.c3
-rw-r--r--drivers/net/forcedeth.c20
-rw-r--r--drivers/net/gianfar.c3
-rw-r--r--drivers/net/ixgb/ixgb_main.c5
-rw-r--r--drivers/net/mv643xx_eth.c59
-rw-r--r--drivers/net/mv643xx_eth.h15
-rw-r--r--drivers/net/myri10ge/myri10ge.c49
-rw-r--r--drivers/net/natsemi.c26
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c144
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c14
-rw-r--r--drivers/net/netxen/netxen_nic_init.c28
-rw-r--r--drivers/net/netxen/netxen_nic_main.c19
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h3
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ns83820.c3
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/pppoe.c53
-rwxr-xr-xdrivers/net/qla3xxx.c492
-rwxr-xr-xdrivers/net/qla3xxx.h11
-rw-r--r--drivers/net/r8169.c3
-rw-r--r--drivers/net/s2io-regs.h1
-rw-r--r--drivers/net/s2io.c302
-rw-r--r--drivers/net/s2io.h10
-rw-r--r--drivers/net/sgiseeq.c11
-rw-r--r--drivers/net/sis900.c10
-rw-r--r--drivers/net/skfp/cfm.c2
-rw-r--r--drivers/net/skge.c47
-rw-r--r--drivers/net/skge.h3
-rw-r--r--drivers/net/sky2.c3
-rw-r--r--drivers/net/spider_net.c554
-rw-r--r--drivers/net/spider_net.h34
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sungem_phy.c389
-rw-r--r--drivers/net/sungem_phy.h10
-rw-r--r--drivers/net/tc35815.c23
-rw-r--r--drivers/net/tg3.c3
-rw-r--r--drivers/net/tulip/de2104x.c8
-rw-r--r--drivers/net/tulip/dmfe.c204
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/typhoon.c3
-rw-r--r--drivers/net/ucc_geth.c21
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/hdlc.c33
-rw-r--r--drivers/net/wan/hdlc_cisco.c3
-rw-r--r--drivers/net/wan/hdlc_fr.c3
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wan/hdlc_raw.c3
-rw-r--r--drivers/net/wan/hdlc_x25.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c5
-rw-r--r--drivers/net/wireless/wl3501_cs.c1
-rw-r--r--drivers/pci/msi.c146
-rw-r--r--drivers/pci/pci.c34
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/probe.c45
-rw-r--r--drivers/pci/quirks.c87
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/class.c14
-rw-r--r--drivers/rtc/interface.c3
-rw-r--r--drivers/rtc/rtc-pcf8583.c29
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/char/tape_std.c5
-rw-r--r--drivers/s390/cio/device_fsm.c117
-rw-r--r--drivers/s390/net/qeth_main.c25
-rw-r--r--drivers/scsi/arm/cumana_2.c4
-rw-r--r--drivers/scsi/arm/eesox.c4
-rw-r--r--drivers/scsi/arm/fas216.c9
-rw-r--r--drivers/scsi/arm/powertec.c5
-rw-r--r--drivers/scsi/arm/scsi.h2
-rw-r--r--drivers/serial/dz.c7
-rw-r--r--drivers/serial/mcfserial.c44
-rw-r--r--drivers/serial/sn_console.c52
-rw-r--r--drivers/usb/input/hid-core.c56
-rw-r--r--drivers/usb/storage/usb.c4
-rw-r--r--drivers/video/Kconfig19
-rw-r--r--drivers/video/aty/aty128fb.c12
-rw-r--r--drivers/video/aty/atyfb.h3
-rw-r--r--drivers/video/aty/atyfb_base.c13
-rw-r--r--drivers/video/aty/mach64_ct.c3
-rw-r--r--drivers/video/aty/radeon_base.c13
-rw-r--r--drivers/video/nvidia/nv_backlight.c9
-rw-r--r--drivers/video/nvidia/nvidia.c12
-rw-r--r--drivers/video/riva/fbdev.c12
-rw-r--r--drivers/video/sm501fb.c16
276 files changed, 6831 insertions, 4034 deletions
diff --git a/drivers/acorn/char/Makefile b/drivers/acorn/char/Makefile
index 2fa9a8bf48..d006c9f168 100644
--- a/drivers/acorn/char/Makefile
+++ b/drivers/acorn/char/Makefile
@@ -2,5 +2,4 @@
2# Makefile for the acorn character device drivers. 2# Makefile for the acorn character device drivers.
3# 3#
4 4
5obj-$(CONFIG_ARCH_ACORN) += i2c.o pcf8583.o
6obj-$(CONFIG_L7200_KEYB) += defkeymap-l7200.o keyb_l7200.o 5obj-$(CONFIG_L7200_KEYB) += defkeymap-l7200.o keyb_l7200.o
diff --git a/drivers/acorn/char/i2c.c b/drivers/acorn/char/i2c.c
deleted file mode 100644
index d276fd14d6..0000000000
--- a/drivers/acorn/char/i2c.c
+++ /dev/null
@@ -1,368 +0,0 @@
1/*
2 * linux/drivers/acorn/char/i2c.c
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ARM IOC/IOMD i2c driver.
11 *
12 * On Acorn machines, the following i2c devices are on the bus:
13 * - PCF8583 real time clock & static RAM
14 */
15#include <linux/capability.h>
16#include <linux/init.h>
17#include <linux/time.h>
18#include <linux/miscdevice.h>
19#include <linux/rtc.h>
20#include <linux/i2c.h>
21#include <linux/i2c-algo-bit.h>
22#include <linux/fs.h>
23
24#include <asm/hardware.h>
25#include <asm/io.h>
26#include <asm/hardware/ioc.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29
30#include "pcf8583.h"
31
32extern int (*set_rtc)(void);
33
34static struct i2c_client *rtc_client;
35static const unsigned char days_in_mon[] =
36 { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
37
38#define CMOS_CHECKSUM (63)
39
40/*
41 * Acorn machines store the year in the static RAM at
42 * location 128.
43 */
44#define CMOS_YEAR (64 + 128)
45
46static inline int rtc_command(int cmd, void *data)
47{
48 int ret = -EIO;
49
50 if (rtc_client)
51 ret = rtc_client->driver->command(rtc_client, cmd, data);
52
53 return ret;
54}
55
56/*
57 * Update the century + year bytes in the CMOS RAM, ensuring
58 * that the check byte is correctly adjusted for the change.
59 */
60static int rtc_update_year(unsigned int new_year)
61{
62 unsigned char yr[2], chk;
63 struct mem cmos_year = { CMOS_YEAR, sizeof(yr), yr };
64 struct mem cmos_check = { CMOS_CHECKSUM, 1, &chk };
65 int ret;
66
67 ret = rtc_command(MEM_READ, &cmos_check);
68 if (ret)
69 goto out;
70 ret = rtc_command(MEM_READ, &cmos_year);
71 if (ret)
72 goto out;
73
74 chk -= yr[1] + yr[0];
75
76 yr[1] = new_year / 100;
77 yr[0] = new_year % 100;
78
79 chk += yr[1] + yr[0];
80
81 ret = rtc_command(MEM_WRITE, &cmos_year);
82 if (ret == 0)
83 ret = rtc_command(MEM_WRITE, &cmos_check);
84 out:
85 return ret;
86}
87
88/*
89 * Read the current RTC time and date, and update xtime.
90 */
91static void get_rtc_time(struct rtc_tm *rtctm, unsigned int *year)
92{
93 unsigned char ctrl, yr[2];
94 struct mem rtcmem = { CMOS_YEAR, sizeof(yr), yr };
95 int real_year, year_offset;
96
97 /*
98 * Ensure that the RTC is running.
99 */
100 rtc_command(RTC_GETCTRL, &ctrl);
101 if (ctrl & 0xc0) {
102 unsigned char new_ctrl = ctrl & ~0xc0;
103
104 printk(KERN_WARNING "RTC: resetting control %02x -> %02x\n",
105 ctrl, new_ctrl);
106
107 rtc_command(RTC_SETCTRL, &new_ctrl);
108 }
109
110 if (rtc_command(RTC_GETDATETIME, rtctm) ||
111 rtc_command(MEM_READ, &rtcmem))
112 return;
113
114 real_year = yr[0];
115
116 /*
117 * The RTC year holds the LSB two bits of the current
118 * year, which should reflect the LSB two bits of the
119 * CMOS copy of the year. Any difference indicates
120 * that we have to correct the CMOS version.
121 */
122 year_offset = rtctm->year_off - (real_year & 3);
123 if (year_offset < 0)
124 /*
125 * RTC year wrapped. Adjust it appropriately.
126 */
127 year_offset += 4;
128
129 *year = real_year + year_offset + yr[1] * 100;
130}
131
132static int set_rtc_time(struct rtc_tm *rtctm, unsigned int year)
133{
134 unsigned char leap;
135 int ret;
136
137 leap = (!(year % 4) && (year % 100)) || !(year % 400);
138
139 if (rtctm->mon > 12 || rtctm->mon == 0 || rtctm->mday == 0)
140 return -EINVAL;
141
142 if (rtctm->mday > (days_in_mon[rtctm->mon] + (rtctm->mon == 2 && leap)))
143 return -EINVAL;
144
145 if (rtctm->hours >= 24 || rtctm->mins >= 60 || rtctm->secs >= 60)
146 return -EINVAL;
147
148 /*
149 * The RTC's own 2-bit year must reflect the least
150 * significant two bits of the CMOS year.
151 */
152 rtctm->year_off = (year % 100) & 3;
153
154 ret = rtc_command(RTC_SETDATETIME, rtctm);
155 if (ret == 0)
156 ret = rtc_update_year(year);
157
158 return ret;
159}
160
161/*
162 * Set the RTC time only. Note that
163 * we do not touch the date.
164 */
165static int k_set_rtc_time(void)
166{
167 struct rtc_tm new_rtctm, old_rtctm;
168 unsigned long nowtime = xtime.tv_sec;
169
170 if (rtc_command(RTC_GETDATETIME, &old_rtctm))
171 return 0;
172
173 new_rtctm.cs = xtime.tv_nsec / 10000000;
174 new_rtctm.secs = nowtime % 60; nowtime /= 60;
175 new_rtctm.mins = nowtime % 60; nowtime /= 60;
176 new_rtctm.hours = nowtime % 24;
177
178 /*
179 * avoid writing when we're going to change the day
180 * of the month. We will retry in the next minute.
181 * This basically means that if the RTC must not drift
182 * by more than 1 minute in 11 minutes.
183 *
184 * [ rtc: 1/1/2000 23:58:00, real 2/1/2000 00:01:00,
185 * rtc gets set to 1/1/2000 00:01:00 ]
186 */
187 if ((old_rtctm.hours == 23 && old_rtctm.mins == 59) ||
188 (new_rtctm.hours == 23 && new_rtctm.mins == 59))
189 return 1;
190
191 return rtc_command(RTC_SETTIME, &new_rtctm);
192}
193
194static int rtc_ioctl(struct inode *inode, struct file *file,
195 unsigned int cmd, unsigned long arg)
196{
197 unsigned int year;
198 struct rtc_time rtctm;
199 struct rtc_tm rtc_raw;
200
201 switch (cmd) {
202 case RTC_ALM_READ:
203 case RTC_ALM_SET:
204 break;
205
206 case RTC_RD_TIME:
207 memset(&rtctm, 0, sizeof(struct rtc_time));
208 get_rtc_time(&rtc_raw, &year);
209 rtctm.tm_sec = rtc_raw.secs;
210 rtctm.tm_min = rtc_raw.mins;
211 rtctm.tm_hour = rtc_raw.hours;
212 rtctm.tm_mday = rtc_raw.mday;
213 rtctm.tm_mon = rtc_raw.mon - 1; /* month starts at 0 */
214 rtctm.tm_year = year - 1900; /* starts at 1900 */
215 return copy_to_user((void *)arg, &rtctm, sizeof(rtctm))
216 ? -EFAULT : 0;
217
218 case RTC_SET_TIME:
219 if (!capable(CAP_SYS_TIME))
220 return -EACCES;
221
222 if (copy_from_user(&rtctm, (void *)arg, sizeof(rtctm)))
223 return -EFAULT;
224 rtc_raw.secs = rtctm.tm_sec;
225 rtc_raw.mins = rtctm.tm_min;
226 rtc_raw.hours = rtctm.tm_hour;
227 rtc_raw.mday = rtctm.tm_mday;
228 rtc_raw.mon = rtctm.tm_mon + 1;
229 year = rtctm.tm_year + 1900;
230 return set_rtc_time(&rtc_raw, year);
231 break;
232
233 case RTC_EPOCH_READ:
234 return put_user(1900, (unsigned long *)arg);
235
236 }
237 return -EINVAL;
238}
239
240static const struct file_operations rtc_fops = {
241 .ioctl = rtc_ioctl,
242};
243
244static struct miscdevice rtc_dev = {
245 .minor = RTC_MINOR,
246 .name = "rtc",
247 .fops = &rtc_fops,
248};
249
250/* IOC / IOMD i2c driver */
251
252#define FORCE_ONES 0xdc
253#define SCL 0x02
254#define SDA 0x01
255
256/*
257 * We must preserve all non-i2c output bits in IOC_CONTROL.
258 * Note also that we need to preserve the value of SCL and
259 * SDA outputs as well (which may be different from the
260 * values read back from IOC_CONTROL).
261 */
262static u_int force_ones;
263
264static void ioc_setscl(void *data, int state)
265{
266 u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA);
267 u_int ones = force_ones;
268
269 if (state)
270 ones |= SCL;
271 else
272 ones &= ~SCL;
273
274 force_ones = ones;
275
276 ioc_writeb(ioc_control | ones, IOC_CONTROL);
277}
278
279static void ioc_setsda(void *data, int state)
280{
281 u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA);
282 u_int ones = force_ones;
283
284 if (state)
285 ones |= SDA;
286 else
287 ones &= ~SDA;
288
289 force_ones = ones;
290
291 ioc_writeb(ioc_control | ones, IOC_CONTROL);
292}
293
294static int ioc_getscl(void *data)
295{
296 return (ioc_readb(IOC_CONTROL) & SCL) != 0;
297}
298
299static int ioc_getsda(void *data)
300{
301 return (ioc_readb(IOC_CONTROL) & SDA) != 0;
302}
303
304static struct i2c_algo_bit_data ioc_data = {
305 .setsda = ioc_setsda,
306 .setscl = ioc_setscl,
307 .getsda = ioc_getsda,
308 .getscl = ioc_getscl,
309 .udelay = 80,
310 .timeout = 100
311};
312
313static int ioc_client_reg(struct i2c_client *client)
314{
315 if (client->driver->id == I2C_DRIVERID_PCF8583 &&
316 client->addr == 0x50) {
317 struct rtc_tm rtctm;
318 unsigned int year;
319 struct timespec tv;
320
321 rtc_client = client;
322 get_rtc_time(&rtctm, &year);
323
324 tv.tv_nsec = rtctm.cs * 10000000;
325 tv.tv_sec = mktime(year, rtctm.mon, rtctm.mday,
326 rtctm.hours, rtctm.mins, rtctm.secs);
327 do_settimeofday(&tv);
328 set_rtc = k_set_rtc_time;
329 }
330
331 return 0;
332}
333
334static int ioc_client_unreg(struct i2c_client *client)
335{
336 if (client == rtc_client) {
337 set_rtc = NULL;
338 rtc_client = NULL;
339 }
340
341 return 0;
342}
343
344static struct i2c_adapter ioc_ops = {
345 .id = I2C_HW_B_IOC,
346 .algo_data = &ioc_data,
347 .client_register = ioc_client_reg,
348 .client_unregister = ioc_client_unreg,
349};
350
351static int __init i2c_ioc_init(void)
352{
353 int ret;
354
355 force_ones = FORCE_ONES | SCL | SDA;
356
357 ret = i2c_bit_add_bus(&ioc_ops);
358
359 if (ret >= 0){
360 ret = misc_register(&rtc_dev);
361 if(ret < 0)
362 i2c_del_adapter(&ioc_ops);
363 }
364
365 return ret;
366}
367
368__initcall(i2c_ioc_init);
diff --git a/drivers/acorn/char/pcf8583.c b/drivers/acorn/char/pcf8583.c
deleted file mode 100644
index 9b49f316ae..0000000000
--- a/drivers/acorn/char/pcf8583.c
+++ /dev/null
@@ -1,284 +0,0 @@
1/*
2 * linux/drivers/acorn/char/pcf8583.c
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Driver for PCF8583 RTC & RAM chip
11 */
12#include <linux/module.h>
13#include <linux/i2c.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/mc146818rtc.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/bcd.h>
20
21#include "pcf8583.h"
22
23static struct i2c_driver pcf8583_driver;
24
25static unsigned short ignore[] = { I2C_CLIENT_END };
26static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END };
27static unsigned short *forces[] = { NULL };
28
29static struct i2c_client_address_data addr_data = {
30 .normal_i2c = normal_addr,
31 .probe = ignore,
32 .ignore = ignore,
33 .forces = forces,
34};
35
36#define set_ctrl(x, v) i2c_set_clientdata(x, (void *)(unsigned int)(v))
37#define get_ctrl(x) ((unsigned int)i2c_get_clientdata(x))
38
39static int
40pcf8583_attach(struct i2c_adapter *adap, int addr, int kind)
41{
42 struct i2c_client *c;
43 unsigned char buf[1], ad[1] = { 0 };
44 struct i2c_msg msgs[2] = {
45 {
46 .addr = addr,
47 .flags = 0,
48 .len = 1,
49 .buf = ad,
50 }, {
51 .addr = addr,
52 .flags = I2C_M_RD,
53 .len = 1,
54 .buf = buf,
55 }
56 };
57
58 c = kmalloc(sizeof(*c), GFP_KERNEL);
59 if (!c)
60 return -ENOMEM;
61
62 memset(c, 0, sizeof(*c));
63 c->addr = addr;
64 c->adapter = adap;
65 c->driver = &pcf8583_driver;
66
67 if (i2c_transfer(c->adapter, msgs, 2) == 2)
68 set_ctrl(c, buf[0]);
69
70 return i2c_attach_client(c);
71}
72
73static int
74pcf8583_probe(struct i2c_adapter *adap)
75{
76 return i2c_probe(adap, &addr_data, pcf8583_attach);
77}
78
79static int
80pcf8583_detach(struct i2c_client *client)
81{
82 i2c_detach_client(client);
83 kfree(client);
84 return 0;
85}
86
87static int
88pcf8583_get_datetime(struct i2c_client *client, struct rtc_tm *dt)
89{
90 unsigned char buf[8], addr[1] = { 1 };
91 struct i2c_msg msgs[2] = {
92 {
93 .addr = client->addr,
94 .flags = 0,
95 .len = 1,
96 .buf = addr,
97 }, {
98 .addr = client->addr,
99 .flags = I2C_M_RD,
100 .len = 6,
101 .buf = buf,
102 }
103 };
104 int ret = -EIO;
105
106 memset(buf, 0, sizeof(buf));
107
108 ret = i2c_transfer(client->adapter, msgs, 2);
109 if (ret == 2) {
110 dt->year_off = buf[4] >> 6;
111 dt->wday = buf[5] >> 5;
112
113 buf[4] &= 0x3f;
114 buf[5] &= 0x1f;
115
116 dt->cs = BCD_TO_BIN(buf[0]);
117 dt->secs = BCD_TO_BIN(buf[1]);
118 dt->mins = BCD_TO_BIN(buf[2]);
119 dt->hours = BCD_TO_BIN(buf[3]);
120 dt->mday = BCD_TO_BIN(buf[4]);
121 dt->mon = BCD_TO_BIN(buf[5]);
122
123 ret = 0;
124 }
125
126 return ret;
127}
128
129static int
130pcf8583_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo)
131{
132 unsigned char buf[8];
133 int ret, len = 6;
134
135 buf[0] = 0;
136 buf[1] = get_ctrl(client) | 0x80;
137 buf[2] = BIN_TO_BCD(dt->cs);
138 buf[3] = BIN_TO_BCD(dt->secs);
139 buf[4] = BIN_TO_BCD(dt->mins);
140 buf[5] = BIN_TO_BCD(dt->hours);
141
142 if (datetoo) {
143 len = 8;
144 buf[6] = BIN_TO_BCD(dt->mday) | (dt->year_off << 6);
145 buf[7] = BIN_TO_BCD(dt->mon) | (dt->wday << 5);
146 }
147
148 ret = i2c_master_send(client, (char *)buf, len);
149 if (ret == len)
150 ret = 0;
151
152 buf[1] = get_ctrl(client);
153 i2c_master_send(client, (char *)buf, 2);
154
155 return ret;
156}
157
158static int
159pcf8583_get_ctrl(struct i2c_client *client, unsigned char *ctrl)
160{
161 *ctrl = get_ctrl(client);
162 return 0;
163}
164
165static int
166pcf8583_set_ctrl(struct i2c_client *client, unsigned char *ctrl)
167{
168 unsigned char buf[2];
169
170 buf[0] = 0;
171 buf[1] = *ctrl;
172 set_ctrl(client, *ctrl);
173
174 return i2c_master_send(client, (char *)buf, 2);
175}
176
177static int
178pcf8583_read_mem(struct i2c_client *client, struct mem *mem)
179{
180 unsigned char addr[1];
181 struct i2c_msg msgs[2] = {
182 {
183 .addr = client->addr,
184 .flags = 0,
185 .len = 1,
186 .buf = addr,
187 }, {
188 .addr = client->addr,
189 .flags = I2C_M_RD,
190 .len = mem->nr,
191 .buf = mem->data,
192 }
193 };
194
195 if (mem->loc < 8)
196 return -EINVAL;
197
198 addr[0] = mem->loc;
199
200 return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
201}
202
203static int
204pcf8583_write_mem(struct i2c_client *client, struct mem *mem)
205{
206 unsigned char addr[1];
207 struct i2c_msg msgs[2] = {
208 {
209 .addr = client->addr,
210 .flags = 0,
211 .len = 1,
212 .buf = addr,
213 }, {
214 .addr = client->addr,
215 .flags = I2C_M_NOSTART,
216 .len = mem->nr,
217 .buf = mem->data,
218 }
219 };
220
221 if (mem->loc < 8)
222 return -EINVAL;
223
224 addr[0] = mem->loc;
225
226 return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
227}
228
229static int
230pcf8583_command(struct i2c_client *client, unsigned int cmd, void *arg)
231{
232 switch (cmd) {
233 case RTC_GETDATETIME:
234 return pcf8583_get_datetime(client, arg);
235
236 case RTC_SETTIME:
237 return pcf8583_set_datetime(client, arg, 0);
238
239 case RTC_SETDATETIME:
240 return pcf8583_set_datetime(client, arg, 1);
241
242 case RTC_GETCTRL:
243 return pcf8583_get_ctrl(client, arg);
244
245 case RTC_SETCTRL:
246 return pcf8583_set_ctrl(client, arg);
247
248 case MEM_READ:
249 return pcf8583_read_mem(client, arg);
250
251 case MEM_WRITE:
252 return pcf8583_write_mem(client, arg);
253
254 default:
255 return -EINVAL;
256 }
257}
258
259static struct i2c_driver pcf8583_driver = {
260 .driver = {
261 .name = "PCF8583",
262 },
263 .id = I2C_DRIVERID_PCF8583,
264 .attach_adapter = pcf8583_probe,
265 .detach_client = pcf8583_detach,
266 .command = pcf8583_command
267};
268
269static __init int pcf8583_init(void)
270{
271 return i2c_add_driver(&pcf8583_driver);
272}
273
274static __exit void pcf8583_exit(void)
275{
276 i2c_del_driver(&pcf8583_driver);
277}
278
279module_init(pcf8583_init);
280module_exit(pcf8583_exit);
281
282MODULE_AUTHOR("Russell King");
283MODULE_DESCRIPTION("PCF8583 I2C RTC driver");
284MODULE_LICENSE("GPL");
diff --git a/drivers/acorn/char/pcf8583.h b/drivers/acorn/char/pcf8583.h
deleted file mode 100644
index 847f7fdb87..0000000000
--- a/drivers/acorn/char/pcf8583.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * linux/drivers/acorn/char/pcf8583.h
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10struct rtc_tm {
11 unsigned char cs;
12 unsigned char secs;
13 unsigned char mins;
14 unsigned char hours;
15 unsigned char mday;
16 unsigned char mon;
17 unsigned char year_off;
18 unsigned char wday;
19};
20
21struct mem {
22 unsigned int loc;
23 unsigned int nr;
24 unsigned char *data;
25};
26
27#define RTC_GETDATETIME 0
28#define RTC_SETTIME 1
29#define RTC_SETDATETIME 2
30#define RTC_GETCTRL 3
31#define RTC_SETCTRL 4
32#define MEM_READ 5
33#define MEM_WRITE 6
34
35#define CTRL_STOP 0x80
36#define CTRL_HOLD 0x40
37#define CTRL_32KHZ 0x00
38#define CTRL_MASK 0x08
39#define CTRL_ALARMEN 0x04
40#define CTRL_ALARM 0x02
41#define CTRL_TIMER 0x01
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e942ffe8b5..7c49e103cf 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -149,6 +149,7 @@ config ACPI_DOCK
149config ACPI_BAY 149config ACPI_BAY
150 tristate "Removable Drive Bay (EXPERIMENTAL)" 150 tristate "Removable Drive Bay (EXPERIMENTAL)"
151 depends on EXPERIMENTAL 151 depends on EXPERIMENTAL
152 depends on ACPI_DOCK
152 help 153 help
153 This driver adds support for ACPI controlled removable drive 154 This driver adds support for ACPI controlled removable drive
154 bays such as the IBM ultrabay or the Dell Module Bay. 155 bays such as the IBM ultrabay or the Dell Module Bay.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4af0a4bb57..d16b5b0c8b 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -562,6 +562,15 @@ config PATA_IXP4XX_CF
562 562
563 If unsure, say N. 563 If unsure, say N.
564 564
565config PATA_SCC
566 tristate "Toshiba's Cell Reference Set IDE support"
567 depends on PCI && PPC_IBM_CELL_BLADE
568 help
569 This option enables support for the built-in IDE controller on
570 Toshiba Cell Reference Board.
571
572 If unsure, say N.
573
565endif 574endif
566endmenu 575endmenu
567 576
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 74298afbba..13d7397e00 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
59obj-$(CONFIG_PATA_SIS) += pata_sis.o 59obj-$(CONFIG_PATA_SIS) += pata_sis.o
60obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 60obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
61obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o 61obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
62obj-$(CONFIG_PATA_SCC) += pata_scc.o
62obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 63obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
63# Should be last but one libata driver 64# Should be last but one libata driver
64obj-$(CONFIG_ATA_GENERIC) += ata_generic.o 65obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6a3543e062..dc7b562259 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "ahci" 48#define DRV_NAME "ahci"
49#define DRV_VERSION "2.0" 49#define DRV_VERSION "2.1"
50 50
51 51
52enum { 52enum {
@@ -198,9 +198,9 @@ struct ahci_port_priv {
198 void *rx_fis; 198 void *rx_fis;
199 dma_addr_t rx_fis_dma; 199 dma_addr_t rx_fis_dma;
200 /* for NCQ spurious interrupt analysis */ 200 /* for NCQ spurious interrupt analysis */
201 int ncq_saw_spurious_sdb_cnt;
202 unsigned int ncq_saw_d2h:1; 201 unsigned int ncq_saw_d2h:1;
203 unsigned int ncq_saw_dmas:1; 202 unsigned int ncq_saw_dmas:1;
203 unsigned int ncq_saw_sdb:1;
204}; 204};
205 205
206static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 206static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
@@ -219,10 +219,12 @@ static void ahci_thaw(struct ata_port *ap);
219static void ahci_error_handler(struct ata_port *ap); 219static void ahci_error_handler(struct ata_port *ap);
220static void ahci_vt8251_error_handler(struct ata_port *ap); 220static void ahci_vt8251_error_handler(struct ata_port *ap);
221static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 221static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
222#ifdef CONFIG_PM
222static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 223static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
223static int ahci_port_resume(struct ata_port *ap); 224static int ahci_port_resume(struct ata_port *ap);
224static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 225static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
225static int ahci_pci_device_resume(struct pci_dev *pdev); 226static int ahci_pci_device_resume(struct pci_dev *pdev);
227#endif
226 228
227static struct scsi_host_template ahci_sht = { 229static struct scsi_host_template ahci_sht = {
228 .module = THIS_MODULE, 230 .module = THIS_MODULE,
@@ -241,8 +243,10 @@ static struct scsi_host_template ahci_sht = {
241 .slave_configure = ata_scsi_slave_config, 243 .slave_configure = ata_scsi_slave_config,
242 .slave_destroy = ata_scsi_slave_destroy, 244 .slave_destroy = ata_scsi_slave_destroy,
243 .bios_param = ata_std_bios_param, 245 .bios_param = ata_std_bios_param,
246#ifdef CONFIG_PM
244 .suspend = ata_scsi_device_suspend, 247 .suspend = ata_scsi_device_suspend,
245 .resume = ata_scsi_device_resume, 248 .resume = ata_scsi_device_resume,
249#endif
246}; 250};
247 251
248static const struct ata_port_operations ahci_ops = { 252static const struct ata_port_operations ahci_ops = {
@@ -271,8 +275,10 @@ static const struct ata_port_operations ahci_ops = {
271 .error_handler = ahci_error_handler, 275 .error_handler = ahci_error_handler,
272 .post_internal_cmd = ahci_post_internal_cmd, 276 .post_internal_cmd = ahci_post_internal_cmd,
273 277
278#ifdef CONFIG_PM
274 .port_suspend = ahci_port_suspend, 279 .port_suspend = ahci_port_suspend,
275 .port_resume = ahci_port_resume, 280 .port_resume = ahci_port_resume,
281#endif
276 282
277 .port_start = ahci_port_start, 283 .port_start = ahci_port_start,
278 .port_stop = ahci_port_stop, 284 .port_stop = ahci_port_stop,
@@ -304,8 +310,10 @@ static const struct ata_port_operations ahci_vt8251_ops = {
304 .error_handler = ahci_vt8251_error_handler, 310 .error_handler = ahci_vt8251_error_handler,
305 .post_internal_cmd = ahci_post_internal_cmd, 311 .post_internal_cmd = ahci_post_internal_cmd,
306 312
313#ifdef CONFIG_PM
307 .port_suspend = ahci_port_suspend, 314 .port_suspend = ahci_port_suspend,
308 .port_resume = ahci_port_resume, 315 .port_resume = ahci_port_resume,
316#endif
309 317
310 .port_start = ahci_port_start, 318 .port_start = ahci_port_start,
311 .port_stop = ahci_port_stop, 319 .port_stop = ahci_port_stop,
@@ -381,16 +389,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
381 { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */ 389 { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
382 { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */ 390 { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
383 { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */ 391 { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
392 { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pi }, /* ICH9M */
384 { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */ 393 { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
385 { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */ 394 { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
386 { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */ 395 { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
387 396
388 /* JMicron */ 397 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
389 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */ 398 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
390 { PCI_VDEVICE(JMICRON, 0x2361), board_ahci_ign_iferr }, /* JMB361 */ 399 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
391 { PCI_VDEVICE(JMICRON, 0x2363), board_ahci_ign_iferr }, /* JMB363 */
392 { PCI_VDEVICE(JMICRON, 0x2365), board_ahci_ign_iferr }, /* JMB365 */
393 { PCI_VDEVICE(JMICRON, 0x2366), board_ahci_ign_iferr }, /* JMB366 */
394 400
395 /* ATI */ 401 /* ATI */
396 { PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */ 402 { PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */
@@ -439,8 +445,10 @@ static struct pci_driver ahci_pci_driver = {
439 .id_table = ahci_pci_tbl, 445 .id_table = ahci_pci_tbl,
440 .probe = ahci_init_one, 446 .probe = ahci_init_one,
441 .remove = ata_pci_remove_one, 447 .remove = ata_pci_remove_one,
448#ifdef CONFIG_PM
442 .suspend = ahci_pci_device_suspend, 449 .suspend = ahci_pci_device_suspend,
443 .resume = ahci_pci_device_resume, 450 .resume = ahci_pci_device_resume,
451#endif
444}; 452};
445 453
446 454
@@ -580,6 +588,7 @@ static void ahci_power_up(void __iomem *port_mmio, u32 cap)
580 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 588 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
581} 589}
582 590
591#ifdef CONFIG_PM
583static void ahci_power_down(void __iomem *port_mmio, u32 cap) 592static void ahci_power_down(void __iomem *port_mmio, u32 cap)
584{ 593{
585 u32 cmd, scontrol; 594 u32 cmd, scontrol;
@@ -597,6 +606,7 @@ static void ahci_power_down(void __iomem *port_mmio, u32 cap)
597 cmd &= ~PORT_CMD_SPIN_UP; 606 cmd &= ~PORT_CMD_SPIN_UP;
598 writel(cmd, port_mmio + PORT_CMD); 607 writel(cmd, port_mmio + PORT_CMD);
599} 608}
609#endif
600 610
601static void ahci_init_port(void __iomem *port_mmio, u32 cap, 611static void ahci_init_port(void __iomem *port_mmio, u32 cap,
602 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma) 612 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
@@ -1160,23 +1170,32 @@ static void ahci_host_intr(struct ata_port *ap)
1160 known_irq = 1; 1170 known_irq = 1;
1161 } 1171 }
1162 1172
1163 if (status & PORT_IRQ_SDB_FIS && 1173 if (status & PORT_IRQ_SDB_FIS) {
1164 pp->ncq_saw_spurious_sdb_cnt < 10) {
1165 /* SDB FIS containing spurious completions might be
1166 * dangerous, we need to know more about them. Print
1167 * more of it.
1168 */
1169 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1174 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1170 1175
1171 ata_port_printk(ap, KERN_INFO, "Spurious SDB FIS during NCQ " 1176 if (le32_to_cpu(f[1])) {
1172 "issue=0x%x SAct=0x%x FIS=%08x:%08x%s\n", 1177 /* SDB FIS containing spurious completions
1178 * might be dangerous, whine and fail commands
1179 * with HSM violation. EH will turn off NCQ
1180 * after several such failures.
1181 */
1182 ata_ehi_push_desc(ehi,
1183 "spurious completions during NCQ "
1184 "issue=0x%x SAct=0x%x FIS=%08x:%08x",
1173 readl(port_mmio + PORT_CMD_ISSUE), 1185 readl(port_mmio + PORT_CMD_ISSUE),
1174 readl(port_mmio + PORT_SCR_ACT), 1186 readl(port_mmio + PORT_SCR_ACT),
1175 le32_to_cpu(f[0]), le32_to_cpu(f[1]), 1187 le32_to_cpu(f[0]), le32_to_cpu(f[1]));
1176 pp->ncq_saw_spurious_sdb_cnt < 10 ? 1188 ehi->err_mask |= AC_ERR_HSM;
1177 "" : ", shutting up"); 1189 ehi->action |= ATA_EH_SOFTRESET;
1178 1190 ata_port_freeze(ap);
1179 pp->ncq_saw_spurious_sdb_cnt++; 1191 } else {
1192 if (!pp->ncq_saw_sdb)
1193 ata_port_printk(ap, KERN_INFO,
1194 "spurious SDB FIS %08x:%08x during NCQ, "
1195 "this message won't be printed again\n",
1196 le32_to_cpu(f[0]), le32_to_cpu(f[1]));
1197 pp->ncq_saw_sdb = 1;
1198 }
1180 known_irq = 1; 1199 known_irq = 1;
1181 } 1200 }
1182 1201
@@ -1329,6 +1348,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1329 } 1348 }
1330} 1349}
1331 1350
1351#ifdef CONFIG_PM
1332static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1352static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1333{ 1353{
1334 struct ahci_host_priv *hpriv = ap->host->private_data; 1354 struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -1407,6 +1427,7 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
1407 1427
1408 return 0; 1428 return 0;
1409} 1429}
1430#endif
1410 1431
1411static int ahci_port_start(struct ata_port *ap) 1432static int ahci_port_start(struct ata_port *ap)
1412{ 1433{
@@ -1665,13 +1686,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1665 if (!printed_version++) 1686 if (!printed_version++)
1666 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1687 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1667 1688
1668 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1669 /* Function 1 is the PATA controller except on the 368, where
1670 we are not AHCI anyway */
1671 if (PCI_FUNC(pdev->devfn))
1672 return -ENODEV;
1673 }
1674
1675 rc = pcim_enable_device(pdev); 1689 rc = pcim_enable_device(pdev);
1676 if (rc) 1690 if (rc)
1677 return rc; 1691 return rc;
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index be66ea08da..d8e79882b8 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -26,7 +26,7 @@
26#include <linux/libata.h> 26#include <linux/libata.h>
27 27
28#define DRV_NAME "ata_generic" 28#define DRV_NAME "ata_generic"
29#define DRV_VERSION "0.2.10" 29#define DRV_VERSION "0.2.11"
30 30
31/* 31/*
32 * A generic parallel ATA driver using libata 32 * A generic parallel ATA driver using libata
@@ -90,10 +90,10 @@ static int generic_set_mode(struct ata_port *ap, struct ata_device **unused)
90 /* We do need the right mode information for DMA or PIO 90 /* We do need the right mode information for DMA or PIO
91 and this comes from the current configuration flags */ 91 and this comes from the current configuration flags */
92 if (dma_enabled & (1 << (5 + i))) { 92 if (dma_enabled & (1 << (5 + i))) {
93 dev->xfer_mode = XFER_MW_DMA_0; 93 ata_id_to_dma_mode(dev, XFER_MW_DMA_0);
94 dev->xfer_shift = ATA_SHIFT_MWDMA;
95 dev->flags &= ~ATA_DFLAG_PIO; 94 dev->flags &= ~ATA_DFLAG_PIO;
96 } else { 95 } else {
96 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
97 dev->xfer_mode = XFER_PIO_0; 97 dev->xfer_mode = XFER_PIO_0;
98 dev->xfer_shift = ATA_SHIFT_PIO; 98 dev->xfer_shift = ATA_SHIFT_PIO;
99 dev->flags |= ATA_DFLAG_PIO; 99 dev->flags |= ATA_DFLAG_PIO;
@@ -119,8 +119,10 @@ static struct scsi_host_template generic_sht = {
119 .slave_configure = ata_scsi_slave_config, 119 .slave_configure = ata_scsi_slave_config,
120 .slave_destroy = ata_scsi_slave_destroy, 120 .slave_destroy = ata_scsi_slave_destroy,
121 .bios_param = ata_std_bios_param, 121 .bios_param = ata_std_bios_param,
122#ifdef CONFIG_PM
122 .resume = ata_scsi_device_resume, 123 .resume = ata_scsi_device_resume,
123 .suspend = ata_scsi_device_suspend, 124 .suspend = ata_scsi_device_suspend,
125#endif
124}; 126};
125 127
126static struct ata_port_operations generic_port_ops = { 128static struct ata_port_operations generic_port_ops = {
@@ -230,8 +232,10 @@ static struct pci_driver ata_generic_pci_driver = {
230 .id_table = ata_generic, 232 .id_table = ata_generic,
231 .probe = ata_generic_init_one, 233 .probe = ata_generic_init_one,
232 .remove = ata_pci_remove_one, 234 .remove = ata_pci_remove_one,
235#ifdef CONFIG_PM
233 .suspend = ata_pci_device_suspend, 236 .suspend = ata_pci_device_suspend,
234 .resume = ata_pci_device_resume, 237 .resume = ata_pci_device_resume,
238#endif
235}; 239};
236 240
237static int __init ata_generic_init(void) 241static int __init ata_generic_init(void)
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 4d716c7347..dc42ba1b46 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.00ac7" 96#define DRV_VERSION "2.10"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@@ -169,8 +169,6 @@ static const struct pci_device_id piix_pci_tbl[] = {
169 /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */ 169 /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
170 /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */ 170 /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
171 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, 171 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
172 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
173 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
174 /* Intel PIIX4 */ 172 /* Intel PIIX4 */
175 { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, 173 { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
176 /* Intel PIIX4 */ 174 /* Intel PIIX4 */
@@ -255,8 +253,10 @@ static struct pci_driver piix_pci_driver = {
255 .id_table = piix_pci_tbl, 253 .id_table = piix_pci_tbl,
256 .probe = piix_init_one, 254 .probe = piix_init_one,
257 .remove = ata_pci_remove_one, 255 .remove = ata_pci_remove_one,
256#ifdef CONFIG_PM
258 .suspend = ata_pci_device_suspend, 257 .suspend = ata_pci_device_suspend,
259 .resume = ata_pci_device_resume, 258 .resume = ata_pci_device_resume,
259#endif
260}; 260};
261 261
262static struct scsi_host_template piix_sht = { 262static struct scsi_host_template piix_sht = {
@@ -275,8 +275,10 @@ static struct scsi_host_template piix_sht = {
275 .slave_configure = ata_scsi_slave_config, 275 .slave_configure = ata_scsi_slave_config,
276 .slave_destroy = ata_scsi_slave_destroy, 276 .slave_destroy = ata_scsi_slave_destroy,
277 .bios_param = ata_std_bios_param, 277 .bios_param = ata_std_bios_param,
278#ifdef CONFIG_PM
278 .resume = ata_scsi_device_resume, 279 .resume = ata_scsi_device_resume,
279 .suspend = ata_scsi_device_suspend, 280 .suspend = ata_scsi_device_suspend,
281#endif
280}; 282};
281 283
282static const struct ata_port_operations piix_pata_ops = { 284static const struct ata_port_operations piix_pata_ops = {
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index b4e8be5d29..019d8ffdde 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -34,6 +34,13 @@ struct taskfile_array {
34 u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */ 34 u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
35}; 35};
36 36
37/*
38 * Helper - belongs in the PCI layer somewhere eventually
39 */
40static int is_pci_dev(struct device *dev)
41{
42 return (dev->bus == &pci_bus_type);
43}
37 44
38/** 45/**
39 * sata_get_dev_handle - finds acpi_handle and PCI device.function 46 * sata_get_dev_handle - finds acpi_handle and PCI device.function
@@ -53,6 +60,9 @@ static int sata_get_dev_handle(struct device *dev, acpi_handle *handle,
53 struct pci_dev *pci_dev; 60 struct pci_dev *pci_dev;
54 acpi_integer addr; 61 acpi_integer addr;
55 62
63 if (!is_pci_dev(dev))
64 return -ENODEV;
65
56 pci_dev = to_pci_dev(dev); /* NOTE: PCI-specific */ 66 pci_dev = to_pci_dev(dev); /* NOTE: PCI-specific */
57 /* Please refer to the ACPI spec for the syntax of _ADR. */ 67 /* Please refer to the ACPI spec for the syntax of _ADR. */
58 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 68 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
@@ -84,7 +94,12 @@ static int pata_get_dev_handle(struct device *dev, acpi_handle *handle,
84 acpi_status status; 94 acpi_status status;
85 struct acpi_device_info *dinfo = NULL; 95 struct acpi_device_info *dinfo = NULL;
86 int ret = -ENODEV; 96 int ret = -ENODEV;
87 struct pci_dev *pdev = to_pci_dev(dev); 97 struct pci_dev *pdev;
98
99 if (!is_pci_dev(dev))
100 return -ENODEV;
101
102 pdev = to_pci_dev(dev);
88 103
89 bus = pdev->bus->number; 104 bus = pdev->bus->number;
90 devnum = PCI_SLOT(pdev->devfn); 105 devnum = PCI_SLOT(pdev->devfn);
@@ -294,9 +309,8 @@ static int do_drive_get_GTF(struct ata_port *ap, int ix,
294 return 0; 309 return 0;
295 310
296 if (ata_msg_probe(ap)) 311 if (ata_msg_probe(ap))
297 ata_dev_printk(atadev, KERN_DEBUG, 312 ata_dev_printk(atadev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
298 "%s: ENTER: ap->id: %d, port#: %d\n", 313 __FUNCTION__, ap->port_no);
299 __FUNCTION__, ap->id, ap->port_no);
300 314
301 if (!ata_dev_enabled(atadev) || (ap->flags & ATA_FLAG_DISABLED)) { 315 if (!ata_dev_enabled(atadev) || (ap->flags & ATA_FLAG_DISABLED)) {
302 if (ata_msg_probe(ap)) 316 if (ata_msg_probe(ap))
@@ -456,6 +470,9 @@ static void taskfile_load_raw(struct ata_port *ap,
456 struct ata_device *atadev, 470 struct ata_device *atadev,
457 const struct taskfile_array *gtf) 471 const struct taskfile_array *gtf)
458{ 472{
473 struct ata_taskfile tf;
474 unsigned int err;
475
459 if (ata_msg_probe(ap)) 476 if (ata_msg_probe(ap))
460 ata_dev_printk(atadev, KERN_DEBUG, "%s: (0x1f1-1f7): hex: " 477 ata_dev_printk(atadev, KERN_DEBUG, "%s: (0x1f1-1f7): hex: "
461 "%02x %02x %02x %02x %02x %02x %02x\n", 478 "%02x %02x %02x %02x %02x %02x %02x\n",
@@ -468,35 +485,25 @@ static void taskfile_load_raw(struct ata_port *ap,
468 && (gtf->tfa[6] == 0)) 485 && (gtf->tfa[6] == 0))
469 return; 486 return;
470 487
471 if (ap->ops->qc_issue) { 488 ata_tf_init(atadev, &tf);
472 struct ata_taskfile tf; 489
473 unsigned int err; 490 /* convert gtf to tf */
474 491 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */
475 ata_tf_init(atadev, &tf); 492 tf.protocol = atadev->class == ATA_DEV_ATAPI ?
476 493 ATA_PROT_ATAPI_NODATA : ATA_PROT_NODATA;
477 /* convert gtf to tf */ 494 tf.feature = gtf->tfa[0]; /* 0x1f1 */
478 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */ 495 tf.nsect = gtf->tfa[1]; /* 0x1f2 */
479 tf.protocol = atadev->class == ATA_DEV_ATAPI ? 496 tf.lbal = gtf->tfa[2]; /* 0x1f3 */
480 ATA_PROT_ATAPI_NODATA : ATA_PROT_NODATA; 497 tf.lbam = gtf->tfa[3]; /* 0x1f4 */
481 tf.feature = gtf->tfa[0]; /* 0x1f1 */ 498 tf.lbah = gtf->tfa[4]; /* 0x1f5 */
482 tf.nsect = gtf->tfa[1]; /* 0x1f2 */ 499 tf.device = gtf->tfa[5]; /* 0x1f6 */
483 tf.lbal = gtf->tfa[2]; /* 0x1f3 */ 500 tf.command = gtf->tfa[6]; /* 0x1f7 */
484 tf.lbam = gtf->tfa[3]; /* 0x1f4 */ 501
485 tf.lbah = gtf->tfa[4]; /* 0x1f5 */ 502 err = ata_exec_internal(atadev, &tf, NULL, DMA_NONE, NULL, 0);
486 tf.device = gtf->tfa[5]; /* 0x1f6 */ 503 if (err && ata_msg_probe(ap))
487 tf.command = gtf->tfa[6]; /* 0x1f7 */ 504 ata_dev_printk(atadev, KERN_ERR,
488 505 "%s: ata_exec_internal failed: %u\n",
489 err = ata_exec_internal(atadev, &tf, NULL, DMA_NONE, NULL, 0); 506 __FUNCTION__, err);
490 if (err && ata_msg_probe(ap))
491 ata_dev_printk(atadev, KERN_ERR,
492 "%s: ata_exec_internal failed: %u\n",
493 __FUNCTION__, err);
494 } else
495 if (ata_msg_warn(ap))
496 ata_dev_printk(atadev, KERN_WARNING,
497 "%s: SATA driver is missing qc_issue function"
498 " entry points\n",
499 __FUNCTION__);
500} 507}
501 508
502/** 509/**
@@ -521,9 +528,8 @@ static int do_drive_set_taskfiles(struct ata_port *ap,
521 struct taskfile_array *gtf; 528 struct taskfile_array *gtf;
522 529
523 if (ata_msg_probe(ap)) 530 if (ata_msg_probe(ap))
524 ata_dev_printk(atadev, KERN_DEBUG, 531 ata_dev_printk(atadev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
525 "%s: ENTER: ap->id: %d, port#: %d\n", 532 __FUNCTION__, ap->port_no);
526 __FUNCTION__, ap->id, ap->port_no);
527 533
528 if (noacpi || !(ap->cbl == ATA_CBL_SATA)) 534 if (noacpi || !(ap->cbl == ATA_CBL_SATA))
529 return 0; 535 return 0;
@@ -627,9 +633,8 @@ int ata_acpi_push_id(struct ata_port *ap, unsigned int ix)
627 return 0; 633 return 0;
628 634
629 if (ata_msg_probe(ap)) 635 if (ata_msg_probe(ap))
630 ata_dev_printk(atadev, KERN_DEBUG, 636 ata_dev_printk(atadev, KERN_DEBUG, "%s: ix = %d, port#: %d\n",
631 "%s: ap->id: %d, ix = %d, port#: %d\n", 637 __FUNCTION__, ix, ap->port_no);
632 __FUNCTION__, ap->id, ix, ap->port_no);
633 638
634 /* Don't continue if not a SATA device. */ 639 /* Don't continue if not a SATA device. */
635 if (!(ap->cbl == ATA_CBL_SATA)) { 640 if (!(ap->cbl == ATA_CBL_SATA)) {
@@ -685,9 +690,8 @@ int ata_acpi_push_id(struct ata_port *ap, unsigned int ix)
685 if (err < 0) { 690 if (err < 0) {
686 if (ata_msg_probe(ap)) 691 if (ata_msg_probe(ap))
687 ata_dev_printk(atadev, KERN_DEBUG, 692 ata_dev_printk(atadev, KERN_DEBUG,
688 "ata%u(%u): %s _SDD error: status = 0x%x\n", 693 "%s _SDD error: status = 0x%x\n",
689 ap->id, ap->device->devno, 694 __FUNCTION__, status);
690 __FUNCTION__, status);
691 } 695 }
692 696
693 /* always return success */ 697 /* always return success */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e900c5edef..dc362fa01c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -59,7 +59,7 @@
59 59
60#include "libata.h" 60#include "libata.h"
61 61
62#define DRV_VERSION "2.10" /* must be exactly four chars */ 62#define DRV_VERSION "2.20" /* must be exactly four chars */
63 63
64 64
65/* debounce timing parameters in msecs { interval, duration, timeout } */ 65/* debounce timing parameters in msecs { interval, duration, timeout } */
@@ -72,7 +72,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev); 73static void ata_dev_xfermask(struct ata_device *dev);
74 74
75static unsigned int ata_unique_id = 1; 75static unsigned int ata_print_id = 1;
76static struct workqueue_struct *ata_wq; 76static struct workqueue_struct *ata_wq;
77 77
78struct workqueue_struct *ata_aux_wq; 78struct workqueue_struct *ata_aux_wq;
@@ -315,9 +315,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags; 316 tf->flags |= tf_flags;
317 317
318 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
319 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
320 likely(tag != ATA_TAG_INTERNAL)) {
321 /* yay, NCQ */ 319 /* yay, NCQ */
322 if (!lba_48_ok(block, n_block)) 320 if (!lba_48_ok(block, n_block))
323 return -ERANGE; 321 return -ERANGE;
@@ -600,6 +598,8 @@ void ata_dev_disable(struct ata_device *dev)
600{ 598{
601 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) { 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
602 ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
603 dev->class++; 603 dev->class++;
604 } 604 }
605} 605}
@@ -708,7 +708,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
709 */ 709 */
710 710
711static unsigned int 711unsigned int
712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err) 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
713{ 713{
714 struct ata_taskfile tf; 714 struct ata_taskfile tf;
@@ -824,6 +824,48 @@ static u64 ata_id_n_sectors(const u16 *id)
824} 824}
825 825
826/** 826/**
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
829 * @mode: mode to assume if we cannot tell
830 *
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
835 *
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
838 * presentation.
839 */
840
841void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
842{
843 unsigned int mask;
844 u8 mode;
845
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
850
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
853
854 if (mode != 0) {
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
857 } else {
858 /* SWDMA perhaps ? */
859 mode = unknown;
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
861 }
862
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
866}
867
868/**
827 * ata_noop_dev_select - Select device 0/1 on ATA bus 869 * ata_noop_dev_select - Select device 0/1 on ATA bus
828 * @ap: ATA channel to manipulate 870 * @ap: ATA channel to manipulate
829 * @device: ATA device (numbered from zero) to select 871 * @device: ATA device (numbered from zero) to select
@@ -891,8 +933,8 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
891 unsigned int wait, unsigned int can_sleep) 933 unsigned int wait, unsigned int can_sleep)
892{ 934{
893 if (ata_msg_probe(ap)) 935 if (ata_msg_probe(ap))
894 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: " 936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
895 "device %u, wait %u\n", ap->id, device, wait); 937 "device %u, wait %u\n", device, wait);
896 938
897 if (wait) 939 if (wait)
898 ata_wait_idle(ap); 940 ata_wait_idle(ap);
@@ -1392,8 +1434,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1392 int rc; 1434 int rc;
1393 1435
1394 if (ata_msg_ctl(ap)) 1436 if (ata_msg_ctl(ap))
1395 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", 1437 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1396 __FUNCTION__, ap->id, dev->devno);
1397 1438
1398 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1439 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1399 1440
@@ -1430,7 +1471,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1430 if (err_mask) { 1471 if (err_mask) {
1431 if (err_mask & AC_ERR_NODEV_HINT) { 1472 if (err_mask & AC_ERR_NODEV_HINT) {
1432 DPRINTK("ata%u.%d: NODEV after polling detection\n", 1473 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1433 ap->id, dev->devno); 1474 ap->print_id, dev->devno);
1434 return -ENOENT; 1475 return -ENOENT;
1435 } 1476 }
1436 1477
@@ -1558,15 +1599,13 @@ int ata_dev_configure(struct ata_device *dev)
1558 int rc; 1599 int rc;
1559 1600
1560 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 1601 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1561 ata_dev_printk(dev, KERN_INFO, 1602 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1562 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n", 1603 __FUNCTION__);
1563 __FUNCTION__, ap->id, dev->devno);
1564 return 0; 1604 return 0;
1565 } 1605 }
1566 1606
1567 if (ata_msg_probe(ap)) 1607 if (ata_msg_probe(ap))
1568 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", 1608 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1569 __FUNCTION__, ap->id, dev->devno);
1570 1609
1571 /* set _SDD */ 1610 /* set _SDD */
1572 rc = ata_acpi_push_id(ap, dev->devno); 1611 rc = ata_acpi_push_id(ap, dev->devno);
@@ -1610,8 +1649,9 @@ int ata_dev_configure(struct ata_device *dev)
1610 if (dev->class == ATA_DEV_ATA) { 1649 if (dev->class == ATA_DEV_ATA) {
1611 if (ata_id_is_cfa(id)) { 1650 if (ata_id_is_cfa(id)) {
1612 if (id[162] & 1) /* CPRM may make this media unusable */ 1651 if (id[162] & 1) /* CPRM may make this media unusable */
1613 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n", 1652 ata_dev_printk(dev, KERN_WARNING,
1614 ap->id, dev->devno); 1653 "supports DRM functions and may "
1654 "not be fully accessable.\n");
1615 snprintf(revbuf, 7, "CFA"); 1655 snprintf(revbuf, 7, "CFA");
1616 } 1656 }
1617 else 1657 else
@@ -1679,7 +1719,7 @@ int ata_dev_configure(struct ata_device *dev)
1679 "%s: %s, %s, max %s\n", 1719 "%s: %s, %s, max %s\n",
1680 revbuf, modelbuf, fwrevbuf, 1720 revbuf, modelbuf, fwrevbuf,
1681 ata_mode_string(xfer_mask)); 1721 ata_mode_string(xfer_mask));
1682 ata_dev_printk(dev, KERN_INFO, 1722 ata_dev_printk(dev, KERN_INFO,
1683 "%Lu sectors, multi %u, CHS %u/%u/%u\n", 1723 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1684 (unsigned long long)dev->n_sectors, 1724 (unsigned long long)dev->n_sectors,
1685 dev->multi_count, dev->cylinders, 1725 dev->multi_count, dev->cylinders,
@@ -1778,7 +1818,7 @@ int ata_bus_probe(struct ata_port *ap)
1778{ 1818{
1779 unsigned int classes[ATA_MAX_DEVICES]; 1819 unsigned int classes[ATA_MAX_DEVICES];
1780 int tries[ATA_MAX_DEVICES]; 1820 int tries[ATA_MAX_DEVICES];
1781 int i, rc, down_xfermask; 1821 int i, rc;
1782 struct ata_device *dev; 1822 struct ata_device *dev;
1783 1823
1784 ata_port_probe(ap); 1824 ata_port_probe(ap);
@@ -1787,8 +1827,6 @@ int ata_bus_probe(struct ata_port *ap)
1787 tries[i] = ATA_PROBE_MAX_TRIES; 1827 tries[i] = ATA_PROBE_MAX_TRIES;
1788 1828
1789 retry: 1829 retry:
1790 down_xfermask = 0;
1791
1792 /* reset and determine device classes */ 1830 /* reset and determine device classes */
1793 ap->ops->phy_reset(ap); 1831 ap->ops->phy_reset(ap);
1794 1832
@@ -1812,8 +1850,11 @@ int ata_bus_probe(struct ata_port *ap)
1812 for (i = 0; i < ATA_MAX_DEVICES; i++) 1850 for (i = 0; i < ATA_MAX_DEVICES; i++)
1813 ap->device[i].pio_mode = XFER_PIO_0; 1851 ap->device[i].pio_mode = XFER_PIO_0;
1814 1852
1815 /* read IDENTIFY page and configure devices */ 1853 /* read IDENTIFY page and configure devices. We have to do the identify
1816 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1854 specific sequence bass-ackwards so that PDIAG- is released by
1855 the slave device */
1856
1857 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
1817 dev = &ap->device[i]; 1858 dev = &ap->device[i];
1818 1859
1819 if (tries[i]) 1860 if (tries[i])
@@ -1826,6 +1867,15 @@ int ata_bus_probe(struct ata_port *ap)
1826 dev->id); 1867 dev->id);
1827 if (rc) 1868 if (rc)
1828 goto fail; 1869 goto fail;
1870 }
1871
1872 /* After the identify sequence we can now set up the devices. We do
1873 this in the normal order so that the user doesn't get confused */
1874
1875 for(i = 0; i < ATA_MAX_DEVICES; i++) {
1876 dev = &ap->device[i];
1877 if (!ata_dev_enabled(dev))
1878 continue;
1829 1879
1830 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO; 1880 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1831 rc = ata_dev_configure(dev); 1881 rc = ata_dev_configure(dev);
@@ -1836,10 +1886,8 @@ int ata_bus_probe(struct ata_port *ap)
1836 1886
1837 /* configure transfer mode */ 1887 /* configure transfer mode */
1838 rc = ata_set_mode(ap, &dev); 1888 rc = ata_set_mode(ap, &dev);
1839 if (rc) { 1889 if (rc)
1840 down_xfermask = 1;
1841 goto fail; 1890 goto fail;
1842 }
1843 1891
1844 for (i = 0; i < ATA_MAX_DEVICES; i++) 1892 for (i = 0; i < ATA_MAX_DEVICES; i++)
1845 if (ata_dev_enabled(&ap->device[i])) 1893 if (ata_dev_enabled(&ap->device[i]))
@@ -1851,25 +1899,29 @@ int ata_bus_probe(struct ata_port *ap)
1851 return -ENODEV; 1899 return -ENODEV;
1852 1900
1853 fail: 1901 fail:
1902 tries[dev->devno]--;
1903
1854 switch (rc) { 1904 switch (rc) {
1855 case -EINVAL: 1905 case -EINVAL:
1856 case -ENODEV: 1906 /* eeek, something went very wrong, give up */
1857 tries[dev->devno] = 0; 1907 tries[dev->devno] = 0;
1858 break; 1908 break;
1909
1910 case -ENODEV:
1911 /* give it just one more chance */
1912 tries[dev->devno] = min(tries[dev->devno], 1);
1859 case -EIO: 1913 case -EIO:
1860 sata_down_spd_limit(ap); 1914 if (tries[dev->devno] == 1) {
1861 /* fall through */ 1915 /* This is the last chance, better to slow
1862 default: 1916 * down than lose it.
1863 tries[dev->devno]--; 1917 */
1864 if (down_xfermask && 1918 sata_down_spd_limit(ap);
1865 ata_down_xfermask_limit(dev, tries[dev->devno] == 1)) 1919 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1866 tries[dev->devno] = 0; 1920 }
1867 } 1921 }
1868 1922
1869 if (!tries[dev->devno]) { 1923 if (!tries[dev->devno])
1870 ata_down_xfermask_limit(dev, 1);
1871 ata_dev_disable(dev); 1924 ata_dev_disable(dev);
1872 }
1873 1925
1874 goto retry; 1926 goto retry;
1875} 1927}
@@ -2300,7 +2352,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2300/** 2352/**
2301 * ata_down_xfermask_limit - adjust dev xfer masks downward 2353 * ata_down_xfermask_limit - adjust dev xfer masks downward
2302 * @dev: Device to adjust xfer masks 2354 * @dev: Device to adjust xfer masks
2303 * @force_pio0: Force PIO0 2355 * @sel: ATA_DNXFER_* selector
2304 * 2356 *
2305 * Adjust xfer masks of @dev downward. Note that this function 2357 * Adjust xfer masks of @dev downward. Note that this function
2306 * does not apply the change. Invoking ata_set_mode() afterwards 2358 * does not apply the change. Invoking ata_set_mode() afterwards
@@ -2312,37 +2364,78 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2312 * RETURNS: 2364 * RETURNS:
2313 * 0 on success, negative errno on failure 2365 * 0 on success, negative errno on failure
2314 */ 2366 */
2315int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0) 2367int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2316{ 2368{
2317 unsigned long xfer_mask; 2369 char buf[32];
2318 int highbit; 2370 unsigned int orig_mask, xfer_mask;
2371 unsigned int pio_mask, mwdma_mask, udma_mask;
2372 int quiet, highbit;
2319 2373
2320 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask, 2374 quiet = !!(sel & ATA_DNXFER_QUIET);
2321 dev->udma_mask); 2375 sel &= ~ATA_DNXFER_QUIET;
2322 2376
2323 if (!xfer_mask) 2377 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2324 goto fail; 2378 dev->mwdma_mask,
2325 /* don't gear down to MWDMA from UDMA, go directly to PIO */ 2379 dev->udma_mask);
2326 if (xfer_mask & ATA_MASK_UDMA) 2380 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2327 xfer_mask &= ~ATA_MASK_MWDMA;
2328 2381
2329 highbit = fls(xfer_mask) - 1; 2382 switch (sel) {
2330 xfer_mask &= ~(1 << highbit); 2383 case ATA_DNXFER_PIO:
2331 if (force_pio0) 2384 highbit = fls(pio_mask) - 1;
2332 xfer_mask &= 1 << ATA_SHIFT_PIO; 2385 pio_mask &= ~(1 << highbit);
2333 if (!xfer_mask) 2386 break;
2334 goto fail; 2387
2388 case ATA_DNXFER_DMA:
2389 if (udma_mask) {
2390 highbit = fls(udma_mask) - 1;
2391 udma_mask &= ~(1 << highbit);
2392 if (!udma_mask)
2393 return -ENOENT;
2394 } else if (mwdma_mask) {
2395 highbit = fls(mwdma_mask) - 1;
2396 mwdma_mask &= ~(1 << highbit);
2397 if (!mwdma_mask)
2398 return -ENOENT;
2399 }
2400 break;
2401
2402 case ATA_DNXFER_40C:
2403 udma_mask &= ATA_UDMA_MASK_40C;
2404 break;
2405
2406 case ATA_DNXFER_FORCE_PIO0:
2407 pio_mask &= 1;
2408 case ATA_DNXFER_FORCE_PIO:
2409 mwdma_mask = 0;
2410 udma_mask = 0;
2411 break;
2412
2413 default:
2414 BUG();
2415 }
2416
2417 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2418
2419 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2420 return -ENOENT;
2421
2422 if (!quiet) {
2423 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2424 snprintf(buf, sizeof(buf), "%s:%s",
2425 ata_mode_string(xfer_mask),
2426 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2427 else
2428 snprintf(buf, sizeof(buf), "%s",
2429 ata_mode_string(xfer_mask));
2430
2431 ata_dev_printk(dev, KERN_WARNING,
2432 "limiting speed to %s\n", buf);
2433 }
2335 2434
2336 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 2435 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2337 &dev->udma_mask); 2436 &dev->udma_mask);
2338 2437
2339 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2340 ata_mode_string(xfer_mask));
2341
2342 return 0; 2438 return 0;
2343
2344 fail:
2345 return -EINVAL;
2346} 2439}
2347 2440
2348static int ata_dev_set_mode(struct ata_device *dev) 2441static int ata_dev_set_mode(struct ata_device *dev)
@@ -2475,12 +2568,11 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2475 * host channels are not permitted to do so. 2568 * host channels are not permitted to do so.
2476 */ 2569 */
2477 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 2570 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2478 ap->host->simplex_claimed = 1; 2571 ap->host->simplex_claimed = ap;
2479 2572
2480 /* step5: chip specific finalisation */ 2573 /* step5: chip specific finalisation */
2481 if (ap->ops->post_set_mode) 2574 if (ap->ops->post_set_mode)
2482 ap->ops->post_set_mode(ap); 2575 ap->ops->post_set_mode(ap);
2483
2484 out: 2576 out:
2485 if (rc) 2577 if (rc)
2486 *r_failed_dev = dev; 2578 *r_failed_dev = dev;
@@ -2609,7 +2701,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2609{ 2701{
2610 struct ata_ioports *ioaddr = &ap->ioaddr; 2702 struct ata_ioports *ioaddr = &ap->ioaddr;
2611 2703
2612 DPRINTK("ata%u: bus reset via SRST\n", ap->id); 2704 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2613 2705
2614 /* software reset. causes dev0 to be selected */ 2706 /* software reset. causes dev0 to be selected */
2615 iowrite8(ap->ctl, ioaddr->ctl_addr); 2707 iowrite8(ap->ctl, ioaddr->ctl_addr);
@@ -2669,7 +2761,7 @@ void ata_bus_reset(struct ata_port *ap)
2669 u8 err; 2761 u8 err;
2670 unsigned int dev0, dev1 = 0, devmask = 0; 2762 unsigned int dev0, dev1 = 0, devmask = 0;
2671 2763
2672 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); 2764 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
2673 2765
2674 /* determine if device 0/1 are present */ 2766 /* determine if device 0/1 are present */
2675 if (ap->flags & ATA_FLAG_SATA_RESET) 2767 if (ap->flags & ATA_FLAG_SATA_RESET)
@@ -3256,7 +3348,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3256 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 3348 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3257 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 3349 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3258 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 3350 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3259 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3260 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 3351 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3261 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 3352 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3262 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, 3353 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
@@ -3266,6 +3357,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3266 /* Devices where NCQ should be avoided */ 3357 /* Devices where NCQ should be avoided */
3267 /* NCQ is slow */ 3358 /* NCQ is slow */
3268 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 3359 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3360 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3361 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3269 3362
3270 /* Devices with NCQ limits */ 3363 /* Devices with NCQ limits */
3271 3364
@@ -3362,7 +3455,7 @@ static void ata_dev_xfermask(struct ata_device *dev)
3362 "device is on DMA blacklist, disabling DMA\n"); 3455 "device is on DMA blacklist, disabling DMA\n");
3363 } 3456 }
3364 3457
3365 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) { 3458 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed != ap) {
3366 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3459 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3367 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 3460 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3368 "other device, disabling DMA\n"); 3461 "other device, disabling DMA\n");
@@ -3739,7 +3832,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
3739 struct scatterlist *lsg = &sg[qc->n_elem - 1]; 3832 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3740 int n_elem, pre_n_elem, dir, trim_sg = 0; 3833 int n_elem, pre_n_elem, dir, trim_sg = 0;
3741 3834
3742 VPRINTK("ENTER, ata%u\n", ap->id); 3835 VPRINTK("ENTER, ata%u\n", ap->print_id);
3743 WARN_ON(!(qc->flags & ATA_QCFLAG_SG)); 3836 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3744 3837
3745 /* we must lengthen transfers to end on a 32-bit boundary */ 3838 /* we must lengthen transfers to end on a 32-bit boundary */
@@ -4140,7 +4233,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4140 if (do_write != i_write) 4233 if (do_write != i_write)
4141 goto err_out; 4234 goto err_out;
4142 4235
4143 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); 4236 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4144 4237
4145 __atapi_pio_bytes(qc, bytes); 4238 __atapi_pio_bytes(qc, bytes);
4146 4239
@@ -4257,7 +4350,7 @@ int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4257 4350
4258fsm_start: 4351fsm_start:
4259 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 4352 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4260 ap->id, qc->tf.protocol, ap->hsm_task_state, status); 4353 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4261 4354
4262 switch (ap->hsm_task_state) { 4355 switch (ap->hsm_task_state) {
4263 case HSM_ST_FIRST: 4356 case HSM_ST_FIRST:
@@ -4290,8 +4383,8 @@ fsm_start:
4290 * let the EH abort the command or reset the device. 4383 * let the EH abort the command or reset the device.
4291 */ 4384 */
4292 if (unlikely(status & (ATA_ERR | ATA_DF))) { 4385 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4293 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", 4386 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4294 ap->id, status); 4387 "error, dev_stat 0x%X\n", status);
4295 qc->err_mask |= AC_ERR_HSM; 4388 qc->err_mask |= AC_ERR_HSM;
4296 ap->hsm_task_state = HSM_ST_ERR; 4389 ap->hsm_task_state = HSM_ST_ERR;
4297 goto fsm_start; 4390 goto fsm_start;
@@ -4348,8 +4441,9 @@ fsm_start:
4348 * let the EH abort the command or reset the device. 4441 * let the EH abort the command or reset the device.
4349 */ 4442 */
4350 if (unlikely(status & (ATA_ERR | ATA_DF))) { 4443 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4351 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", 4444 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4352 ap->id, status); 4445 "device error, dev_stat 0x%X\n",
4446 status);
4353 qc->err_mask |= AC_ERR_HSM; 4447 qc->err_mask |= AC_ERR_HSM;
4354 ap->hsm_task_state = HSM_ST_ERR; 4448 ap->hsm_task_state = HSM_ST_ERR;
4355 goto fsm_start; 4449 goto fsm_start;
@@ -4435,7 +4529,7 @@ fsm_start:
4435 4529
4436 /* no more data to transfer */ 4530 /* no more data to transfer */
4437 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 4531 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4438 ap->id, qc->dev->devno, status); 4532 ap->print_id, qc->dev->devno, status);
4439 4533
4440 WARN_ON(qc->err_mask); 4534 WARN_ON(qc->err_mask);
4441 4535
@@ -4977,7 +5071,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
4977 u8 status, host_stat = 0; 5071 u8 status, host_stat = 0;
4978 5072
4979 VPRINTK("ata%u: protocol %d task_state %d\n", 5073 VPRINTK("ata%u: protocol %d task_state %d\n",
4980 ap->id, qc->tf.protocol, ap->hsm_task_state); 5074 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
4981 5075
4982 /* Check whether we are expecting interrupt in this state */ 5076 /* Check whether we are expecting interrupt in this state */
4983 switch (ap->hsm_task_state) { 5077 switch (ap->hsm_task_state) {
@@ -4998,7 +5092,8 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
4998 qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 5092 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4999 /* check status of DMA engine */ 5093 /* check status of DMA engine */
5000 host_stat = ap->ops->bmdma_status(ap); 5094 host_stat = ap->ops->bmdma_status(ap);
5001 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); 5095 VPRINTK("ata%u: host_stat 0x%X\n",
5096 ap->print_id, host_stat);
5002 5097
5003 /* if it's not our irq... */ 5098 /* if it's not our irq... */
5004 if (!(host_stat & ATA_DMA_INTR)) 5099 if (!(host_stat & ATA_DMA_INTR))
@@ -5259,6 +5354,7 @@ int ata_flush_cache(struct ata_device *dev)
5259 return 0; 5354 return 0;
5260} 5355}
5261 5356
5357#ifdef CONFIG_PM
5262static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 5358static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5263 unsigned int action, unsigned int ehi_flags, 5359 unsigned int action, unsigned int ehi_flags,
5264 int wait) 5360 int wait)
@@ -5374,6 +5470,7 @@ void ata_host_resume(struct ata_host *host)
5374 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5470 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5375 host->dev->power.power_state = PMSG_ON; 5471 host->dev->power.power_state = PMSG_ON;
5376} 5472}
5473#endif
5377 5474
5378/** 5475/**
5379 * ata_port_start - Set port up for dma. 5476 * ata_port_start - Set port up for dma.
@@ -5457,7 +5554,7 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
5457 5554
5458 ap->lock = &host->lock; 5555 ap->lock = &host->lock;
5459 ap->flags = ATA_FLAG_DISABLED; 5556 ap->flags = ATA_FLAG_DISABLED;
5460 ap->id = ata_unique_id++; 5557 ap->print_id = ata_print_id++;
5461 ap->ctl = ATA_DEVCTL_OBS; 5558 ap->ctl = ATA_DEVCTL_OBS;
5462 ap->host = host; 5559 ap->host = host;
5463 ap->dev = ent->dev; 5560 ap->dev = ent->dev;
@@ -5528,7 +5625,7 @@ static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5528{ 5625{
5529 ap->scsi_host = shost; 5626 ap->scsi_host = shost;
5530 5627
5531 shost->unique_id = ap->id; 5628 shost->unique_id = ap->print_id;
5532 shost->max_id = 16; 5629 shost->max_id = 16;
5533 shost->max_lun = 1; 5630 shost->max_lun = 1;
5534 shost->max_channel = 1; 5631 shost->max_channel = 1;
@@ -5598,6 +5695,8 @@ static void ata_host_release(struct device *gendev, void *res)
5598 5695
5599 if (host->ops->host_stop) 5696 if (host->ops->host_stop)
5600 host->ops->host_stop(host); 5697 host->ops->host_stop(host);
5698
5699 dev_set_drvdata(gendev, NULL);
5601} 5700}
5602 5701
5603/** 5702/**
@@ -5792,9 +5891,9 @@ int ata_device_add(const struct ata_probe_ent *ent)
5792 /* wait for EH to finish */ 5891 /* wait for EH to finish */
5793 ata_port_wait_eh(ap); 5892 ata_port_wait_eh(ap);
5794 } else { 5893 } else {
5795 DPRINTK("ata%u: bus probe begin\n", ap->id); 5894 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5796 rc = ata_bus_probe(ap); 5895 rc = ata_bus_probe(ap);
5797 DPRINTK("ata%u: bus probe end\n", ap->id); 5896 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5798 5897
5799 if (rc) { 5898 if (rc) {
5800 /* FIXME: do something useful here? 5899 /* FIXME: do something useful here?
@@ -5820,7 +5919,6 @@ int ata_device_add(const struct ata_probe_ent *ent)
5820 5919
5821 err_out: 5920 err_out:
5822 devres_release_group(dev, ata_device_add); 5921 devres_release_group(dev, ata_device_add);
5823 dev_set_drvdata(dev, NULL);
5824 VPRINTK("EXIT, returning %d\n", rc); 5922 VPRINTK("EXIT, returning %d\n", rc);
5825 return 0; 5923 return 0;
5826} 5924}
@@ -5905,11 +6003,7 @@ ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5905{ 6003{
5906 struct ata_probe_ent *probe_ent; 6004 struct ata_probe_ent *probe_ent;
5907 6005
5908 /* XXX - the following if can go away once all LLDs are managed */ 6006 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5909 if (!list_empty(&dev->devres_head))
5910 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5911 else
5912 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5913 if (!probe_ent) { 6007 if (!probe_ent) {
5914 printk(KERN_ERR DRV_NAME "(%s): out of memory\n", 6008 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5915 kobject_name(&(dev->kobj))); 6009 kobject_name(&(dev->kobj)));
@@ -6012,14 +6106,14 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6012 return (tmp == bits->val) ? 1 : 0; 6106 return (tmp == bits->val) ? 1 : 0;
6013} 6107}
6014 6108
6109#ifdef CONFIG_PM
6015void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6110void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6016{ 6111{
6017 pci_save_state(pdev); 6112 pci_save_state(pdev);
6113 pci_disable_device(pdev);
6018 6114
6019 if (mesg.event == PM_EVENT_SUSPEND) { 6115 if (mesg.event == PM_EVENT_SUSPEND)
6020 pci_disable_device(pdev);
6021 pci_set_power_state(pdev, PCI_D3hot); 6116 pci_set_power_state(pdev, PCI_D3hot);
6022 }
6023} 6117}
6024 6118
6025int ata_pci_device_do_resume(struct pci_dev *pdev) 6119int ata_pci_device_do_resume(struct pci_dev *pdev)
@@ -6064,6 +6158,8 @@ int ata_pci_device_resume(struct pci_dev *pdev)
6064 ata_host_resume(host); 6158 ata_host_resume(host);
6065 return rc; 6159 return rc;
6066} 6160}
6161#endif /* CONFIG_PM */
6162
6067#endif /* CONFIG_PCI */ 6163#endif /* CONFIG_PCI */
6068 6164
6069 6165
@@ -6241,6 +6337,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6241EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); 6337EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6242EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 6338EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6243EXPORT_SYMBOL_GPL(ata_port_probe); 6339EXPORT_SYMBOL_GPL(ata_port_probe);
6340EXPORT_SYMBOL_GPL(ata_dev_disable);
6244EXPORT_SYMBOL_GPL(sata_set_spd); 6341EXPORT_SYMBOL_GPL(sata_set_spd);
6245EXPORT_SYMBOL_GPL(sata_phy_debounce); 6342EXPORT_SYMBOL_GPL(sata_phy_debounce);
6246EXPORT_SYMBOL_GPL(sata_phy_resume); 6343EXPORT_SYMBOL_GPL(sata_phy_resume);
@@ -6271,10 +6368,13 @@ EXPORT_SYMBOL_GPL(sata_scr_write);
6271EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6368EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6272EXPORT_SYMBOL_GPL(ata_port_online); 6369EXPORT_SYMBOL_GPL(ata_port_online);
6273EXPORT_SYMBOL_GPL(ata_port_offline); 6370EXPORT_SYMBOL_GPL(ata_port_offline);
6371#ifdef CONFIG_PM
6274EXPORT_SYMBOL_GPL(ata_host_suspend); 6372EXPORT_SYMBOL_GPL(ata_host_suspend);
6275EXPORT_SYMBOL_GPL(ata_host_resume); 6373EXPORT_SYMBOL_GPL(ata_host_resume);
6374#endif /* CONFIG_PM */
6276EXPORT_SYMBOL_GPL(ata_id_string); 6375EXPORT_SYMBOL_GPL(ata_id_string);
6277EXPORT_SYMBOL_GPL(ata_id_c_string); 6376EXPORT_SYMBOL_GPL(ata_id_c_string);
6377EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6278EXPORT_SYMBOL_GPL(ata_device_blacklisted); 6378EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6279EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6379EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6280 6380
@@ -6287,16 +6387,20 @@ EXPORT_SYMBOL_GPL(pci_test_config_bits);
6287EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 6387EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6288EXPORT_SYMBOL_GPL(ata_pci_init_one); 6388EXPORT_SYMBOL_GPL(ata_pci_init_one);
6289EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6389EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6390#ifdef CONFIG_PM
6290EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6391EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6291EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6392EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6292EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6393EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6293EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6394EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6395#endif /* CONFIG_PM */
6294EXPORT_SYMBOL_GPL(ata_pci_default_filter); 6396EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6295EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 6397EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6296#endif /* CONFIG_PCI */ 6398#endif /* CONFIG_PCI */
6297 6399
6400#ifdef CONFIG_PM
6298EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 6401EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6299EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 6402EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6403#endif /* CONFIG_PM */
6300 6404
6301EXPORT_SYMBOL_GPL(ata_eng_timeout); 6405EXPORT_SYMBOL_GPL(ata_eng_timeout);
6302EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6406EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
@@ -6311,3 +6415,4 @@ EXPORT_SYMBOL_GPL(ata_irq_on);
6311EXPORT_SYMBOL_GPL(ata_dummy_irq_on); 6415EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6312EXPORT_SYMBOL_GPL(ata_irq_ack); 6416EXPORT_SYMBOL_GPL(ata_irq_ack);
6313EXPORT_SYMBOL_GPL(ata_dummy_irq_ack); 6417EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6418EXPORT_SYMBOL_GPL(ata_dev_try_classify);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 52c85af7fe..7349c3dbf7 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -44,10 +44,41 @@
44 44
45#include "libata.h" 45#include "libata.h"
46 46
47enum {
48 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
49 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
50 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
51};
52
47static void __ata_port_freeze(struct ata_port *ap); 53static void __ata_port_freeze(struct ata_port *ap);
48static void ata_eh_finish(struct ata_port *ap); 54static void ata_eh_finish(struct ata_port *ap);
55#ifdef CONFIG_PM
49static void ata_eh_handle_port_suspend(struct ata_port *ap); 56static void ata_eh_handle_port_suspend(struct ata_port *ap);
50static void ata_eh_handle_port_resume(struct ata_port *ap); 57static void ata_eh_handle_port_resume(struct ata_port *ap);
58static int ata_eh_suspend(struct ata_port *ap,
59 struct ata_device **r_failed_dev);
60static void ata_eh_prep_resume(struct ata_port *ap);
61static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev);
62#else /* CONFIG_PM */
63static void ata_eh_handle_port_suspend(struct ata_port *ap)
64{ }
65
66static void ata_eh_handle_port_resume(struct ata_port *ap)
67{ }
68
69static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
70{
71 return 0;
72}
73
74static void ata_eh_prep_resume(struct ata_port *ap)
75{ }
76
77static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
78{
79 return 0;
80}
81#endif /* CONFIG_PM */
51 82
52static void ata_ering_record(struct ata_ering *ering, int is_io, 83static void ata_ering_record(struct ata_ering *ering, int is_io,
53 unsigned int err_mask) 84 unsigned int err_mask)
@@ -65,12 +96,9 @@ static void ata_ering_record(struct ata_ering *ering, int is_io,
65 ent->timestamp = get_jiffies_64(); 96 ent->timestamp = get_jiffies_64();
66} 97}
67 98
68static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering) 99static void ata_ering_clear(struct ata_ering *ering)
69{ 100{
70 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 101 memset(ering, 0, sizeof(*ering));
71 if (!ent->err_mask)
72 return NULL;
73 return ent;
74} 102}
75 103
76static int ata_ering_map(struct ata_ering *ering, 104static int ata_ering_map(struct ata_ering *ering,
@@ -585,7 +613,7 @@ static void __ata_port_freeze(struct ata_port *ap)
585 613
586 ap->pflags |= ATA_PFLAG_FROZEN; 614 ap->pflags |= ATA_PFLAG_FROZEN;
587 615
588 DPRINTK("ata%u port frozen\n", ap->id); 616 DPRINTK("ata%u port frozen\n", ap->print_id);
589} 617}
590 618
591/** 619/**
@@ -658,7 +686,7 @@ void ata_eh_thaw_port(struct ata_port *ap)
658 686
659 spin_unlock_irqrestore(ap->lock, flags); 687 spin_unlock_irqrestore(ap->lock, flags);
660 688
661 DPRINTK("ata%u port thawed\n", ap->id); 689 DPRINTK("ata%u port thawed\n", ap->print_id);
662} 690}
663 691
664static void ata_eh_scsidone(struct scsi_cmnd *scmd) 692static void ata_eh_scsidone(struct scsi_cmnd *scmd)
@@ -1159,87 +1187,99 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1159 return action; 1187 return action;
1160} 1188}
1161 1189
1162static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent) 1190static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
1163{ 1191{
1164 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT)) 1192 if (err_mask & AC_ERR_ATA_BUS)
1165 return 1; 1193 return 1;
1166 1194
1167 if (ent->is_io) { 1195 if (err_mask & AC_ERR_TIMEOUT)
1168 if (ent->err_mask & AC_ERR_HSM) 1196 return 2;
1169 return 1; 1197
1170 if ((ent->err_mask & 1198 if (is_io) {
1171 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 1199 if (err_mask & AC_ERR_HSM)
1172 return 2; 1200 return 2;
1201 if ((err_mask &
1202 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1203 return 3;
1173 } 1204 }
1174 1205
1175 return 0; 1206 return 0;
1176} 1207}
1177 1208
1178struct speed_down_needed_arg { 1209struct speed_down_verdict_arg {
1179 u64 since; 1210 u64 since;
1180 int nr_errors[3]; 1211 int nr_errors[4];
1181}; 1212};
1182 1213
1183static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg) 1214static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1184{ 1215{
1185 struct speed_down_needed_arg *arg = void_arg; 1216 struct speed_down_verdict_arg *arg = void_arg;
1217 int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask);
1186 1218
1187 if (ent->timestamp < arg->since) 1219 if (ent->timestamp < arg->since)
1188 return -1; 1220 return -1;
1189 1221
1190 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++; 1222 arg->nr_errors[cat]++;
1191 return 0; 1223 return 0;
1192} 1224}
1193 1225
1194/** 1226/**
1195 * ata_eh_speed_down_needed - Determine wheter speed down is necessary 1227 * ata_eh_speed_down_verdict - Determine speed down verdict
1196 * @dev: Device of interest 1228 * @dev: Device of interest
1197 * 1229 *
1198 * This function examines error ring of @dev and determines 1230 * This function examines error ring of @dev and determines
1199 * whether speed down is necessary. Speed down is necessary if 1231 * whether NCQ needs to be turned off, transfer speed should be
1200 * there have been more than 3 of Cat-1 errors or 10 of Cat-2 1232 * stepped down, or falling back to PIO is necessary.
1201 * errors during last 15 minutes.
1202 * 1233 *
1203 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM 1234 * Cat-1 is ATA_BUS error for any command.
1204 * violation for known supported commands.
1205 * 1235 *
1206 * Cat-2 errors are unclassified DEV error for known supported 1236 * Cat-2 is TIMEOUT for any command or HSM violation for known
1237 * supported commands.
1238 *
1239 * Cat-3 is is unclassified DEV error for known supported
1207 * command. 1240 * command.
1208 * 1241 *
1242 * NCQ needs to be turned off if there have been more than 3
1243 * Cat-2 + Cat-3 errors during last 10 minutes.
1244 *
1245 * Speed down is necessary if there have been more than 3 Cat-1 +
1246 * Cat-2 errors or 10 Cat-3 errors during last 10 minutes.
1247 *
1248 * Falling back to PIO mode is necessary if there have been more
1249 * than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes.
1250 *
1209 * LOCKING: 1251 * LOCKING:
1210 * Inherited from caller. 1252 * Inherited from caller.
1211 * 1253 *
1212 * RETURNS: 1254 * RETURNS:
1213 * 1 if speed down is necessary, 0 otherwise 1255 * OR of ATA_EH_SPDN_* flags.
1214 */ 1256 */
1215static int ata_eh_speed_down_needed(struct ata_device *dev) 1257static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1216{ 1258{
1217 const u64 interval = 15LLU * 60 * HZ; 1259 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1218 static const int err_limits[3] = { -1, 3, 10 }; 1260 u64 j64 = get_jiffies_64();
1219 struct speed_down_needed_arg arg; 1261 struct speed_down_verdict_arg arg;
1220 struct ata_ering_entry *ent; 1262 unsigned int verdict = 0;
1221 int err_cat;
1222 u64 j64;
1223 1263
1224 ent = ata_ering_top(&dev->ering); 1264 /* scan past 10 mins of error history */
1225 if (!ent) 1265 memset(&arg, 0, sizeof(arg));
1226 return 0; 1266 arg.since = j64 - min(j64, j10mins);
1267 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1227 1268
1228 err_cat = ata_eh_categorize_ering_entry(ent); 1269 if (arg.nr_errors[2] + arg.nr_errors[3] > 3)
1229 if (err_cat == 0) 1270 verdict |= ATA_EH_SPDN_NCQ_OFF;
1230 return 0; 1271 if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10)
1272 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1231 1273
1274 /* scan past 3 mins of error history */
1232 memset(&arg, 0, sizeof(arg)); 1275 memset(&arg, 0, sizeof(arg));
1276 arg.since = j64 - min(j64, j5mins);
1277 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1233 1278
1234 j64 = get_jiffies_64(); 1279 if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10)
1235 if (j64 >= interval) 1280 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1236 arg.since = j64 - interval;
1237 else
1238 arg.since = 0;
1239
1240 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1241 1281
1242 return arg.nr_errors[err_cat] > err_limits[err_cat]; 1282 return verdict;
1243} 1283}
1244 1284
1245/** 1285/**
@@ -1257,31 +1297,80 @@ static int ata_eh_speed_down_needed(struct ata_device *dev)
1257 * Kernel thread context (may sleep). 1297 * Kernel thread context (may sleep).
1258 * 1298 *
1259 * RETURNS: 1299 * RETURNS:
1260 * 0 on success, -errno otherwise 1300 * Determined recovery action.
1261 */ 1301 */
1262static int ata_eh_speed_down(struct ata_device *dev, int is_io, 1302static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
1263 unsigned int err_mask) 1303 unsigned int err_mask)
1264{ 1304{
1265 if (!err_mask) 1305 unsigned int verdict;
1306 unsigned int action = 0;
1307
1308 /* don't bother if Cat-0 error */
1309 if (ata_eh_categorize_error(is_io, err_mask) == 0)
1266 return 0; 1310 return 0;
1267 1311
1268 /* record error and determine whether speed down is necessary */ 1312 /* record error and determine whether speed down is necessary */
1269 ata_ering_record(&dev->ering, is_io, err_mask); 1313 ata_ering_record(&dev->ering, is_io, err_mask);
1314 verdict = ata_eh_speed_down_verdict(dev);
1270 1315
1271 if (!ata_eh_speed_down_needed(dev)) 1316 /* turn off NCQ? */
1272 return 0; 1317 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1318 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1319 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1320 dev->flags |= ATA_DFLAG_NCQ_OFF;
1321 ata_dev_printk(dev, KERN_WARNING,
1322 "NCQ disabled due to excessive errors\n");
1323 goto done;
1324 }
1273 1325
1274 /* speed down SATA link speed if possible */ 1326 /* speed down? */
1275 if (sata_down_spd_limit(dev->ap) == 0) 1327 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1276 return ATA_EH_HARDRESET; 1328 /* speed down SATA link speed if possible */
1329 if (sata_down_spd_limit(dev->ap) == 0) {
1330 action |= ATA_EH_HARDRESET;
1331 goto done;
1332 }
1277 1333
1278 /* lower transfer mode */ 1334 /* lower transfer mode */
1279 if (ata_down_xfermask_limit(dev, 0) == 0) 1335 if (dev->spdn_cnt < 2) {
1280 return ATA_EH_SOFTRESET; 1336 static const int dma_dnxfer_sel[] =
1337 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1338 static const int pio_dnxfer_sel[] =
1339 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1340 int sel;
1341
1342 if (dev->xfer_shift != ATA_SHIFT_PIO)
1343 sel = dma_dnxfer_sel[dev->spdn_cnt];
1344 else
1345 sel = pio_dnxfer_sel[dev->spdn_cnt];
1346
1347 dev->spdn_cnt++;
1348
1349 if (ata_down_xfermask_limit(dev, sel) == 0) {
1350 action |= ATA_EH_SOFTRESET;
1351 goto done;
1352 }
1353 }
1354 }
1355
1356 /* Fall back to PIO? Slowing down to PIO is meaningless for
1357 * SATA. Consider it only for PATA.
1358 */
1359 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1360 (dev->ap->cbl != ATA_CBL_SATA) &&
1361 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1362 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1363 dev->spdn_cnt = 0;
1364 action |= ATA_EH_SOFTRESET;
1365 goto done;
1366 }
1367 }
1281 1368
1282 ata_dev_printk(dev, KERN_ERR,
1283 "speed down requested but no transfer mode left\n");
1284 return 0; 1369 return 0;
1370 done:
1371 /* device has been slowed down, blow error history */
1372 ata_ering_clear(&dev->ering);
1373 return action;
1285} 1374}
1286 1375
1287/** 1376/**
@@ -1726,6 +1815,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1726 return rc; 1815 return rc;
1727} 1816}
1728 1817
1818#ifdef CONFIG_PM
1729/** 1819/**
1730 * ata_eh_suspend - handle suspend EH action 1820 * ata_eh_suspend - handle suspend EH action
1731 * @ap: target host port 1821 * @ap: target host port
@@ -1883,6 +1973,7 @@ static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
1883 DPRINTK("EXIT\n"); 1973 DPRINTK("EXIT\n");
1884 return 0; 1974 return 0;
1885} 1975}
1976#endif /* CONFIG_PM */
1886 1977
1887static int ata_port_nr_enabled(struct ata_port *ap) 1978static int ata_port_nr_enabled(struct ata_port *ap)
1888{ 1979{
@@ -1964,7 +2055,7 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1964{ 2055{
1965 struct ata_eh_context *ehc = &ap->eh_context; 2056 struct ata_eh_context *ehc = &ap->eh_context;
1966 struct ata_device *dev; 2057 struct ata_device *dev;
1967 int down_xfermask, i, rc; 2058 int i, rc;
1968 2059
1969 DPRINTK("ENTER\n"); 2060 DPRINTK("ENTER\n");
1970 2061
@@ -1993,7 +2084,6 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1993 } 2084 }
1994 2085
1995 retry: 2086 retry:
1996 down_xfermask = 0;
1997 rc = 0; 2087 rc = 0;
1998 2088
1999 /* if UNLOADING, finish immediately */ 2089 /* if UNLOADING, finish immediately */
@@ -2038,10 +2128,8 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2038 /* configure transfer mode if necessary */ 2128 /* configure transfer mode if necessary */
2039 if (ehc->i.flags & ATA_EHI_SETMODE) { 2129 if (ehc->i.flags & ATA_EHI_SETMODE) {
2040 rc = ata_set_mode(ap, &dev); 2130 rc = ata_set_mode(ap, &dev);
2041 if (rc) { 2131 if (rc)
2042 down_xfermask = 1;
2043 goto dev_fail; 2132 goto dev_fail;
2044 }
2045 ehc->i.flags &= ~ATA_EHI_SETMODE; 2133 ehc->i.flags &= ~ATA_EHI_SETMODE;
2046 } 2134 }
2047 2135
@@ -2053,20 +2141,27 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2053 goto out; 2141 goto out;
2054 2142
2055 dev_fail: 2143 dev_fail:
2144 ehc->tries[dev->devno]--;
2145
2056 switch (rc) { 2146 switch (rc) {
2057 case -ENODEV:
2058 /* device missing, schedule probing */
2059 ehc->i.probe_mask |= (1 << dev->devno);
2060 case -EINVAL: 2147 case -EINVAL:
2148 /* eeek, something went very wrong, give up */
2061 ehc->tries[dev->devno] = 0; 2149 ehc->tries[dev->devno] = 0;
2062 break; 2150 break;
2151
2152 case -ENODEV:
2153 /* device missing or wrong IDENTIFY data, schedule probing */
2154 ehc->i.probe_mask |= (1 << dev->devno);
2155 /* give it just one more chance */
2156 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
2063 case -EIO: 2157 case -EIO:
2064 sata_down_spd_limit(ap); 2158 if (ehc->tries[dev->devno] == 1) {
2065 default: 2159 /* This is the last chance, better to slow
2066 ehc->tries[dev->devno]--; 2160 * down than lose it.
2067 if (down_xfermask && 2161 */
2068 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1)) 2162 sata_down_spd_limit(ap);
2069 ehc->tries[dev->devno] = 0; 2163 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2164 }
2070 } 2165 }
2071 2166
2072 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 2167 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
@@ -2181,6 +2276,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
2181 ata_eh_finish(ap); 2276 ata_eh_finish(ap);
2182} 2277}
2183 2278
2279#ifdef CONFIG_PM
2184/** 2280/**
2185 * ata_eh_handle_port_suspend - perform port suspend operation 2281 * ata_eh_handle_port_suspend - perform port suspend operation
2186 * @ap: port to suspend 2282 * @ap: port to suspend
@@ -2296,3 +2392,4 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
2296 } 2392 }
2297 spin_unlock_irqrestore(ap->lock, flags); 2393 spin_unlock_irqrestore(ap->lock, flags);
2298} 2394}
2395#endif /* CONFIG_PM */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0009818a43..6cc817a102 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -333,6 +333,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
333 scsi_cmd[8] = args[3]; 333 scsi_cmd[8] = args[3];
334 scsi_cmd[10] = args[4]; 334 scsi_cmd[10] = args[4];
335 scsi_cmd[12] = args[5]; 335 scsi_cmd[12] = args[5];
336 scsi_cmd[13] = args[6] & 0x0f;
336 scsi_cmd[14] = args[0]; 337 scsi_cmd[14] = args[0];
337 338
338 /* Good values for timeout and retries? Values below 339 /* Good values for timeout and retries? Values below
@@ -509,6 +510,7 @@ static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
509 } 510 }
510} 511}
511 512
513#ifdef CONFIG_PM
512/** 514/**
513 * ata_scsi_device_suspend - suspend ATA device associated with sdev 515 * ata_scsi_device_suspend - suspend ATA device associated with sdev
514 * @sdev: the SCSI device to suspend 516 * @sdev: the SCSI device to suspend
@@ -633,6 +635,7 @@ int ata_scsi_device_resume(struct scsi_device *sdev)
633 sdev->sdev_gendev.power.power_state = PMSG_ON; 635 sdev->sdev_gendev.power.power_state = PMSG_ON;
634 return 0; 636 return 0;
635} 637}
638#endif /* CONFIG_PM */
636 639
637/** 640/**
638 * ata_to_sense_error - convert ATA error to SCSI error 641 * ata_to_sense_error - convert ATA error to SCSI error
@@ -781,7 +784,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
781 */ 784 */
782 if (qc->err_mask || 785 if (qc->err_mask ||
783 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 786 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
784 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 787 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
785 &sb[1], &sb[2], &sb[3], verbose); 788 &sb[1], &sb[2], &sb[3], verbose);
786 sb[1] &= 0x0f; 789 sb[1] &= 0x0f;
787 } 790 }
@@ -854,7 +857,7 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
854 */ 857 */
855 if (qc->err_mask || 858 if (qc->err_mask ||
856 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 859 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
857 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 860 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
858 &sb[1], &sb[2], &sb[3], verbose); 861 &sb[1], &sb[2], &sb[3], verbose);
859 sb[1] &= 0x0f; 862 sb[1] &= 0x0f;
860 } 863 }
@@ -986,29 +989,32 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
986 struct ata_port *ap = ata_shost_to_port(sdev->host); 989 struct ata_port *ap = ata_shost_to_port(sdev->host);
987 struct ata_device *dev; 990 struct ata_device *dev;
988 unsigned long flags; 991 unsigned long flags;
989 int max_depth;
990 992
991 if (queue_depth < 1) 993 if (queue_depth < 1 || queue_depth == sdev->queue_depth)
992 return sdev->queue_depth; 994 return sdev->queue_depth;
993 995
994 dev = ata_scsi_find_dev(ap, sdev); 996 dev = ata_scsi_find_dev(ap, sdev);
995 if (!dev || !ata_dev_enabled(dev)) 997 if (!dev || !ata_dev_enabled(dev))
996 return sdev->queue_depth; 998 return sdev->queue_depth;
997 999
998 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 1000 /* NCQ enabled? */
999 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
1000 if (queue_depth > max_depth)
1001 queue_depth = max_depth;
1002
1003 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
1004
1005 spin_lock_irqsave(ap->lock, flags); 1001 spin_lock_irqsave(ap->lock, flags);
1006 if (queue_depth > 1) 1002 dev->flags &= ~ATA_DFLAG_NCQ_OFF;
1007 dev->flags &= ~ATA_DFLAG_NCQ_OFF; 1003 if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
1008 else
1009 dev->flags |= ATA_DFLAG_NCQ_OFF; 1004 dev->flags |= ATA_DFLAG_NCQ_OFF;
1005 queue_depth = 1;
1006 }
1010 spin_unlock_irqrestore(ap->lock, flags); 1007 spin_unlock_irqrestore(ap->lock, flags);
1011 1008
1009 /* limit and apply queue depth */
1010 queue_depth = min(queue_depth, sdev->host->can_queue);
1011 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
1012 queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
1013
1014 if (sdev->queue_depth == queue_depth)
1015 return -EINVAL;
1016
1017 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
1012 return queue_depth; 1018 return queue_depth;
1013} 1019}
1014 1020
@@ -1469,7 +1475,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1469 } 1475 }
1470 1476
1471 if (need_sense && !ap->ops->error_handler) 1477 if (need_sense && !ap->ops->error_handler)
1472 ata_dump_status(ap->id, &qc->result_tf); 1478 ata_dump_status(ap->print_id, &qc->result_tf);
1473 1479
1474 qc->scsidone(cmd); 1480 qc->scsidone(cmd);
1475 1481
@@ -1495,11 +1501,9 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1495static int ata_scmd_need_defer(struct ata_device *dev, int is_io) 1501static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1496{ 1502{
1497 struct ata_port *ap = dev->ap; 1503 struct ata_port *ap = dev->ap;
1504 int is_ncq = is_io && ata_ncq_enabled(dev);
1498 1505
1499 if (!(dev->flags & ATA_DFLAG_NCQ)) 1506 if (is_ncq) {
1500 return 0;
1501
1502 if (is_io) {
1503 if (!ata_tag_valid(ap->active_tag)) 1507 if (!ata_tag_valid(ap->active_tag))
1504 return 0; 1508 return 0;
1505 } else { 1509 } else {
@@ -2774,7 +2778,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2774 u8 *scsicmd = cmd->cmnd; 2778 u8 *scsicmd = cmd->cmnd;
2775 2779
2776 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 2780 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
2777 ap->id, 2781 ap->print_id,
2778 scsidev->channel, scsidev->id, scsidev->lun, 2782 scsidev->channel, scsidev->id, scsidev->lun,
2779 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], 2783 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
2780 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], 2784 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
@@ -3234,7 +3238,7 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3234 3238
3235 ata_port_init(ap, host, ent, 0); 3239 ata_port_init(ap, host, ent, 0);
3236 ap->lock = shost->host_lock; 3240 ap->lock = shost->host_lock;
3237 kfree(ent); 3241 devm_kfree(host->dev, ent);
3238 return ap; 3242 return ap;
3239} 3243}
3240EXPORT_SYMBOL_GPL(ata_sas_port_alloc); 3244EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 16bc3e35bd..2ffcca063d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -175,7 +175,7 @@ void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
175 */ 175 */
176void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 176void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
177{ 177{
178 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 178 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
179 179
180 iowrite8(tf->command, ap->ioaddr.command_addr); 180 iowrite8(tf->command, ap->ioaddr.command_addr);
181 ata_pause(ap); 181 ata_pause(ap);
@@ -521,7 +521,7 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
521static int ata_resources_present(struct pci_dev *pdev, int port) 521static int ata_resources_present(struct pci_dev *pdev, int port)
522{ 522{
523 int i; 523 int i;
524 524
525 /* Check the PCI resources for this channel are enabled */ 525 /* Check the PCI resources for this channel are enabled */
526 port = port * 2; 526 port = port * 2;
527 for (i = 0; i < 2; i ++) { 527 for (i = 0; i < 2; i ++) {
@@ -531,7 +531,7 @@ static int ata_resources_present(struct pci_dev *pdev, int port)
531 } 531 }
532 return 1; 532 return 1;
533} 533}
534 534
535/** 535/**
536 * ata_pci_init_native_mode - Initialize native-mode driver 536 * ata_pci_init_native_mode - Initialize native-mode driver
537 * @pdev: pci device to be initialized 537 * @pdev: pci device to be initialized
@@ -576,7 +576,7 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
576 576
577 probe_ent->irq = pdev->irq; 577 probe_ent->irq = pdev->irq;
578 probe_ent->irq_flags = IRQF_SHARED; 578 probe_ent->irq_flags = IRQF_SHARED;
579 579
580 /* Discard disabled ports. Some controllers show their 580 /* Discard disabled ports. Some controllers show their
581 unused channels this way */ 581 unused channels this way */
582 if (ata_resources_present(pdev, 0) == 0) 582 if (ata_resources_present(pdev, 0) == 0)
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0ad7781d72..c42671493e 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -41,6 +41,15 @@ struct ata_scsi_args {
41enum { 41enum {
42 /* flags for ata_dev_read_id() */ 42 /* flags for ata_dev_read_id() */
43 ATA_READID_POSTRESET = (1 << 0), /* reading ID after reset */ 43 ATA_READID_POSTRESET = (1 << 0), /* reading ID after reset */
44
45 /* selector for ata_down_xfermask_limit() */
46 ATA_DNXFER_PIO = 0, /* speed down PIO */
47 ATA_DNXFER_DMA = 1, /* speed down DMA */
48 ATA_DNXFER_40C = 2, /* apply 40c cable limit */
49 ATA_DNXFER_FORCE_PIO = 3, /* force PIO */
50 ATA_DNXFER_FORCE_PIO0 = 4, /* force PIO0 */
51
52 ATA_DNXFER_QUIET = (1 << 31),
44}; 53};
45 54
46extern struct workqueue_struct *ata_aux_wq; 55extern struct workqueue_struct *ata_aux_wq;
@@ -69,7 +78,7 @@ extern int ata_dev_revalidate(struct ata_device *dev, unsigned int flags);
69extern int ata_dev_configure(struct ata_device *dev); 78extern int ata_dev_configure(struct ata_device *dev);
70extern int sata_down_spd_limit(struct ata_port *ap); 79extern int sata_down_spd_limit(struct ata_port *ap);
71extern int sata_set_spd_needed(struct ata_port *ap); 80extern int sata_set_spd_needed(struct ata_port *ap);
72extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0); 81extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
73extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); 82extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
74extern void ata_sg_clean(struct ata_queued_cmd *qc); 83extern void ata_sg_clean(struct ata_queued_cmd *qc);
75extern void ata_qc_free(struct ata_queued_cmd *qc); 84extern void ata_qc_free(struct ata_queued_cmd *qc);
@@ -150,7 +159,5 @@ extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
150/* libata-sff.c */ 159/* libata-sff.c */
151extern u8 ata_irq_on(struct ata_port *ap); 160extern u8 ata_irq_on(struct ata_port *ap);
152 161
153/* pata_sis.c */
154extern struct ata_port_info sis_info133;
155 162
156#endif /* __LIBATA_H__ */ 163#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index ab44d18850..11ea552a58 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -34,7 +34,7 @@
34#include <linux/dmi.h> 34#include <linux/dmi.h>
35 35
36#define DRV_NAME "pata_ali" 36#define DRV_NAME "pata_ali"
37#define DRV_VERSION "0.7.2" 37#define DRV_VERSION "0.7.3"
38 38
39/* 39/*
40 * Cable special cases 40 * Cable special cases
@@ -345,8 +345,10 @@ static struct scsi_host_template ali_sht = {
345 .slave_configure = ata_scsi_slave_config, 345 .slave_configure = ata_scsi_slave_config,
346 .slave_destroy = ata_scsi_slave_destroy, 346 .slave_destroy = ata_scsi_slave_destroy,
347 .bios_param = ata_std_bios_param, 347 .bios_param = ata_std_bios_param,
348#ifdef CONFIG_PM
348 .resume = ata_scsi_device_resume, 349 .resume = ata_scsi_device_resume,
349 .suspend = ata_scsi_device_suspend, 350 .suspend = ata_scsi_device_suspend,
351#endif
350}; 352};
351 353
352/* 354/*
@@ -667,11 +669,13 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
667 return ata_pci_init_one(pdev, port_info, 2); 669 return ata_pci_init_one(pdev, port_info, 2);
668} 670}
669 671
672#ifdef CONFIG_PM
670static int ali_reinit_one(struct pci_dev *pdev) 673static int ali_reinit_one(struct pci_dev *pdev)
671{ 674{
672 ali_init_chipset(pdev); 675 ali_init_chipset(pdev);
673 return ata_pci_device_resume(pdev); 676 return ata_pci_device_resume(pdev);
674} 677}
678#endif
675 679
676static const struct pci_device_id ali[] = { 680static const struct pci_device_id ali[] = {
677 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), }, 681 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
@@ -685,8 +689,10 @@ static struct pci_driver ali_pci_driver = {
685 .id_table = ali, 689 .id_table = ali,
686 .probe = ali_init_one, 690 .probe = ali_init_one,
687 .remove = ata_pci_remove_one, 691 .remove = ata_pci_remove_one,
692#ifdef CONFIG_PM
688 .suspend = ata_pci_device_suspend, 693 .suspend = ata_pci_device_suspend,
689 .resume = ali_reinit_one, 694 .resume = ali_reinit_one,
695#endif
690}; 696};
691 697
692static int __init ali_init(void) 698static int __init ali_init(void)
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 619e44b040..1838176290 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_amd" 27#define DRV_NAME "pata_amd"
28#define DRV_VERSION "0.2.7" 28#define DRV_VERSION "0.2.8"
29 29
30/** 30/**
31 * timing_setup - shared timing computation and load 31 * timing_setup - shared timing computation and load
@@ -128,7 +128,7 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
128 128
129static int amd_pre_reset(struct ata_port *ap) 129static int amd_pre_reset(struct ata_port *ap)
130{ 130{
131 static const u32 bitmask[2] = {0x03, 0xC0}; 131 static const u32 bitmask[2] = {0x03, 0x0C};
132 static const struct pci_bits amd_enable_bits[] = { 132 static const struct pci_bits amd_enable_bits[] = {
133 { 0x40, 1, 0x02, 0x02 }, 133 { 0x40, 1, 0x02, 0x02 },
134 { 0x40, 1, 0x01, 0x01 } 134 { 0x40, 1, 0x01, 0x01 }
@@ -247,7 +247,7 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
247 */ 247 */
248 248
249static int nv_pre_reset(struct ata_port *ap) { 249static int nv_pre_reset(struct ata_port *ap) {
250 static const u8 bitmask[2] = {0x03, 0xC0}; 250 static const u8 bitmask[2] = {0x03, 0x0C};
251 static const struct pci_bits nv_enable_bits[] = { 251 static const struct pci_bits nv_enable_bits[] = {
252 { 0x50, 1, 0x02, 0x02 }, 252 { 0x50, 1, 0x02, 0x02 },
253 { 0x50, 1, 0x01, 0x01 } 253 { 0x50, 1, 0x01, 0x01 }
@@ -334,8 +334,10 @@ static struct scsi_host_template amd_sht = {
334 .slave_configure = ata_scsi_slave_config, 334 .slave_configure = ata_scsi_slave_config,
335 .slave_destroy = ata_scsi_slave_destroy, 335 .slave_destroy = ata_scsi_slave_destroy,
336 .bios_param = ata_std_bios_param, 336 .bios_param = ata_std_bios_param,
337#ifdef CONFIG_PM
337 .resume = ata_scsi_device_resume, 338 .resume = ata_scsi_device_resume,
338 .suspend = ata_scsi_device_suspend, 339 .suspend = ata_scsi_device_suspend,
340#endif
339}; 341};
340 342
341static struct ata_port_operations amd33_port_ops = { 343static struct ata_port_operations amd33_port_ops = {
@@ -663,6 +665,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
663 return ata_pci_init_one(pdev, port_info, 2); 665 return ata_pci_init_one(pdev, port_info, 2);
664} 666}
665 667
668#ifdef CONFIG_PM
666static int amd_reinit_one(struct pci_dev *pdev) 669static int amd_reinit_one(struct pci_dev *pdev)
667{ 670{
668 if (pdev->vendor == PCI_VENDOR_ID_AMD) { 671 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
@@ -679,6 +682,7 @@ static int amd_reinit_one(struct pci_dev *pdev)
679 } 682 }
680 return ata_pci_device_resume(pdev); 683 return ata_pci_device_resume(pdev);
681} 684}
685#endif
682 686
683static const struct pci_device_id amd[] = { 687static const struct pci_device_id amd[] = {
684 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 }, 688 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
@@ -708,8 +712,10 @@ static struct pci_driver amd_pci_driver = {
708 .id_table = amd, 712 .id_table = amd,
709 .probe = amd_init_one, 713 .probe = amd_init_one,
710 .remove = ata_pci_remove_one, 714 .remove = ata_pci_remove_one,
715#ifdef CONFIG_PM
711 .suspend = ata_pci_device_suspend, 716 .suspend = ata_pci_device_suspend,
712 .resume = amd_reinit_one, 717 .resume = amd_reinit_one,
718#endif
713}; 719};
714 720
715static int __init amd_init(void) 721static int __init amd_init(void)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index c3eb40c91c..51d9923be0 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -224,8 +224,10 @@ static struct scsi_host_template atiixp_sht = {
224 .slave_configure = ata_scsi_slave_config, 224 .slave_configure = ata_scsi_slave_config,
225 .slave_destroy = ata_scsi_slave_destroy, 225 .slave_destroy = ata_scsi_slave_destroy,
226 .bios_param = ata_std_bios_param, 226 .bios_param = ata_std_bios_param,
227#ifdef CONFIG_PM
227 .resume = ata_scsi_device_resume, 228 .resume = ata_scsi_device_resume,
228 .suspend = ata_scsi_device_suspend, 229 .suspend = ata_scsi_device_suspend,
230#endif
229}; 231};
230 232
231static struct ata_port_operations atiixp_port_ops = { 233static struct ata_port_operations atiixp_port_ops = {
@@ -290,8 +292,10 @@ static struct pci_driver atiixp_pci_driver = {
290 .id_table = atiixp, 292 .id_table = atiixp,
291 .probe = atiixp_init_one, 293 .probe = atiixp_init_one,
292 .remove = ata_pci_remove_one, 294 .remove = ata_pci_remove_one,
295#ifdef CONFIG_PM
293 .resume = ata_pci_device_resume, 296 .resume = ata_pci_device_resume,
294 .suspend = ata_pci_device_suspend, 297 .suspend = ata_pci_device_suspend,
298#endif
295}; 299};
296 300
297static int __init atiixp_init(void) 301static int __init atiixp_init(void)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index da098282b5..5b13bdd1ed 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * pata_cmd64x.c - ATI PATA for new ATA layer 2 * pata_cmd64x.c - CMD64x PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox <alan@redhat.com>
5 * 5 *
@@ -285,8 +285,10 @@ static struct scsi_host_template cmd64x_sht = {
285 .slave_configure = ata_scsi_slave_config, 285 .slave_configure = ata_scsi_slave_config,
286 .slave_destroy = ata_scsi_slave_destroy, 286 .slave_destroy = ata_scsi_slave_destroy,
287 .bios_param = ata_std_bios_param, 287 .bios_param = ata_std_bios_param,
288#ifdef CONFIG_PM
288 .resume = ata_scsi_device_resume, 289 .resume = ata_scsi_device_resume,
289 .suspend = ata_scsi_device_suspend, 290 .suspend = ata_scsi_device_suspend,
291#endif
290}; 292};
291 293
292static struct ata_port_operations cmd64x_port_ops = { 294static struct ata_port_operations cmd64x_port_ops = {
@@ -479,6 +481,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
479 return ata_pci_init_one(pdev, port_info, 2); 481 return ata_pci_init_one(pdev, port_info, 2);
480} 482}
481 483
484#ifdef CONFIG_PM
482static int cmd64x_reinit_one(struct pci_dev *pdev) 485static int cmd64x_reinit_one(struct pci_dev *pdev)
483{ 486{
484 u8 mrdmode; 487 u8 mrdmode;
@@ -492,6 +495,7 @@ static int cmd64x_reinit_one(struct pci_dev *pdev)
492#endif 495#endif
493 return ata_pci_device_resume(pdev); 496 return ata_pci_device_resume(pdev);
494} 497}
498#endif
495 499
496static const struct pci_device_id cmd64x[] = { 500static const struct pci_device_id cmd64x[] = {
497 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 }, 501 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
@@ -507,8 +511,10 @@ static struct pci_driver cmd64x_pci_driver = {
507 .id_table = cmd64x, 511 .id_table = cmd64x,
508 .probe = cmd64x_init_one, 512 .probe = cmd64x_init_one,
509 .remove = ata_pci_remove_one, 513 .remove = ata_pci_remove_one,
514#ifdef CONFIG_PM
510 .suspend = ata_pci_device_suspend, 515 .suspend = ata_pci_device_suspend,
511 .resume = cmd64x_reinit_one, 516 .resume = cmd64x_reinit_one,
517#endif
512}; 518};
513 519
514static int __init cmd64x_init(void) 520static int __init cmd64x_init(void)
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 1ce8fcfd78..7ef834250a 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "pata_cs5520" 43#define DRV_NAME "pata_cs5520"
44#define DRV_VERSION "0.6.3" 44#define DRV_VERSION "0.6.4"
45 45
46struct pio_clocks 46struct pio_clocks
47{ 47{
@@ -167,8 +167,10 @@ static struct scsi_host_template cs5520_sht = {
167 .slave_configure = ata_scsi_slave_config, 167 .slave_configure = ata_scsi_slave_config,
168 .slave_destroy = ata_scsi_slave_destroy, 168 .slave_destroy = ata_scsi_slave_destroy,
169 .bios_param = ata_std_bios_param, 169 .bios_param = ata_std_bios_param,
170#ifdef CONFIG_PM
170 .resume = ata_scsi_device_resume, 171 .resume = ata_scsi_device_resume,
171 .suspend = ata_scsi_device_suspend, 172 .suspend = ata_scsi_device_suspend,
173#endif
172}; 174};
173 175
174static struct ata_port_operations cs5520_port_ops = { 176static struct ata_port_operations cs5520_port_ops = {
@@ -306,9 +308,9 @@ static void __devexit cs5520_remove_one(struct pci_dev *pdev)
306 struct ata_host *host = dev_get_drvdata(dev); 308 struct ata_host *host = dev_get_drvdata(dev);
307 309
308 ata_host_detach(host); 310 ata_host_detach(host);
309 dev_set_drvdata(dev, NULL);
310} 311}
311 312
313#ifdef CONFIG_PM
312/** 314/**
313 * cs5520_reinit_one - device resume 315 * cs5520_reinit_one - device resume
314 * @pdev: PCI device 316 * @pdev: PCI device
@@ -325,6 +327,31 @@ static int cs5520_reinit_one(struct pci_dev *pdev)
325 pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); 327 pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
326 return ata_pci_device_resume(pdev); 328 return ata_pci_device_resume(pdev);
327} 329}
330
331/**
332 * cs5520_pci_device_suspend - device suspend
333 * @pdev: PCI device
334 *
335 * We have to cut and waste bits from the standard method because
336 * the 5520 is a bit odd and not just a pure ATA device. As a result
337 * we must not disable it. The needed code is short and this avoids
338 * chip specific mess in the core code.
339 */
340
341static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
342{
343 struct ata_host *host = dev_get_drvdata(&pdev->dev);
344 int rc = 0;
345
346 rc = ata_host_suspend(host, mesg);
347 if (rc)
348 return rc;
349
350 pci_save_state(pdev);
351 return 0;
352}
353#endif /* CONFIG_PM */
354
328/* For now keep DMA off. We can set it for all but A rev CS5510 once the 355/* For now keep DMA off. We can set it for all but A rev CS5510 once the
329 core ATA code can handle it */ 356 core ATA code can handle it */
330 357
@@ -340,8 +367,10 @@ static struct pci_driver cs5520_pci_driver = {
340 .id_table = pata_cs5520, 367 .id_table = pata_cs5520,
341 .probe = cs5520_init_one, 368 .probe = cs5520_init_one,
342 .remove = cs5520_remove_one, 369 .remove = cs5520_remove_one,
343 .suspend = ata_pci_device_suspend, 370#ifdef CONFIG_PM
371 .suspend = cs5520_pci_device_suspend,
344 .resume = cs5520_reinit_one, 372 .resume = cs5520_reinit_one,
373#endif
345}; 374};
346 375
347static int __init cs5520_init(void) 376static int __init cs5520_init(void)
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 3d7b7d87ec..db63e80e60 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -35,7 +35,7 @@
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36 36
37#define DRV_NAME "pata_cs5530" 37#define DRV_NAME "pata_cs5530"
38#define DRV_VERSION "0.7.1" 38#define DRV_VERSION "0.7.2"
39 39
40static void __iomem *cs5530_port_base(struct ata_port *ap) 40static void __iomem *cs5530_port_base(struct ata_port *ap)
41{ 41{
@@ -188,8 +188,10 @@ static struct scsi_host_template cs5530_sht = {
188 .slave_configure = ata_scsi_slave_config, 188 .slave_configure = ata_scsi_slave_config,
189 .slave_destroy = ata_scsi_slave_destroy, 189 .slave_destroy = ata_scsi_slave_destroy,
190 .bios_param = ata_std_bios_param, 190 .bios_param = ata_std_bios_param,
191#ifdef CONFIG_PM
191 .resume = ata_scsi_device_resume, 192 .resume = ata_scsi_device_resume,
192 .suspend = ata_scsi_device_suspend, 193 .suspend = ata_scsi_device_suspend,
194#endif
193}; 195};
194 196
195static struct ata_port_operations cs5530_port_ops = { 197static struct ata_port_operations cs5530_port_ops = {
@@ -376,6 +378,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
376 return ata_pci_init_one(pdev, port_info, 2); 378 return ata_pci_init_one(pdev, port_info, 2);
377} 379}
378 380
381#ifdef CONFIG_PM
379static int cs5530_reinit_one(struct pci_dev *pdev) 382static int cs5530_reinit_one(struct pci_dev *pdev)
380{ 383{
381 /* If we fail on resume we are doomed */ 384 /* If we fail on resume we are doomed */
@@ -383,6 +386,7 @@ static int cs5530_reinit_one(struct pci_dev *pdev)
383 BUG(); 386 BUG();
384 return ata_pci_device_resume(pdev); 387 return ata_pci_device_resume(pdev);
385} 388}
389#endif /* CONFIG_PM */
386 390
387static const struct pci_device_id cs5530[] = { 391static const struct pci_device_id cs5530[] = {
388 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, 392 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
@@ -395,8 +399,10 @@ static struct pci_driver cs5530_pci_driver = {
395 .id_table = cs5530, 399 .id_table = cs5530,
396 .probe = cs5530_init_one, 400 .probe = cs5530_init_one,
397 .remove = ata_pci_remove_one, 401 .remove = ata_pci_remove_one,
402#ifdef CONFIG_PM
398 .suspend = ata_pci_device_suspend, 403 .suspend = ata_pci_device_suspend,
399 .resume = cs5530_reinit_one, 404 .resume = cs5530_reinit_one,
405#endif
400}; 406};
401 407
402static int __init cs5530_init(void) 408static int __init cs5530_init(void)
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 17bc693cc5..1572e5c903 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -185,8 +185,10 @@ static struct scsi_host_template cs5535_sht = {
185 .slave_configure = ata_scsi_slave_config, 185 .slave_configure = ata_scsi_slave_config,
186 .slave_destroy = ata_scsi_slave_destroy, 186 .slave_destroy = ata_scsi_slave_destroy,
187 .bios_param = ata_std_bios_param, 187 .bios_param = ata_std_bios_param,
188#ifdef CONFIG_PM
188 .resume = ata_scsi_device_resume, 189 .resume = ata_scsi_device_resume,
189 .suspend = ata_scsi_device_suspend, 190 .suspend = ata_scsi_device_suspend,
191#endif
190}; 192};
191 193
192static struct ata_port_operations cs5535_port_ops = { 194static struct ata_port_operations cs5535_port_ops = {
@@ -270,8 +272,10 @@ static struct pci_driver cs5535_pci_driver = {
270 .id_table = cs5535, 272 .id_table = cs5535,
271 .probe = cs5535_init_one, 273 .probe = cs5535_init_one,
272 .remove = ata_pci_remove_one, 274 .remove = ata_pci_remove_one,
275#ifdef CONFIG_PM
273 .suspend = ata_pci_device_suspend, 276 .suspend = ata_pci_device_suspend,
274 .resume = ata_pci_device_resume, 277 .resume = ata_pci_device_resume,
278#endif
275}; 279};
276 280
277static int __init cs5535_init(void) 281static int __init cs5535_init(void)
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 63f48f0876..f69dde5f70 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -136,8 +136,10 @@ static struct scsi_host_template cy82c693_sht = {
136 .slave_configure = ata_scsi_slave_config, 136 .slave_configure = ata_scsi_slave_config,
137 .slave_destroy = ata_scsi_slave_destroy, 137 .slave_destroy = ata_scsi_slave_destroy,
138 .bios_param = ata_std_bios_param, 138 .bios_param = ata_std_bios_param,
139#ifdef CONFIG_PM
139 .resume = ata_scsi_device_resume, 140 .resume = ata_scsi_device_resume,
140 .suspend = ata_scsi_device_suspend, 141 .suspend = ata_scsi_device_suspend,
142#endif
141}; 143};
142 144
143static struct ata_port_operations cy82c693_port_ops = { 145static struct ata_port_operations cy82c693_port_ops = {
@@ -206,8 +208,10 @@ static struct pci_driver cy82c693_pci_driver = {
206 .id_table = cy82c693, 208 .id_table = cy82c693,
207 .probe = cy82c693_init_one, 209 .probe = cy82c693_init_one,
208 .remove = ata_pci_remove_one, 210 .remove = ata_pci_remove_one,
211#ifdef CONFIG_PM
209 .suspend = ata_pci_device_suspend, 212 .suspend = ata_pci_device_suspend,
210 .resume = ata_pci_device_resume, 213 .resume = ata_pci_device_resume,
214#endif
211}; 215};
212 216
213static int __init cy82c693_init(void) 217static int __init cy82c693_init(void)
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index c19b6a8a7d..dac7a6554f 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -234,8 +234,10 @@ static struct scsi_host_template efar_sht = {
234 .slave_configure = ata_scsi_slave_config, 234 .slave_configure = ata_scsi_slave_config,
235 .slave_destroy = ata_scsi_slave_destroy, 235 .slave_destroy = ata_scsi_slave_destroy,
236 .bios_param = ata_std_bios_param, 236 .bios_param = ata_std_bios_param,
237#ifdef CONFIG_PM
237 .resume = ata_scsi_device_resume, 238 .resume = ata_scsi_device_resume,
238 .suspend = ata_scsi_device_suspend, 239 .suspend = ata_scsi_device_suspend,
240#endif
239}; 241};
240 242
241static const struct ata_port_operations efar_ops = { 243static const struct ata_port_operations efar_ops = {
@@ -317,8 +319,10 @@ static struct pci_driver efar_pci_driver = {
317 .id_table = efar_pci_tbl, 319 .id_table = efar_pci_tbl,
318 .probe = efar_init_one, 320 .probe = efar_init_one,
319 .remove = ata_pci_remove_one, 321 .remove = ata_pci_remove_one,
322#ifdef CONFIG_PM
320 .suspend = ata_pci_device_suspend, 323 .suspend = ata_pci_device_suspend,
321 .resume = ata_pci_device_resume, 324 .resume = ata_pci_device_resume,
325#endif
322}; 326};
323 327
324static int __init efar_init(void) 328static int __init efar_init(void)
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 27d724b5ee..baf35f8760 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -27,7 +27,7 @@
27#include <linux/libata.h> 27#include <linux/libata.h>
28 28
29#define DRV_NAME "pata_hpt366" 29#define DRV_NAME "pata_hpt366"
30#define DRV_VERSION "0.5.3" 30#define DRV_VERSION "0.6.0"
31 31
32struct hpt_clock { 32struct hpt_clock {
33 u8 xfer_speed; 33 u8 xfer_speed;
@@ -328,8 +328,10 @@ static struct scsi_host_template hpt36x_sht = {
328 .slave_configure = ata_scsi_slave_config, 328 .slave_configure = ata_scsi_slave_config,
329 .slave_destroy = ata_scsi_slave_destroy, 329 .slave_destroy = ata_scsi_slave_destroy,
330 .bios_param = ata_std_bios_param, 330 .bios_param = ata_std_bios_param,
331#ifdef CONFIG_PM
331 .resume = ata_scsi_device_resume, 332 .resume = ata_scsi_device_resume,
332 .suspend = ata_scsi_device_suspend, 333 .suspend = ata_scsi_device_suspend,
334#endif
333}; 335};
334 336
335/* 337/*
@@ -457,12 +459,13 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
457 return ata_pci_init_one(dev, port_info, 2); 459 return ata_pci_init_one(dev, port_info, 2);
458} 460}
459 461
462#ifdef CONFIG_PM
460static int hpt36x_reinit_one(struct pci_dev *dev) 463static int hpt36x_reinit_one(struct pci_dev *dev)
461{ 464{
462 hpt36x_init_chipset(dev); 465 hpt36x_init_chipset(dev);
463 return ata_pci_device_resume(dev); 466 return ata_pci_device_resume(dev);
464} 467}
465 468#endif
466 469
467static const struct pci_device_id hpt36x[] = { 470static const struct pci_device_id hpt36x[] = {
468 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, 471 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
@@ -474,8 +477,10 @@ static struct pci_driver hpt36x_pci_driver = {
474 .id_table = hpt36x, 477 .id_table = hpt36x,
475 .probe = hpt36x_init_one, 478 .probe = hpt36x_init_one,
476 .remove = ata_pci_remove_one, 479 .remove = ata_pci_remove_one,
480#ifdef CONFIG_PM
477 .suspend = ata_pci_device_suspend, 481 .suspend = ata_pci_device_suspend,
478 .resume = hpt36x_reinit_one, 482 .resume = hpt36x_reinit_one,
483#endif
479}; 484};
480 485
481static int __init hpt36x_init(void) 486static int __init hpt36x_init(void)
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 4ffc392052..f331eeeafa 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_hpt37x" 27#define DRV_NAME "pata_hpt37x"
28#define DRV_VERSION "0.5.2" 28#define DRV_VERSION "0.6.0"
29 29
30struct hpt_clock { 30struct hpt_clock {
31 u8 xfer_speed; 31 u8 xfer_speed;
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 483ce7c12c..813485c852 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -119,8 +119,10 @@ static struct scsi_host_template hpt3x3_sht = {
119 .slave_configure = ata_scsi_slave_config, 119 .slave_configure = ata_scsi_slave_config,
120 .slave_destroy = ata_scsi_slave_destroy, 120 .slave_destroy = ata_scsi_slave_destroy,
121 .bios_param = ata_std_bios_param, 121 .bios_param = ata_std_bios_param,
122#ifdef CONFIG_PM
122 .resume = ata_scsi_device_resume, 123 .resume = ata_scsi_device_resume,
123 .suspend = ata_scsi_device_suspend, 124 .suspend = ata_scsi_device_suspend,
125#endif
124}; 126};
125 127
126static struct ata_port_operations hpt3x3_port_ops = { 128static struct ata_port_operations hpt3x3_port_ops = {
@@ -206,11 +208,13 @@ static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
206 return ata_pci_init_one(dev, port_info, 2); 208 return ata_pci_init_one(dev, port_info, 2);
207} 209}
208 210
211#ifdef CONFIG_PM
209static int hpt3x3_reinit_one(struct pci_dev *dev) 212static int hpt3x3_reinit_one(struct pci_dev *dev)
210{ 213{
211 hpt3x3_init_chipset(dev); 214 hpt3x3_init_chipset(dev);
212 return ata_pci_device_resume(dev); 215 return ata_pci_device_resume(dev);
213} 216}
217#endif
214 218
215static const struct pci_device_id hpt3x3[] = { 219static const struct pci_device_id hpt3x3[] = {
216 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), }, 220 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
@@ -223,8 +227,10 @@ static struct pci_driver hpt3x3_pci_driver = {
223 .id_table = hpt3x3, 227 .id_table = hpt3x3,
224 .probe = hpt3x3_init_one, 228 .probe = hpt3x3_init_one,
225 .remove = ata_pci_remove_one, 229 .remove = ata_pci_remove_one,
230#ifdef CONFIG_PM
226 .suspend = ata_pci_device_suspend, 231 .suspend = ata_pci_device_suspend,
227 .resume = hpt3x3_reinit_one, 232 .resume = hpt3x3_reinit_one,
233#endif
228}; 234};
229 235
230static int __init hpt3x3_init(void) 236static int __init hpt3x3_init(void)
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 1bf5ec18b2..1a61cc8917 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -17,7 +17,7 @@
17#include <linux/libata.h> 17#include <linux/libata.h>
18 18
19#define DRV_NAME "pata_isapnp" 19#define DRV_NAME "pata_isapnp"
20#define DRV_VERSION "0.1.5" 20#define DRV_VERSION "0.2.0"
21 21
22static struct scsi_host_template isapnp_sht = { 22static struct scsi_host_template isapnp_sht = {
23 .module = THIS_MODULE, 23 .module = THIS_MODULE,
@@ -128,7 +128,6 @@ static void isapnp_remove_one(struct pnp_dev *idev)
128 struct ata_host *host = dev_get_drvdata(dev); 128 struct ata_host *host = dev_get_drvdata(dev);
129 129
130 ata_host_detach(host); 130 ata_host_detach(host);
131 dev_set_drvdata(dev, NULL);
132} 131}
133 132
134static struct pnp_device_id isapnp_devices[] = { 133static struct pnp_device_id isapnp_devices[] = {
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 7eac869dfc..ea73470155 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -246,8 +246,10 @@ static struct scsi_host_template it8213_sht = {
246 .dma_boundary = ATA_DMA_BOUNDARY, 246 .dma_boundary = ATA_DMA_BOUNDARY,
247 .slave_configure = ata_scsi_slave_config, 247 .slave_configure = ata_scsi_slave_config,
248 .bios_param = ata_std_bios_param, 248 .bios_param = ata_std_bios_param,
249#ifdef CONFIG_PM
249 .resume = ata_scsi_device_resume, 250 .resume = ata_scsi_device_resume,
250 .suspend = ata_scsi_device_suspend, 251 .suspend = ata_scsi_device_suspend,
252#endif
251}; 253};
252 254
253static const struct ata_port_operations it8213_ops = { 255static const struct ata_port_operations it8213_ops = {
@@ -330,8 +332,10 @@ static struct pci_driver it8213_pci_driver = {
330 .id_table = it8213_pci_tbl, 332 .id_table = it8213_pci_tbl,
331 .probe = it8213_init_one, 333 .probe = it8213_init_one,
332 .remove = ata_pci_remove_one, 334 .remove = ata_pci_remove_one,
335#ifdef CONFIG_PM
333 .suspend = ata_pci_device_suspend, 336 .suspend = ata_pci_device_suspend,
334 .resume = ata_pci_device_resume, 337 .resume = ata_pci_device_resume,
338#endif
335}; 339};
336 340
337static int __init it8213_init(void) 341static int __init it8213_init(void)
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 73394c75be..35ecb2ba06 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -80,7 +80,7 @@
80 80
81 81
82#define DRV_NAME "pata_it821x" 82#define DRV_NAME "pata_it821x"
83#define DRV_VERSION "0.3.3" 83#define DRV_VERSION "0.3.4"
84 84
85struct it821x_dev 85struct it821x_dev
86{ 86{
@@ -503,10 +503,12 @@ static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused
503 /* We do need the right mode information for DMA or PIO 503 /* We do need the right mode information for DMA or PIO
504 and this comes from the current configuration flags */ 504 and this comes from the current configuration flags */
505 if (dma_enabled & (1 << (5 + i))) { 505 if (dma_enabled & (1 << (5 + i))) {
506 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
506 dev->xfer_mode = XFER_MW_DMA_0; 507 dev->xfer_mode = XFER_MW_DMA_0;
507 dev->xfer_shift = ATA_SHIFT_MWDMA; 508 dev->xfer_shift = ATA_SHIFT_MWDMA;
508 dev->flags &= ~ATA_DFLAG_PIO; 509 dev->flags &= ~ATA_DFLAG_PIO;
509 } else { 510 } else {
511 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
510 dev->xfer_mode = XFER_PIO_0; 512 dev->xfer_mode = XFER_PIO_0;
511 dev->xfer_shift = ATA_SHIFT_PIO; 513 dev->xfer_shift = ATA_SHIFT_PIO;
512 dev->flags |= ATA_DFLAG_PIO; 514 dev->flags |= ATA_DFLAG_PIO;
@@ -644,8 +646,10 @@ static struct scsi_host_template it821x_sht = {
644 .slave_configure = ata_scsi_slave_config, 646 .slave_configure = ata_scsi_slave_config,
645 .slave_destroy = ata_scsi_slave_destroy, 647 .slave_destroy = ata_scsi_slave_destroy,
646 .bios_param = ata_std_bios_param, 648 .bios_param = ata_std_bios_param,
649#ifdef CONFIG_PM
647 .resume = ata_scsi_device_resume, 650 .resume = ata_scsi_device_resume,
648 .suspend = ata_scsi_device_suspend, 651 .suspend = ata_scsi_device_suspend,
652#endif
649}; 653};
650 654
651static struct ata_port_operations it821x_smart_port_ops = { 655static struct ata_port_operations it821x_smart_port_ops = {
@@ -778,6 +782,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
778 return ata_pci_init_one(pdev, port_info, 2); 782 return ata_pci_init_one(pdev, port_info, 2);
779} 783}
780 784
785#ifdef CONFIG_PM
781static int it821x_reinit_one(struct pci_dev *pdev) 786static int it821x_reinit_one(struct pci_dev *pdev)
782{ 787{
783 /* Resume - turn raid back off if need be */ 788 /* Resume - turn raid back off if need be */
@@ -785,6 +790,7 @@ static int it821x_reinit_one(struct pci_dev *pdev)
785 it821x_disable_raid(pdev); 790 it821x_disable_raid(pdev);
786 return ata_pci_device_resume(pdev); 791 return ata_pci_device_resume(pdev);
787} 792}
793#endif
788 794
789static const struct pci_device_id it821x[] = { 795static const struct pci_device_id it821x[] = {
790 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), }, 796 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
@@ -798,8 +804,10 @@ static struct pci_driver it821x_pci_driver = {
798 .id_table = it821x, 804 .id_table = it821x,
799 .probe = it821x_init_one, 805 .probe = it821x_init_one,
800 .remove = ata_pci_remove_one, 806 .remove = ata_pci_remove_one,
807#ifdef CONFIG_PM
801 .suspend = ata_pci_device_suspend, 808 .suspend = ata_pci_device_suspend,
802 .resume = it821x_reinit_one, 809 .resume = it821x_reinit_one,
810#endif
803}; 811};
804 812
805static int __init it821x_init(void) 813static int __init it821x_init(void)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 3222ac7b94..9a0523b5c9 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -23,15 +23,16 @@
23#include <scsi/scsi_host.h> 23#include <scsi/scsi_host.h>
24 24
25#define DRV_NAME "pata_ixp4xx_cf" 25#define DRV_NAME "pata_ixp4xx_cf"
26#define DRV_VERSION "0.1.1ac1" 26#define DRV_VERSION "0.1.2"
27 27
28static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device *adev) 28static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device **error)
29{ 29{
30 int i; 30 int i;
31 31
32 for (i = 0; i < ATA_MAX_DEVICES; i++) { 32 for (i = 0; i < ATA_MAX_DEVICES; i++) {
33 struct ata_device *dev = &ap->device[i]; 33 struct ata_device *dev = &ap->device[i];
34 if (ata_dev_enabled(dev)) { 34 if (ata_dev_ready(dev)) {
35 ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
35 dev->pio_mode = XFER_PIO_0; 36 dev->pio_mode = XFER_PIO_0;
36 dev->xfer_mode = XFER_PIO_0; 37 dev->xfer_mode = XFER_PIO_0;
37 dev->xfer_shift = ATA_SHIFT_PIO; 38 dev->xfer_shift = ATA_SHIFT_PIO;
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 7a635dd326..43763c99ea 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -137,6 +137,10 @@ static struct scsi_host_template jmicron_sht = {
137 .slave_destroy = ata_scsi_slave_destroy, 137 .slave_destroy = ata_scsi_slave_destroy,
138 /* Use standard CHS mapping rules */ 138 /* Use standard CHS mapping rules */
139 .bios_param = ata_std_bios_param, 139 .bios_param = ata_std_bios_param,
140#ifdef CONFIG_PM
141 .suspend = ata_scsi_device_suspend,
142 .resume = ata_scsi_device_resume,
143#endif
140}; 144};
141 145
142static const struct ata_port_operations jmicron_ops = { 146static const struct ata_port_operations jmicron_ops = {
@@ -202,49 +206,20 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
202 }; 206 };
203 struct ata_port_info *port_info[2] = { &info, &info }; 207 struct ata_port_info *port_info[2] = { &info, &info };
204 208
205 u32 reg;
206
207 /* PATA controller is fn 1, AHCI is fn 0 */
208 if (id->driver_data != 368 && PCI_FUNC(pdev->devfn) != 1)
209 return -ENODEV;
210
211 /* The 365/66 have two PATA channels, redirect the second */
212 if (id->driver_data == 365 || id->driver_data == 366) {
213 pci_read_config_dword(pdev, 0x80, &reg);
214 reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
215 pci_write_config_dword(pdev, 0x80, reg);
216 }
217
218 return ata_pci_init_one(pdev, port_info, 2); 209 return ata_pci_init_one(pdev, port_info, 2);
219} 210}
220 211
221static int jmicron_reinit_one(struct pci_dev *pdev)
222{
223 u32 reg;
224
225 switch(pdev->device) {
226 case PCI_DEVICE_ID_JMICRON_JMB368:
227 break;
228 case PCI_DEVICE_ID_JMICRON_JMB365:
229 case PCI_DEVICE_ID_JMICRON_JMB366:
230 /* Restore mapping or disks swap and boy does it get ugly */
231 pci_read_config_dword(pdev, 0x80, &reg);
232 reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
233 pci_write_config_dword(pdev, 0x80, reg);
234 /* Fall through */
235 default:
236 /* Make sure AHCI is turned back on */
237 pci_write_config_byte(pdev, 0x41, 0xa1);
238 }
239 return ata_pci_device_resume(pdev);
240}
241
242static const struct pci_device_id jmicron_pci_tbl[] = { 212static const struct pci_device_id jmicron_pci_tbl[] = {
243 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361}, 213 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361,
244 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363}, 214 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 361 },
245 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB365), 365}, 215 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363,
246 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB366), 366}, 216 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 363 },
247 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB368), 368}, 217 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365,
218 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 365 },
219 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366,
220 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 366 },
221 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368,
222 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 368 },
248 223
249 { } /* terminate list */ 224 { } /* terminate list */
250}; 225};
@@ -254,8 +229,10 @@ static struct pci_driver jmicron_pci_driver = {
254 .id_table = jmicron_pci_tbl, 229 .id_table = jmicron_pci_tbl,
255 .probe = jmicron_init_one, 230 .probe = jmicron_init_one,
256 .remove = ata_pci_remove_one, 231 .remove = ata_pci_remove_one,
232#ifdef CONFIG_PM
257 .suspend = ata_pci_device_suspend, 233 .suspend = ata_pci_device_suspend,
258 .resume = jmicron_reinit_one, 234 .resume = ata_pci_device_resume,
235#endif
259}; 236};
260 237
261static int __init jmicron_init(void) 238static int __init jmicron_init(void)
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 98c1fee4b3..86fbcd6a74 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -64,12 +64,12 @@
64#include <linux/platform_device.h> 64#include <linux/platform_device.h>
65 65
66#define DRV_NAME "pata_legacy" 66#define DRV_NAME "pata_legacy"
67#define DRV_VERSION "0.5.3" 67#define DRV_VERSION "0.5.4"
68 68
69#define NR_HOST 6 69#define NR_HOST 6
70 70
71static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 }; 71static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
72static int legacy_irq[NR_HOST] = { 15, 14, 11, 10, 8, 12 }; 72static int legacy_irq[NR_HOST] = { 14, 15, 11, 10, 8, 12 };
73 73
74struct legacy_data { 74struct legacy_data {
75 unsigned long timing; 75 unsigned long timing;
@@ -186,7 +186,10 @@ static struct ata_port_operations legacy_port_ops = {
186 .exec_command = ata_exec_command, 186 .exec_command = ata_exec_command,
187 .dev_select = ata_std_dev_select, 187 .dev_select = ata_std_dev_select,
188 188
189 .freeze = ata_bmdma_freeze,
190 .thaw = ata_bmdma_thaw,
189 .error_handler = ata_bmdma_error_handler, 191 .error_handler = ata_bmdma_error_handler,
192 .post_internal_cmd = ata_bmdma_post_internal_cmd,
190 193
191 .qc_prep = ata_qc_prep, 194 .qc_prep = ata_qc_prep,
192 .qc_issue = ata_qc_issue_prot, 195 .qc_issue = ata_qc_issue_prot,
@@ -298,7 +301,10 @@ static struct ata_port_operations pdc20230_port_ops = {
298 .exec_command = ata_exec_command, 301 .exec_command = ata_exec_command,
299 .dev_select = ata_std_dev_select, 302 .dev_select = ata_std_dev_select,
300 303
304 .freeze = ata_bmdma_freeze,
305 .thaw = ata_bmdma_thaw,
301 .error_handler = ata_bmdma_error_handler, 306 .error_handler = ata_bmdma_error_handler,
307 .post_internal_cmd = ata_bmdma_post_internal_cmd,
302 308
303 .qc_prep = ata_qc_prep, 309 .qc_prep = ata_qc_prep,
304 .qc_issue = ata_qc_issue_prot, 310 .qc_issue = ata_qc_issue_prot,
@@ -350,7 +356,10 @@ static struct ata_port_operations ht6560a_port_ops = {
350 .exec_command = ata_exec_command, 356 .exec_command = ata_exec_command,
351 .dev_select = ata_std_dev_select, 357 .dev_select = ata_std_dev_select,
352 358
359 .freeze = ata_bmdma_freeze,
360 .thaw = ata_bmdma_thaw,
353 .error_handler = ata_bmdma_error_handler, 361 .error_handler = ata_bmdma_error_handler,
362 .post_internal_cmd = ata_bmdma_post_internal_cmd,
354 363
355 .qc_prep = ata_qc_prep, 364 .qc_prep = ata_qc_prep,
356 .qc_issue = ata_qc_issue_prot, 365 .qc_issue = ata_qc_issue_prot,
@@ -413,7 +422,10 @@ static struct ata_port_operations ht6560b_port_ops = {
413 .exec_command = ata_exec_command, 422 .exec_command = ata_exec_command,
414 .dev_select = ata_std_dev_select, 423 .dev_select = ata_std_dev_select,
415 424
425 .freeze = ata_bmdma_freeze,
426 .thaw = ata_bmdma_thaw,
416 .error_handler = ata_bmdma_error_handler, 427 .error_handler = ata_bmdma_error_handler,
428 .post_internal_cmd = ata_bmdma_post_internal_cmd,
417 429
418 .qc_prep = ata_qc_prep, 430 .qc_prep = ata_qc_prep,
419 .qc_issue = ata_qc_issue_prot, 431 .qc_issue = ata_qc_issue_prot,
@@ -531,7 +543,10 @@ static struct ata_port_operations opti82c611a_port_ops = {
531 .exec_command = ata_exec_command, 543 .exec_command = ata_exec_command,
532 .dev_select = ata_std_dev_select, 544 .dev_select = ata_std_dev_select,
533 545
546 .freeze = ata_bmdma_freeze,
547 .thaw = ata_bmdma_thaw,
534 .error_handler = ata_bmdma_error_handler, 548 .error_handler = ata_bmdma_error_handler,
549 .post_internal_cmd = ata_bmdma_post_internal_cmd,
535 550
536 .qc_prep = ata_qc_prep, 551 .qc_prep = ata_qc_prep,
537 .qc_issue = ata_qc_issue_prot, 552 .qc_issue = ata_qc_issue_prot,
@@ -661,7 +676,10 @@ static struct ata_port_operations opti82c46x_port_ops = {
661 .exec_command = ata_exec_command, 676 .exec_command = ata_exec_command,
662 .dev_select = ata_std_dev_select, 677 .dev_select = ata_std_dev_select,
663 678
679 .freeze = ata_bmdma_freeze,
680 .thaw = ata_bmdma_thaw,
664 .error_handler = ata_bmdma_error_handler, 681 .error_handler = ata_bmdma_error_handler,
682 .post_internal_cmd = ata_bmdma_post_internal_cmd,
665 683
666 .qc_prep = ata_qc_prep, 684 .qc_prep = ata_qc_prep,
667 .qc_issue = opti82c46x_qc_issue_prot, 685 .qc_issue = opti82c46x_qc_issue_prot,
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 13a70ac6f1..6dd7c4ef3e 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -103,8 +103,10 @@ static struct scsi_host_template marvell_sht = {
103 .slave_destroy = ata_scsi_slave_destroy, 103 .slave_destroy = ata_scsi_slave_destroy,
104 /* Use standard CHS mapping rules */ 104 /* Use standard CHS mapping rules */
105 .bios_param = ata_std_bios_param, 105 .bios_param = ata_std_bios_param,
106#ifdef CONFIG_PM
106 .resume = ata_scsi_device_resume, 107 .resume = ata_scsi_device_resume,
107 .suspend = ata_scsi_device_suspend, 108 .suspend = ata_scsi_device_suspend,
109#endif
108}; 110};
109 111
110static const struct ata_port_operations marvell_ops = { 112static const struct ata_port_operations marvell_ops = {
@@ -199,8 +201,10 @@ static struct pci_driver marvell_pci_driver = {
199 .id_table = marvell_pci_tbl, 201 .id_table = marvell_pci_tbl,
200 .probe = marvell_init_one, 202 .probe = marvell_init_one,
201 .remove = ata_pci_remove_one, 203 .remove = ata_pci_remove_one,
204#ifdef CONFIG_PM
202 .suspend = ata_pci_device_suspend, 205 .suspend = ata_pci_device_suspend,
203 .resume = ata_pci_device_resume, 206 .resume = ata_pci_device_resume,
207#endif
204}; 208};
205 209
206static int __init marvell_init(void) 210static int __init marvell_init(void)
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 29e1809e5e..f5d88729ca 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -280,6 +280,10 @@ static struct scsi_host_template mpc52xx_ata_sht = {
280 .dma_boundary = ATA_DMA_BOUNDARY, 280 .dma_boundary = ATA_DMA_BOUNDARY,
281 .slave_configure = ata_scsi_slave_config, 281 .slave_configure = ata_scsi_slave_config,
282 .bios_param = ata_std_bios_param, 282 .bios_param = ata_std_bios_param,
283#ifdef CONFIG_PM
284 .suspend = ata_scsi_device_suspend,
285 .resume = ata_scsi_device_resume,
286#endif
283}; 287};
284 288
285static struct ata_port_operations mpc52xx_ata_port_ops = { 289static struct ata_port_operations mpc52xx_ata_port_ops = {
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index f2e7115f7a..4abe45ac19 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -165,8 +165,10 @@ static struct scsi_host_template mpiix_sht = {
165 .slave_configure = ata_scsi_slave_config, 165 .slave_configure = ata_scsi_slave_config,
166 .slave_destroy = ata_scsi_slave_destroy, 166 .slave_destroy = ata_scsi_slave_destroy,
167 .bios_param = ata_std_bios_param, 167 .bios_param = ata_std_bios_param,
168#ifdef CONFIG_PM
168 .resume = ata_scsi_device_resume, 169 .resume = ata_scsi_device_resume,
169 .suspend = ata_scsi_device_suspend, 170 .suspend = ata_scsi_device_suspend,
171#endif
170}; 172};
171 173
172static struct ata_port_operations mpiix_port_ops = { 174static struct ata_port_operations mpiix_port_ops = {
@@ -270,8 +272,10 @@ static struct pci_driver mpiix_pci_driver = {
270 .id_table = mpiix, 272 .id_table = mpiix,
271 .probe = mpiix_init_one, 273 .probe = mpiix_init_one,
272 .remove = ata_pci_remove_one, 274 .remove = ata_pci_remove_one,
275#ifdef CONFIG_PM
273 .suspend = ata_pci_device_suspend, 276 .suspend = ata_pci_device_suspend,
274 .resume = ata_pci_device_resume, 277 .resume = ata_pci_device_resume,
278#endif
275}; 279};
276 280
277static int __init mpiix_init(void) 281static int __init mpiix_init(void)
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index e8393e19be..38f99b38a5 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -63,8 +63,10 @@ static struct scsi_host_template netcell_sht = {
63 .slave_destroy = ata_scsi_slave_destroy, 63 .slave_destroy = ata_scsi_slave_destroy,
64 /* Use standard CHS mapping rules */ 64 /* Use standard CHS mapping rules */
65 .bios_param = ata_std_bios_param, 65 .bios_param = ata_std_bios_param,
66#ifdef CONFIG_PM
66 .resume = ata_scsi_device_resume, 67 .resume = ata_scsi_device_resume,
67 .suspend = ata_scsi_device_suspend, 68 .suspend = ata_scsi_device_suspend,
69#endif
68}; 70};
69 71
70static const struct ata_port_operations netcell_ops = { 72static const struct ata_port_operations netcell_ops = {
@@ -153,8 +155,10 @@ static struct pci_driver netcell_pci_driver = {
153 .id_table = netcell_pci_tbl, 155 .id_table = netcell_pci_tbl,
154 .probe = netcell_init_one, 156 .probe = netcell_init_one,
155 .remove = ata_pci_remove_one, 157 .remove = ata_pci_remove_one,
158#ifdef CONFIG_PM
156 .suspend = ata_pci_device_suspend, 159 .suspend = ata_pci_device_suspend,
157 .resume = ata_pci_device_resume, 160 .resume = ata_pci_device_resume,
161#endif
158}; 162};
159 163
160static int __init netcell_init(void) 164static int __init netcell_init(void)
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 3d1fa487c4..9944a28daa 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -157,8 +157,10 @@ static struct scsi_host_template ns87410_sht = {
157 .slave_configure = ata_scsi_slave_config, 157 .slave_configure = ata_scsi_slave_config,
158 .slave_destroy = ata_scsi_slave_destroy, 158 .slave_destroy = ata_scsi_slave_destroy,
159 .bios_param = ata_std_bios_param, 159 .bios_param = ata_std_bios_param,
160#ifdef CONFIG_PM
160 .resume = ata_scsi_device_resume, 161 .resume = ata_scsi_device_resume,
161 .suspend = ata_scsi_device_suspend, 162 .suspend = ata_scsi_device_suspend,
163#endif
162}; 164};
163 165
164static struct ata_port_operations ns87410_port_ops = { 166static struct ata_port_operations ns87410_port_ops = {
@@ -212,8 +214,10 @@ static struct pci_driver ns87410_pci_driver = {
212 .id_table = ns87410, 214 .id_table = ns87410,
213 .probe = ns87410_init_one, 215 .probe = ns87410_init_one,
214 .remove = ata_pci_remove_one, 216 .remove = ata_pci_remove_one,
217#ifdef CONFIG_PM
215 .suspend = ata_pci_device_suspend, 218 .suspend = ata_pci_device_suspend,
216 .resume = ata_pci_device_resume, 219 .resume = ata_pci_device_resume,
220#endif
217}; 221};
218 222
219static int __init ns87410_init(void) 223static int __init ns87410_init(void)
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 45215aa05e..da68cd19ef 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -25,7 +25,7 @@
25#include <linux/ata.h> 25#include <linux/ata.h>
26 26
27#define DRV_NAME "pata_oldpiix" 27#define DRV_NAME "pata_oldpiix"
28#define DRV_VERSION "0.5.3" 28#define DRV_VERSION "0.5.4"
29 29
30/** 30/**
31 * oldpiix_pre_reset - probe begin 31 * oldpiix_pre_reset - probe begin
@@ -209,10 +209,9 @@ static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc)
209 struct ata_device *adev = qc->dev; 209 struct ata_device *adev = qc->dev;
210 210
211 if (adev != ap->private_data) { 211 if (adev != ap->private_data) {
212 oldpiix_set_piomode(ap, adev);
212 if (adev->dma_mode) 213 if (adev->dma_mode)
213 oldpiix_set_dmamode(ap, adev); 214 oldpiix_set_dmamode(ap, adev);
214 else if (adev->pio_mode)
215 oldpiix_set_piomode(ap, adev);
216 } 215 }
217 return ata_qc_issue_prot(qc); 216 return ata_qc_issue_prot(qc);
218} 217}
@@ -234,8 +233,10 @@ static struct scsi_host_template oldpiix_sht = {
234 .slave_configure = ata_scsi_slave_config, 233 .slave_configure = ata_scsi_slave_config,
235 .slave_destroy = ata_scsi_slave_destroy, 234 .slave_destroy = ata_scsi_slave_destroy,
236 .bios_param = ata_std_bios_param, 235 .bios_param = ata_std_bios_param,
236#ifdef CONFIG_PM
237 .resume = ata_scsi_device_resume, 237 .resume = ata_scsi_device_resume,
238 .suspend = ata_scsi_device_suspend, 238 .suspend = ata_scsi_device_suspend,
239#endif
239}; 240};
240 241
241static const struct ata_port_operations oldpiix_pata_ops = { 242static const struct ata_port_operations oldpiix_pata_ops = {
@@ -317,8 +318,10 @@ static struct pci_driver oldpiix_pci_driver = {
317 .id_table = oldpiix_pci_tbl, 318 .id_table = oldpiix_pci_tbl,
318 .probe = oldpiix_init_one, 319 .probe = oldpiix_init_one,
319 .remove = ata_pci_remove_one, 320 .remove = ata_pci_remove_one,
321#ifdef CONFIG_PM
320 .suspend = ata_pci_device_suspend, 322 .suspend = ata_pci_device_suspend,
321 .resume = ata_pci_device_resume, 323 .resume = ata_pci_device_resume,
324#endif
322}; 325};
323 326
324static int __init oldpiix_init(void) 327static int __init oldpiix_init(void)
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index da1aa148b3..3fd3a35c22 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -34,7 +34,7 @@
34#include <linux/libata.h> 34#include <linux/libata.h>
35 35
36#define DRV_NAME "pata_opti" 36#define DRV_NAME "pata_opti"
37#define DRV_VERSION "0.2.7" 37#define DRV_VERSION "0.2.8"
38 38
39enum { 39enum {
40 READ_REG = 0, /* index of Read cycle timing register */ 40 READ_REG = 0, /* index of Read cycle timing register */
@@ -179,8 +179,10 @@ static struct scsi_host_template opti_sht = {
179 .slave_configure = ata_scsi_slave_config, 179 .slave_configure = ata_scsi_slave_config,
180 .slave_destroy = ata_scsi_slave_destroy, 180 .slave_destroy = ata_scsi_slave_destroy,
181 .bios_param = ata_std_bios_param, 181 .bios_param = ata_std_bios_param,
182#ifdef CONFIG_PM
182 .resume = ata_scsi_device_resume, 183 .resume = ata_scsi_device_resume,
183 .suspend = ata_scsi_device_suspend, 184 .suspend = ata_scsi_device_suspend,
185#endif
184}; 186};
185 187
186static struct ata_port_operations opti_port_ops = { 188static struct ata_port_operations opti_port_ops = {
@@ -244,8 +246,10 @@ static struct pci_driver opti_pci_driver = {
244 .id_table = opti, 246 .id_table = opti,
245 .probe = opti_init_one, 247 .probe = opti_init_one,
246 .remove = ata_pci_remove_one, 248 .remove = ata_pci_remove_one,
249#ifdef CONFIG_PM
247 .suspend = ata_pci_device_suspend, 250 .suspend = ata_pci_device_suspend,
248 .resume = ata_pci_device_resume, 251 .resume = ata_pci_device_resume,
252#endif
249}; 253};
250 254
251static int __init opti_init(void) 255static int __init opti_init(void)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index d80b36e209..9764907e8a 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -33,7 +33,7 @@
33#include <linux/libata.h> 33#include <linux/libata.h>
34 34
35#define DRV_NAME "pata_optidma" 35#define DRV_NAME "pata_optidma"
36#define DRV_VERSION "0.2.3" 36#define DRV_VERSION "0.2.4"
37 37
38enum { 38enum {
39 READ_REG = 0, /* index of Read cycle timing register */ 39 READ_REG = 0, /* index of Read cycle timing register */
@@ -360,8 +360,10 @@ static struct scsi_host_template optidma_sht = {
360 .slave_configure = ata_scsi_slave_config, 360 .slave_configure = ata_scsi_slave_config,
361 .slave_destroy = ata_scsi_slave_destroy, 361 .slave_destroy = ata_scsi_slave_destroy,
362 .bios_param = ata_std_bios_param, 362 .bios_param = ata_std_bios_param,
363#ifdef CONFIG_PM
363 .resume = ata_scsi_device_resume, 364 .resume = ata_scsi_device_resume,
364 .suspend = ata_scsi_device_suspend, 365 .suspend = ata_scsi_device_suspend,
366#endif
365}; 367};
366 368
367static struct ata_port_operations optidma_port_ops = { 369static struct ata_port_operations optidma_port_ops = {
@@ -524,8 +526,10 @@ static struct pci_driver optidma_pci_driver = {
524 .id_table = optidma, 526 .id_table = optidma,
525 .probe = optidma_init_one, 527 .probe = optidma_init_one,
526 .remove = ata_pci_remove_one, 528 .remove = ata_pci_remove_one,
529#ifdef CONFIG_PM
527 .suspend = ata_pci_device_suspend, 530 .suspend = ata_pci_device_suspend,
528 .resume = ata_pci_device_resume, 531 .resume = ata_pci_device_resume,
532#endif
529}; 533};
530 534
531static int __init optidma_init(void) 535static int __init optidma_init(void)
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 8928a6dfac..103720f873 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -42,7 +42,7 @@
42 42
43 43
44#define DRV_NAME "pata_pcmcia" 44#define DRV_NAME "pata_pcmcia"
45#define DRV_VERSION "0.2.11" 45#define DRV_VERSION "0.3.0"
46 46
47/* 47/*
48 * Private data structure to glue stuff together 48 * Private data structure to glue stuff together
@@ -319,14 +319,17 @@ static void pcmcia_remove_one(struct pcmcia_device *pdev)
319static struct pcmcia_device_id pcmcia_devices[] = { 319static struct pcmcia_device_id pcmcia_devices[] = {
320 PCMCIA_DEVICE_FUNC_ID(4), 320 PCMCIA_DEVICE_FUNC_ID(4),
321 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ 321 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
322 PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
323 PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
322 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), 324 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
323 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), 325 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
324 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ 326 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
325 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), 327 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
326 PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ 328 PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
327 PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ 329 PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
328 PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), 330 PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
329 PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */ 331 PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
332 PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
330 PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), 333 PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
331 PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74), 334 PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
332 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), 335 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 61537873d2..93bcdadb7b 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -35,7 +35,7 @@
35#include <linux/libata.h> 35#include <linux/libata.h>
36 36
37#define DRV_NAME "pata_pdc2027x" 37#define DRV_NAME "pata_pdc2027x"
38#define DRV_VERSION "0.74-ac5" 38#define DRV_VERSION "0.8"
39#undef PDC_DEBUG 39#undef PDC_DEBUG
40 40
41#ifdef PDC_DEBUG 41#ifdef PDC_DEBUG
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 6dd63413a5..acdc52cbe3 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -2,13 +2,14 @@
2 * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer 2 * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox <alan@redhat.com>
5 * (C) 2007 Bartlomiej Zolnierkiewicz
5 * 6 *
6 * Based in part on linux/drivers/ide/pci/pdc202xx_old.c 7 * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
7 * 8 *
8 * First cut with LBA48/ATAPI 9 * First cut with LBA48/ATAPI
9 * 10 *
10 * TODO: 11 * TODO:
11 * Channel interlock/reset on both required ? 12 * Channel interlock/reset on both required
12 */ 13 */
13 14
14#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -21,7 +22,7 @@
21#include <linux/libata.h> 22#include <linux/libata.h>
22 23
23#define DRV_NAME "pata_pdc202xx_old" 24#define DRV_NAME "pata_pdc202xx_old"
24#define DRV_VERSION "0.2.3" 25#define DRV_VERSION "0.4.0"
25 26
26/** 27/**
27 * pdc2024x_pre_reset - probe begin 28 * pdc2024x_pre_reset - probe begin
@@ -76,7 +77,7 @@ static void pdc2026x_error_handler(struct ata_port *ap)
76static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) 77static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
77{ 78{
78 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 79 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
79 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; 80 int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
80 static u16 pio_timing[5] = { 81 static u16 pio_timing[5] = {
81 0x0913, 0x050C , 0x0308, 0x0206, 0x0104 82 0x0913, 0x050C , 0x0308, 0x0206, 0x0104
82 }; 83 };
@@ -85,7 +86,7 @@ static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *a
85 pci_read_config_byte(pdev, port, &r_ap); 86 pci_read_config_byte(pdev, port, &r_ap);
86 pci_read_config_byte(pdev, port + 1, &r_bp); 87 pci_read_config_byte(pdev, port + 1, &r_bp);
87 r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */ 88 r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
88 r_bp &= ~0x07; 89 r_bp &= ~0x1F;
89 r_ap |= (pio_timing[pio] >> 8); 90 r_ap |= (pio_timing[pio] >> 8);
90 r_bp |= (pio_timing[pio] & 0xFF); 91 r_bp |= (pio_timing[pio] & 0xFF);
91 92
@@ -123,7 +124,7 @@ static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
123static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) 124static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
124{ 125{
125 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 126 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
126 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; 127 int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
127 static u8 udma_timing[6][2] = { 128 static u8 udma_timing[6][2] = {
128 { 0x60, 0x03 }, /* 33 Mhz Clock */ 129 { 0x60, 0x03 }, /* 33 Mhz Clock */
129 { 0x40, 0x02 }, 130 { 0x40, 0x02 },
@@ -132,12 +133,17 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
132 { 0x20, 0x01 }, 133 { 0x20, 0x01 },
133 { 0x20, 0x01 } 134 { 0x20, 0x01 }
134 }; 135 };
136 static u8 mdma_timing[3][2] = {
137 { 0x60, 0x03 },
138 { 0x60, 0x04 },
139 { 0xe0, 0x0f },
140 };
135 u8 r_bp, r_cp; 141 u8 r_bp, r_cp;
136 142
137 pci_read_config_byte(pdev, port + 1, &r_bp); 143 pci_read_config_byte(pdev, port + 1, &r_bp);
138 pci_read_config_byte(pdev, port + 2, &r_cp); 144 pci_read_config_byte(pdev, port + 2, &r_cp);
139 145
140 r_bp &= ~0xF0; 146 r_bp &= ~0xE0;
141 r_cp &= ~0x0F; 147 r_cp &= ~0x0F;
142 148
143 if (adev->dma_mode >= XFER_UDMA_0) { 149 if (adev->dma_mode >= XFER_UDMA_0) {
@@ -147,8 +153,8 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
147 153
148 } else { 154 } else {
149 int speed = adev->dma_mode - XFER_MW_DMA_0; 155 int speed = adev->dma_mode - XFER_MW_DMA_0;
150 r_bp |= 0x60; 156 r_bp |= mdma_timing[speed][0];
151 r_cp |= (5 - speed); 157 r_cp |= mdma_timing[speed][1];
152 } 158 }
153 pci_write_config_byte(pdev, port + 1, r_bp); 159 pci_write_config_byte(pdev, port + 1, r_bp);
154 pci_write_config_byte(pdev, port + 2, r_cp); 160 pci_write_config_byte(pdev, port + 2, r_cp);
@@ -267,8 +273,10 @@ static struct scsi_host_template pdc202xx_sht = {
267 .slave_configure = ata_scsi_slave_config, 273 .slave_configure = ata_scsi_slave_config,
268 .slave_destroy = ata_scsi_slave_destroy, 274 .slave_destroy = ata_scsi_slave_destroy,
269 .bios_param = ata_std_bios_param, 275 .bios_param = ata_std_bios_param,
276#ifdef CONFIG_PM
270 .resume = ata_scsi_device_resume, 277 .resume = ata_scsi_device_resume,
271 .suspend = ata_scsi_device_suspend, 278 .suspend = ata_scsi_device_suspend,
279#endif
272}; 280};
273 281
274static struct ata_port_operations pdc2024x_port_ops = { 282static struct ata_port_operations pdc2024x_port_ops = {
@@ -399,8 +407,10 @@ static struct pci_driver pdc202xx_pci_driver = {
399 .id_table = pdc202xx, 407 .id_table = pdc202xx,
400 .probe = pdc202xx_init_one, 408 .probe = pdc202xx_init_one,
401 .remove = ata_pci_remove_one, 409 .remove = ata_pci_remove_one,
410#ifdef CONFIG_PM
402 .suspend = ata_pci_device_suspend, 411 .suspend = ata_pci_device_suspend,
403 .resume = ata_pci_device_resume, 412 .resume = ata_pci_device_resume,
413#endif
404}; 414};
405 415
406static int __init pdc202xx_init(void) 416static int __init pdc202xx_init(void)
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 479a326114..4b82a5435a 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -42,6 +42,7 @@ static int pata_platform_set_mode(struct ata_port *ap, struct ata_device **unuse
42 dev->pio_mode = dev->xfer_mode = XFER_PIO_0; 42 dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
43 dev->xfer_shift = ATA_SHIFT_PIO; 43 dev->xfer_shift = ATA_SHIFT_PIO;
44 dev->flags |= ATA_DFLAG_PIO; 44 dev->flags |= ATA_DFLAG_PIO;
45 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
45 } 46 }
46 } 47 }
47 return 0; 48 return 0;
@@ -227,7 +228,6 @@ static int __devexit pata_platform_remove(struct platform_device *pdev)
227 struct ata_host *host = dev_get_drvdata(dev); 228 struct ata_host *host = dev_get_drvdata(dev);
228 229
229 ata_host_detach(host); 230 ata_host_detach(host);
230 dev_set_drvdata(dev, NULL);
231 231
232 return 0; 232 return 0;
233} 233}
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 4362141976..c3810012f3 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -26,7 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27 27
28#define DRV_NAME "pata_qdi" 28#define DRV_NAME "pata_qdi"
29#define DRV_VERSION "0.2.4" 29#define DRV_VERSION "0.3.0"
30 30
31#define NR_HOST 4 /* Two 6580s */ 31#define NR_HOST 4 /* Two 6580s */
32 32
@@ -363,7 +363,8 @@ static __init int qdi_init(void)
363 release_region(port, 2); 363 release_region(port, 2);
364 continue; 364 continue;
365 } 365 }
366 ct += qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04); 366 if (qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
367 ct++;
367 } 368 }
368 if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) { 369 if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
369 /* QD6580: dual channel */ 370 /* QD6580: dual channel */
@@ -375,11 +376,14 @@ static __init int qdi_init(void)
375 res = inb(port + 3); 376 res = inb(port + 3);
376 if (res & 1) { 377 if (res & 1) {
377 /* Single channel mode */ 378 /* Single channel mode */
378 ct += qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04); 379 if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04))
380 ct++;
379 } else { 381 } else {
380 /* Dual channel mode */ 382 /* Dual channel mode */
381 ct += qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04); 383 if (qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04) == 0)
382 ct += qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04); 384 ct++;
385 if (qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04) == 0)
386 ct++;
383 } 387 }
384 } 388 }
385 } 389 }
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 0d1e571ef6..9a9132c9e3 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -228,8 +228,10 @@ static struct scsi_host_template radisys_sht = {
228 .slave_configure = ata_scsi_slave_config, 228 .slave_configure = ata_scsi_slave_config,
229 .slave_destroy = ata_scsi_slave_destroy, 229 .slave_destroy = ata_scsi_slave_destroy,
230 .bios_param = ata_std_bios_param, 230 .bios_param = ata_std_bios_param,
231#ifdef CONFIG_PM
231 .resume = ata_scsi_device_resume, 232 .resume = ata_scsi_device_resume,
232 .suspend = ata_scsi_device_suspend, 233 .suspend = ata_scsi_device_suspend,
234#endif
233}; 235};
234 236
235static const struct ata_port_operations radisys_pata_ops = { 237static const struct ata_port_operations radisys_pata_ops = {
@@ -312,8 +314,10 @@ static struct pci_driver radisys_pci_driver = {
312 .id_table = radisys_pci_tbl, 314 .id_table = radisys_pci_tbl,
313 .probe = radisys_init_one, 315 .probe = radisys_init_one,
314 .remove = ata_pci_remove_one, 316 .remove = ata_pci_remove_one,
317#ifdef CONFIG_PM
315 .suspend = ata_pci_device_suspend, 318 .suspend = ata_pci_device_suspend,
316 .resume = ata_pci_device_resume, 319 .resume = ata_pci_device_resume,
320#endif
317}; 321};
318 322
319static int __init radisys_init(void) 323static int __init radisys_init(void)
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 71a2bac09e..f522daa2a6 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -71,6 +71,7 @@ static int rz1000_set_mode(struct ata_port *ap, struct ata_device **unused)
71 dev->xfer_mode = XFER_PIO_0; 71 dev->xfer_mode = XFER_PIO_0;
72 dev->xfer_shift = ATA_SHIFT_PIO; 72 dev->xfer_shift = ATA_SHIFT_PIO;
73 dev->flags |= ATA_DFLAG_PIO; 73 dev->flags |= ATA_DFLAG_PIO;
74 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
74 } 75 }
75 } 76 }
76 return 0; 77 return 0;
@@ -93,8 +94,10 @@ static struct scsi_host_template rz1000_sht = {
93 .slave_configure = ata_scsi_slave_config, 94 .slave_configure = ata_scsi_slave_config,
94 .slave_destroy = ata_scsi_slave_destroy, 95 .slave_destroy = ata_scsi_slave_destroy,
95 .bios_param = ata_std_bios_param, 96 .bios_param = ata_std_bios_param,
97#ifdef CONFIG_PM
96 .resume = ata_scsi_device_resume, 98 .resume = ata_scsi_device_resume,
97 .suspend = ata_scsi_device_suspend, 99 .suspend = ata_scsi_device_suspend,
100#endif
98}; 101};
99 102
100static struct ata_port_operations rz1000_port_ops = { 103static struct ata_port_operations rz1000_port_ops = {
@@ -177,6 +180,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
177 return -ENODEV; 180 return -ENODEV;
178} 181}
179 182
183#ifdef CONFIG_PM
180static int rz1000_reinit_one(struct pci_dev *pdev) 184static int rz1000_reinit_one(struct pci_dev *pdev)
181{ 185{
182 /* If this fails on resume (which is a "cant happen" case), we 186 /* If this fails on resume (which is a "cant happen" case), we
@@ -185,6 +189,7 @@ static int rz1000_reinit_one(struct pci_dev *pdev)
185 panic("rz1000 fifo"); 189 panic("rz1000 fifo");
186 return ata_pci_device_resume(pdev); 190 return ata_pci_device_resume(pdev);
187} 191}
192#endif
188 193
189static const struct pci_device_id pata_rz1000[] = { 194static const struct pci_device_id pata_rz1000[] = {
190 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), }, 195 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
@@ -198,8 +203,10 @@ static struct pci_driver rz1000_pci_driver = {
198 .id_table = pata_rz1000, 203 .id_table = pata_rz1000,
199 .probe = rz1000_init_one, 204 .probe = rz1000_init_one,
200 .remove = ata_pci_remove_one, 205 .remove = ata_pci_remove_one,
206#ifdef CONFIG_PM
201 .suspend = ata_pci_device_suspend, 207 .suspend = ata_pci_device_suspend,
202 .resume = rz1000_reinit_one, 208 .resume = rz1000_reinit_one,
209#endif
203}; 210};
204 211
205static int __init rz1000_init(void) 212static int __init rz1000_init(void)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 58e42fbd14..93b3ed0f9e 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -194,8 +194,10 @@ static struct scsi_host_template sc1200_sht = {
194 .slave_configure = ata_scsi_slave_config, 194 .slave_configure = ata_scsi_slave_config,
195 .slave_destroy = ata_scsi_slave_destroy, 195 .slave_destroy = ata_scsi_slave_destroy,
196 .bios_param = ata_std_bios_param, 196 .bios_param = ata_std_bios_param,
197#ifdef CONFIG_PM
197 .resume = ata_scsi_device_resume, 198 .resume = ata_scsi_device_resume,
198 .suspend = ata_scsi_device_suspend, 199 .suspend = ata_scsi_device_suspend,
200#endif
199}; 201};
200 202
201static struct ata_port_operations sc1200_port_ops = { 203static struct ata_port_operations sc1200_port_ops = {
@@ -210,7 +212,10 @@ static struct ata_port_operations sc1200_port_ops = {
210 .exec_command = ata_exec_command, 212 .exec_command = ata_exec_command,
211 .dev_select = ata_std_dev_select, 213 .dev_select = ata_std_dev_select,
212 214
215 .freeze = ata_bmdma_freeze,
216 .thaw = ata_bmdma_thaw,
213 .error_handler = ata_bmdma_error_handler, 217 .error_handler = ata_bmdma_error_handler,
218 .post_internal_cmd = ata_bmdma_post_internal_cmd,
214 219
215 .bmdma_setup = ata_bmdma_setup, 220 .bmdma_setup = ata_bmdma_setup,
216 .bmdma_start = ata_bmdma_start, 221 .bmdma_start = ata_bmdma_start,
@@ -266,8 +271,10 @@ static struct pci_driver sc1200_pci_driver = {
266 .id_table = sc1200, 271 .id_table = sc1200,
267 .probe = sc1200_init_one, 272 .probe = sc1200_init_one,
268 .remove = ata_pci_remove_one, 273 .remove = ata_pci_remove_one,
274#ifdef CONFIG_PM
269 .suspend = ata_pci_device_suspend, 275 .suspend = ata_pci_device_suspend,
270 .resume = ata_pci_device_resume, 276 .resume = ata_pci_device_resume,
277#endif
271}; 278};
272 279
273static int __init sc1200_init(void) 280static int __init sc1200_init(void)
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
new file mode 100644
index 0000000000..f3ed141fdc
--- /dev/null
+++ b/drivers/ata/pata_scc.c
@@ -0,0 +1,1230 @@
1/*
2 * Support for IDE interfaces on Celleb platform
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/ata/ata_piix.c:
7 * Copyright 2003-2005 Red Hat Inc
8 * Copyright 2003-2005 Jeff Garzik
9 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
10 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
11 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
12 *
13 * and drivers/ata/ahci.c:
14 * Copyright 2004-2005 Red Hat, Inc.
15 *
16 * and drivers/ata/libata-core.c:
17 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
18 * Copyright 2003-2004 Jeff Garzik
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "pata_scc"
46#define DRV_VERSION "0.1"
47
48#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
49
50/* PCI BARs */
51#define SCC_CTRL_BAR 0
52#define SCC_BMID_BAR 1
53
54/* offset of CTRL registers */
55#define SCC_CTL_PIOSHT 0x000
56#define SCC_CTL_PIOCT 0x004
57#define SCC_CTL_MDMACT 0x008
58#define SCC_CTL_MCRCST 0x00C
59#define SCC_CTL_SDMACT 0x010
60#define SCC_CTL_SCRCST 0x014
61#define SCC_CTL_UDENVT 0x018
62#define SCC_CTL_TDVHSEL 0x020
63#define SCC_CTL_MODEREG 0x024
64#define SCC_CTL_ECMODE 0xF00
65#define SCC_CTL_MAEA0 0xF50
66#define SCC_CTL_MAEC0 0xF54
67#define SCC_CTL_CCKCTRL 0xFF0
68
69/* offset of BMID registers */
70#define SCC_DMA_CMD 0x000
71#define SCC_DMA_STATUS 0x004
72#define SCC_DMA_TABLE_OFS 0x008
73#define SCC_DMA_INTMASK 0x010
74#define SCC_DMA_INTST 0x014
75#define SCC_DMA_PTERADD 0x018
76#define SCC_REG_CMD_ADDR 0x020
77#define SCC_REG_DATA 0x000
78#define SCC_REG_ERR 0x004
79#define SCC_REG_FEATURE 0x004
80#define SCC_REG_NSECT 0x008
81#define SCC_REG_LBAL 0x00C
82#define SCC_REG_LBAM 0x010
83#define SCC_REG_LBAH 0x014
84#define SCC_REG_DEVICE 0x018
85#define SCC_REG_STATUS 0x01C
86#define SCC_REG_CMD 0x01C
87#define SCC_REG_ALTSTATUS 0x020
88
89/* register value */
90#define TDVHSEL_MASTER 0x00000001
91#define TDVHSEL_SLAVE 0x00000004
92
93#define MODE_JCUSFEN 0x00000080
94
95#define ECMODE_VALUE 0x01
96
97#define CCKCTRL_ATARESET 0x00040000
98#define CCKCTRL_BUFCNT 0x00020000
99#define CCKCTRL_CRST 0x00010000
100#define CCKCTRL_OCLKEN 0x00000100
101#define CCKCTRL_ATACLKOEN 0x00000002
102#define CCKCTRL_LCLKEN 0x00000001
103
104#define QCHCD_IOS_SS 0x00000001
105
106#define QCHSD_STPDIAG 0x00020000
107
108#define INTMASK_MSK 0xD1000012
109#define INTSTS_SERROR 0x80000000
110#define INTSTS_PRERR 0x40000000
111#define INTSTS_RERR 0x10000000
112#define INTSTS_ICERR 0x01000000
113#define INTSTS_BMSINT 0x00000010
114#define INTSTS_BMHE 0x00000008
115#define INTSTS_IOIRQS 0x00000004
116#define INTSTS_INTRQ 0x00000002
117#define INTSTS_ACTEINT 0x00000001
118
119
120/* PIO transfer mode table */
121/* JCHST */
122static const unsigned long JCHSTtbl[2][7] = {
123 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
124 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
125};
126
127/* JCHHT */
128static const unsigned long JCHHTtbl[2][7] = {
129 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
130 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
131};
132
133/* JCHCT */
134static const unsigned long JCHCTtbl[2][7] = {
135 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
136 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
137};
138
139/* DMA transfer mode table */
140/* JCHDCTM/JCHDCTS */
141static const unsigned long JCHDCTxtbl[2][7] = {
142 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
143 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
144};
145
146/* JCSTWTM/JCSTWTS */
147static const unsigned long JCSTWTxtbl[2][7] = {
148 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
149 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
150};
151
152/* JCTSS */
153static const unsigned long JCTSStbl[2][7] = {
154 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
155 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
156};
157
158/* JCENVT */
159static const unsigned long JCENVTtbl[2][7] = {
160 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
161 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
162};
163
164/* JCACTSELS/JCACTSELM */
165static const unsigned long JCACTSELtbl[2][7] = {
166 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
167 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
168};
169
170static const struct pci_device_id scc_pci_tbl[] = {
171 {PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
173 { } /* terminate list */
174};
175
176/**
177 * scc_set_piomode - Initialize host controller PATA PIO timings
178 * @ap: Port whose timings we are configuring
179 * @adev: um
180 *
181 * Set PIO mode for device.
182 *
183 * LOCKING:
184 * None (inherited from caller).
185 */
186
187static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
188{
189 unsigned int pio = adev->pio_mode - XFER_PIO_0;
190 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
191 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
192 void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
193 void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
194 unsigned long reg;
195 int offset;
196
197 reg = in_be32(cckctrl_port);
198 if (reg & CCKCTRL_ATACLKOEN)
199 offset = 1; /* 133MHz */
200 else
201 offset = 0; /* 100MHz */
202
203 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
204 out_be32(piosht_port, reg);
205 reg = JCHCTtbl[offset][pio];
206 out_be32(pioct_port, reg);
207}
208
209/**
210 * scc_set_dmamode - Initialize host controller PATA DMA timings
211 * @ap: Port whose timings we are configuring
212 * @adev: um
213 * @udma: udma mode, 0 - 6
214 *
215 * Set UDMA mode for device.
216 *
217 * LOCKING:
218 * None (inherited from caller).
219 */
220
221static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
222{
223 unsigned int udma = adev->dma_mode;
224 unsigned int is_slave = (adev->devno != 0);
225 u8 speed = udma;
226 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
227 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
228 void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
229 void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
230 void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
231 void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
232 void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
233 void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
234 int offset, idx;
235
236 if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
237 offset = 1; /* 133MHz */
238 else
239 offset = 0; /* 100MHz */
240
241 if (speed >= XFER_UDMA_0)
242 idx = speed - XFER_UDMA_0;
243 else
244 return;
245
246 if (is_slave) {
247 out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
248 out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
249 out_be32(tdvhsel_port,
250 (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
251 } else {
252 out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
253 out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
254 out_be32(tdvhsel_port,
255 (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
256 }
257 out_be32(udenvt_port,
258 JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
259}
260
261/**
262 * scc_tf_load - send taskfile registers to host controller
263 * @ap: Port to which output is sent
264 * @tf: ATA taskfile register set
265 *
266 * Note: Original code is ata_tf_load().
267 */
268
269static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
270{
271 struct ata_ioports *ioaddr = &ap->ioaddr;
272 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
273
274 if (tf->ctl != ap->last_ctl) {
275 out_be32(ioaddr->ctl_addr, tf->ctl);
276 ap->last_ctl = tf->ctl;
277 ata_wait_idle(ap);
278 }
279
280 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
281 out_be32(ioaddr->feature_addr, tf->hob_feature);
282 out_be32(ioaddr->nsect_addr, tf->hob_nsect);
283 out_be32(ioaddr->lbal_addr, tf->hob_lbal);
284 out_be32(ioaddr->lbam_addr, tf->hob_lbam);
285 out_be32(ioaddr->lbah_addr, tf->hob_lbah);
286 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
287 tf->hob_feature,
288 tf->hob_nsect,
289 tf->hob_lbal,
290 tf->hob_lbam,
291 tf->hob_lbah);
292 }
293
294 if (is_addr) {
295 out_be32(ioaddr->feature_addr, tf->feature);
296 out_be32(ioaddr->nsect_addr, tf->nsect);
297 out_be32(ioaddr->lbal_addr, tf->lbal);
298 out_be32(ioaddr->lbam_addr, tf->lbam);
299 out_be32(ioaddr->lbah_addr, tf->lbah);
300 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
301 tf->feature,
302 tf->nsect,
303 tf->lbal,
304 tf->lbam,
305 tf->lbah);
306 }
307
308 if (tf->flags & ATA_TFLAG_DEVICE) {
309 out_be32(ioaddr->device_addr, tf->device);
310 VPRINTK("device 0x%X\n", tf->device);
311 }
312
313 ata_wait_idle(ap);
314}
315
316/**
317 * scc_check_status - Read device status reg & clear interrupt
318 * @ap: port where the device is
319 *
320 * Note: Original code is ata_check_status().
321 */
322
323static u8 scc_check_status (struct ata_port *ap)
324{
325 return in_be32(ap->ioaddr.status_addr);
326}
327
328/**
329 * scc_tf_read - input device's ATA taskfile shadow registers
330 * @ap: Port from which input is read
331 * @tf: ATA taskfile register set for storing input
332 *
333 * Note: Original code is ata_tf_read().
334 */
335
336static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
337{
338 struct ata_ioports *ioaddr = &ap->ioaddr;
339
340 tf->command = scc_check_status(ap);
341 tf->feature = in_be32(ioaddr->error_addr);
342 tf->nsect = in_be32(ioaddr->nsect_addr);
343 tf->lbal = in_be32(ioaddr->lbal_addr);
344 tf->lbam = in_be32(ioaddr->lbam_addr);
345 tf->lbah = in_be32(ioaddr->lbah_addr);
346 tf->device = in_be32(ioaddr->device_addr);
347
348 if (tf->flags & ATA_TFLAG_LBA48) {
349 out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
350 tf->hob_feature = in_be32(ioaddr->error_addr);
351 tf->hob_nsect = in_be32(ioaddr->nsect_addr);
352 tf->hob_lbal = in_be32(ioaddr->lbal_addr);
353 tf->hob_lbam = in_be32(ioaddr->lbam_addr);
354 tf->hob_lbah = in_be32(ioaddr->lbah_addr);
355 }
356}
357
358/**
359 * scc_exec_command - issue ATA command to host controller
360 * @ap: port to which command is being issued
361 * @tf: ATA taskfile register set
362 *
363 * Note: Original code is ata_exec_command().
364 */
365
366static void scc_exec_command (struct ata_port *ap,
367 const struct ata_taskfile *tf)
368{
369 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
370
371 out_be32(ap->ioaddr.command_addr, tf->command);
372 ata_pause(ap);
373}
374
375/**
376 * scc_check_altstatus - Read device alternate status reg
377 * @ap: port where the device is
378 */
379
380static u8 scc_check_altstatus (struct ata_port *ap)
381{
382 return in_be32(ap->ioaddr.altstatus_addr);
383}
384
385/**
386 * scc_std_dev_select - Select device 0/1 on ATA bus
387 * @ap: ATA channel to manipulate
388 * @device: ATA device (numbered from zero) to select
389 *
390 * Note: Original code is ata_std_dev_select().
391 */
392
393static void scc_std_dev_select (struct ata_port *ap, unsigned int device)
394{
395 u8 tmp;
396
397 if (device == 0)
398 tmp = ATA_DEVICE_OBS;
399 else
400 tmp = ATA_DEVICE_OBS | ATA_DEV1;
401
402 out_be32(ap->ioaddr.device_addr, tmp);
403 ata_pause(ap);
404}
405
406/**
407 * scc_bmdma_setup - Set up PCI IDE BMDMA transaction
408 * @qc: Info associated with this ATA transaction.
409 *
410 * Note: Original code is ata_bmdma_setup().
411 */
412
413static void scc_bmdma_setup (struct ata_queued_cmd *qc)
414{
415 struct ata_port *ap = qc->ap;
416 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
417 u8 dmactl;
418 void __iomem *mmio = ap->ioaddr.bmdma_addr;
419
420 /* load PRD table addr */
421 out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma);
422
423 /* specify data direction, triple-check start bit is clear */
424 dmactl = in_be32(mmio + SCC_DMA_CMD);
425 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
426 if (!rw)
427 dmactl |= ATA_DMA_WR;
428 out_be32(mmio + SCC_DMA_CMD, dmactl);
429
430 /* issue r/w command */
431 ap->ops->exec_command(ap, &qc->tf);
432}
433
434/**
435 * scc_bmdma_start - Start a PCI IDE BMDMA transaction
436 * @qc: Info associated with this ATA transaction.
437 *
438 * Note: Original code is ata_bmdma_start().
439 */
440
441static void scc_bmdma_start (struct ata_queued_cmd *qc)
442{
443 struct ata_port *ap = qc->ap;
444 u8 dmactl;
445 void __iomem *mmio = ap->ioaddr.bmdma_addr;
446
447 /* start host DMA transaction */
448 dmactl = in_be32(mmio + SCC_DMA_CMD);
449 out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
450}
451
452/**
453 * scc_devchk - PATA device presence detection
454 * @ap: ATA channel to examine
455 * @device: Device to examine (starting at zero)
456 *
457 * Note: Original code is ata_devchk().
458 */
459
460static unsigned int scc_devchk (struct ata_port *ap,
461 unsigned int device)
462{
463 struct ata_ioports *ioaddr = &ap->ioaddr;
464 u8 nsect, lbal;
465
466 ap->ops->dev_select(ap, device);
467
468 out_be32(ioaddr->nsect_addr, 0x55);
469 out_be32(ioaddr->lbal_addr, 0xaa);
470
471 out_be32(ioaddr->nsect_addr, 0xaa);
472 out_be32(ioaddr->lbal_addr, 0x55);
473
474 out_be32(ioaddr->nsect_addr, 0x55);
475 out_be32(ioaddr->lbal_addr, 0xaa);
476
477 nsect = in_be32(ioaddr->nsect_addr);
478 lbal = in_be32(ioaddr->lbal_addr);
479
480 if ((nsect == 0x55) && (lbal == 0xaa))
481 return 1; /* we found a device */
482
483 return 0; /* nothing found */
484}
485
486/**
487 * scc_bus_post_reset - PATA device post reset
488 *
489 * Note: Original code is ata_bus_post_reset().
490 */
491
492static void scc_bus_post_reset (struct ata_port *ap, unsigned int devmask)
493{
494 struct ata_ioports *ioaddr = &ap->ioaddr;
495 unsigned int dev0 = devmask & (1 << 0);
496 unsigned int dev1 = devmask & (1 << 1);
497 unsigned long timeout;
498
499 /* if device 0 was found in ata_devchk, wait for its
500 * BSY bit to clear
501 */
502 if (dev0)
503 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
504
505 /* if device 1 was found in ata_devchk, wait for
506 * register access, then wait for BSY to clear
507 */
508 timeout = jiffies + ATA_TMOUT_BOOT;
509 while (dev1) {
510 u8 nsect, lbal;
511
512 ap->ops->dev_select(ap, 1);
513 nsect = in_be32(ioaddr->nsect_addr);
514 lbal = in_be32(ioaddr->lbal_addr);
515 if ((nsect == 1) && (lbal == 1))
516 break;
517 if (time_after(jiffies, timeout)) {
518 dev1 = 0;
519 break;
520 }
521 msleep(50); /* give drive a breather */
522 }
523 if (dev1)
524 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
525
526 /* is all this really necessary? */
527 ap->ops->dev_select(ap, 0);
528 if (dev1)
529 ap->ops->dev_select(ap, 1);
530 if (dev0)
531 ap->ops->dev_select(ap, 0);
532}
533
534/**
535 * scc_bus_softreset - PATA device software reset
536 *
537 * Note: Original code is ata_bus_softreset().
538 */
539
540static unsigned int scc_bus_softreset (struct ata_port *ap,
541 unsigned int devmask)
542{
543 struct ata_ioports *ioaddr = &ap->ioaddr;
544
545 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
546
547 /* software reset. causes dev0 to be selected */
548 out_be32(ioaddr->ctl_addr, ap->ctl);
549 udelay(20);
550 out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
551 udelay(20);
552 out_be32(ioaddr->ctl_addr, ap->ctl);
553
554 /* spec mandates ">= 2ms" before checking status.
555 * We wait 150ms, because that was the magic delay used for
556 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
557 * between when the ATA command register is written, and then
558 * status is checked. Because waiting for "a while" before
559 * checking status is fine, post SRST, we perform this magic
560 * delay here as well.
561 *
562 * Old drivers/ide uses the 2mS rule and then waits for ready
563 */
564 msleep(150);
565
566 /* Before we perform post reset processing we want to see if
567 * the bus shows 0xFF because the odd clown forgets the D7
568 * pulldown resistor.
569 */
570 if (scc_check_status(ap) == 0xFF)
571 return 0;
572
573 scc_bus_post_reset(ap, devmask);
574
575 return 0;
576}
577
578/**
579 * scc_std_softreset - reset host port via ATA SRST
580 * @ap: port to reset
581 * @classes: resulting classes of attached devices
582 *
583 * Note: Original code is ata_std_softreset().
584 */
585
586static int scc_std_softreset (struct ata_port *ap, unsigned int *classes)
587{
588 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
589 unsigned int devmask = 0, err_mask;
590 u8 err;
591
592 DPRINTK("ENTER\n");
593
594 if (ata_port_offline(ap)) {
595 classes[0] = ATA_DEV_NONE;
596 goto out;
597 }
598
599 /* determine if device 0/1 are present */
600 if (scc_devchk(ap, 0))
601 devmask |= (1 << 0);
602 if (slave_possible && scc_devchk(ap, 1))
603 devmask |= (1 << 1);
604
605 /* select device 0 again */
606 ap->ops->dev_select(ap, 0);
607
608 /* issue bus reset */
609 DPRINTK("about to softreset, devmask=%x\n", devmask);
610 err_mask = scc_bus_softreset(ap, devmask);
611 if (err_mask) {
612 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
613 err_mask);
614 return -EIO;
615 }
616
617 /* determine by signature whether we have ATA or ATAPI devices */
618 classes[0] = ata_dev_try_classify(ap, 0, &err);
619 if (slave_possible && err != 0x81)
620 classes[1] = ata_dev_try_classify(ap, 1, &err);
621
622 out:
623 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
624 return 0;
625}
626
627/**
628 * scc_bmdma_stop - Stop PCI IDE BMDMA transfer
629 * @qc: Command we are ending DMA for
630 */
631
632static void scc_bmdma_stop (struct ata_queued_cmd *qc)
633{
634 struct ata_port *ap = qc->ap;
635 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
636 void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
637 u32 reg;
638
639 while (1) {
640 reg = in_be32(bmid_base + SCC_DMA_INTST);
641
642 if (reg & INTSTS_SERROR) {
643 printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
644 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
645 out_be32(bmid_base + SCC_DMA_CMD,
646 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
647 continue;
648 }
649
650 if (reg & INTSTS_PRERR) {
651 u32 maea0, maec0;
652 maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
653 maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
654 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
655 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
656 out_be32(bmid_base + SCC_DMA_CMD,
657 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
658 continue;
659 }
660
661 if (reg & INTSTS_RERR) {
662 printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
663 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
664 out_be32(bmid_base + SCC_DMA_CMD,
665 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
666 continue;
667 }
668
669 if (reg & INTSTS_ICERR) {
670 out_be32(bmid_base + SCC_DMA_CMD,
671 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
672 printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
673 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
674 continue;
675 }
676
677 if (reg & INTSTS_BMSINT) {
678 unsigned int classes;
679 printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
680 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
681 /* TBD: SW reset */
682 scc_std_softreset(ap, &classes);
683 continue;
684 }
685
686 if (reg & INTSTS_BMHE) {
687 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
688 continue;
689 }
690
691 if (reg & INTSTS_ACTEINT) {
692 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
693 continue;
694 }
695
696 if (reg & INTSTS_IOIRQS) {
697 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
698 continue;
699 }
700 break;
701 }
702
703 /* clear start/stop bit */
704 out_be32(bmid_base + SCC_DMA_CMD,
705 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
706
707 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
708 ata_altstatus(ap); /* dummy read */
709}
710
711/**
712 * scc_bmdma_status - Read PCI IDE BMDMA status
713 * @ap: Port associated with this ATA transaction.
714 */
715
716static u8 scc_bmdma_status (struct ata_port *ap)
717{
718 u8 host_stat;
719 void __iomem *mmio = ap->ioaddr.bmdma_addr;
720
721 host_stat = in_be32(mmio + SCC_DMA_STATUS);
722
723 /* Workaround for PTERADD: emulate DMA_INTR when
724 * - IDE_STATUS[ERR] = 1
725 * - INT_STATUS[INTRQ] = 1
726 * - DMA_STATUS[IORACTA] = 1
727 */
728 if (!(host_stat & ATA_DMA_INTR)) {
729 u32 int_status = in_be32(mmio + SCC_DMA_INTST);
730 if (ata_altstatus(ap) & ATA_ERR &&
731 int_status & INTSTS_INTRQ &&
732 host_stat & ATA_DMA_ACTIVE)
733 host_stat |= ATA_DMA_INTR;
734 }
735
736 return host_stat;
737}
738
739/**
740 * scc_data_xfer - Transfer data by PIO
741 * @adev: device for this I/O
742 * @buf: data buffer
743 * @buflen: buffer length
744 * @write_data: read/write
745 *
746 * Note: Original code is ata_data_xfer().
747 */
748
749static void scc_data_xfer (struct ata_device *adev, unsigned char *buf,
750 unsigned int buflen, int write_data)
751{
752 struct ata_port *ap = adev->ap;
753 unsigned int words = buflen >> 1;
754 unsigned int i;
755 u16 *buf16 = (u16 *) buf;
756 void __iomem *mmio = ap->ioaddr.data_addr;
757
758 /* Transfer multiple of 2 bytes */
759 if (write_data) {
760 for (i = 0; i < words; i++)
761 out_be32(mmio, cpu_to_le16(buf16[i]));
762 } else {
763 for (i = 0; i < words; i++)
764 buf16[i] = le16_to_cpu(in_be32(mmio));
765 }
766
767 /* Transfer trailing 1 byte, if any. */
768 if (unlikely(buflen & 0x01)) {
769 u16 align_buf[1] = { 0 };
770 unsigned char *trailing_buf = buf + buflen - 1;
771
772 if (write_data) {
773 memcpy(align_buf, trailing_buf, 1);
774 out_be32(mmio, cpu_to_le16(align_buf[0]));
775 } else {
776 align_buf[0] = le16_to_cpu(in_be32(mmio));
777 memcpy(trailing_buf, align_buf, 1);
778 }
779 }
780}
781
782/**
783 * scc_irq_on - Enable interrupts on a port.
784 * @ap: Port on which interrupts are enabled.
785 *
786 * Note: Original code is ata_irq_on().
787 */
788
789static u8 scc_irq_on (struct ata_port *ap)
790{
791 struct ata_ioports *ioaddr = &ap->ioaddr;
792 u8 tmp;
793
794 ap->ctl &= ~ATA_NIEN;
795 ap->last_ctl = ap->ctl;
796
797 out_be32(ioaddr->ctl_addr, ap->ctl);
798 tmp = ata_wait_idle(ap);
799
800 ap->ops->irq_clear(ap);
801
802 return tmp;
803}
804
805/**
806 * scc_irq_ack - Acknowledge a device interrupt.
807 * @ap: Port on which interrupts are enabled.
808 *
809 * Note: Original code is ata_irq_ack().
810 */
811
812static u8 scc_irq_ack (struct ata_port *ap, unsigned int chk_drq)
813{
814 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
815 u8 host_stat, post_stat, status;
816
817 status = ata_busy_wait(ap, bits, 1000);
818 if (status & bits)
819 if (ata_msg_err(ap))
820 printk(KERN_ERR "abnormal status 0x%X\n", status);
821
822 /* get controller status; clear intr, err bits */
823 host_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
824 out_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS,
825 host_stat | ATA_DMA_INTR | ATA_DMA_ERR);
826
827 post_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
828
829 if (ata_msg_intr(ap))
830 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
831 __FUNCTION__,
832 host_stat, post_stat, status);
833
834 return status;
835}
836
837/**
838 * scc_bmdma_freeze - Freeze BMDMA controller port
839 * @ap: port to freeze
840 *
841 * Note: Original code is ata_bmdma_freeze().
842 */
843
844static void scc_bmdma_freeze (struct ata_port *ap)
845{
846 struct ata_ioports *ioaddr = &ap->ioaddr;
847
848 ap->ctl |= ATA_NIEN;
849 ap->last_ctl = ap->ctl;
850
851 out_be32(ioaddr->ctl_addr, ap->ctl);
852
853 /* Under certain circumstances, some controllers raise IRQ on
854 * ATA_NIEN manipulation. Also, many controllers fail to mask
855 * previously pending IRQ on ATA_NIEN assertion. Clear it.
856 */
857 ata_chk_status(ap);
858
859 ap->ops->irq_clear(ap);
860}
861
862/**
863 * scc_pata_prereset - prepare for reset
864 * @ap: ATA port to be reset
865 */
866
867static int scc_pata_prereset (struct ata_port *ap)
868{
869 ap->cbl = ATA_CBL_PATA80;
870 return ata_std_prereset(ap);
871}
872
873/**
874 * scc_std_postreset - standard postreset callback
875 * @ap: the target ata_port
876 * @classes: classes of attached devices
877 *
878 * Note: Original code is ata_std_postreset().
879 */
880
881static void scc_std_postreset (struct ata_port *ap, unsigned int *classes)
882{
883 DPRINTK("ENTER\n");
884
885 /* re-enable interrupts */
886 if (!ap->ops->error_handler)
887 ap->ops->irq_on(ap);
888
889 /* is double-select really necessary? */
890 if (classes[0] != ATA_DEV_NONE)
891 ap->ops->dev_select(ap, 1);
892 if (classes[1] != ATA_DEV_NONE)
893 ap->ops->dev_select(ap, 0);
894
895 /* bail out if no device is present */
896 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
897 DPRINTK("EXIT, no device\n");
898 return;
899 }
900
901 /* set up device control */
902 if (ap->ioaddr.ctl_addr)
903 out_be32(ap->ioaddr.ctl_addr, ap->ctl);
904
905 DPRINTK("EXIT\n");
906}
907
908/**
909 * scc_error_handler - Stock error handler for BMDMA controller
910 * @ap: port to handle error for
911 */
912
913static void scc_error_handler (struct ata_port *ap)
914{
915 ata_bmdma_drive_eh(ap, scc_pata_prereset, scc_std_softreset, NULL,
916 scc_std_postreset);
917}
918
919/**
920 * scc_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
921 * @ap: Port associated with this ATA transaction.
922 *
923 * Note: Original code is ata_bmdma_irq_clear().
924 */
925
926static void scc_bmdma_irq_clear (struct ata_port *ap)
927{
928 void __iomem *mmio = ap->ioaddr.bmdma_addr;
929
930 if (!mmio)
931 return;
932
933 out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
934}
935
936/**
937 * scc_port_start - Set port up for dma.
938 * @ap: Port to initialize
939 *
940 * Allocate space for PRD table using ata_port_start().
941 * Set PRD table address for PTERADD. (PRD Transfer End Read)
942 */
943
944static int scc_port_start (struct ata_port *ap)
945{
946 void __iomem *mmio = ap->ioaddr.bmdma_addr;
947 int rc;
948
949 rc = ata_port_start(ap);
950 if (rc)
951 return rc;
952
953 out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma);
954 return 0;
955}
956
957/**
958 * scc_port_stop - Undo scc_port_start()
959 * @ap: Port to shut down
960 *
961 * Reset PTERADD.
962 */
963
964static void scc_port_stop (struct ata_port *ap)
965{
966 void __iomem *mmio = ap->ioaddr.bmdma_addr;
967
968 out_be32(mmio + SCC_DMA_PTERADD, 0);
969}
970
971static struct scsi_host_template scc_sht = {
972 .module = THIS_MODULE,
973 .name = DRV_NAME,
974 .ioctl = ata_scsi_ioctl,
975 .queuecommand = ata_scsi_queuecmd,
976 .can_queue = ATA_DEF_QUEUE,
977 .this_id = ATA_SHT_THIS_ID,
978 .sg_tablesize = LIBATA_MAX_PRD,
979 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
980 .emulated = ATA_SHT_EMULATED,
981 .use_clustering = ATA_SHT_USE_CLUSTERING,
982 .proc_name = DRV_NAME,
983 .dma_boundary = ATA_DMA_BOUNDARY,
984 .slave_configure = ata_scsi_slave_config,
985 .slave_destroy = ata_scsi_slave_destroy,
986 .bios_param = ata_std_bios_param,
987#ifdef CONFIG_PM
988 .resume = ata_scsi_device_resume,
989 .suspend = ata_scsi_device_suspend,
990#endif
991};
992
993static const struct ata_port_operations scc_pata_ops = {
994 .port_disable = ata_port_disable,
995 .set_piomode = scc_set_piomode,
996 .set_dmamode = scc_set_dmamode,
997 .mode_filter = ata_pci_default_filter,
998
999 .tf_load = scc_tf_load,
1000 .tf_read = scc_tf_read,
1001 .exec_command = scc_exec_command,
1002 .check_status = scc_check_status,
1003 .check_altstatus = scc_check_altstatus,
1004 .dev_select = scc_std_dev_select,
1005
1006 .bmdma_setup = scc_bmdma_setup,
1007 .bmdma_start = scc_bmdma_start,
1008 .bmdma_stop = scc_bmdma_stop,
1009 .bmdma_status = scc_bmdma_status,
1010 .data_xfer = scc_data_xfer,
1011
1012 .qc_prep = ata_qc_prep,
1013 .qc_issue = ata_qc_issue_prot,
1014
1015 .freeze = scc_bmdma_freeze,
1016 .error_handler = scc_error_handler,
1017 .post_internal_cmd = scc_bmdma_stop,
1018
1019 .irq_handler = ata_interrupt,
1020 .irq_clear = scc_bmdma_irq_clear,
1021 .irq_on = scc_irq_on,
1022 .irq_ack = scc_irq_ack,
1023
1024 .port_start = scc_port_start,
1025 .port_stop = scc_port_stop,
1026};
1027
1028static struct ata_port_info scc_port_info[] = {
1029 {
1030 .sht = &scc_sht,
1031 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY,
1032 .pio_mask = 0x1f, /* pio0-4 */
1033 .mwdma_mask = 0x00,
1034 .udma_mask = ATA_UDMA6,
1035 .port_ops = &scc_pata_ops,
1036 },
1037};
1038
1039/**
1040 * scc_reset_controller - initialize SCC PATA controller.
1041 */
1042
1043static int scc_reset_controller(struct ata_probe_ent *probe_ent)
1044{
1045 void __iomem *ctrl_base = probe_ent->iomap[SCC_CTRL_BAR];
1046 void __iomem *bmid_base = probe_ent->iomap[SCC_BMID_BAR];
1047 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
1048 void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
1049 void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
1050 void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
1051 void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
1052 u32 reg = 0;
1053
1054 out_be32(cckctrl_port, reg);
1055 reg |= CCKCTRL_ATACLKOEN;
1056 out_be32(cckctrl_port, reg);
1057 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
1058 out_be32(cckctrl_port, reg);
1059 reg |= CCKCTRL_CRST;
1060 out_be32(cckctrl_port, reg);
1061
1062 for (;;) {
1063 reg = in_be32(cckctrl_port);
1064 if (reg & CCKCTRL_CRST)
1065 break;
1066 udelay(5000);
1067 }
1068
1069 reg |= CCKCTRL_ATARESET;
1070 out_be32(cckctrl_port, reg);
1071 out_be32(ecmode_port, ECMODE_VALUE);
1072 out_be32(mode_port, MODE_JCUSFEN);
1073 out_be32(intmask_port, INTMASK_MSK);
1074
1075 if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
1076 printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
1077 return -EIO;
1078 }
1079
1080 return 0;
1081}
1082
1083/**
1084 * scc_setup_ports - initialize ioaddr with SCC PATA port offsets.
1085 * @ioaddr: IO address structure to be initialized
1086 * @base: base address of BMID region
1087 */
1088
1089static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
1090{
1091 ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
1092 ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
1093 ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
1094 ioaddr->bmdma_addr = base;
1095 ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
1096 ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
1097 ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
1098 ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
1099 ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
1100 ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
1101 ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
1102 ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
1103 ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
1104 ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
1105}
1106
1107static int scc_host_init(struct ata_probe_ent *probe_ent)
1108{
1109 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1110 int rc;
1111
1112 rc = scc_reset_controller(probe_ent);
1113 if (rc)
1114 return rc;
1115
1116 probe_ent->n_ports = 1;
1117
1118 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1119 if (rc)
1120 return rc;
1121 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1122 if (rc)
1123 return rc;
1124
1125 scc_setup_ports(&probe_ent->port[0], probe_ent->iomap[SCC_BMID_BAR]);
1126
1127 pci_set_master(pdev);
1128
1129 return 0;
1130}
1131
1132/**
1133 * scc_init_one - Register SCC PATA device with kernel services
1134 * @pdev: PCI device to register
1135 * @ent: Entry in scc_pci_tbl matching with @pdev
1136 *
1137 * LOCKING:
1138 * Inherited from PCI layer (may sleep).
1139 *
1140 * RETURNS:
1141 * Zero on success, or -ERRNO value.
1142 */
1143
1144static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1145{
1146 static int printed_version;
1147 unsigned int board_idx = (unsigned int) ent->driver_data;
1148 struct device *dev = &pdev->dev;
1149 struct ata_probe_ent *probe_ent;
1150 int rc;
1151
1152 if (!printed_version++)
1153 dev_printk(KERN_DEBUG, &pdev->dev,
1154 "version " DRV_VERSION "\n");
1155
1156 rc = pcim_enable_device(pdev);
1157 if (rc)
1158 return rc;
1159
1160 rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
1161 if (rc == -EBUSY)
1162 pcim_pin_device(pdev);
1163 if (rc)
1164 return rc;
1165
1166 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
1167 if (!probe_ent)
1168 return -ENOMEM;
1169
1170 probe_ent->dev = dev;
1171 INIT_LIST_HEAD(&probe_ent->node);
1172
1173 probe_ent->sht = scc_port_info[board_idx].sht;
1174 probe_ent->port_flags = scc_port_info[board_idx].flags;
1175 probe_ent->pio_mask = scc_port_info[board_idx].pio_mask;
1176 probe_ent->udma_mask = scc_port_info[board_idx].udma_mask;
1177 probe_ent->port_ops = scc_port_info[board_idx].port_ops;
1178
1179 probe_ent->irq = pdev->irq;
1180 probe_ent->irq_flags = IRQF_SHARED;
1181 probe_ent->iomap = pcim_iomap_table(pdev);
1182
1183 rc = scc_host_init(probe_ent);
1184 if (rc)
1185 return rc;
1186
1187 if (!ata_device_add(probe_ent))
1188 return -ENODEV;
1189
1190 devm_kfree(dev, probe_ent);
1191 return 0;
1192}
1193
1194static struct pci_driver scc_pci_driver = {
1195 .name = DRV_NAME,
1196 .id_table = scc_pci_tbl,
1197 .probe = scc_init_one,
1198 .remove = ata_pci_remove_one,
1199#ifdef CONFIG_PM
1200 .suspend = ata_pci_device_suspend,
1201 .resume = ata_pci_device_resume,
1202#endif
1203};
1204
1205static int __init scc_init (void)
1206{
1207 int rc;
1208
1209 DPRINTK("pci_register_driver\n");
1210 rc = pci_register_driver(&scc_pci_driver);
1211 if (rc)
1212 return rc;
1213
1214 DPRINTK("done\n");
1215 return 0;
1216}
1217
1218static void __exit scc_exit (void)
1219{
1220 pci_unregister_driver(&scc_pci_driver);
1221}
1222
1223module_init(scc_init);
1224module_exit(scc_exit);
1225
1226MODULE_AUTHOR("Toshiba corp");
1227MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
1228MODULE_LICENSE("GPL");
1229MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
1230MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index ad5b43fef3..598eef810a 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "pata_serverworks" 43#define DRV_NAME "pata_serverworks"
44#define DRV_VERSION "0.3.9" 44#define DRV_VERSION "0.4.0"
45 45
46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ 46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ 47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
@@ -319,8 +319,10 @@ static struct scsi_host_template serverworks_sht = {
319 .slave_configure = ata_scsi_slave_config, 319 .slave_configure = ata_scsi_slave_config,
320 .slave_destroy = ata_scsi_slave_destroy, 320 .slave_destroy = ata_scsi_slave_destroy,
321 .bios_param = ata_std_bios_param, 321 .bios_param = ata_std_bios_param,
322#ifdef CONFIG_PM
322 .resume = ata_scsi_device_resume, 323 .resume = ata_scsi_device_resume,
323 .suspend = ata_scsi_device_suspend, 324 .suspend = ata_scsi_device_suspend,
325#endif
324}; 326};
325 327
326static struct ata_port_operations serverworks_osb4_port_ops = { 328static struct ata_port_operations serverworks_osb4_port_ops = {
@@ -548,6 +550,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
548 return ata_pci_init_one(pdev, port_info, ports); 550 return ata_pci_init_one(pdev, port_info, ports);
549} 551}
550 552
553#ifdef CONFIG_PM
551static int serverworks_reinit_one(struct pci_dev *pdev) 554static int serverworks_reinit_one(struct pci_dev *pdev)
552{ 555{
553 /* Force master latency timer to 64 PCI clocks */ 556 /* Force master latency timer to 64 PCI clocks */
@@ -571,6 +574,7 @@ static int serverworks_reinit_one(struct pci_dev *pdev)
571 } 574 }
572 return ata_pci_device_resume(pdev); 575 return ata_pci_device_resume(pdev);
573} 576}
577#endif
574 578
575static const struct pci_device_id serverworks[] = { 579static const struct pci_device_id serverworks[] = {
576 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0}, 580 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
@@ -587,8 +591,10 @@ static struct pci_driver serverworks_pci_driver = {
587 .id_table = serverworks, 591 .id_table = serverworks,
588 .probe = serverworks_init_one, 592 .probe = serverworks_init_one,
589 .remove = ata_pci_remove_one, 593 .remove = ata_pci_remove_one,
594#ifdef CONFIG_PM
590 .suspend = ata_pci_device_suspend, 595 .suspend = ata_pci_device_suspend,
591 .resume = serverworks_reinit_one, 596 .resume = serverworks_reinit_one,
597#endif
592}; 598};
593 599
594static int __init serverworks_init(void) 600static int __init serverworks_init(void)
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index ed79fabe02..dab2889a55 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -33,7 +33,7 @@
33#include <linux/libata.h> 33#include <linux/libata.h>
34 34
35#define DRV_NAME "pata_sil680" 35#define DRV_NAME "pata_sil680"
36#define DRV_VERSION "0.4.1" 36#define DRV_VERSION "0.4.5"
37 37
38/** 38/**
39 * sil680_selreg - return register base 39 * sil680_selreg - return register base
@@ -139,10 +139,13 @@ static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
139 139
140 unsigned long tfaddr = sil680_selreg(ap, 0x02); 140 unsigned long tfaddr = sil680_selreg(ap, 0x02);
141 unsigned long addr = sil680_seldev(ap, adev, 0x04); 141 unsigned long addr = sil680_seldev(ap, adev, 0x04);
142 unsigned long addr_mask = 0x80 + 4 * ap->port_no;
142 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 143 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
143 int pio = adev->pio_mode - XFER_PIO_0; 144 int pio = adev->pio_mode - XFER_PIO_0;
144 int lowest_pio = pio; 145 int lowest_pio = pio;
146 int port_shift = 4 * adev->devno;
145 u16 reg; 147 u16 reg;
148 u8 mode;
146 149
147 struct ata_device *pair = ata_dev_pair(adev); 150 struct ata_device *pair = ata_dev_pair(adev);
148 151
@@ -153,10 +156,17 @@ static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
153 pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]); 156 pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
154 157
155 pci_read_config_word(pdev, tfaddr-2, &reg); 158 pci_read_config_word(pdev, tfaddr-2, &reg);
159 pci_read_config_byte(pdev, addr_mask, &mode);
160
156 reg &= ~0x0200; /* Clear IORDY */ 161 reg &= ~0x0200; /* Clear IORDY */
157 if (ata_pio_need_iordy(adev)) 162 mode &= ~(3 << port_shift); /* Clear IORDY and DMA bits */
163
164 if (ata_pio_need_iordy(adev)) {
158 reg |= 0x0200; /* Enable IORDY */ 165 reg |= 0x0200; /* Enable IORDY */
166 mode |= 1 << port_shift;
167 }
159 pci_write_config_word(pdev, tfaddr-2, reg); 168 pci_write_config_word(pdev, tfaddr-2, reg);
169 pci_write_config_byte(pdev, addr_mask, mode);
160} 170}
161 171
162/** 172/**
@@ -226,6 +236,10 @@ static struct scsi_host_template sil680_sht = {
226 .slave_configure = ata_scsi_slave_config, 236 .slave_configure = ata_scsi_slave_config,
227 .slave_destroy = ata_scsi_slave_destroy, 237 .slave_destroy = ata_scsi_slave_destroy,
228 .bios_param = ata_std_bios_param, 238 .bios_param = ata_std_bios_param,
239#ifdef CONFIG_PM
240 .suspend = ata_scsi_device_suspend,
241 .resume = ata_scsi_device_resume,
242#endif
229}; 243};
230 244
231static struct ata_port_operations sil680_port_ops = { 245static struct ata_port_operations sil680_port_ops = {
@@ -367,11 +381,13 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
367 return ata_pci_init_one(pdev, port_info, 2); 381 return ata_pci_init_one(pdev, port_info, 2);
368} 382}
369 383
384#ifdef CONFIG_PM
370static int sil680_reinit_one(struct pci_dev *pdev) 385static int sil680_reinit_one(struct pci_dev *pdev)
371{ 386{
372 sil680_init_chip(pdev); 387 sil680_init_chip(pdev);
373 return ata_pci_device_resume(pdev); 388 return ata_pci_device_resume(pdev);
374} 389}
390#endif
375 391
376static const struct pci_device_id sil680[] = { 392static const struct pci_device_id sil680[] = {
377 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), }, 393 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
@@ -384,8 +400,10 @@ static struct pci_driver sil680_pci_driver = {
384 .id_table = sil680, 400 .id_table = sil680,
385 .probe = sil680_init_one, 401 .probe = sil680_init_one,
386 .remove = ata_pci_remove_one, 402 .remove = ata_pci_remove_one,
403#ifdef CONFIG_PM
387 .suspend = ata_pci_device_suspend, 404 .suspend = ata_pci_device_suspend,
388 .resume = sil680_reinit_one, 405 .resume = sil680_reinit_one,
406#endif
389}; 407};
390 408
391static int __init sil680_init(void) 409static int __init sil680_init(void)
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 560103d55b..f482078659 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -32,11 +32,10 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <linux/libata.h> 33#include <linux/libata.h>
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include "libata.h" 35#include "sis.h"
36 36
37#undef DRV_NAME /* already defined in libata.h, for libata-core */
38#define DRV_NAME "pata_sis" 37#define DRV_NAME "pata_sis"
39#define DRV_VERSION "0.4.5" 38#define DRV_VERSION "0.5.0"
40 39
41struct sis_chipset { 40struct sis_chipset {
42 u16 device; /* PCI host ID */ 41 u16 device; /* PCI host ID */
@@ -151,7 +150,7 @@ static int sis_66_pre_reset(struct ata_port *ap)
151 150
152 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) { 151 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
153 ata_port_disable(ap); 152 ata_port_disable(ap);
154 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 153 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
155 return 0; 154 return 0;
156 } 155 }
157 /* Older chips keep cable detect in bits 4/5 of reg 0x48 */ 156 /* Older chips keep cable detect in bits 4/5 of reg 0x48 */
@@ -197,7 +196,7 @@ static int sis_old_pre_reset(struct ata_port *ap)
197 196
198 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) { 197 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
199 ata_port_disable(ap); 198 ata_port_disable(ap);
200 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 199 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
201 return 0; 200 return 0;
202 } 201 }
203 ap->cbl = ATA_CBL_PATA40; 202 ap->cbl = ATA_CBL_PATA40;
@@ -576,8 +575,10 @@ static struct scsi_host_template sis_sht = {
576 .slave_configure = ata_scsi_slave_config, 575 .slave_configure = ata_scsi_slave_config,
577 .slave_destroy = ata_scsi_slave_destroy, 576 .slave_destroy = ata_scsi_slave_destroy,
578 .bios_param = ata_std_bios_param, 577 .bios_param = ata_std_bios_param,
578#ifdef CONFIG_PM
579 .resume = ata_scsi_device_resume, 579 .resume = ata_scsi_device_resume,
580 .suspend = ata_scsi_device_suspend, 580 .suspend = ata_scsi_device_suspend,
581#endif
581}; 582};
582 583
583static const struct ata_port_operations sis_133_ops = { 584static const struct ata_port_operations sis_133_ops = {
@@ -1033,8 +1034,10 @@ static struct pci_driver sis_pci_driver = {
1033 .id_table = sis_pci_tbl, 1034 .id_table = sis_pci_tbl,
1034 .probe = sis_init_one, 1035 .probe = sis_init_one,
1035 .remove = ata_pci_remove_one, 1036 .remove = ata_pci_remove_one,
1037#ifdef CONFIG_PM
1036 .suspend = ata_pci_device_suspend, 1038 .suspend = ata_pci_device_suspend,
1037 .resume = ata_pci_device_resume, 1039 .resume = ata_pci_device_resume,
1040#endif
1038}; 1041};
1039 1042
1040static int __init sis_init(void) 1043static int __init sis_init(void)
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 96e890fd64..b681441cfc 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -7,6 +7,13 @@
7 * SL82C105/Winbond 553 IDE driver 7 * SL82C105/Winbond 553 IDE driver
8 * 8 *
9 * and in part on the documentation and errata sheet 9 * and in part on the documentation and errata sheet
10 *
11 *
12 * Note: The controller like many controllers has shared timings for
13 * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
14 * in the dma_stop function. Thus we actually don't need a set_dmamode
15 * method as the PIO method is always called and will set the right PIO
16 * timing parameters.
10 */ 17 */
11 18
12#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -19,7 +26,7 @@
19#include <linux/libata.h> 26#include <linux/libata.h>
20 27
21#define DRV_NAME "pata_sl82c105" 28#define DRV_NAME "pata_sl82c105"
22#define DRV_VERSION "0.2.3" 29#define DRV_VERSION "0.3.0"
23 30
24enum { 31enum {
25 /* 32 /*
@@ -126,33 +133,6 @@ static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *a
126} 133}
127 134
128/** 135/**
129 * sl82c105_set_dmamode - set initial DMA mode data
130 * @ap: ATA interface
131 * @adev: ATA device
132 *
133 * Called to do the DMA mode setup. This replaces the PIO timings
134 * for the device in question. Set appropriate PIO timings not DMA
135 * timings at this point.
136 */
137
138static void sl82c105_set_dmamode(struct ata_port *ap, struct ata_device *adev)
139{
140 switch(adev->dma_mode) {
141 case XFER_MW_DMA_0:
142 sl82c105_configure_piomode(ap, adev, 0);
143 break;
144 case XFER_MW_DMA_1:
145 sl82c105_configure_piomode(ap, adev, 3);
146 break;
147 case XFER_MW_DMA_2:
148 sl82c105_configure_piomode(ap, adev, 4);
149 break;
150 default:
151 BUG();
152 }
153}
154
155/**
156 * sl82c105_reset_engine - Reset the DMA engine 136 * sl82c105_reset_engine - Reset the DMA engine
157 * @ap: ATA interface 137 * @ap: ATA interface
158 * 138 *
@@ -222,7 +202,7 @@ static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
222 202
223 /* This will redo the initial setup of the DMA device to matching 203 /* This will redo the initial setup of the DMA device to matching
224 PIO timings */ 204 PIO timings */
225 sl82c105_set_dmamode(ap, qc->dev); 205 sl82c105_set_piomode(ap, qc->dev);
226} 206}
227 207
228static struct scsi_host_template sl82c105_sht = { 208static struct scsi_host_template sl82c105_sht = {
@@ -246,7 +226,6 @@ static struct scsi_host_template sl82c105_sht = {
246static struct ata_port_operations sl82c105_port_ops = { 226static struct ata_port_operations sl82c105_port_ops = {
247 .port_disable = ata_port_disable, 227 .port_disable = ata_port_disable,
248 .set_piomode = sl82c105_set_piomode, 228 .set_piomode = sl82c105_set_piomode,
249 .set_dmamode = sl82c105_set_dmamode,
250 .mode_filter = ata_pci_default_filter, 229 .mode_filter = ata_pci_default_filter,
251 230
252 .tf_load = ata_tf_load, 231 .tf_load = ata_tf_load,
@@ -255,7 +234,10 @@ static struct ata_port_operations sl82c105_port_ops = {
255 .exec_command = ata_exec_command, 234 .exec_command = ata_exec_command,
256 .dev_select = ata_std_dev_select, 235 .dev_select = ata_std_dev_select,
257 236
237 .freeze = ata_bmdma_freeze,
238 .thaw = ata_bmdma_thaw,
258 .error_handler = sl82c105_error_handler, 239 .error_handler = sl82c105_error_handler,
240 .post_internal_cmd = ata_bmdma_post_internal_cmd,
259 241
260 .bmdma_setup = ata_bmdma_setup, 242 .bmdma_setup = ata_bmdma_setup,
261 .bmdma_start = sl82c105_bmdma_start, 243 .bmdma_start = sl82c105_bmdma_start,
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 453ab90b72..71418f2a0c 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -193,8 +193,10 @@ static struct scsi_host_template triflex_sht = {
193 .slave_configure = ata_scsi_slave_config, 193 .slave_configure = ata_scsi_slave_config,
194 .slave_destroy = ata_scsi_slave_destroy, 194 .slave_destroy = ata_scsi_slave_destroy,
195 .bios_param = ata_std_bios_param, 195 .bios_param = ata_std_bios_param,
196#ifdef CONFIG_PM
196 .resume = ata_scsi_device_resume, 197 .resume = ata_scsi_device_resume,
197 .suspend = ata_scsi_device_suspend, 198 .suspend = ata_scsi_device_suspend,
199#endif
198}; 200};
199 201
200static struct ata_port_operations triflex_port_ops = { 202static struct ata_port_operations triflex_port_ops = {
@@ -260,8 +262,10 @@ static struct pci_driver triflex_pci_driver = {
260 .id_table = triflex, 262 .id_table = triflex,
261 .probe = triflex_init_one, 263 .probe = triflex_init_one,
262 .remove = ata_pci_remove_one, 264 .remove = ata_pci_remove_one,
265#ifdef CONFIG_PM
263 .suspend = ata_pci_device_suspend, 266 .suspend = ata_pci_device_suspend,
264 .resume = ata_pci_device_resume, 267 .resume = ata_pci_device_resume,
268#endif
265}; 269};
266 270
267static int __init triflex_init(void) 271static int __init triflex_init(void)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 220fcd6c54..946ade0e1f 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -170,7 +170,7 @@ static int via_pre_reset(struct ata_port *ap)
170 ap->cbl = ATA_CBL_PATA40; 170 ap->cbl = ATA_CBL_PATA40;
171 else 171 else
172 ap->cbl = ATA_CBL_PATA_UNK; 172 ap->cbl = ATA_CBL_PATA_UNK;
173 173
174 174
175 return ata_std_prereset(ap); 175 return ata_std_prereset(ap);
176} 176}
@@ -305,8 +305,10 @@ static struct scsi_host_template via_sht = {
305 .slave_configure = ata_scsi_slave_config, 305 .slave_configure = ata_scsi_slave_config,
306 .slave_destroy = ata_scsi_slave_destroy, 306 .slave_destroy = ata_scsi_slave_destroy,
307 .bios_param = ata_std_bios_param, 307 .bios_param = ata_std_bios_param,
308#ifdef CONFIG_PM
308 .resume = ata_scsi_device_resume, 309 .resume = ata_scsi_device_resume,
309 .suspend = ata_scsi_device_suspend, 310 .suspend = ata_scsi_device_suspend,
311#endif
310}; 312};
311 313
312static struct ata_port_operations via_port_ops = { 314static struct ata_port_operations via_port_ops = {
@@ -560,6 +562,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
560 return ata_pci_init_one(pdev, port_info, 2); 562 return ata_pci_init_one(pdev, port_info, 2);
561} 563}
562 564
565#ifdef CONFIG_PM
563/** 566/**
564 * via_reinit_one - reinit after resume 567 * via_reinit_one - reinit after resume
565 * @pdev; PCI device 568 * @pdev; PCI device
@@ -592,6 +595,7 @@ static int via_reinit_one(struct pci_dev *pdev)
592 } 595 }
593 return ata_pci_device_resume(pdev); 596 return ata_pci_device_resume(pdev);
594} 597}
598#endif
595 599
596static const struct pci_device_id via[] = { 600static const struct pci_device_id via[] = {
597 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), }, 601 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), },
@@ -607,8 +611,10 @@ static struct pci_driver via_pci_driver = {
607 .id_table = via, 611 .id_table = via,
608 .probe = via_init_one, 612 .probe = via_init_one,
609 .remove = ata_pci_remove_one, 613 .remove = ata_pci_remove_one,
614#ifdef CONFIG_PM
610 .suspend = ata_pci_device_suspend, 615 .suspend = ata_pci_device_suspend,
611 .resume = via_reinit_one, 616 .resume = via_reinit_one,
617#endif
612}; 618};
613 619
614static int __init via_init(void) 620static int __init via_init(void)
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 0888b4f19f..6c111035fc 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -17,7 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18 18
19#define DRV_NAME "pata_winbond" 19#define DRV_NAME "pata_winbond"
20#define DRV_VERSION "0.0.1" 20#define DRV_VERSION "0.0.2"
21 21
22#define NR_HOST 4 /* Two winbond controllers, two channels each */ 22#define NR_HOST 4 /* Two winbond controllers, two channels each */
23 23
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 857ac23217..5dd3ca8b5f 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -44,7 +44,7 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "pdc_adma" 46#define DRV_NAME "pdc_adma"
47#define DRV_VERSION "0.04" 47#define DRV_VERSION "0.05"
48 48
49/* macro to calculate base address for ATA regs */ 49/* macro to calculate base address for ATA regs */
50#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) 50#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
@@ -498,7 +498,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
498 if ((status & ATA_BUSY)) 498 if ((status & ATA_BUSY))
499 continue; 499 continue;
500 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 500 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
501 ap->id, qc->tf.protocol, status); 501 ap->print_id, qc->tf.protocol, status);
502 502
503 /* complete taskfile transaction */ 503 /* complete taskfile transaction */
504 pp->state = adma_state_idle; 504 pp->state = adma_state_idle;
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 31b636fac9..3193a603d1 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -135,8 +135,10 @@ static struct scsi_host_template inic_sht = {
135 .slave_configure = inic_slave_config, 135 .slave_configure = inic_slave_config,
136 .slave_destroy = ata_scsi_slave_destroy, 136 .slave_destroy = ata_scsi_slave_destroy,
137 .bios_param = ata_std_bios_param, 137 .bios_param = ata_std_bios_param,
138#ifdef CONFIG_PM
138 .suspend = ata_scsi_device_suspend, 139 .suspend = ata_scsi_device_suspend,
139 .resume = ata_scsi_device_resume, 140 .resume = ata_scsi_device_resume,
141#endif
140}; 142};
141 143
142static const int scr_map[] = { 144static const int scr_map[] = {
@@ -632,6 +634,7 @@ static int init_controller(void __iomem *mmio_base, u16 hctl)
632 return 0; 634 return 0;
633} 635}
634 636
637#ifdef CONFIG_PM
635static int inic_pci_device_resume(struct pci_dev *pdev) 638static int inic_pci_device_resume(struct pci_dev *pdev)
636{ 639{
637 struct ata_host *host = dev_get_drvdata(&pdev->dev); 640 struct ata_host *host = dev_get_drvdata(&pdev->dev);
@@ -642,7 +645,6 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
642 ata_pci_device_do_resume(pdev); 645 ata_pci_device_do_resume(pdev);
643 646
644 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 647 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
645 printk("XXX\n");
646 rc = init_controller(mmio_base, hpriv->cached_hctl); 648 rc = init_controller(mmio_base, hpriv->cached_hctl);
647 if (rc) 649 if (rc)
648 return rc; 650 return rc;
@@ -652,6 +654,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
652 654
653 return 0; 655 return 0;
654} 656}
657#endif
655 658
656static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 659static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
657{ 660{
@@ -755,8 +758,10 @@ static const struct pci_device_id inic_pci_tbl[] = {
755static struct pci_driver inic_pci_driver = { 758static struct pci_driver inic_pci_driver = {
756 .name = DRV_NAME, 759 .name = DRV_NAME,
757 .id_table = inic_pci_tbl, 760 .id_table = inic_pci_tbl,
761#ifdef CONFIG_PM
758 .suspend = ata_pci_device_suspend, 762 .suspend = ata_pci_device_suspend,
759 .resume = inic_pci_device_resume, 763 .resume = inic_pci_device_resume,
764#endif
760 .probe = inic_init_one, 765 .probe = inic_init_one,
761 .remove = ata_pci_remove_one, 766 .remove = ata_pci_remove_one,
762}; 767};
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index d689df52ea..a65ba636aa 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -35,7 +35,7 @@
35#include <linux/libata.h> 35#include <linux/libata.h>
36 36
37#define DRV_NAME "sata_mv" 37#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.7" 38#define DRV_VERSION "0.8"
39 39
40enum { 40enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */ 41 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -137,14 +137,19 @@ enum {
137 PCI_ERR = (1 << 18), 137 PCI_ERR = (1 << 18),
138 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ 138 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
139 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ 139 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
140 PORTS_0_3_COAL_DONE = (1 << 8),
141 PORTS_4_7_COAL_DONE = (1 << 17),
140 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ 142 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
141 GPIO_INT = (1 << 22), 143 GPIO_INT = (1 << 22),
142 SELF_INT = (1 << 23), 144 SELF_INT = (1 << 23),
143 TWSI_INT = (1 << 24), 145 TWSI_INT = (1 << 24),
144 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 146 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
147 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
145 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | 148 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
146 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | 149 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
147 HC_MAIN_RSVD), 150 HC_MAIN_RSVD),
151 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
152 HC_MAIN_RSVD_5),
148 153
149 /* SATAHC registers */ 154 /* SATAHC registers */
150 HC_CFG_OFS = 0, 155 HC_CFG_OFS = 0,
@@ -814,23 +819,27 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
814 u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 819 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
815 820
816 /* set up non-NCQ EDMA configuration */ 821 /* set up non-NCQ EDMA configuration */
817 cfg &= ~0x1f; /* clear queue depth */
818 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
819 cfg &= ~(1 << 9); /* disable equeue */ 822 cfg &= ~(1 << 9); /* disable equeue */
820 823
821 if (IS_GEN_I(hpriv)) 824 if (IS_GEN_I(hpriv)) {
825 cfg &= ~0x1f; /* clear queue depth */
822 cfg |= (1 << 8); /* enab config burst size mask */ 826 cfg |= (1 << 8); /* enab config burst size mask */
827 }
823 828
824 else if (IS_GEN_II(hpriv)) 829 else if (IS_GEN_II(hpriv)) {
830 cfg &= ~0x1f; /* clear queue depth */
825 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 831 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
832 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
833 }
826 834
827 else if (IS_GEN_IIE(hpriv)) { 835 else if (IS_GEN_IIE(hpriv)) {
828 cfg |= (1 << 23); /* dis RX PM port mask */ 836 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
829 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ 837 cfg |= (1 << 22); /* enab 4-entry host queue cache */
830 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ 838 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
831 cfg |= (1 << 18); /* enab early completion */ 839 cfg |= (1 << 18); /* enab early completion */
832 cfg |= (1 << 17); /* enab host q cache */ 840 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
833 cfg |= (1 << 22); /* enab cutthrough */ 841 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
842 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
834 } 843 }
835 844
836 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 845 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
@@ -1276,7 +1285,7 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1276 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1285 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1277 } 1286 }
1278 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " 1287 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1279 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr); 1288 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1280 1289
1281 /* Clear EDMA now that SERR cleanup done */ 1290 /* Clear EDMA now that SERR cleanup done */
1282 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1291 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -2052,7 +2061,7 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2052 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; 2061 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2053 2062
2054 /* unused: */ 2063 /* unused: */
2055 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; 2064 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2056 2065
2057 /* Clear any currently outstanding port interrupt conditions */ 2066 /* Clear any currently outstanding port interrupt conditions */
2058 serr_ofs = mv_scr_offset(SCR_ERROR); 2067 serr_ofs = mv_scr_offset(SCR_ERROR);
@@ -2240,7 +2249,11 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2240 2249
2241 /* and unmask interrupt generation for host regs */ 2250 /* and unmask interrupt generation for host regs */
2242 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); 2251 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2243 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 2252
2253 if (IS_50XX(hpriv))
2254 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2255 else
2256 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2244 2257
2245 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 2258 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2246 "PCI int cause/mask=0x%08x/0x%08x\n", 2259 "PCI int cause/mask=0x%08x/0x%08x\n",
@@ -2347,7 +2360,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2347 return rc; 2360 return rc;
2348 2361
2349 /* Enable interrupts */ 2362 /* Enable interrupts */
2350 if (msi && !pci_enable_msi(pdev)) 2363 if (msi && pci_enable_msi(pdev))
2351 pci_intx(pdev, 1); 2364 pci_intx(pdev, 1);
2352 2365
2353 mv_dump_pci_cfg(pdev, 0x68); 2366 mv_dump_pci_cfg(pdev, 0x68);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index ab92f208da..388d07fab5 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -219,6 +219,7 @@ struct nv_adma_port_priv {
219 void __iomem * gen_block; 219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block; 220 void __iomem * notifier_clear_block;
221 u8 flags; 221 u8 flags;
222 int last_issue_ncq;
222}; 223};
223 224
224struct nv_host_priv { 225struct nv_host_priv {
@@ -229,7 +230,9 @@ struct nv_host_priv {
229 230
230static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 231static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
231static void nv_remove_one (struct pci_dev *pdev); 232static void nv_remove_one (struct pci_dev *pdev);
233#ifdef CONFIG_PM
232static int nv_pci_device_resume(struct pci_dev *pdev); 234static int nv_pci_device_resume(struct pci_dev *pdev);
235#endif
233static void nv_ck804_host_stop(struct ata_host *host); 236static void nv_ck804_host_stop(struct ata_host *host);
234static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 237static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); 238static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
@@ -250,14 +253,13 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250static void nv_adma_irq_clear(struct ata_port *ap); 253static void nv_adma_irq_clear(struct ata_port *ap);
251static int nv_adma_port_start(struct ata_port *ap); 254static int nv_adma_port_start(struct ata_port *ap);
252static void nv_adma_port_stop(struct ata_port *ap); 255static void nv_adma_port_stop(struct ata_port *ap);
256#ifdef CONFIG_PM
253static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg); 257static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254static int nv_adma_port_resume(struct ata_port *ap); 258static int nv_adma_port_resume(struct ata_port *ap);
259#endif
255static void nv_adma_error_handler(struct ata_port *ap); 260static void nv_adma_error_handler(struct ata_port *ap);
256static void nv_adma_host_stop(struct ata_host *host); 261static void nv_adma_host_stop(struct ata_host *host);
257static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc); 262static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
258static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260static u8 nv_adma_bmdma_status(struct ata_port *ap);
261 263
262enum nv_host_type 264enum nv_host_type
263{ 265{
@@ -297,8 +299,10 @@ static struct pci_driver nv_pci_driver = {
297 .name = DRV_NAME, 299 .name = DRV_NAME,
298 .id_table = nv_pci_tbl, 300 .id_table = nv_pci_tbl,
299 .probe = nv_init_one, 301 .probe = nv_init_one,
302#ifdef CONFIG_PM
300 .suspend = ata_pci_device_suspend, 303 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume, 304 .resume = nv_pci_device_resume,
305#endif
302 .remove = nv_remove_one, 306 .remove = nv_remove_one,
303}; 307};
304 308
@@ -318,8 +322,10 @@ static struct scsi_host_template nv_sht = {
318 .slave_configure = ata_scsi_slave_config, 322 .slave_configure = ata_scsi_slave_config,
319 .slave_destroy = ata_scsi_slave_destroy, 323 .slave_destroy = ata_scsi_slave_destroy,
320 .bios_param = ata_std_bios_param, 324 .bios_param = ata_std_bios_param,
325#ifdef CONFIG_PM
321 .suspend = ata_scsi_device_suspend, 326 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume, 327 .resume = ata_scsi_device_resume,
328#endif
323}; 329};
324 330
325static struct scsi_host_template nv_adma_sht = { 331static struct scsi_host_template nv_adma_sht = {
@@ -338,8 +344,10 @@ static struct scsi_host_template nv_adma_sht = {
338 .slave_configure = nv_adma_slave_config, 344 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy, 345 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param, 346 .bios_param = ata_std_bios_param,
347#ifdef CONFIG_PM
341 .suspend = ata_scsi_device_suspend, 348 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume, 349 .resume = ata_scsi_device_resume,
350#endif
343}; 351};
344 352
345static const struct ata_port_operations nv_generic_ops = { 353static const struct ata_port_operations nv_generic_ops = {
@@ -432,16 +440,16 @@ static const struct ata_port_operations nv_adma_ops = {
432 .exec_command = ata_exec_command, 440 .exec_command = ata_exec_command,
433 .check_status = ata_check_status, 441 .check_status = ata_check_status,
434 .dev_select = ata_std_dev_select, 442 .dev_select = ata_std_dev_select,
435 .bmdma_setup = nv_adma_bmdma_setup, 443 .bmdma_setup = ata_bmdma_setup,
436 .bmdma_start = nv_adma_bmdma_start, 444 .bmdma_start = ata_bmdma_start,
437 .bmdma_stop = nv_adma_bmdma_stop, 445 .bmdma_stop = ata_bmdma_stop,
438 .bmdma_status = nv_adma_bmdma_status, 446 .bmdma_status = ata_bmdma_status,
439 .qc_prep = nv_adma_qc_prep, 447 .qc_prep = nv_adma_qc_prep,
440 .qc_issue = nv_adma_qc_issue, 448 .qc_issue = nv_adma_qc_issue,
441 .freeze = nv_ck804_freeze, 449 .freeze = nv_ck804_freeze,
442 .thaw = nv_ck804_thaw, 450 .thaw = nv_ck804_thaw,
443 .error_handler = nv_adma_error_handler, 451 .error_handler = nv_adma_error_handler,
444 .post_internal_cmd = nv_adma_bmdma_stop, 452 .post_internal_cmd = nv_adma_post_internal_cmd,
445 .data_xfer = ata_data_xfer, 453 .data_xfer = ata_data_xfer,
446 .irq_handler = nv_adma_interrupt, 454 .irq_handler = nv_adma_interrupt,
447 .irq_clear = nv_adma_irq_clear, 455 .irq_clear = nv_adma_irq_clear,
@@ -451,8 +459,10 @@ static const struct ata_port_operations nv_adma_ops = {
451 .scr_write = nv_scr_write, 459 .scr_write = nv_scr_write,
452 .port_start = nv_adma_port_start, 460 .port_start = nv_adma_port_start,
453 .port_stop = nv_adma_port_stop, 461 .port_stop = nv_adma_port_stop,
462#ifdef CONFIG_PM
454 .port_suspend = nv_adma_port_suspend, 463 .port_suspend = nv_adma_port_suspend,
455 .port_resume = nv_adma_port_resume, 464 .port_resume = nv_adma_port_resume,
465#endif
456 .host_stop = nv_adma_host_stop, 466 .host_stop = nv_adma_host_stop,
457}; 467};
458 468
@@ -661,30 +671,31 @@ static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
661{ 671{
662 unsigned int idx = 0; 672 unsigned int idx = 0;
663 673
664 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB); 674 if(tf->flags & ATA_TFLAG_ISADDR) {
665 675 if (tf->flags & ATA_TFLAG_LBA48) {
666 if ((tf->flags & ATA_TFLAG_LBA48) == 0) { 676 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
667 cpb[idx++] = cpu_to_le16(IGN); 677 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
668 cpb[idx++] = cpu_to_le16(IGN); 678 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
669 cpb[idx++] = cpu_to_le16(IGN); 679 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
670 cpb[idx++] = cpu_to_le16(IGN); 680 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
671 cpb[idx++] = cpu_to_le16(IGN); 681 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
672 } 682 } else
673 else { 683 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
674 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature); 684
675 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); 685 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
676 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); 686 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
677 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); 687 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
678 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); 688 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
679 } 689 }
680 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); 690
681 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); 691 if(tf->flags & ATA_TFLAG_DEVICE)
682 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); 692 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
683 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
684 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
685 693
686 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); 694 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
687 695
696 while(idx < 12)
697 cpb[idx++] = cpu_to_le16(IGN);
698
688 return idx; 699 return idx;
689} 700}
690 701
@@ -741,6 +752,17 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
741 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num, 752 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
742 qc->err_mask); 753 qc->err_mask);
743 ata_qc_complete(qc); 754 ata_qc_complete(qc);
755 } else {
756 struct ata_eh_info *ehi = &ap->eh_info;
757 /* Notifier bits set without a command may indicate the drive
758 is misbehaving. Raise host state machine violation on this
759 condition. */
760 ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
761 cpb_num);
762 ehi->err_mask |= AC_ERR_HSM;
763 ehi->action |= ATA_EH_SOFTRESET;
764 ata_port_freeze(ap);
765 return 1;
744 } 766 }
745 } 767 }
746 return 0; 768 return 0;
@@ -852,22 +874,14 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
852 874
853 if (status & (NV_ADMA_STAT_DONE | 875 if (status & (NV_ADMA_STAT_DONE |
854 NV_ADMA_STAT_CPBERR)) { 876 NV_ADMA_STAT_CPBERR)) {
877 u32 check_commands = notifier | notifier_error;
878 int pos, error = 0;
855 /** Check CPBs for completed commands */ 879 /** Check CPBs for completed commands */
856 880 while ((pos = ffs(check_commands)) && !error) {
857 if (ata_tag_valid(ap->active_tag)) { 881 pos--;
858 /* Non-NCQ command */ 882 error = nv_adma_check_cpb(ap, pos,
859 nv_adma_check_cpb(ap, ap->active_tag, 883 notifier_error & (1 << pos) );
860 notifier_error & (1 << ap->active_tag)); 884 check_commands &= ~(1 << pos );
861 } else {
862 int pos, error = 0;
863 u32 active = ap->sactive;
864
865 while ((pos = ffs(active)) && !error) {
866 pos--;
867 error = nv_adma_check_cpb(ap, pos,
868 notifier_error & (1 << pos) );
869 active &= ~(1 << pos );
870 }
871 } 885 }
872 } 886 }
873 } 887 }
@@ -905,73 +919,12 @@ static void nv_adma_irq_clear(struct ata_port *ap)
905 iowrite8(ioread8(dma_stat_addr), dma_stat_addr); 919 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
906} 920}
907 921
908static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc) 922static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
909{
910 struct ata_port *ap = qc->ap;
911 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
912 struct nv_adma_port_priv *pp = ap->private_data;
913 u8 dmactl;
914
915 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
916 WARN_ON(1);
917 return;
918 }
919
920 /* load PRD table addr. */
921 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
922
923 /* specify data direction, triple-check start bit is clear */
924 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
925 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
926 if (!rw)
927 dmactl |= ATA_DMA_WR;
928
929 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
930
931 /* issue r/w command */
932 ata_exec_command(ap, &qc->tf);
933}
934
935static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
936{
937 struct ata_port *ap = qc->ap;
938 struct nv_adma_port_priv *pp = ap->private_data;
939 u8 dmactl;
940
941 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
942 WARN_ON(1);
943 return;
944 }
945
946 /* start host DMA transaction */
947 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
948 iowrite8(dmactl | ATA_DMA_START,
949 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
950}
951
952static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
953{
954 struct ata_port *ap = qc->ap;
955 struct nv_adma_port_priv *pp = ap->private_data;
956
957 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
958 return;
959
960 /* clear start/stop bit */
961 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
962 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
963
964 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
965 ata_altstatus(ap); /* dummy read */
966}
967
968static u8 nv_adma_bmdma_status(struct ata_port *ap)
969{ 923{
970 struct nv_adma_port_priv *pp = ap->private_data; 924 struct nv_adma_port_priv *pp = qc->ap->private_data;
971
972 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
973 925
974 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 926 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
927 ata_bmdma_post_internal_cmd(qc);
975} 928}
976 929
977static int nv_adma_port_start(struct ata_port *ap) 930static int nv_adma_port_start(struct ata_port *ap)
@@ -1040,14 +993,15 @@ static int nv_adma_port_start(struct ata_port *ap)
1040 993
1041 /* clear GO for register mode, enable interrupt */ 994 /* clear GO for register mode, enable interrupt */
1042 tmp = readw(mmio + NV_ADMA_CTL); 995 tmp = readw(mmio + NV_ADMA_CTL);
1043 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL); 996 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
997 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1044 998
1045 tmp = readw(mmio + NV_ADMA_CTL); 999 tmp = readw(mmio + NV_ADMA_CTL);
1046 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); 1000 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1047 readl( mmio + NV_ADMA_CTL ); /* flush posted write */ 1001 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1048 udelay(1); 1002 udelay(1);
1049 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); 1003 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1050 readl( mmio + NV_ADMA_CTL ); /* flush posted write */ 1004 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1051 1005
1052 return 0; 1006 return 0;
1053} 1007}
@@ -1061,6 +1015,7 @@ static void nv_adma_port_stop(struct ata_port *ap)
1061 writew(0, mmio + NV_ADMA_CTL); 1015 writew(0, mmio + NV_ADMA_CTL);
1062} 1016}
1063 1017
1018#ifdef CONFIG_PM
1064static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg) 1019static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1065{ 1020{
1066 struct nv_adma_port_priv *pp = ap->private_data; 1021 struct nv_adma_port_priv *pp = ap->private_data;
@@ -1099,17 +1054,19 @@ static int nv_adma_port_resume(struct ata_port *ap)
1099 1054
1100 /* clear GO for register mode, enable interrupt */ 1055 /* clear GO for register mode, enable interrupt */
1101 tmp = readw(mmio + NV_ADMA_CTL); 1056 tmp = readw(mmio + NV_ADMA_CTL);
1102 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL); 1057 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1058 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1103 1059
1104 tmp = readw(mmio + NV_ADMA_CTL); 1060 tmp = readw(mmio + NV_ADMA_CTL);
1105 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); 1061 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1106 readl( mmio + NV_ADMA_CTL ); /* flush posted write */ 1062 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1107 udelay(1); 1063 udelay(1);
1108 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); 1064 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1109 readl( mmio + NV_ADMA_CTL ); /* flush posted write */ 1065 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1110 1066
1111 return 0; 1067 return 0;
1112} 1068}
1069#endif
1113 1070
1114static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port) 1071static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1115{ 1072{
@@ -1163,11 +1120,7 @@ static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1163 int idx, 1120 int idx,
1164 struct nv_adma_prd *aprd) 1121 struct nv_adma_prd *aprd)
1165{ 1122{
1166 u8 flags; 1123 u8 flags = 0;
1167
1168 memset(aprd, 0, sizeof(struct nv_adma_prd));
1169
1170 flags = 0;
1171 if (qc->tf.flags & ATA_TFLAG_WRITE) 1124 if (qc->tf.flags & ATA_TFLAG_WRITE)
1172 flags |= NV_APRD_WRITE; 1125 flags |= NV_APRD_WRITE;
1173 if (idx == qc->n_elem - 1) 1126 if (idx == qc->n_elem - 1)
@@ -1178,6 +1131,7 @@ static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1178 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); 1131 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1179 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ 1132 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1180 aprd->flags = flags; 1133 aprd->flags = flags;
1134 aprd->packet_len = 0;
1181} 1135}
1182 1136
1183static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) 1137static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
@@ -1198,6 +1152,8 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1198 } 1152 }
1199 if (idx > 5) 1153 if (idx > 5)
1200 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); 1154 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1155 else
1156 cpb->next_aprd = cpu_to_le64(0);
1201} 1157}
1202 1158
1203static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) 1159static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
@@ -1230,7 +1186,10 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1230 return; 1186 return;
1231 } 1187 }
1232 1188
1233 memset(cpb, 0, sizeof(struct nv_adma_cpb)); 1189 cpb->resp_flags = NV_CPB_RESP_DONE;
1190 wmb();
1191 cpb->ctl_flags = 0;
1192 wmb();
1234 1193
1235 cpb->len = 3; 1194 cpb->len = 3;
1236 cpb->tag = qc->tag; 1195 cpb->tag = qc->tag;
@@ -1254,12 +1213,15 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1254 finished filling in all of the contents */ 1213 finished filling in all of the contents */
1255 wmb(); 1214 wmb();
1256 cpb->ctl_flags = ctl_flags; 1215 cpb->ctl_flags = ctl_flags;
1216 wmb();
1217 cpb->resp_flags = 0;
1257} 1218}
1258 1219
1259static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) 1220static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1260{ 1221{
1261 struct nv_adma_port_priv *pp = qc->ap->private_data; 1222 struct nv_adma_port_priv *pp = qc->ap->private_data;
1262 void __iomem *mmio = pp->ctl_block; 1223 void __iomem *mmio = pp->ctl_block;
1224 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1263 1225
1264 VPRINTK("ENTER\n"); 1226 VPRINTK("ENTER\n");
1265 1227
@@ -1274,6 +1236,14 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1274 /* write append register, command tag in lower 8 bits 1236 /* write append register, command tag in lower 8 bits
1275 and (number of cpbs to append -1) in top 8 bits */ 1237 and (number of cpbs to append -1) in top 8 bits */
1276 wmb(); 1238 wmb();
1239
1240 if(curr_ncq != pp->last_issue_ncq) {
1241 /* Seems to need some delay before switching between NCQ and non-NCQ
1242 commands, else we get command timeouts and such. */
1243 udelay(20);
1244 pp->last_issue_ncq = curr_ncq;
1245 }
1246
1277 writew(qc->tag, mmio + NV_ADMA_APPEND); 1247 writew(qc->tag, mmio + NV_ADMA_APPEND);
1278 1248
1279 DPRINTK("Issued tag %u\n",qc->tag); 1249 DPRINTK("Issued tag %u\n",qc->tag);
@@ -1447,6 +1417,30 @@ static void nv_adma_error_handler(struct ata_port *ap)
1447 int i; 1417 int i;
1448 u16 tmp; 1418 u16 tmp;
1449 1419
1420 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1421 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1422 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1423 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1424 u32 status = readw(mmio + NV_ADMA_STAT);
1425 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1426 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1427
1428 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1429 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1430 "next cpb count 0x%X next cpb idx 0x%x\n",
1431 notifier, notifier_error, gen_ctl, status,
1432 cpb_count, next_cpb_idx);
1433
1434 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1435 struct nv_adma_cpb *cpb = &pp->cpb[i];
1436 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1437 ap->sactive & (1 << i) )
1438 ata_port_printk(ap, KERN_ERR,
1439 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1440 i, cpb->ctl_flags, cpb->resp_flags);
1441 }
1442 }
1443
1450 /* Push us back into port register mode for error handling. */ 1444 /* Push us back into port register mode for error handling. */
1451 nv_adma_register_mode(ap); 1445 nv_adma_register_mode(ap);
1452 1446
@@ -1460,10 +1454,10 @@ static void nv_adma_error_handler(struct ata_port *ap)
1460 /* Reset channel */ 1454 /* Reset channel */
1461 tmp = readw(mmio + NV_ADMA_CTL); 1455 tmp = readw(mmio + NV_ADMA_CTL);
1462 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); 1456 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1463 readl( mmio + NV_ADMA_CTL ); /* flush posted write */ 1457 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1464 udelay(1); 1458 udelay(1);
1465 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); 1459 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1466 readl( mmio + NV_ADMA_CTL ); /* flush posted write */ 1460 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1467 } 1461 }
1468 1462
1469 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, 1463 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
@@ -1575,6 +1569,7 @@ static void nv_remove_one (struct pci_dev *pdev)
1575 kfree(hpriv); 1569 kfree(hpriv);
1576} 1570}
1577 1571
1572#ifdef CONFIG_PM
1578static int nv_pci_device_resume(struct pci_dev *pdev) 1573static int nv_pci_device_resume(struct pci_dev *pdev)
1579{ 1574{
1580 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1575 struct ata_host *host = dev_get_drvdata(&pdev->dev);
@@ -1622,6 +1617,7 @@ static int nv_pci_device_resume(struct pci_dev *pdev)
1622 1617
1623 return 0; 1618 return 0;
1624} 1619}
1620#endif
1625 1621
1626static void nv_ck804_host_stop(struct ata_host *host) 1622static void nv_ck804_host_stop(struct ata_host *host)
1627{ 1623{
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index cf9ed8c393..2339813ce9 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -45,7 +45,7 @@
45#include "sata_promise.h" 45#include "sata_promise.h"
46 46
47#define DRV_NAME "sata_promise" 47#define DRV_NAME "sata_promise"
48#define DRV_VERSION "1.05" 48#define DRV_VERSION "2.00"
49 49
50 50
51enum { 51enum {
@@ -218,6 +218,7 @@ static const struct ata_port_operations pdc_pata_ops = {
218 .freeze = pdc_freeze, 218 .freeze = pdc_freeze,
219 .thaw = pdc_thaw, 219 .thaw = pdc_thaw,
220 .error_handler = pdc_error_handler, 220 .error_handler = pdc_error_handler,
221 .post_internal_cmd = pdc_post_internal_cmd,
221 .data_xfer = ata_data_xfer, 222 .data_xfer = ata_data_xfer,
222 .irq_handler = pdc_interrupt, 223 .irq_handler = pdc_interrupt,
223 .irq_clear = pdc_irq_clear, 224 .irq_clear = pdc_irq_clear,
@@ -776,7 +777,8 @@ static int pdc_old_check_atapi_dma(struct ata_queued_cmd *qc)
776 return pdc_check_atapi_dma(qc); 777 return pdc_check_atapi_dma(qc);
777} 778}
778 779
779static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base) 780static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base,
781 void __iomem *scr_addr)
780{ 782{
781 port->cmd_addr = base; 783 port->cmd_addr = base;
782 port->data_addr = base; 784 port->data_addr = base;
@@ -791,6 +793,7 @@ static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
791 port->status_addr = base + 0x1c; 793 port->status_addr = base + 0x1c;
792 port->altstatus_addr = 794 port->altstatus_addr =
793 port->ctl_addr = base + 0x38; 795 port->ctl_addr = base + 0x38;
796 port->scr_addr = scr_addr;
794} 797}
795 798
796 799
@@ -903,11 +906,8 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
903 906
904 base = probe_ent->iomap[PDC_MMIO_BAR]; 907 base = probe_ent->iomap[PDC_MMIO_BAR];
905 908
906 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200); 909 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200, base + 0x400);
907 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280); 910 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280, base + 0x500);
908
909 probe_ent->port[0].scr_addr = base + 0x400;
910 probe_ent->port[1].scr_addr = base + 0x500;
911 911
912 /* notice 4-port boards */ 912 /* notice 4-port boards */
913 switch (board_idx) { 913 switch (board_idx) {
@@ -916,12 +916,8 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
916 /* Fall through */ 916 /* Fall through */
917 case board_20319: 917 case board_20319:
918 probe_ent->n_ports = 4; 918 probe_ent->n_ports = 4;
919 919 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300, base + 0x600);
920 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300); 920 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380, base + 0x700);
921 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
922
923 probe_ent->port[2].scr_addr = base + 0x600;
924 probe_ent->port[3].scr_addr = base + 0x700;
925 break; 921 break;
926 case board_2057x: 922 case board_2057x:
927 hp->flags |= PDC_FLAG_GEN_II; 923 hp->flags |= PDC_FLAG_GEN_II;
@@ -931,7 +927,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
931 tmp = readb(base + PDC_FLASH_CTL+1); 927 tmp = readb(base + PDC_FLASH_CTL+1);
932 if (!(tmp & 0x80)) { 928 if (!(tmp & 0x80)) {
933 probe_ent->n_ports = 3; 929 probe_ent->n_ports = 3;
934 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300); 930 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300, NULL);
935 hp->port_flags[2] = ATA_FLAG_SLAVE_POSS; 931 hp->port_flags[2] = ATA_FLAG_SLAVE_POSS;
936 printk(KERN_INFO DRV_NAME " PATA port found\n"); 932 printk(KERN_INFO DRV_NAME " PATA port found\n");
937 } else 933 } else
@@ -941,12 +937,8 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
941 break; 937 break;
942 case board_20619: 938 case board_20619:
943 probe_ent->n_ports = 4; 939 probe_ent->n_ports = 4;
944 940 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300, NULL);
945 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300); 941 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380, NULL);
946 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
947
948 probe_ent->port[2].scr_addr = base + 0x600;
949 probe_ent->port[3].scr_addr = base + 0x700;
950 break; 942 break;
951 default: 943 default:
952 BUG(); 944 BUG();
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 6097d8f2a0..8786b45f29 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -39,7 +39,7 @@
39#include <linux/libata.h> 39#include <linux/libata.h>
40 40
41#define DRV_NAME "sata_qstor" 41#define DRV_NAME "sata_qstor"
42#define DRV_VERSION "0.06" 42#define DRV_VERSION "0.07"
43 43
44enum { 44enum {
45 QS_MMIO_BAR = 4, 45 QS_MMIO_BAR = 4,
@@ -446,7 +446,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
446 if ((status & ATA_BUSY)) 446 if ((status & ATA_BUSY))
447 continue; 447 continue;
448 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 448 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
449 ap->id, qc->tf.protocol, status); 449 ap->print_id, qc->tf.protocol, status);
450 450
451 /* complete taskfile transaction */ 451 /* complete taskfile transaction */
452 pp->state = qs_state_idle; 452 pp->state = qs_state_idle;
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index dca3d3749f..917b7ea4ef 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "sata_sil" 48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "2.0" 49#define DRV_VERSION "2.1"
50 50
51enum { 51enum {
52 SIL_MMIO_BAR = 5, 52 SIL_MMIO_BAR = 5,
@@ -183,8 +183,10 @@ static struct scsi_host_template sil_sht = {
183 .slave_configure = ata_scsi_slave_config, 183 .slave_configure = ata_scsi_slave_config,
184 .slave_destroy = ata_scsi_slave_destroy, 184 .slave_destroy = ata_scsi_slave_destroy,
185 .bios_param = ata_std_bios_param, 185 .bios_param = ata_std_bios_param,
186#ifdef CONFIG_PM
186 .suspend = ata_scsi_device_suspend, 187 .suspend = ata_scsi_device_suspend,
187 .resume = ata_scsi_device_resume, 188 .resume = ata_scsi_device_resume,
189#endif
188}; 190};
189 191
190static const struct ata_port_operations sil_ops = { 192static const struct ata_port_operations sil_ops = {
@@ -339,7 +341,7 @@ static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_re
339 break; 341 break;
340 } 342 }
341 343
342 return 0; 344 return NULL;
343} 345}
344 346
345static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) 347static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
@@ -386,9 +388,15 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
386 goto freeze; 388 goto freeze;
387 } 389 }
388 390
389 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN)) 391 if (unlikely(!qc))
390 goto freeze; 392 goto freeze;
391 393
394 if (unlikely(qc->tf.flags & ATA_TFLAG_POLLING)) {
395 /* this sometimes happens, just clear IRQ */
396 ata_chk_status(ap);
397 return;
398 }
399
392 /* Check whether we are expecting interrupt in this state */ 400 /* Check whether we are expecting interrupt in this state */
393 switch (ap->hsm_task_state) { 401 switch (ap->hsm_task_state) {
394 case HSM_ST_FIRST: 402 case HSM_ST_FIRST:
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index e65e8d55da..75d9615996 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -30,7 +30,7 @@
30#include <linux/libata.h> 30#include <linux/libata.h>
31 31
32#define DRV_NAME "sata_sil24" 32#define DRV_NAME "sata_sil24"
33#define DRV_VERSION "0.3" 33#define DRV_VERSION "0.8"
34 34
35/* 35/*
36 * Port request block (PRB) 32 bytes 36 * Port request block (PRB) 32 bytes
@@ -380,8 +380,10 @@ static struct scsi_host_template sil24_sht = {
380 .slave_configure = ata_scsi_slave_config, 380 .slave_configure = ata_scsi_slave_config,
381 .slave_destroy = ata_scsi_slave_destroy, 381 .slave_destroy = ata_scsi_slave_destroy,
382 .bios_param = ata_std_bios_param, 382 .bios_param = ata_std_bios_param,
383#ifdef CONFIG_PM
383 .suspend = ata_scsi_device_suspend, 384 .suspend = ata_scsi_device_suspend,
384 .resume = ata_scsi_device_resume, 385 .resume = ata_scsi_device_resume,
386#endif
385}; 387};
386 388
387static const struct ata_port_operations sil24_ops = { 389static const struct ata_port_operations sil24_ops = {
@@ -647,7 +649,6 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
647 struct sil24_sge *sge) 649 struct sil24_sge *sge)
648{ 650{
649 struct scatterlist *sg; 651 struct scatterlist *sg;
650 unsigned int idx = 0;
651 652
652 ata_for_each_sg(sg, qc) { 653 ata_for_each_sg(sg, qc) {
653 sge->addr = cpu_to_le64(sg_dma_address(sg)); 654 sge->addr = cpu_to_le64(sg_dma_address(sg));
@@ -656,9 +657,7 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
656 sge->flags = cpu_to_le32(SGE_TRM); 657 sge->flags = cpu_to_le32(SGE_TRM);
657 else 658 else
658 sge->flags = 0; 659 sge->flags = 0;
659
660 sge++; 660 sge++;
661 idx++;
662 } 661 }
663} 662}
664 663
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 49c9e2bd70..1879e0cd56 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -40,9 +40,8 @@
40#include <linux/device.h> 40#include <linux/device.h>
41#include <scsi/scsi_host.h> 41#include <scsi/scsi_host.h>
42#include <linux/libata.h> 42#include <linux/libata.h>
43#include "libata.h" 43#include "sis.h"
44 44
45#undef DRV_NAME /* already defined in libata.h, for libata-core */
46#define DRV_NAME "sata_sis" 45#define DRV_NAME "sata_sis"
47#define DRV_VERSION "0.7" 46#define DRV_VERSION "0.7"
48 47
@@ -310,7 +309,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
310 case 0x10: 309 case 0x10:
311 ppi[1] = &sis_info133; 310 ppi[1] = &sis_info133;
312 break; 311 break;
313 312
314 case 0x30: 313 case 0x30:
315 ppi[0] = &sis_info133; 314 ppi[0] = &sis_info133;
316 break; 315 break;
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 4e42899942..b121195cc5 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -53,7 +53,7 @@
53#endif /* CONFIG_PPC_OF */ 53#endif /* CONFIG_PPC_OF */
54 54
55#define DRV_NAME "sata_svw" 55#define DRV_NAME "sata_svw"
56#define DRV_VERSION "2.0" 56#define DRV_VERSION "2.1"
57 57
58enum { 58enum {
59 K2_FLAG_NO_ATAPI_DMA = (1 << 29), 59 K2_FLAG_NO_ATAPI_DMA = (1 << 29),
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 0ebd77b080..1a081c3a8c 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -44,7 +44,7 @@
44#include "sata_promise.h" 44#include "sata_promise.h"
45 45
46#define DRV_NAME "sata_sx4" 46#define DRV_NAME "sata_sx4"
47#define DRV_VERSION "0.9" 47#define DRV_VERSION "0.10"
48 48
49 49
50enum { 50enum {
@@ -421,7 +421,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
421 421
422 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); 422 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
423 423
424 VPRINTK("ata%u: ENTER\n", ap->id); 424 VPRINTK("ata%u: ENTER\n", ap->print_id);
425 425
426 /* hard-code chip #0 */ 426 /* hard-code chip #0 */
427 mmio += PDC_CHIP0_OFS; 427 mmio += PDC_CHIP0_OFS;
@@ -478,7 +478,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
478 unsigned int portno = ap->port_no; 478 unsigned int portno = ap->port_no;
479 unsigned int i; 479 unsigned int i;
480 480
481 VPRINTK("ata%u: ENTER\n", ap->id); 481 VPRINTK("ata%u: ENTER\n", ap->print_id);
482 482
483 /* hard-code chip #0 */ 483 /* hard-code chip #0 */
484 mmio += PDC_CHIP0_OFS; 484 mmio += PDC_CHIP0_OFS;
@@ -605,7 +605,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
605 /* hard-code chip #0 */ 605 /* hard-code chip #0 */
606 mmio += PDC_CHIP0_OFS; 606 mmio += PDC_CHIP0_OFS;
607 607
608 VPRINTK("ata%u: ENTER\n", ap->id); 608 VPRINTK("ata%u: ENTER\n", ap->print_id);
609 609
610 wmb(); /* flush PRD, pkt writes */ 610 wmb(); /* flush PRD, pkt writes */
611 611
@@ -672,7 +672,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
672 672
673 /* step two - DMA from DIMM to host */ 673 /* step two - DMA from DIMM to host */
674 if (doing_hdma) { 674 if (doing_hdma) {
675 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id, 675 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
676 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 676 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
677 /* get drive status; clear intr; complete txn */ 677 /* get drive status; clear intr; complete txn */
678 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 678 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
@@ -683,7 +683,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
683 /* step one - exec ATA command */ 683 /* step one - exec ATA command */
684 else { 684 else {
685 u8 seq = (u8) (port_no + 1 + 4); 685 u8 seq = (u8) (port_no + 1 + 4);
686 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id, 686 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
687 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 687 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
688 688
689 /* submit hdma pkt */ 689 /* submit hdma pkt */
@@ -698,7 +698,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
698 /* step one - DMA from host to DIMM */ 698 /* step one - DMA from host to DIMM */
699 if (doing_hdma) { 699 if (doing_hdma) {
700 u8 seq = (u8) (port_no + 1); 700 u8 seq = (u8) (port_no + 1);
701 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id, 701 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
702 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 702 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
703 703
704 /* submit ata pkt */ 704 /* submit ata pkt */
@@ -711,7 +711,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
711 711
712 /* step two - execute ATA command */ 712 /* step two - execute ATA command */
713 else { 713 else {
714 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id, 714 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
715 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 715 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
716 /* get drive status; clear intr; complete txn */ 716 /* get drive status; clear intr; complete txn */
717 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 717 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 80131eec68..d659ace80f 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -36,7 +36,7 @@
36#include <linux/libata.h> 36#include <linux/libata.h>
37 37
38#define DRV_NAME "sata_uli" 38#define DRV_NAME "sata_uli"
39#define DRV_VERSION "1.0" 39#define DRV_VERSION "1.1"
40 40
41enum { 41enum {
42 uli_5289 = 0, 42 uli_5289 = 0,
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index baca6d79bb..598e6a26a4 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "sata_via" 48#define DRV_NAME "sata_via"
49#define DRV_VERSION "2.0" 49#define DRV_VERSION "2.1"
50 50
51enum board_ids_enum { 51enum board_ids_enum {
52 vt6420, 52 vt6420,
@@ -60,7 +60,7 @@ enum {
60 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */ 60 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
61 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ 61 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
62 PATA_PIO_TIMING = 0xAB, /* PATA timing register */ 62 PATA_PIO_TIMING = 0xAB, /* PATA timing register */
63 63
64 PORT0 = (1 << 1), 64 PORT0 = (1 << 1),
65 PORT1 = (1 << 0), 65 PORT1 = (1 << 0),
66 ALL_PORTS = PORT0 | PORT1, 66 ALL_PORTS = PORT0 | PORT1,
@@ -151,7 +151,7 @@ static const struct ata_port_operations vt6420_sata_ops = {
151 151
152static const struct ata_port_operations vt6421_pata_ops = { 152static const struct ata_port_operations vt6421_pata_ops = {
153 .port_disable = ata_port_disable, 153 .port_disable = ata_port_disable,
154 154
155 .set_piomode = vt6421_set_pio_mode, 155 .set_piomode = vt6421_set_pio_mode,
156 .set_dmamode = vt6421_set_dma_mode, 156 .set_dmamode = vt6421_set_dma_mode,
157 157
@@ -185,7 +185,7 @@ static const struct ata_port_operations vt6421_pata_ops = {
185 185
186static const struct ata_port_operations vt6421_sata_ops = { 186static const struct ata_port_operations vt6421_sata_ops = {
187 .port_disable = ata_port_disable, 187 .port_disable = ata_port_disable,
188 188
189 .tf_load = ata_tf_load, 189 .tf_load = ata_tf_load,
190 .tf_read = ata_tf_read, 190 .tf_read = ata_tf_read,
191 .check_status = ata_check_status, 191 .check_status = ata_check_status,
@@ -423,16 +423,21 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
423{ 423{
424 struct ata_probe_ent *probe_ent; 424 struct ata_probe_ent *probe_ent;
425 struct ata_port_info *ppi[2]; 425 struct ata_port_info *ppi[2];
426 void __iomem * const *iomap; 426 void __iomem *bar5;
427 427
428 ppi[0] = ppi[1] = &vt6420_port_info; 428 ppi[0] = ppi[1] = &vt6420_port_info;
429 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 429 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
430 if (!probe_ent) 430 if (!probe_ent)
431 return NULL; 431 return NULL;
432 432
433 iomap = pcim_iomap_table(pdev); 433 bar5 = pcim_iomap(pdev, 5, 0);
434 probe_ent->port[0].scr_addr = svia_scr_addr(iomap[5], 0); 434 if (!bar5) {
435 probe_ent->port[1].scr_addr = svia_scr_addr(iomap[5], 1); 435 dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
436 return NULL;
437 }
438
439 probe_ent->port[0].scr_addr = svia_scr_addr(bar5, 0);
440 probe_ent->port[1].scr_addr = svia_scr_addr(bar5, 1);
436 441
437 return probe_ent; 442 return probe_ent;
438} 443}
@@ -460,6 +465,13 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
460 probe_ent->mwdma_mask = 0x07; 465 probe_ent->mwdma_mask = 0x07;
461 probe_ent->udma_mask = 0x7f; 466 probe_ent->udma_mask = 0x7f;
462 467
468 for (i = 0; i < 6; i++)
469 if (!pcim_iomap(pdev, i, 0)) {
470 dev_printk(KERN_ERR, &pdev->dev,
471 "failed to iomap PCI BAR %d\n", i);
472 return NULL;
473 }
474
463 for (i = 0; i < N_PORTS; i++) 475 for (i = 0; i < N_PORTS; i++)
464 vt6421_init_addrs(probe_ent, pcim_iomap_table(pdev), i); 476 vt6421_init_addrs(probe_ent, pcim_iomap_table(pdev), i);
465 477
@@ -522,7 +534,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
522 if (rc) 534 if (rc)
523 return rc; 535 return rc;
524 536
525 rc = pcim_iomap_regions(pdev, 0x1f, DRV_NAME); 537 rc = pci_request_regions(pdev, DRV_NAME);
526 if (rc) { 538 if (rc) {
527 pcim_pin_device(pdev); 539 pcim_pin_device(pdev);
528 return rc; 540 return rc;
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 2fd037bde0..170bad1b41 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -47,7 +47,7 @@
47#include <linux/libata.h> 47#include <linux/libata.h>
48 48
49#define DRV_NAME "sata_vsc" 49#define DRV_NAME "sata_vsc"
50#define DRV_VERSION "2.0" 50#define DRV_VERSION "2.1"
51 51
52enum { 52enum {
53 VSC_MMIO_BAR = 0, 53 VSC_MMIO_BAR = 0,
@@ -98,10 +98,6 @@ enum {
98 VSC_SATA_INT_PHY_CHANGE), 98 VSC_SATA_INT_PHY_CHANGE),
99}; 99};
100 100
101#define is_vsc_sata_int_err(port_idx, int_status) \
102 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
103
104
105static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) 101static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
106{ 102{
107 if (sc_reg > SCR_CONTROL) 103 if (sc_reg > SCR_CONTROL)
@@ -119,6 +115,28 @@ static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
119} 115}
120 116
121 117
118static void vsc_freeze(struct ata_port *ap)
119{
120 void __iomem *mask_addr;
121
122 mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
123 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
124
125 writeb(0, mask_addr);
126}
127
128
129static void vsc_thaw(struct ata_port *ap)
130{
131 void __iomem *mask_addr;
132
133 mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
134 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
135
136 writeb(0xff, mask_addr);
137}
138
139
122static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) 140static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
123{ 141{
124 void __iomem *mask_addr; 142 void __iomem *mask_addr;
@@ -203,6 +221,36 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
203 } 221 }
204} 222}
205 223
224static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
225{
226 if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M))
227 ata_port_freeze(ap);
228 else
229 ata_port_abort(ap);
230}
231
232static void vsc_port_intr(u8 port_status, struct ata_port *ap)
233{
234 struct ata_queued_cmd *qc;
235 int handled = 0;
236
237 if (unlikely(port_status & VSC_SATA_INT_ERROR)) {
238 vsc_error_intr(port_status, ap);
239 return;
240 }
241
242 qc = ata_qc_from_tag(ap, ap->active_tag);
243 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
244 handled = ata_host_intr(ap, qc);
245
246 /* We received an interrupt during a polled command,
247 * or some other spurious condition. Interrupt reporting
248 * with this hardware is fairly reliable so it is safe to
249 * simply clear the interrupt
250 */
251 if (unlikely(!handled))
252 ata_chk_status(ap);
253}
206 254
207/* 255/*
208 * vsc_sata_interrupt 256 * vsc_sata_interrupt
@@ -214,59 +262,36 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance)
214 struct ata_host *host = dev_instance; 262 struct ata_host *host = dev_instance;
215 unsigned int i; 263 unsigned int i;
216 unsigned int handled = 0; 264 unsigned int handled = 0;
217 u32 int_status; 265 u32 status;
218
219 spin_lock(&host->lock);
220 266
221 int_status = readl(host->iomap[VSC_MMIO_BAR] + 267 status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET);
222 VSC_SATA_INT_STAT_OFFSET);
223 268
224 for (i = 0; i < host->n_ports; i++) { 269 if (unlikely(status == 0xffffffff || status == 0)) {
225 if (int_status & ((u32) 0xFF << (8 * i))) { 270 if (status)
226 struct ata_port *ap; 271 dev_printk(KERN_ERR, host->dev,
272 ": IRQ status == 0xffffffff, "
273 "PCI fault or device removal?\n");
274 goto out;
275 }
227 276
228 ap = host->ports[i]; 277 spin_lock(&host->lock);
229 278
230 if (is_vsc_sata_int_err(i, int_status)) { 279 for (i = 0; i < host->n_ports; i++) {
231 u32 err_status; 280 u8 port_status = (status >> (8 * i)) & 0xff;
232 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__); 281 if (port_status) {
233 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0; 282 struct ata_port *ap = host->ports[i];
234 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
235 handled++;
236 }
237 283
238 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 284 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
239 struct ata_queued_cmd *qc; 285 vsc_port_intr(port_status, ap);
240 286 handled++;
241 qc = ata_qc_from_tag(ap, ap->active_tag); 287 } else
242 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 288 dev_printk(KERN_ERR, host->dev,
243 handled += ata_host_intr(ap, qc); 289 ": interrupt from disabled port %d\n", i);
244 else if (is_vsc_sata_int_err(i, int_status)) {
245 /*
246 * On some chips (i.e. Intel 31244), an error
247 * interrupt will sneak in at initialization
248 * time (phy state changes). Clearing the SCR
249 * error register is not required, but it prevents
250 * the phy state change interrupts from recurring
251 * later.
252 */
253 u32 err_status;
254 err_status = vsc_sata_scr_read(ap, SCR_ERROR);
255 printk(KERN_DEBUG "%s: clearing interrupt, "
256 "status %x; sata err status %x\n",
257 __FUNCTION__,
258 int_status, err_status);
259 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
260 /* Clear interrupt status */
261 ata_chk_status(ap);
262 handled++;
263 }
264 }
265 } 290 }
266 } 291 }
267 292
268 spin_unlock(&host->lock); 293 spin_unlock(&host->lock);
269 294out:
270 return IRQ_RETVAL(handled); 295 return IRQ_RETVAL(handled);
271} 296}
272 297
@@ -304,8 +329,8 @@ static const struct ata_port_operations vsc_sata_ops = {
304 .qc_prep = ata_qc_prep, 329 .qc_prep = ata_qc_prep,
305 .qc_issue = ata_qc_issue_prot, 330 .qc_issue = ata_qc_issue_prot,
306 .data_xfer = ata_data_xfer, 331 .data_xfer = ata_data_xfer,
307 .freeze = ata_bmdma_freeze, 332 .freeze = vsc_freeze,
308 .thaw = ata_bmdma_thaw, 333 .thaw = vsc_thaw,
309 .error_handler = ata_bmdma_error_handler, 334 .error_handler = ata_bmdma_error_handler,
310 .post_internal_cmd = ata_bmdma_post_internal_cmd, 335 .post_internal_cmd = ata_bmdma_post_internal_cmd,
311 .irq_handler = vsc_sata_interrupt, 336 .irq_handler = vsc_sata_interrupt,
diff --git a/drivers/ata/sis.h b/drivers/ata/sis.h
new file mode 100644
index 0000000000..231da8fc22
--- /dev/null
+++ b/drivers/ata/sis.h
@@ -0,0 +1,5 @@
1
2struct ata_port_info;
3
4/* pata_sis.c */
5extern struct ata_port_info sis_info133;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index cacb1c816e..17ee97f3a9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -406,22 +406,6 @@ config BLK_DEV_RAM_BLOCKSIZE
406 setups function - apparently needed by the rd_load_image routine 406 setups function - apparently needed by the rd_load_image routine
407 that supposes the filesystem in the image uses a 1024 blocksize. 407 that supposes the filesystem in the image uses a 1024 blocksize.
408 408
409config BLK_DEV_INITRD
410 bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
411 depends on BROKEN || !FRV
412 help
413 The initial RAM filesystem is a ramfs which is loaded by the
414 boot loader (loadlin or lilo) and that is mounted as root
415 before the normal boot procedure. It is typically used to
416 load modules needed to mount the "real" root file system,
417 etc. See <file:Documentation/initrd.txt> for details.
418
419 If RAM disk support (BLK_DEV_RAM) is also included, this
420 also enables initial RAM disk (initrd) support and adds
421 15 Kbytes (more on some other architectures) to the kernel size.
422
423 If unsure say Y.
424
425config CDROM_PKTCDVD 409config CDROM_PKTCDVD
426 tristate "Packet writing on CD/DVD media" 410 tristate "Packet writing on CD/DVD media"
427 depends on !UML 411 depends on !UML
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index bb022ed4a8..8d17d8df36 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -530,7 +530,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
530 u16 aoemajor; 530 u16 aoemajor;
531 531
532 hin = (struct aoe_hdr *) skb->mac.raw; 532 hin = (struct aoe_hdr *) skb->mac.raw;
533 aoemajor = be16_to_cpu(hin->major); 533 aoemajor = be16_to_cpu(get_unaligned(&hin->major));
534 d = aoedev_by_aoeaddr(aoemajor, hin->minor); 534 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
535 if (d == NULL) { 535 if (d == NULL) {
536 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " 536 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
@@ -542,7 +542,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
542 542
543 spin_lock_irqsave(&d->lock, flags); 543 spin_lock_irqsave(&d->lock, flags);
544 544
545 n = be32_to_cpu(hin->tag); 545 n = be32_to_cpu(get_unaligned(&hin->tag));
546 f = getframe(d, n); 546 f = getframe(d, n);
547 if (f == NULL) { 547 if (f == NULL) {
548 calc_rttavg(d, -tsince(n)); 548 calc_rttavg(d, -tsince(n));
@@ -550,9 +550,9 @@ aoecmd_ata_rsp(struct sk_buff *skb)
550 snprintf(ebuf, sizeof ebuf, 550 snprintf(ebuf, sizeof ebuf,
551 "%15s e%d.%d tag=%08x@%08lx\n", 551 "%15s e%d.%d tag=%08x@%08lx\n",
552 "unexpected rsp", 552 "unexpected rsp",
553 be16_to_cpu(hin->major), 553 be16_to_cpu(get_unaligned(&hin->major)),
554 hin->minor, 554 hin->minor,
555 be32_to_cpu(hin->tag), 555 be32_to_cpu(get_unaligned(&hin->tag)),
556 jiffies); 556 jiffies);
557 aoechr_error(ebuf); 557 aoechr_error(ebuf);
558 return; 558 return;
@@ -631,7 +631,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
631 printk(KERN_INFO 631 printk(KERN_INFO
632 "aoe: unrecognized ata command %2.2Xh for %d.%d\n", 632 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
633 ahout->cmdstat, 633 ahout->cmdstat,
634 be16_to_cpu(hin->major), 634 be16_to_cpu(get_unaligned(&hin->major)),
635 hin->minor); 635 hin->minor);
636 } 636 }
637 } 637 }
@@ -733,7 +733,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
733 * Enough people have their dip switches set backwards to 733 * Enough people have their dip switches set backwards to
734 * warrant a loud message for this special case. 734 * warrant a loud message for this special case.
735 */ 735 */
736 aoemajor = be16_to_cpu(h->major); 736 aoemajor = be16_to_cpu(get_unaligned(&h->major));
737 if (aoemajor == 0xfff) { 737 if (aoemajor == 0xfff) {
738 printk(KERN_ERR "aoe: Warning: shelf address is all ones. " 738 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
739 "Check shelf dip switches.\n"); 739 "Check shelf dip switches.\n");
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 9626e0f5da..aab6d91a2c 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -8,6 +8,7 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/moduleparam.h> 10#include <linux/moduleparam.h>
11#include <asm/unaligned.h>
11#include "aoe.h" 12#include "aoe.h"
12 13
13#define NECODES 5 14#define NECODES 5
@@ -123,7 +124,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
123 skb_push(skb, ETH_HLEN); /* (1) */ 124 skb_push(skb, ETH_HLEN); /* (1) */
124 125
125 h = (struct aoe_hdr *) skb->mac.raw; 126 h = (struct aoe_hdr *) skb->mac.raw;
126 n = be32_to_cpu(h->tag); 127 n = be32_to_cpu(get_unaligned(&h->tag));
127 if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) 128 if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
128 goto exit; 129 goto exit;
129 130
@@ -133,7 +134,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
133 n = 0; 134 n = 0;
134 if (net_ratelimit()) 135 if (net_ratelimit())
135 printk(KERN_ERR "aoe: error packet from %d.%d; ecode=%d '%s'\n", 136 printk(KERN_ERR "aoe: error packet from %d.%d; ecode=%d '%s'\n",
136 be16_to_cpu(h->major), h->minor, 137 be16_to_cpu(get_unaligned(&h->major)), h->minor,
137 h->err, aoe_errlist[n]); 138 h->err, aoe_errlist[n]);
138 goto exit; 139 goto exit;
139 } 140 }
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 05dfe35752..0c716ee905 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1291,13 +1291,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
1291 if (inq_buff == NULL) 1291 if (inq_buff == NULL)
1292 goto mem_msg; 1292 goto mem_msg;
1293 1293
1294 /* testing to see if 16-byte CDBs are already being used */
1295 if (h->cciss_read == CCISS_READ_16) {
1296 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1297 &total_size, &block_size);
1298 goto geo_inq;
1299 }
1300
1294 cciss_read_capacity(ctlr, drv_index, 1, 1301 cciss_read_capacity(ctlr, drv_index, 1,
1295 &total_size, &block_size); 1302 &total_size, &block_size);
1296 1303
1297 /* total size = last LBA + 1 */ 1304 /* if read_capacity returns all F's this volume is >2TB in size */
1298 /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */ 1305 /* so we switch to 16-byte CDB's for all read/write ops */
1299 /* so we assume this volume this must be >2TB in size */ 1306 if (total_size == 0xFFFFFFFFULL) {
1300 if (total_size == (__u32) 0) {
1301 cciss_read_capacity_16(ctlr, drv_index, 1, 1307 cciss_read_capacity_16(ctlr, drv_index, 1,
1302 &total_size, &block_size); 1308 &total_size, &block_size);
1303 h->cciss_read = CCISS_READ_16; 1309 h->cciss_read = CCISS_READ_16;
@@ -1306,6 +1312,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
1306 h->cciss_read = CCISS_READ_10; 1312 h->cciss_read = CCISS_READ_10;
1307 h->cciss_write = CCISS_WRITE_10; 1313 h->cciss_write = CCISS_WRITE_10;
1308 } 1314 }
1315geo_inq:
1309 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, 1316 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1310 inq_buff, &h->drv[drv_index]); 1317 inq_buff, &h->drv[drv_index]);
1311 1318
@@ -1917,13 +1924,14 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
1917 drv->raid_level = inq_buff->data_byte[8]; 1924 drv->raid_level = inq_buff->data_byte[8];
1918 } 1925 }
1919 drv->block_size = block_size; 1926 drv->block_size = block_size;
1920 drv->nr_blocks = total_size; 1927 drv->nr_blocks = total_size + 1;
1921 t = drv->heads * drv->sectors; 1928 t = drv->heads * drv->sectors;
1922 if (t > 1) { 1929 if (t > 1) {
1923 unsigned rem = sector_div(total_size, t); 1930 sector_t real_size = total_size + 1;
1931 unsigned long rem = sector_div(real_size, t);
1924 if (rem) 1932 if (rem)
1925 total_size++; 1933 real_size++;
1926 drv->cylinders = total_size; 1934 drv->cylinders = real_size;
1927 } 1935 }
1928 } else { /* Get geometry failed */ 1936 } else { /* Get geometry failed */
1929 printk(KERN_WARNING "cciss: reading geometry failed\n"); 1937 printk(KERN_WARNING "cciss: reading geometry failed\n");
@@ -1953,16 +1961,16 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1953 ctlr, buf, sizeof(ReadCapdata_struct), 1961 ctlr, buf, sizeof(ReadCapdata_struct),
1954 1, logvol, 0, NULL, TYPE_CMD); 1962 1, logvol, 0, NULL, TYPE_CMD);
1955 if (return_code == IO_OK) { 1963 if (return_code == IO_OK) {
1956 *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1; 1964 *total_size = be32_to_cpu(*(__u32 *) buf->total_size);
1957 *block_size = be32_to_cpu(*(__u32 *) buf->block_size); 1965 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1958 } else { /* read capacity command failed */ 1966 } else { /* read capacity command failed */
1959 printk(KERN_WARNING "cciss: read capacity failed\n"); 1967 printk(KERN_WARNING "cciss: read capacity failed\n");
1960 *total_size = 0; 1968 *total_size = 0;
1961 *block_size = BLOCK_SIZE; 1969 *block_size = BLOCK_SIZE;
1962 } 1970 }
1963 if (*total_size != (__u32) 0) 1971 if (*total_size != 0)
1964 printk(KERN_INFO " blocks= %llu block_size= %d\n", 1972 printk(KERN_INFO " blocks= %llu block_size= %d\n",
1965 (unsigned long long)*total_size, *block_size); 1973 (unsigned long long)*total_size+1, *block_size);
1966 kfree(buf); 1974 kfree(buf);
1967 return; 1975 return;
1968} 1976}
@@ -1989,7 +1997,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
1989 1, logvol, 0, NULL, TYPE_CMD); 1997 1, logvol, 0, NULL, TYPE_CMD);
1990 } 1998 }
1991 if (return_code == IO_OK) { 1999 if (return_code == IO_OK) {
1992 *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1; 2000 *total_size = be64_to_cpu(*(__u64 *) buf->total_size);
1993 *block_size = be32_to_cpu(*(__u32 *) buf->block_size); 2001 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1994 } else { /* read capacity command failed */ 2002 } else { /* read capacity command failed */
1995 printk(KERN_WARNING "cciss: read capacity failed\n"); 2003 printk(KERN_WARNING "cciss: read capacity failed\n");
@@ -1997,7 +2005,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
1997 *block_size = BLOCK_SIZE; 2005 *block_size = BLOCK_SIZE;
1998 } 2006 }
1999 printk(KERN_INFO " blocks= %llu block_size= %d\n", 2007 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2000 (unsigned long long)*total_size, *block_size); 2008 (unsigned long long)*total_size+1, *block_size);
2001 kfree(buf); 2009 kfree(buf);
2002 return; 2010 return;
2003} 2011}
@@ -3119,8 +3127,9 @@ static void cciss_getgeometry(int cntl_num)
3119 } 3127 }
3120 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size); 3128 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3121 3129
3122 /* total_size = last LBA + 1 */ 3130 /* If read_capacity returns all F's the logical is >2TB */
3123 if(total_size == (__u32) 0) { 3131 /* so we switch to 16-byte CDBs for all read/write ops */
3132 if(total_size == 0xFFFFFFFFULL) {
3124 cciss_read_capacity_16(cntl_num, i, 0, 3133 cciss_read_capacity_16(cntl_num, i, 0,
3125 &total_size, &block_size); 3134 &total_size, &block_size);
3126 hba[cntl_num]->cciss_read = CCISS_READ_16; 3135 hba[cntl_num]->cciss_read = CCISS_READ_16;
@@ -3395,7 +3404,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3395 return -1; 3404 return -1;
3396} 3405}
3397 3406
3398static void __devexit cciss_remove_one(struct pci_dev *pdev) 3407static void cciss_remove_one(struct pci_dev *pdev)
3399{ 3408{
3400 ctlr_info_t *tmp_ptr; 3409 ctlr_info_t *tmp_ptr;
3401 int i, j; 3410 int i, j;
@@ -3419,9 +3428,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3419 memset(flush_buf, 0, 4); 3428 memset(flush_buf, 0, 4);
3420 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, 3429 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3421 TYPE_CMD); 3430 TYPE_CMD);
3422 if (return_code != IO_OK) { 3431 if (return_code == IO_OK) {
3423 printk(KERN_WARNING "Error Flushing cache on controller %d\n", 3432 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3424 i); 3433 } else {
3434 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3425 } 3435 }
3426 free_irq(hba[i]->intr[2], hba[i]); 3436 free_irq(hba[i]->intr[2], hba[i]);
3427 3437
@@ -3472,6 +3482,7 @@ static struct pci_driver cciss_pci_driver = {
3472 .probe = cciss_init_one, 3482 .probe = cciss_init_one,
3473 .remove = __devexit_p(cciss_remove_one), 3483 .remove = __devexit_p(cciss_remove_one),
3474 .id_table = cciss_pci_device_id, /* id_table */ 3484 .id_table = cciss_pci_device_id, /* id_table */
3485 .shutdown = cciss_remove_one,
3475}; 3486};
3476 3487
3477/* 3488/*
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index dff3766f11..5872036e8a 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -1179,8 +1179,10 @@ static int __init mm_init(void)
1179 return -ENOMEM; 1179 return -ENOMEM;
1180 1180
1181 err = major_nr = register_blkdev(0, "umem"); 1181 err = major_nr = register_blkdev(0, "umem");
1182 if (err < 0) 1182 if (err < 0) {
1183 pci_unregister_driver(&mm_pci_driver);
1183 return -EIO; 1184 return -EIO;
1185 }
1184 1186
1185 for (i = 0; i < num_cards; i++) { 1187 for (i = 0; i < num_cards; i++) {
1186 mm_gendisk[i] = alloc_disk(1 << MM_SHIFT); 1188 mm_gendisk[i] = alloc_disk(1 << MM_SHIFT);
@@ -1207,6 +1209,7 @@ static int __init mm_init(void)
1207 return 0; 1209 return 0;
1208 1210
1209out: 1211out:
1212 pci_unregister_driver(&mm_pci_driver);
1210 unregister_blkdev(major_nr, "umem"); 1213 unregister_blkdev(major_nr, "umem");
1211 while (i--) 1214 while (i--)
1212 put_disk(mm_gendisk[i]); 1215 put_disk(mm_gendisk[i]);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index dc13ebaced..44cd7b2ddf 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -376,6 +376,25 @@ static int send_request(struct request *req)
376 return 0; 376 return 0;
377} 377}
378 378
379static void viocd_end_request(struct request *req, int uptodate)
380{
381 int nsectors = req->hard_nr_sectors;
382
383 /*
384 * Make sure it's fully ended, and ensure that we process
385 * at least one sector.
386 */
387 if (blk_pc_request(req))
388 nsectors = (req->data_len + 511) >> 9;
389 if (!nsectors)
390 nsectors = 1;
391
392 if (end_that_request_first(req, uptodate, nsectors))
393 BUG();
394 add_disk_randomness(req->rq_disk);
395 blkdev_dequeue_request(req);
396 end_that_request_last(req, uptodate);
397}
379 398
380static int rwreq; 399static int rwreq;
381 400
@@ -385,11 +404,11 @@ static void do_viocd_request(request_queue_t *q)
385 404
386 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 405 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
387 if (!blk_fs_request(req)) 406 if (!blk_fs_request(req))
388 end_request(req, 0); 407 viocd_end_request(req, 0);
389 else if (send_request(req) < 0) { 408 else if (send_request(req) < 0) {
390 printk(VIOCD_KERN_WARNING 409 printk(VIOCD_KERN_WARNING
391 "unable to send message to OS/400!"); 410 "unable to send message to OS/400!");
392 end_request(req, 0); 411 viocd_end_request(req, 0);
393 } else 412 } else
394 rwreq++; 413 rwreq++;
395 } 414 }
@@ -601,9 +620,9 @@ return_complete:
601 "with rc %d:0x%04X: %s\n", 620 "with rc %d:0x%04X: %s\n",
602 req, event->xRc, 621 req, event->xRc,
603 bevent->sub_result, err->msg); 622 bevent->sub_result, err->msg);
604 end_request(req, 0); 623 viocd_end_request(req, 0);
605 } else 624 } else
606 end_request(req, 1); 625 viocd_end_request(req, 1);
607 626
608 /* restart handling of incoming requests */ 627 /* restart handling of incoming requests */
609 spin_unlock_irqrestore(&viocd_reqlock, flags); 628 spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d0a6dc5321..3429ece4ef 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1026,16 +1026,17 @@ config MMTIMER
1026source "drivers/char/tpm/Kconfig" 1026source "drivers/char/tpm/Kconfig"
1027 1027
1028config TELCLOCK 1028config TELCLOCK
1029 tristate "Telecom clock driver for MPBL0010 ATCA SBC" 1029 tristate "Telecom clock driver for ATCA SBC"
1030 depends on EXPERIMENTAL && X86 1030 depends on EXPERIMENTAL && X86
1031 default n 1031 default n
1032 help 1032 help
1033 The telecom clock device is specific to the MPBL0010 ATCA computer and 1033 The telecom clock device is specific to the MPCBL0010 and MPCBL0050
1034 allows direct userspace access to the configuration of the telecom clock 1034 ATCA computers and allows direct userspace access to the
1035 configuration settings. This device is used for hardware synchronization 1035 configuration of the telecom clock configuration settings. This
1036 across the ATCA backplane fabric. Upon loading, the driver exports a 1036 device is used for hardware synchronization across the ATCA backplane
1037 sysfs directory, /sys/devices/platform/telco_clock, with a number of 1037 fabric. Upon loading, the driver exports a sysfs directory,
1038 files for controlling the behavior of this hardware. 1038 /sys/devices/platform/telco_clock, with a number of files for
1039 controlling the behavior of this hardware.
1039 1040
1040endmenu 1041endmenu
1041 1042
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 79f7c01db7..bcdb149c81 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -419,7 +419,7 @@ hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
419 agp_device_command(command, (mode & AGP8X_MODE) != 0); 419 agp_device_command(command, (mode & AGP8X_MODE) != 0);
420} 420}
421 421
422struct const agp_bridge_driver hp_zx1_driver = { 422const struct agp_bridge_driver hp_zx1_driver = {
423 .owner = THIS_MODULE, 423 .owner = THIS_MODULE,
424 .size_type = FIXED_APER_SIZE, 424 .size_type = FIXED_APER_SIZE,
425 .configure = hp_zx1_configure, 425 .configure = hp_zx1_configure,
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 1cde376a45..53354bf83a 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -550,7 +550,7 @@ static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
550 | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); 550 | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
551} 551}
552 552
553struct const agp_bridge_driver intel_i460_driver = { 553const struct agp_bridge_driver intel_i460_driver = {
554 .owner = THIS_MODULE, 554 .owner = THIS_MODULE,
555 .aperture_sizes = i460_sizes, 555 .aperture_sizes = i460_sizes,
556 .size_type = U8_APER_SIZE, 556 .size_type = U8_APER_SIZE,
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 3c8f3d6336..3d83b461cc 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -210,7 +210,7 @@ parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
210 agp_device_command(command, (mode & AGP8X_MODE) != 0); 210 agp_device_command(command, (mode & AGP8X_MODE) != 0);
211} 211}
212 212
213struct const agp_bridge_driver parisc_agp_driver = { 213static const struct agp_bridge_driver parisc_agp_driver = {
214 .owner = THIS_MODULE, 214 .owner = THIS_MODULE,
215 .size_type = FIXED_APER_SIZE, 215 .size_type = FIXED_APER_SIZE,
216 .configure = parisc_agp_configure, 216 .configure = parisc_agp_configure,
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index e12773acf3..ee8f50edde 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -247,7 +247,7 @@ static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev)
247 return bridge; 247 return bridge;
248} 248}
249 249
250struct const agp_bridge_driver sgi_tioca_driver = { 250const struct agp_bridge_driver sgi_tioca_driver = {
251 .owner = THIS_MODULE, 251 .owner = THIS_MODULE,
252 .size_type = U16_APER_SIZE, 252 .size_type = U16_APER_SIZE,
253 .configure = sgi_tioca_configure, 253 .configure = sgi_tioca_configure,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 292b4ad1ae..91b062126a 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -489,7 +489,7 @@ static const struct aper_size_info_32 u3_sizes[8] =
489 {4, 1024, 0, 1} 489 {4, 1024, 0, 1}
490}; 490};
491 491
492struct const agp_bridge_driver uninorth_agp_driver = { 492const struct agp_bridge_driver uninorth_agp_driver = {
493 .owner = THIS_MODULE, 493 .owner = THIS_MODULE,
494 .aperture_sizes = (void *)uninorth_sizes, 494 .aperture_sizes = (void *)uninorth_sizes,
495 .size_type = U32_APER_SIZE, 495 .size_type = U32_APER_SIZE,
@@ -514,7 +514,7 @@ struct const agp_bridge_driver uninorth_agp_driver = {
514 .cant_use_aperture = 1, 514 .cant_use_aperture = 1,
515}; 515};
516 516
517struct const agp_bridge_driver u3_agp_driver = { 517const struct agp_bridge_driver u3_agp_driver = {
518 .owner = THIS_MODULE, 518 .owner = THIS_MODULE,
519 .aperture_sizes = (void *)u3_sizes, 519 .aperture_sizes = (void *)u3_sizes,
520 .size_type = U32_APER_SIZE, 520 .size_type = U32_APER_SIZE,
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 54df35527b..16dc5d1d3c 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -3501,6 +3501,7 @@ get_serial_info(struct cyclades_port *info,
3501 tmp.irq = cinfo->irq; 3501 tmp.irq = cinfo->irq;
3502 tmp.flags = info->flags; 3502 tmp.flags = info->flags;
3503 tmp.close_delay = info->close_delay; 3503 tmp.close_delay = info->close_delay;
3504 tmp.closing_wait = info->closing_wait;
3504 tmp.baud_base = info->baud; 3505 tmp.baud_base = info->baud;
3505 tmp.custom_divisor = info->custom_divisor; 3506 tmp.custom_divisor = info->custom_divisor;
3506 tmp.hub6 = 0; /*!!! */ 3507 tmp.hub6 = 0; /*!!! */
diff --git a/drivers/char/ds1286.c b/drivers/char/ds1286.c
index 6d58b03708..59146e3365 100644
--- a/drivers/char/ds1286.c
+++ b/drivers/char/ds1286.c
@@ -197,6 +197,7 @@ static int ds1286_ioctl(struct inode *inode, struct file *file,
197 197
198 hrs = alm_tm.tm_hour; 198 hrs = alm_tm.tm_hour;
199 min = alm_tm.tm_min; 199 min = alm_tm.tm_min;
200 sec = alm_tm.tm_sec;
200 201
201 if (hrs >= 24) 202 if (hrs >= 24)
202 hrs = 0xff; 203 hrs = 0xff;
@@ -204,9 +205,11 @@ static int ds1286_ioctl(struct inode *inode, struct file *file,
204 if (min >= 60) 205 if (min >= 60)
205 min = 0xff; 206 min = 0xff;
206 207
207 BIN_TO_BCD(sec); 208 if (sec != 0)
208 BIN_TO_BCD(min); 209 return -EINVAL;
209 BIN_TO_BCD(hrs); 210
211 min = BIN2BCD(min);
212 min = BIN2BCD(hrs);
210 213
211 spin_lock(&ds1286_lock); 214 spin_lock(&ds1286_lock);
212 rtc_write(hrs, RTC_HOURS_ALARM); 215 rtc_write(hrs, RTC_HOURS_ALARM);
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 88fc24fc43..de5be30484 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -209,7 +209,6 @@ static void digi_send_break(struct channel *ch, int msec);
209static void setup_empty_event(struct tty_struct *tty, struct channel *ch); 209static void setup_empty_event(struct tty_struct *tty, struct channel *ch);
210void epca_setup(char *, int *); 210void epca_setup(char *, int *);
211 211
212static int get_termio(struct tty_struct *, struct termio __user *);
213static int pc_write(struct tty_struct *, const unsigned char *, int); 212static int pc_write(struct tty_struct *, const unsigned char *, int);
214static int pc_init(void); 213static int pc_init(void);
215static int init_PCI(void); 214static int init_PCI(void);
@@ -2362,15 +2361,6 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2362 2361
2363 switch (cmd) 2362 switch (cmd)
2364 { /* Begin switch cmd */ 2363 { /* Begin switch cmd */
2365
2366#if 0 /* Handled by calling layer properly */
2367 case TCGETS:
2368 if (copy_to_user(argp, tty->termios, sizeof(struct ktermios)))
2369 return -EFAULT;
2370 return 0;
2371 case TCGETA:
2372 return get_termio(tty, argp);
2373#endif
2374 case TCSBRK: /* SVID version: non-zero arg --> no break */ 2364 case TCSBRK: /* SVID version: non-zero arg --> no break */
2375 retval = tty_check_change(tty); 2365 retval = tty_check_change(tty);
2376 if (retval) 2366 if (retval)
@@ -2735,13 +2725,6 @@ static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
2735 memoff(ch); 2725 memoff(ch);
2736} /* End setup_empty_event */ 2726} /* End setup_empty_event */
2737 2727
2738/* --------------------- Begin get_termio ----------------------- */
2739
2740static int get_termio(struct tty_struct * tty, struct termio __user * termio)
2741{ /* Begin get_termio */
2742 return kernel_termios_to_user_termio(termio, tty->termios);
2743} /* End get_termio */
2744
2745/* ---------------------- Begin epca_setup -------------------------- */ 2728/* ---------------------- Begin epca_setup -------------------------- */
2746void epca_setup(char *str, int *ints) 2729void epca_setup(char *str, int *ints)
2747{ /* Begin epca_setup */ 2730{ /* Begin epca_setup */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a7b33d2f59..e22146546a 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2478,6 +2478,11 @@ static __devinit void default_find_bmc(void)
2478 if (!info) 2478 if (!info)
2479 return; 2479 return;
2480 2480
2481#ifdef CONFIG_PPC_MERGE
2482 if (check_legacy_ioport(ipmi_defaults[i].port))
2483 continue;
2484#endif
2485
2481 info->addr_source = NULL; 2486 info->addr_source = NULL;
2482 2487
2483 info->si_type = ipmi_defaults[i].type; 2488 info->si_type = ipmi_defaults[i].type;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 0e82968c2f..f2e4ec4fd4 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -273,6 +273,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
273 DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read); 273 DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read);
274 274
275 min_bytes_to_read = min(count, bytes_to_read + 5); 275 min_bytes_to_read = min(count, bytes_to_read + 5);
276 min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
276 277
277 DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read); 278 DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read);
278 279
@@ -340,7 +341,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
340 return 0; 341 return 0;
341 } 342 }
342 343
343 if (count < 5) { 344 if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) {
344 DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count); 345 DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count);
345 return -EIO; 346 return -EIO;
346 } 347 }
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index f24c26d2db..e453268566 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1901,6 +1901,20 @@ static int init_dev(struct tty_driver *driver, int idx,
1901 /* check whether we're reopening an existing tty */ 1901 /* check whether we're reopening an existing tty */
1902 if (driver->flags & TTY_DRIVER_DEVPTS_MEM) { 1902 if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
1903 tty = devpts_get_tty(idx); 1903 tty = devpts_get_tty(idx);
1904 /*
1905 * If we don't have a tty here on a slave open, it's because
1906 * the master already started the close process and there's
1907 * no relation between devpts file and tty anymore.
1908 */
1909 if (!tty && driver->subtype == PTY_TYPE_SLAVE) {
1910 retval = -EIO;
1911 goto end_init;
1912 }
1913 /*
1914 * It's safe from now on because init_dev() is called with
1915 * tty_mutex held and release_dev() won't change tty->count
1916 * or tty->flags without having to grab tty_mutex
1917 */
1904 if (tty && driver->subtype == PTY_TYPE_MASTER) 1918 if (tty && driver->subtype == PTY_TYPE_MASTER)
1905 tty = tty->link; 1919 tty = tty->link;
1906 } else { 1920 } else {
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index ccaa6a39cb..d42060ede9 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -214,4 +214,7 @@ pm_good:
214 return clocksource_register(&clocksource_acpi_pm); 214 return clocksource_register(&clocksource_acpi_pm);
215} 215}
216 216
217module_init(init_acpi_pm_clocksource); 217/* We use fs_initcall because we want the PCI fixups to have run
218 * but we still need to load before device_initcall
219 */
220fs_initcall(init_acpi_pm_clocksource);
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
index 4f3925ceb3..1bde303b97 100644
--- a/drivers/clocksource/cyclone.c
+++ b/drivers/clocksource/cyclone.c
@@ -116,4 +116,4 @@ static int __init init_cyclone_clocksource(void)
116 return clocksource_register(&clocksource_cyclone); 116 return clocksource_register(&clocksource_cyclone);
117} 117}
118 118
119module_init(init_cyclone_clocksource); 119arch_initcall(init_cyclone_clocksource);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index a44db75bc2..a905f78203 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
128 */ 128 */
129static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data) 129static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data)
130{ 130{
131 struct cn_callback_entry *__cbq; 131 struct cn_callback_entry *__cbq, *__new_cbq;
132 struct cn_dev *dev = &cdev; 132 struct cn_dev *dev = &cdev;
133 int err = -ENODEV; 133 int err = -ENODEV;
134 134
@@ -148,27 +148,27 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
148 } else { 148 } else {
149 struct cn_callback_data *d; 149 struct cn_callback_data *d;
150 150
151 __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC); 151 err = -ENOMEM;
152 if (__cbq) { 152 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
153 d = &__cbq->data; 153 if (__new_cbq) {
154 d = &__new_cbq->data;
154 d->callback_priv = msg; 155 d->callback_priv = msg;
155 d->callback = __cbq->data.callback; 156 d->callback = __cbq->data.callback;
156 d->ddata = data; 157 d->ddata = data;
157 d->destruct_data = destruct_data; 158 d->destruct_data = destruct_data;
158 d->free = __cbq; 159 d->free = __new_cbq;
159 160
160 INIT_WORK(&__cbq->work, 161 INIT_WORK(&__new_cbq->work,
161 &cn_queue_wrapper); 162 &cn_queue_wrapper);
162 163
163 if (queue_work(dev->cbdev->cn_queue, 164 if (queue_work(dev->cbdev->cn_queue,
164 &__cbq->work)) 165 &__new_cbq->work))
165 err = 0; 166 err = 0;
166 else { 167 else {
167 kfree(__cbq); 168 kfree(__new_cbq);
168 err = -EINVAL; 169 err = -EINVAL;
169 } 170 }
170 } else 171 }
171 err = -ENOMEM;
172 } 172 }
173 break; 173 break;
174 } 174 }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a12d6a236d..f52facc570 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1752,7 +1752,7 @@ static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
1752 * (and isn't unregistered in the meantime). 1752 * (and isn't unregistered in the meantime).
1753 * 1753 *
1754 */ 1754 */
1755int cpufreq_register_driver(const struct cpufreq_driver *driver_data) 1755int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1756{ 1756{
1757 unsigned long flags; 1757 unsigned long flags;
1758 int ret; 1758 int ret;
@@ -1817,7 +1817,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1817 * Returns zero if successful, and -EINVAL if the cpufreq_driver is 1817 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1818 * currently not initialised. 1818 * currently not initialised.
1819 */ 1819 */
1820int cpufreq_unregister_driver(const struct cpufreq_driver *driver) 1820int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1821{ 1821{
1822 unsigned long flags; 1822 unsigned long flags;
1823 1823
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 0eb62841e9..6d3840e629 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -99,9 +99,8 @@ do_crypt(void *src, void *dst, int len, u32 flags)
99static unsigned int 99static unsigned int
100geode_aes_crypt(struct geode_aes_op *op) 100geode_aes_crypt(struct geode_aes_op *op)
101{ 101{
102
103 u32 flags = 0; 102 u32 flags = 0;
104 int iflags; 103 unsigned long iflags;
105 104
106 if (op->len == 0 || op->src == op->dst) 105 if (op->len == 0 || op->src == op->dst)
107 return 0; 106 return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 7452399501..f4ee1afe48 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -667,7 +667,6 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size)
667 667
668 if (item.format != HID_ITEM_FORMAT_SHORT) { 668 if (item.format != HID_ITEM_FORMAT_SHORT) {
669 dbg("unexpected long global item"); 669 dbg("unexpected long global item");
670 kfree(device->collection);
671 hid_free_device(device); 670 hid_free_device(device);
672 kfree(parser); 671 kfree(parser);
673 return NULL; 672 return NULL;
@@ -676,7 +675,6 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size)
676 if (dispatch_type[item.type](parser, &item)) { 675 if (dispatch_type[item.type](parser, &item)) {
677 dbg("item %u %u %u %u parsing failed\n", 676 dbg("item %u %u %u %u parsing failed\n",
678 item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag); 677 item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag);
679 kfree(device->collection);
680 hid_free_device(device); 678 hid_free_device(device);
681 kfree(parser); 679 kfree(parser);
682 return NULL; 680 return NULL;
@@ -685,14 +683,12 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size)
685 if (start == end) { 683 if (start == end) {
686 if (parser->collection_stack_ptr) { 684 if (parser->collection_stack_ptr) {
687 dbg("unbalanced collection at end of report description"); 685 dbg("unbalanced collection at end of report description");
688 kfree(device->collection);
689 hid_free_device(device); 686 hid_free_device(device);
690 kfree(parser); 687 kfree(parser);
691 return NULL; 688 return NULL;
692 } 689 }
693 if (parser->local.delimiter_depth) { 690 if (parser->local.delimiter_depth) {
694 dbg("unbalanced delimiter at end of report description"); 691 dbg("unbalanced delimiter at end of report description");
695 kfree(device->collection);
696 hid_free_device(device); 692 hid_free_device(device);
697 kfree(parser); 693 kfree(parser);
698 return NULL; 694 return NULL;
@@ -703,7 +699,6 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size)
703 } 699 }
704 700
705 dbg("item fetching failed at offset %d\n", (int)(end - start)); 701 dbg("item fetching failed at offset %d\n", (int)(end - start));
706 kfree(device->collection);
707 hid_free_device(device); 702 hid_free_device(device);
708 kfree(parser); 703 kfree(parser);
709 return NULL; 704 return NULL;
@@ -880,7 +875,7 @@ static void hid_output_field(struct hid_field *field, __u8 *data)
880 875
881 /* make sure the unused bits in the last byte are zeros */ 876 /* make sure the unused bits in the last byte are zeros */
882 if (count > 0 && size > 0) 877 if (count > 0 && size > 0)
883 data[(count*size-1)/8] = 0; 878 data[(offset+count*size-1)/8] = 0;
884 879
885 for (n = 0; n < count; n++) { 880 for (n = 0; n < count; n++) {
886 if (field->logical_minimum < 0) /* signed values */ 881 if (field->logical_minimum < 0) /* signed values */
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 89241be4ec..83c4126b37 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -29,6 +29,7 @@
29 */ 29 */
30 30
31#include <linux/hid.h> 31#include <linux/hid.h>
32#include <linux/hid-debug.h>
32 33
33struct hid_usage_entry { 34struct hid_usage_entry {
34 unsigned page; 35 unsigned page;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 25d180a24f..c8434023ba 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -2,7 +2,7 @@
2 * $Id: hid-input.c,v 1.2 2002/04/23 00:59:25 rdamazio Exp $ 2 * $Id: hid-input.c,v 1.2 2002/04/23 00:59:25 rdamazio Exp $
3 * 3 *
4 * Copyright (c) 2000-2001 Vojtech Pavlik 4 * Copyright (c) 2000-2001 Vojtech Pavlik
5 * Copyright (c) 2006 Jiri Kosina 5 * Copyright (c) 2006-2007 Jiri Kosina
6 * 6 *
7 * HID to Linux Input mapping 7 * HID to Linux Input mapping
8 */ 8 */
@@ -71,7 +71,6 @@ static const struct {
71#define map_led(c) do { usage->code = c; usage->type = EV_LED; bit = input->ledbit; max = LED_MAX; } while (0) 71#define map_led(c) do { usage->code = c; usage->type = EV_LED; bit = input->ledbit; max = LED_MAX; } while (0)
72 72
73#define map_abs_clear(c) do { map_abs(c); clear_bit(c, bit); } while (0) 73#define map_abs_clear(c) do { map_abs(c); clear_bit(c, bit); } while (0)
74#define map_rel_clear(c) do { map_rel(c); clear_bit(c, bit); } while (0)
75#define map_key_clear(c) do { map_key(c); clear_bit(c, bit); } while (0) 74#define map_key_clear(c) do { map_key(c); clear_bit(c, bit); } while (0)
76 75
77#ifdef CONFIG_USB_HIDINPUT_POWERBOOK 76#ifdef CONFIG_USB_HIDINPUT_POWERBOOK
@@ -296,7 +295,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
296 } 295 }
297 } 296 }
298 297
299 map_key_clear(code); 298 map_key(code);
300 break; 299 break;
301 300
302 301
@@ -347,9 +346,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
347 case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ: 346 case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
348 case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL: 347 case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL:
349 if (field->flags & HID_MAIN_ITEM_RELATIVE) 348 if (field->flags & HID_MAIN_ITEM_RELATIVE)
350 map_rel_clear(usage->hid & 0xf); 349 map_rel(usage->hid & 0xf);
351 else 350 else
352 map_abs_clear(usage->hid & 0xf); 351 map_abs(usage->hid & 0xf);
353 break; 352 break;
354 353
355 case HID_GD_HATSWITCH: 354 case HID_GD_HATSWITCH:
@@ -519,7 +518,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
519 case 0x22f: map_key_clear(KEY_ZOOMRESET); break; 518 case 0x22f: map_key_clear(KEY_ZOOMRESET); break;
520 case 0x233: map_key_clear(KEY_SCROLLUP); break; 519 case 0x233: map_key_clear(KEY_SCROLLUP); break;
521 case 0x234: map_key_clear(KEY_SCROLLDOWN); break; 520 case 0x234: map_key_clear(KEY_SCROLLDOWN); break;
522 case 0x238: map_rel_clear(REL_HWHEEL); break; 521 case 0x238: map_rel(REL_HWHEEL); break;
523 case 0x25f: map_key_clear(KEY_CANCEL); break; 522 case 0x25f: map_key_clear(KEY_CANCEL); break;
524 case 0x279: map_key_clear(KEY_REDO); break; 523 case 0x279: map_key_clear(KEY_REDO); break;
525 524
@@ -532,6 +531,26 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
532 case 0x302: map_key_clear(KEY_PROG2); break; 531 case 0x302: map_key_clear(KEY_PROG2); break;
533 case 0x303: map_key_clear(KEY_PROG3); break; 532 case 0x303: map_key_clear(KEY_PROG3); break;
534 533
534 /* Reported on Logitech S510 wireless keyboard */
535 case 0x101f: map_key_clear(KEY_ZOOMIN); break;
536 case 0x1020: map_key_clear(KEY_ZOOMOUT); break;
537 case 0x1021: map_key_clear(KEY_ZOOMRESET); break;
538 /* this one is marked as 'Rotate' */
539 case 0x1028: map_key_clear(KEY_ANGLE); break;
540 case 0x1029: map_key_clear(KEY_SHUFFLE); break;
541 case 0x1041: map_key_clear(KEY_BATTERY); break;
542 case 0x1042: map_key_clear(KEY_WORDPROCESSOR); break;
543 case 0x1043: map_key_clear(KEY_SPREADSHEET); break;
544 case 0x1044: map_key_clear(KEY_PRESENTATION); break;
545 case 0x1045: map_key_clear(KEY_UNDO); break;
546 case 0x1046: map_key_clear(KEY_REDO); break;
547 case 0x1047: map_key_clear(KEY_PRINT); break;
548 case 0x1048: map_key_clear(KEY_SAVE); break;
549 case 0x1049: map_key_clear(KEY_PROG1); break;
550 case 0x104a: map_key_clear(KEY_PROG2); break;
551 case 0x104b: map_key_clear(KEY_PROG3); break;
552 case 0x104c: map_key_clear(KEY_PROG4); break;
553
535 default: goto ignore; 554 default: goto ignore;
536 } 555 }
537 break; 556 break;
@@ -647,6 +666,12 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
647 666
648 set_bit(usage->type, input->evbit); 667 set_bit(usage->type, input->evbit);
649 668
669 if (device->quirks & HID_QUIRK_DUPLICATE_USAGES &&
670 (usage->type == EV_KEY ||
671 usage->type == EV_REL ||
672 usage->type == EV_ABS))
673 clear_bit(usage->code, bit);
674
650 while (usage->code <= max && test_and_set_bit(usage->code, bit)) 675 while (usage->code <= max && test_and_set_bit(usage->code, bit))
651 usage->code = find_next_zero_bit(bit, max + 1, usage->code); 676 usage->code = find_next_zero_bit(bit, max + 1, usage->code);
652 677
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 4d44a2db29..fb19dbb31e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -495,6 +495,16 @@ config I2C_VERSATILE
495 This driver can also be built as a module. If so, the module 495 This driver can also be built as a module. If so, the module
496 will be called i2c-versatile. 496 will be called i2c-versatile.
497 497
498config I2C_ACORN
499 bool "Acorn IOC/IOMD I2C bus support"
500 depends on I2C && ARCH_ACORN
501 default y
502 select I2C_ALGOBIT
503 help
504 Say yes if you want to support the I2C bus on Acorn platforms.
505
506 If you don't know, say Y.
507
498config I2C_VIA 508config I2C_VIA
499 tristate "VIA 82C586B" 509 tristate "VIA 82C586B"
500 depends on I2C && PCI && EXPERIMENTAL 510 depends on I2C && PCI && EXPERIMENTAL
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 03505aa44b..290b540183 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
42obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o 42obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
43obj-$(CONFIG_I2C_STUB) += i2c-stub.o 43obj-$(CONFIG_I2C_STUB) += i2c-stub.o
44obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o 44obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
45obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
45obj-$(CONFIG_I2C_VIA) += i2c-via.o 46obj-$(CONFIG_I2C_VIA) += i2c-via.o
46obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o 47obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
47obj-$(CONFIG_I2C_VOODOO3) += i2c-voodoo3.o 48obj-$(CONFIG_I2C_VOODOO3) += i2c-voodoo3.o
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
new file mode 100644
index 0000000000..09bd7f40b9
--- /dev/null
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -0,0 +1,97 @@
1/*
2 * linux/drivers/acorn/char/i2c.c
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ARM IOC/IOMD i2c driver.
11 *
12 * On Acorn machines, the following i2c devices are on the bus:
13 * - PCF8583 real time clock & static RAM
14 */
15#include <linux/init.h>
16#include <linux/i2c.h>
17#include <linux/i2c-algo-bit.h>
18
19#include <asm/hardware.h>
20#include <asm/io.h>
21#include <asm/hardware/ioc.h>
22#include <asm/system.h>
23
24#define FORCE_ONES 0xdc
25#define SCL 0x02
26#define SDA 0x01
27
28/*
29 * We must preserve all non-i2c output bits in IOC_CONTROL.
30 * Note also that we need to preserve the value of SCL and
31 * SDA outputs as well (which may be different from the
32 * values read back from IOC_CONTROL).
33 */
34static u_int force_ones;
35
36static void ioc_setscl(void *data, int state)
37{
38 u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA);
39 u_int ones = force_ones;
40
41 if (state)
42 ones |= SCL;
43 else
44 ones &= ~SCL;
45
46 force_ones = ones;
47
48 ioc_writeb(ioc_control | ones, IOC_CONTROL);
49}
50
51static void ioc_setsda(void *data, int state)
52{
53 u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA);
54 u_int ones = force_ones;
55
56 if (state)
57 ones |= SDA;
58 else
59 ones &= ~SDA;
60
61 force_ones = ones;
62
63 ioc_writeb(ioc_control | ones, IOC_CONTROL);
64}
65
66static int ioc_getscl(void *data)
67{
68 return (ioc_readb(IOC_CONTROL) & SCL) != 0;
69}
70
71static int ioc_getsda(void *data)
72{
73 return (ioc_readb(IOC_CONTROL) & SDA) != 0;
74}
75
76static struct i2c_algo_bit_data ioc_data = {
77 .setsda = ioc_setsda,
78 .setscl = ioc_setscl,
79 .getsda = ioc_getsda,
80 .getscl = ioc_getscl,
81 .udelay = 80,
82 .timeout = 100
83};
84
85static struct i2c_adapter ioc_ops = {
86 .id = I2C_HW_B_IOC,
87 .algo_data = &ioc_data,
88};
89
90static int __init i2c_ioc_init(void)
91{
92 force_ones = FORCE_ONES | SCL | SDA;
93
94 return i2c_bit_add_bus(&ioc_ops);
95}
96
97__initcall(i2c_ioc_init);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 49234e32fd..5d134bb75b 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -1023,7 +1023,7 @@ config BLK_DEV_4DRIVES
1023config BLK_DEV_ALI14XX 1023config BLK_DEV_ALI14XX
1024 tristate "ALI M14xx support" 1024 tristate "ALI M14xx support"
1025 help 1025 help
1026 This driver is enabled at runtime using the "ide0=ali14xx" kernel 1026 This driver is enabled at runtime using the "ali14xx.probe" kernel
1027 boot parameter. It enables support for the secondary IDE interface 1027 boot parameter. It enables support for the secondary IDE interface
1028 of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster 1028 of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
1029 I/O speeds to be set as well. See the files 1029 I/O speeds to be set as well. See the files
@@ -1033,7 +1033,7 @@ config BLK_DEV_ALI14XX
1033config BLK_DEV_DTC2278 1033config BLK_DEV_DTC2278
1034 tristate "DTC-2278 support" 1034 tristate "DTC-2278 support"
1035 help 1035 help
1036 This driver is enabled at runtime using the "ide0=dtc2278" kernel 1036 This driver is enabled at runtime using the "dtc2278.probe" kernel
1037 boot parameter. It enables support for the secondary IDE interface 1037 boot parameter. It enables support for the secondary IDE interface
1038 of the DTC-2278 card, and permits faster I/O speeds to be set as 1038 of the DTC-2278 card, and permits faster I/O speeds to be set as
1039 well. See the <file:Documentation/ide.txt> and 1039 well. See the <file:Documentation/ide.txt> and
@@ -1042,7 +1042,7 @@ config BLK_DEV_DTC2278
1042config BLK_DEV_HT6560B 1042config BLK_DEV_HT6560B
1043 tristate "Holtek HT6560B support" 1043 tristate "Holtek HT6560B support"
1044 help 1044 help
1045 This driver is enabled at runtime using the "ide0=ht6560b" kernel 1045 This driver is enabled at runtime using the "ht6560b.probe" kernel
1046 boot parameter. It enables support for the secondary IDE interface 1046 boot parameter. It enables support for the secondary IDE interface
1047 of the Holtek card, and permits faster I/O speeds to be set as well. 1047 of the Holtek card, and permits faster I/O speeds to be set as well.
1048 See the <file:Documentation/ide.txt> and 1048 See the <file:Documentation/ide.txt> and
@@ -1051,7 +1051,7 @@ config BLK_DEV_HT6560B
1051config BLK_DEV_QD65XX 1051config BLK_DEV_QD65XX
1052 tristate "QDI QD65xx support" 1052 tristate "QDI QD65xx support"
1053 help 1053 help
1054 This driver is enabled at runtime using the "ide0=qd65xx" kernel 1054 This driver is enabled at runtime using the "qd65xx.probe" kernel
1055 boot parameter. It permits faster I/O speeds to be set. See the 1055 boot parameter. It permits faster I/O speeds to be set. See the
1056 <file:Documentation/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> for 1056 <file:Documentation/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> for
1057 more info. 1057 more info.
@@ -1059,7 +1059,7 @@ config BLK_DEV_QD65XX
1059config BLK_DEV_UMC8672 1059config BLK_DEV_UMC8672
1060 tristate "UMC-8672 support" 1060 tristate "UMC-8672 support"
1061 help 1061 help
1062 This driver is enabled at runtime using the "ide0=umc8672" kernel 1062 This driver is enabled at runtime using the "umc8672.probe" kernel
1063 boot parameter. It enables support for the secondary IDE interface 1063 boot parameter. It enables support for the secondary IDE interface
1064 of the UMC-8672, and permits faster I/O speeds to be set as well. 1064 of the UMC-8672, and permits faster I/O speeds to be set as well.
1065 See the files <file:Documentation/ide.txt> and 1065 See the files <file:Documentation/ide.txt> and
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 6b2d152351..556455fbfa 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -17,8 +17,6 @@
17 * device can't do DMA handshaking for some stupid reason. We don't need to do that. 17 * device can't do DMA handshaking for some stupid reason. We don't need to do that.
18 */ 18 */
19 19
20#undef REALLY_SLOW_IO /* most systems can safely undef this */
21
22#include <linux/types.h> 20#include <linux/types.h>
23#include <linux/kernel.h> 21#include <linux/kernel.h>
24#include <linux/timer.h> 22#include <linux/timer.h>
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index e2cea1889c..37aa6ddd97 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -43,8 +43,6 @@
43 43
44#define IDEDISK_VERSION "1.18" 44#define IDEDISK_VERSION "1.18"
45 45
46#undef REALLY_SLOW_IO /* most systems can safely undef this */
47
48//#define DEBUG 46//#define DEBUG
49 47
50#include <linux/module.h> 48#include <linux/module.h>
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c67b3b1e6f..bd513f5a23 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -583,6 +583,8 @@ u8 eighty_ninty_three (ide_drive_t *drive)
583 if(!(drive->id->hw_config & 0x4000)) 583 if(!(drive->id->hw_config & 0x4000))
584 return 0; 584 return 0;
585#endif /* CONFIG_IDEDMA_IVB */ 585#endif /* CONFIG_IDEDMA_IVB */
586 if (!(drive->id->hw_config & 0x2000))
587 return 0;
586 return 1; 588 return 1;
587} 589}
588 590
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 8afce4ceea..68719314df 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -345,16 +345,16 @@ static int ide_scan_pio_blacklist (char *model)
345 345
346/** 346/**
347 * ide_get_best_pio_mode - get PIO mode from drive 347 * ide_get_best_pio_mode - get PIO mode from drive
348 * @driver: drive to consider 348 * @drive: drive to consider
349 * @mode_wanted: preferred mode 349 * @mode_wanted: preferred mode
350 * @max_mode: highest allowed 350 * @max_mode: highest allowed mode
351 * @d: pio data 351 * @d: PIO data
352 * 352 *
353 * This routine returns the recommended PIO settings for a given drive, 353 * This routine returns the recommended PIO settings for a given drive,
354 * based on the drive->id information and the ide_pio_blacklist[]. 354 * based on the drive->id information and the ide_pio_blacklist[].
355 * This is used by most chipset support modules when "auto-tuning".
356 * 355 *
357 * Drive PIO mode auto selection 356 * Drive PIO mode is auto-selected if 255 is passed as mode_wanted.
357 * This is used by most chipset support modules when "auto-tuning".
358 */ 358 */
359 359
360u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode, ide_pio_data_t *d) 360u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode, ide_pio_data_t *d)
@@ -367,6 +367,7 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode, ide_p
367 367
368 if (mode_wanted != 255) { 368 if (mode_wanted != 255) {
369 pio_mode = mode_wanted; 369 pio_mode = mode_wanted;
370 use_iordy = (pio_mode > 2);
370 } else if (!drive->id) { 371 } else if (!drive->id) {
371 pio_mode = 0; 372 pio_mode = 0;
372 } else if ((pio_mode = ide_scan_pio_blacklist(id->model)) != -1) { 373 } else if ((pio_mode = ide_scan_pio_blacklist(id->model)) != -1) {
@@ -396,19 +397,12 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode, ide_p
396 } 397 }
397 } 398 }
398 399
399#if 0
400 if (drive->id->major_rev_num & 0x0004) printk("ATA-2 ");
401#endif
402
403 /* 400 /*
404 * Conservative "downgrade" for all pre-ATA2 drives 401 * Conservative "downgrade" for all pre-ATA2 drives
405 */ 402 */
406 if (pio_mode && pio_mode < 4) { 403 if (pio_mode && pio_mode < 4) {
407 pio_mode--; 404 pio_mode--;
408 overridden = 1; 405 overridden = 1;
409#if 0
410 use_iordy = (pio_mode > 2);
411#endif
412 if (cycle_time && cycle_time < ide_pio_timings[pio_mode].cycle_time) 406 if (cycle_time && cycle_time < ide_pio_timings[pio_mode].cycle_time)
413 cycle_time = 0; /* use standard timing */ 407 cycle_time = 0; /* use standard timing */
414 } 408 }
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 8afbd6cb94..8f15c23aa7 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -31,8 +31,6 @@
31 * valid after probe time even with noprobe 31 * valid after probe time even with noprobe
32 */ 32 */
33 33
34#undef REALLY_SLOW_IO /* most systems can safely undef this */
35
36#include <linux/module.h> 34#include <linux/module.h>
37#include <linux/types.h> 35#include <linux/types.h>
38#include <linux/string.h> 36#include <linux/string.h>
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index b3c0818c5c..dfbd744585 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -126,8 +126,6 @@
126#define REVISION "Revision: 7.00alpha2" 126#define REVISION "Revision: 7.00alpha2"
127#define VERSION "Id: ide.c 7.00a2 20020906" 127#define VERSION "Id: ide.c 7.00a2 20020906"
128 128
129#undef REALLY_SLOW_IO /* most systems can safely undef this */
130
131#define _IDE_C /* Tell ide.h it's really us */ 129#define _IDE_C /* Tell ide.h it's really us */
132 130
133#include <linux/module.h> 131#include <linux/module.h>
@@ -1486,23 +1484,23 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
1486} 1484}
1487 1485
1488#ifdef CONFIG_BLK_DEV_ALI14XX 1486#ifdef CONFIG_BLK_DEV_ALI14XX
1489static int __initdata probe_ali14xx; 1487extern int probe_ali14xx;
1490extern int ali14xx_init(void); 1488extern int ali14xx_init(void);
1491#endif 1489#endif
1492#ifdef CONFIG_BLK_DEV_UMC8672 1490#ifdef CONFIG_BLK_DEV_UMC8672
1493static int __initdata probe_umc8672; 1491extern int probe_umc8672;
1494extern int umc8672_init(void); 1492extern int umc8672_init(void);
1495#endif 1493#endif
1496#ifdef CONFIG_BLK_DEV_DTC2278 1494#ifdef CONFIG_BLK_DEV_DTC2278
1497static int __initdata probe_dtc2278; 1495extern int probe_dtc2278;
1498extern int dtc2278_init(void); 1496extern int dtc2278_init(void);
1499#endif 1497#endif
1500#ifdef CONFIG_BLK_DEV_HT6560B 1498#ifdef CONFIG_BLK_DEV_HT6560B
1501static int __initdata probe_ht6560b; 1499extern int probe_ht6560b;
1502extern int ht6560b_init(void); 1500extern int ht6560b_init(void);
1503#endif 1501#endif
1504#ifdef CONFIG_BLK_DEV_QD65XX 1502#ifdef CONFIG_BLK_DEV_QD65XX
1505static int __initdata probe_qd65xx; 1503extern int probe_qd65xx;
1506extern int qd65xx_init(void); 1504extern int qd65xx_init(void);
1507#endif 1505#endif
1508 1506
@@ -1580,7 +1578,7 @@ static int __init ide_setup(char *s)
1580 */ 1578 */
1581 if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) { 1579 if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
1582 const char *hd_words[] = { 1580 const char *hd_words[] = {
1583 "none", "noprobe", "nowerr", "cdrom", "serialize", 1581 "none", "noprobe", "nowerr", "cdrom", "minus5",
1584 "autotune", "noautotune", "minus8", "swapdata", "bswap", 1582 "autotune", "noautotune", "minus8", "swapdata", "bswap",
1585 "noflush", "remap", "remap63", "scsi", NULL }; 1583 "noflush", "remap", "remap63", "scsi", NULL };
1586 unit = s[2] - 'a'; 1584 unit = s[2] - 'a';
@@ -1608,9 +1606,6 @@ static int __init ide_setup(char *s)
1608 drive->ready_stat = 0; 1606 drive->ready_stat = 0;
1609 hwif->noprobe = 0; 1607 hwif->noprobe = 0;
1610 goto done; 1608 goto done;
1611 case -5: /* "serialize" */
1612 printk(" -- USE \"ide%d=serialize\" INSTEAD", hw);
1613 goto do_serialize;
1614 case -6: /* "autotune" */ 1609 case -6: /* "autotune" */
1615 drive->autotune = IDE_TUNE_AUTO; 1610 drive->autotune = IDE_TUNE_AUTO;
1616 goto obsolete_option; 1611 goto obsolete_option;
@@ -1671,7 +1666,7 @@ static int __init ide_setup(char *s)
1671 * (-8, -9, -10) are reserved to ease the hardcoding. 1666 * (-8, -9, -10) are reserved to ease the hardcoding.
1672 */ 1667 */
1673 static const char *ide_words[] = { 1668 static const char *ide_words[] = {
1674 "noprobe", "serialize", "autotune", "noautotune", 1669 "noprobe", "serialize", "minus3", "minus4",
1675 "reset", "dma", "ata66", "minus8", "minus9", 1670 "reset", "dma", "ata66", "minus8", "minus9",
1676 "minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb", 1671 "minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb",
1677 "dtc2278", "umc8672", "ali14xx", NULL }; 1672 "dtc2278", "umc8672", "ali14xx", NULL };
@@ -1742,12 +1737,17 @@ static int __init ide_setup(char *s)
1742 hwif->chipset = mate->chipset = ide_4drives; 1737 hwif->chipset = mate->chipset = ide_4drives;
1743 mate->irq = hwif->irq; 1738 mate->irq = hwif->irq;
1744 memcpy(mate->io_ports, hwif->io_ports, sizeof(hwif->io_ports)); 1739 memcpy(mate->io_ports, hwif->io_ports, sizeof(hwif->io_ports));
1745 goto do_serialize; 1740 hwif->mate = mate;
1741 mate->mate = hwif;
1742 hwif->serialized = mate->serialized = 1;
1743 goto obsolete_option;
1746 } 1744 }
1747#endif /* CONFIG_BLK_DEV_4DRIVES */ 1745#endif /* CONFIG_BLK_DEV_4DRIVES */
1748 case -10: /* minus10 */ 1746 case -10: /* minus10 */
1749 case -9: /* minus9 */ 1747 case -9: /* minus9 */
1750 case -8: /* minus8 */ 1748 case -8: /* minus8 */
1749 case -4:
1750 case -3:
1751 goto bad_option; 1751 goto bad_option;
1752 case -7: /* ata66 */ 1752 case -7: /* ata66 */
1753#ifdef CONFIG_BLK_DEV_IDEPCI 1753#ifdef CONFIG_BLK_DEV_IDEPCI
@@ -1762,16 +1762,7 @@ static int __init ide_setup(char *s)
1762 case -5: /* "reset" */ 1762 case -5: /* "reset" */
1763 hwif->reset = 1; 1763 hwif->reset = 1;
1764 goto obsolete_option; 1764 goto obsolete_option;
1765 case -4: /* "noautotune" */
1766 hwif->drives[0].autotune = IDE_TUNE_NOAUTO;
1767 hwif->drives[1].autotune = IDE_TUNE_NOAUTO;
1768 goto obsolete_option;
1769 case -3: /* "autotune" */
1770 hwif->drives[0].autotune = IDE_TUNE_AUTO;
1771 hwif->drives[1].autotune = IDE_TUNE_AUTO;
1772 goto obsolete_option;
1773 case -2: /* "serialize" */ 1765 case -2: /* "serialize" */
1774 do_serialize:
1775 hwif->mate = &ide_hwifs[hw^1]; 1766 hwif->mate = &ide_hwifs[hw^1];
1776 hwif->mate->mate = hwif; 1767 hwif->mate->mate = hwif;
1777 hwif->serialized = hwif->mate->serialized = 1; 1768 hwif->serialized = hwif->mate->serialized = 1;
@@ -1840,8 +1831,8 @@ static void __init probe_for_hwifs (void)
1840#endif /* CONFIG_BLK_DEV_CMD640 */ 1831#endif /* CONFIG_BLK_DEV_CMD640 */
1841#ifdef CONFIG_BLK_DEV_IDE_PMAC 1832#ifdef CONFIG_BLK_DEV_IDE_PMAC
1842 { 1833 {
1843 extern void pmac_ide_probe(void); 1834 extern int pmac_ide_probe(void);
1844 pmac_ide_probe(); 1835 (void)pmac_ide_probe();
1845 } 1836 }
1846#endif /* CONFIG_BLK_DEV_IDE_PMAC */ 1837#endif /* CONFIG_BLK_DEV_IDE_PMAC */
1847#ifdef CONFIG_BLK_DEV_GAYLE 1838#ifdef CONFIG_BLK_DEV_GAYLE
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index 9c544467cb..91961aa030 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -37,8 +37,6 @@
37 * mode 4 for a while now with no trouble.) -Derek 37 * mode 4 for a while now with no trouble.) -Derek
38 */ 38 */
39 39
40#undef REALLY_SLOW_IO /* most systems can safely undef this */
41
42#include <linux/module.h> 40#include <linux/module.h>
43#include <linux/types.h> 41#include <linux/types.h>
44#include <linux/kernel.h> 42#include <linux/kernel.h>
@@ -230,9 +228,17 @@ static int __init ali14xx_probe(void)
230 return 0; 228 return 0;
231} 229}
232 230
231int probe_ali14xx = 0;
232
233module_param_named(probe, probe_ali14xx, bool, 0);
234MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
235
233/* Can be called directly from ide.c. */ 236/* Can be called directly from ide.c. */
234int __init ali14xx_init(void) 237int __init ali14xx_init(void)
235{ 238{
239 if (probe_ali14xx == 0)
240 goto out;
241
236 /* auto-detect IDE controller port */ 242 /* auto-detect IDE controller port */
237 if (findPort()) { 243 if (findPort()) {
238 if (ali14xx_probe()) 244 if (ali14xx_probe())
@@ -240,6 +246,7 @@ int __init ali14xx_init(void)
240 return 0; 246 return 0;
241 } 247 }
242 printk(KERN_ERR "ali14xx: not found.\n"); 248 printk(KERN_ERR "ali14xx: not found.\n");
249out:
243 return -ENODEV; 250 return -ENODEV;
244} 251}
245 252
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index 3b1d33baaa..0219ffa64d 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -4,8 +4,6 @@
4 * Copyright (C) 1996 Linus Torvalds & author (see below) 4 * Copyright (C) 1996 Linus Torvalds & author (see below)
5 */ 5 */
6 6
7#undef REALLY_SLOW_IO /* most systems can safely undef this */
8
9#include <linux/module.h> 7#include <linux/module.h>
10#include <linux/types.h> 8#include <linux/types.h>
11#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -94,7 +92,7 @@ static void tune_dtc2278 (ide_drive_t *drive, u8 pio)
94 HWIF(drive)->drives[!drive->select.b.unit].io_32bit = 1; 92 HWIF(drive)->drives[!drive->select.b.unit].io_32bit = 1;
95} 93}
96 94
97static int __init probe_dtc2278(void) 95static int __init dtc2278_probe(void)
98{ 96{
99 unsigned long flags; 97 unsigned long flags;
100 ide_hwif_t *hwif, *mate; 98 ide_hwif_t *hwif, *mate;
@@ -145,10 +143,18 @@ static int __init probe_dtc2278(void)
145 return 0; 143 return 0;
146} 144}
147 145
146int probe_dtc2278 = 0;
147
148module_param_named(probe, probe_dtc2278, bool, 0);
149MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
150
148/* Can be called directly from ide.c. */ 151/* Can be called directly from ide.c. */
149int __init dtc2278_init(void) 152int __init dtc2278_init(void)
150{ 153{
151 if (probe_dtc2278()) { 154 if (probe_dtc2278 == 0)
155 return -ENODEV;
156
157 if (dtc2278_probe()) {
152 printk(KERN_ERR "dtc2278: ide interfaces already in use!\n"); 158 printk(KERN_ERR "dtc2278: ide interfaces already in use!\n");
153 return -EBUSY; 159 return -EBUSY;
154 } 160 }
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index 19ccd006f2..a2832643c5 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -36,8 +36,6 @@
36 36
37#define HT6560B_VERSION "v0.07" 37#define HT6560B_VERSION "v0.07"
38 38
39#undef REALLY_SLOW_IO /* most systems can safely undef this */
40
41#include <linux/module.h> 39#include <linux/module.h>
42#include <linux/types.h> 40#include <linux/types.h>
43#include <linux/kernel.h> 41#include <linux/kernel.h>
@@ -303,12 +301,20 @@ static void tune_ht6560b (ide_drive_t *drive, u8 pio)
303#endif 301#endif
304} 302}
305 303
304int probe_ht6560b = 0;
305
306module_param_named(probe, probe_ht6560b, bool, 0);
307MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
308
306/* Can be called directly from ide.c. */ 309/* Can be called directly from ide.c. */
307int __init ht6560b_init(void) 310int __init ht6560b_init(void)
308{ 311{
309 ide_hwif_t *hwif, *mate; 312 ide_hwif_t *hwif, *mate;
310 int t; 313 int t;
311 314
315 if (probe_ht6560b == 0)
316 return -ENODEV;
317
312 hwif = &ide_hwifs[0]; 318 hwif = &ide_hwifs[0];
313 mate = &ide_hwifs[1]; 319 mate = &ide_hwifs[1];
314 320
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index a5023cdbdc..b08c37c9f9 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -359,14 +359,17 @@ void ide_release(struct pcmcia_device *link)
359static struct pcmcia_device_id ide_ids[] = { 359static struct pcmcia_device_id ide_ids[] = {
360 PCMCIA_DEVICE_FUNC_ID(4), 360 PCMCIA_DEVICE_FUNC_ID(4),
361 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ 361 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
362 PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
363 PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
362 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), 364 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
363 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), 365 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
364 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ 366 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
365 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), 367 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
366 PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ 368 PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
367 PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ 369 PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
368 PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), 370 PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
369 PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */ 371 PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
372 PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
370 PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), 373 PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
371 PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74), 374 PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
372 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), 375 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index d3c3bc2640..2fb8f50f12 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -16,8 +16,8 @@
16 * Please set local bus speed using kernel parameter idebus 16 * Please set local bus speed using kernel parameter idebus
17 * for example, "idebus=33" stands for 33Mhz VLbus 17 * for example, "idebus=33" stands for 33Mhz VLbus
18 * To activate controller support, use "ide0=qd65xx" 18 * To activate controller support, use "ide0=qd65xx"
19 * To enable tuning, use "ide0=autotune" 19 * To enable tuning, use "hda=autotune hdb=autotune"
20 * To enable second channel tuning (qd6580 only), use "ide1=autotune" 20 * To enable 2nd channel tuning (qd6580 only), use "hdc=autotune hdd=autotune"
21 */ 21 */
22 22
23/* 23/*
@@ -25,8 +25,6 @@
25 * Samuel Thibault <samuel.thibault@fnac.net> 25 * Samuel Thibault <samuel.thibault@fnac.net>
26 */ 26 */
27 27
28#undef REALLY_SLOW_IO /* most systems can safely undef this */
29
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/types.h> 29#include <linux/types.h>
32#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -490,9 +488,17 @@ static int __init qd_probe(int base)
490 return 1; 488 return 1;
491} 489}
492 490
491int probe_qd65xx = 0;
492
493module_param_named(probe, probe_qd65xx, bool, 0);
494MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
495
493/* Can be called directly from ide.c. */ 496/* Can be called directly from ide.c. */
494int __init qd65xx_init(void) 497int __init qd65xx_init(void)
495{ 498{
499 if (probe_qd65xx == 0)
500 return -ENODEV;
501
496 if (qd_probe(0x30)) 502 if (qd_probe(0x30))
497 qd_probe(0xb0); 503 qd_probe(0xb0);
498 if (ide_hwifs[0].chipset != ide_qd65xx && 504 if (ide_hwifs[0].chipset != ide_qd65xx &&
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index 6e2c58c5f6..ca79744555 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -165,12 +165,21 @@ static int __init umc8672_probe(void)
165 return 0; 165 return 0;
166} 166}
167 167
168int probe_umc8672 = 0;
169
170module_param_named(probe, probe_umc8672, bool, 0);
171MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
172
168/* Can be called directly from ide.c. */ 173/* Can be called directly from ide.c. */
169int __init umc8672_init(void) 174int __init umc8672_init(void)
170{ 175{
171 if (umc8672_probe()) 176 if (probe_umc8672 == 0)
172 return -ENODEV; 177 goto out;
173 return 0; 178
179 if (umc8672_probe() == 0)
180 return 0;;
181out:
182 return -ENODEV;;
174} 183}
175 184
176#ifdef MODULE 185#ifdef MODULE
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 0a59d5ef15..b2dc028dc8 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -29,8 +29,6 @@
29 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE 29 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
30 * Interface and Linux Device Driver" Application Note. 30 * Interface and Linux Device Driver" Application Note.
31 */ 31 */
32#undef REALLY_SLOW_IO /* most systems can safely undef this */
33
34#include <linux/types.h> 32#include <linux/types.h>
35#include <linux/module.h> 33#include <linux/module.h>
36#include <linux/kernel.h> 34#include <linux/kernel.h>
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 4debd18d52..83e0aa65a4 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02 2 * linux/drivers/ide/pci/alim15x3.c Version 0.21 2007/02/03
3 * 3 *
4 * Copyright (C) 1998-2000 Michel Aubry, Maintainer 4 * Copyright (C) 1998-2000 Michel Aubry, Maintainer
5 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer 5 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
@@ -9,6 +9,7 @@
9 * May be copied or modified under the terms of the GNU General Public License 9 * May be copied or modified under the terms of the GNU General Public License
10 * Copyright (C) 2002 Alan Cox <alan@redhat.com> 10 * Copyright (C) 2002 Alan Cox <alan@redhat.com>
11 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw> 11 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
12 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
12 * 13 *
13 * (U)DMA capable version of ali 1533/1543(C), 1535(D) 14 * (U)DMA capable version of ali 1533/1543(C), 1535(D)
14 * 15 *
@@ -280,15 +281,17 @@ static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
280#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */ 281#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */
281 282
282/** 283/**
283 * ali15x3_tune_drive - set up a drive 284 * ali15x3_tune_pio - set up chipset for PIO mode
284 * @drive: drive to tune 285 * @drive: drive to tune
285 * @pio: unused 286 * @pio: desired mode
286 * 287 *
287 * Select the best PIO timing for the drive in question. Then 288 * Select the best PIO mode for the drive in question.
288 * program the controller for this drive set up 289 * Then program the controller for this mode.
290 *
291 * Returns the PIO mode programmed.
289 */ 292 */
290 293
291static void ali15x3_tune_drive (ide_drive_t *drive, u8 pio) 294static u8 ali15x3_tune_pio (ide_drive_t *drive, u8 pio)
292{ 295{
293 ide_pio_data_t d; 296 ide_pio_data_t d;
294 ide_hwif_t *hwif = HWIF(drive); 297 ide_hwif_t *hwif = HWIF(drive);
@@ -356,6 +359,22 @@ static void ali15x3_tune_drive (ide_drive_t *drive, u8 pio)
356 * { 20, 50, 30 } PIO Mode 5 with IORDY (nonstandard) 359 * { 20, 50, 30 } PIO Mode 5 with IORDY (nonstandard)
357 */ 360 */
358 361
362 return pio;
363}
364
365/**
366 * ali15x3_tune_drive - set up drive for PIO mode
367 * @drive: drive to tune
368 * @pio: desired mode
369 *
370 * Program the controller with the best PIO timing for the given drive.
371 * Then set up the drive itself.
372 */
373
374static void ali15x3_tune_drive (ide_drive_t *drive, u8 pio)
375{
376 pio = ali15x3_tune_pio(drive, pio);
377 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
359} 378}
360 379
361/** 380/**
@@ -430,7 +449,7 @@ static u8 ali15x3_ratemask (ide_drive_t *drive)
430} 449}
431 450
432/** 451/**
433 * ali15x3_tune_chipset - set up chiset for new speed 452 * ali15x3_tune_chipset - set up chipset/drive for new speed
434 * @drive: drive to configure for 453 * @drive: drive to configure for
435 * @xferspeed: desired speed 454 * @xferspeed: desired speed
436 * 455 *
@@ -461,7 +480,7 @@ static int ali15x3_tune_chipset (ide_drive_t *drive, u8 xferspeed)
461 pci_write_config_byte(dev, m5229_udma, tmpbyte); 480 pci_write_config_byte(dev, m5229_udma, tmpbyte);
462 481
463 if (speed < XFER_SW_DMA_0) 482 if (speed < XFER_SW_DMA_0)
464 ali15x3_tune_drive(drive, speed); 483 (void) ali15x3_tune_pio(drive, speed - XFER_PIO_0);
465 } else { 484 } else {
466 pci_read_config_byte(dev, m5229_udma, &tmpbyte); 485 pci_read_config_byte(dev, m5229_udma, &tmpbyte);
467 tmpbyte &= (0x0f << ((1-unit) << 2)); 486 tmpbyte &= (0x0f << ((1-unit) << 2));
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index 61b5f9c0b2..dc43f009ac 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -98,7 +98,6 @@
98 * (patch courtesy of Zoltan Hidvegi) 98 * (patch courtesy of Zoltan Hidvegi)
99 */ 99 */
100 100
101#undef REALLY_SLOW_IO /* most systems can safely undef this */
102#define CMD640_PREFETCH_MASKS 1 101#define CMD640_PREFETCH_MASKS 1
103 102
104//#define CMD640_DUMP_REGS 103//#define CMD640_DUMP_REGS
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 49df27513d..b0d4825c56 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -1,6 +1,6 @@
1/* $Id: cmd64x.c,v 1.21 2000/01/30 23:23:16 1/* $Id: cmd64x.c,v 1.21 2000/01/30 23:23:16
2 * 2 *
3 * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002 3 * linux/drivers/ide/pci/cmd64x.c Version 1.41 Feb 3, 2007
4 * 4 *
5 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines. 5 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
6 * Note, this driver is not used at all on other systems because 6 * Note, this driver is not used at all on other systems because
@@ -12,6 +12,7 @@
12 * Copyright (C) 1998 David S. Miller (davem@redhat.com) 12 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
13 * 13 *
14 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> 14 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
15 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
15 */ 16 */
16 17
17#include <linux/module.h> 18#include <linux/module.h>
@@ -262,43 +263,25 @@ static void program_drive_counts (ide_drive_t *drive, int setup_count, int activ
262} 263}
263 264
264/* 265/*
265 * Attempts to set the interface PIO mode. 266 * This routine selects drive's best PIO mode, calculates setup/active/recovery
266 * The preferred method of selecting PIO modes (e.g. mode 4) is 267 * counts, and then writes them into the chipset registers.
267 * "echo 'piomode:4' > /proc/ide/hdx/settings". Special cases are
268 * 8: prefetch off, 9: prefetch on, 255: auto-select best mode.
269 * Called with 255 at boot time.
270 */ 268 */
271 269static u8 cmd64x_tune_pio (ide_drive_t *drive, u8 mode_wanted)
272static void cmd64x_tuneproc (ide_drive_t *drive, u8 mode_wanted)
273{ 270{
274 int setup_time, active_time, recovery_time; 271 int setup_time, active_time, recovery_time;
275 int clock_time, pio_mode, cycle_time; 272 int clock_time, pio_mode, cycle_time;
276 u8 recovery_count2, cycle_count; 273 u8 recovery_count2, cycle_count;
277 int setup_count, active_count, recovery_count; 274 int setup_count, active_count, recovery_count;
278 int bus_speed = system_bus_clock(); 275 int bus_speed = system_bus_clock();
279 /*byte b;*/
280 ide_pio_data_t d; 276 ide_pio_data_t d;
281 277
282 switch (mode_wanted) { 278 pio_mode = ide_get_best_pio_mode(drive, mode_wanted, 5, &d);
283 case 8: /* set prefetch off */
284 case 9: /* set prefetch on */
285 mode_wanted &= 1;
286 /*set_prefetch_mode(index, mode_wanted);*/
287 cmdprintk("%s: %sabled cmd640 prefetch\n",
288 drive->name, mode_wanted ? "en" : "dis");
289 return;
290 }
291
292 mode_wanted = ide_get_best_pio_mode (drive, mode_wanted, 5, &d);
293 pio_mode = d.pio_mode;
294 cycle_time = d.cycle_time; 279 cycle_time = d.cycle_time;
295 280
296 /* 281 /*
297 * I copied all this complicated stuff from cmd640.c and made a few 282 * I copied all this complicated stuff from cmd640.c and made a few
298 * minor changes. For now I am just going to pray that it is correct. 283 * minor changes. For now I am just going to pray that it is correct.
299 */ 284 */
300 if (pio_mode > 5)
301 pio_mode = 5;
302 setup_time = ide_pio_timings[pio_mode].setup_time; 285 setup_time = ide_pio_timings[pio_mode].setup_time;
303 active_time = ide_pio_timings[pio_mode].active_time; 286 active_time = ide_pio_timings[pio_mode].active_time;
304 recovery_time = cycle_time - (setup_time + active_time); 287 recovery_time = cycle_time - (setup_time + active_time);
@@ -320,22 +303,33 @@ static void cmd64x_tuneproc (ide_drive_t *drive, u8 mode_wanted)
320 if (active_count > 16) 303 if (active_count > 16)
321 active_count = 16; /* maximum allowed by cmd646 */ 304 active_count = 16; /* maximum allowed by cmd646 */
322 305
323 /*
324 * In a perfect world, we might set the drive pio mode here
325 * (using WIN_SETFEATURE) before continuing.
326 *
327 * But we do not, because:
328 * 1) this is the wrong place to do it
329 * (proper is do_special() in ide.c)
330 * 2) in practice this is rarely, if ever, necessary
331 */
332 program_drive_counts (drive, setup_count, active_count, recovery_count); 306 program_drive_counts (drive, setup_count, active_count, recovery_count);
333 307
334 cmdprintk("%s: selected cmd646 PIO mode%d : %d (%dns)%s, " 308 cmdprintk("%s: PIO mode wanted %d, selected %d (%dns)%s, "
335 "clocks=%d/%d/%d\n", 309 "clocks=%d/%d/%d\n",
336 drive->name, pio_mode, mode_wanted, cycle_time, 310 drive->name, mode_wanted, pio_mode, cycle_time,
337 d.overridden ? " (overriding vendor mode)" : "", 311 d.overridden ? " (overriding vendor mode)" : "",
338 setup_count, active_count, recovery_count); 312 setup_count, active_count, recovery_count);
313
314 return pio_mode;
315}
316
317/*
318 * Attempts to set drive's PIO mode.
319 * Special cases are 8: prefetch off, 9: prefetch on (both never worked),
320 * and 255: auto-select best mode (used at boot time).
321 */
322static void cmd64x_tune_drive (ide_drive_t *drive, u8 pio)
323{
324 /*
325 * Filter out the prefetch control values
326 * to prevent PIO5 from being programmed
327 */
328 if (pio == 8 || pio == 9)
329 return;
330
331 pio = cmd64x_tune_pio(drive, pio);
332 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
339} 333}
340 334
341static u8 cmd64x_ratemask (ide_drive_t *drive) 335static u8 cmd64x_ratemask (ide_drive_t *drive)
@@ -387,22 +381,6 @@ static u8 cmd64x_ratemask (ide_drive_t *drive)
387 return mode; 381 return mode;
388} 382}
389 383
390static void config_cmd64x_chipset_for_pio (ide_drive_t *drive, u8 set_speed)
391{
392 u8 speed = 0x00;
393 u8 set_pio = ide_get_best_pio_mode(drive, 4, 5, NULL);
394
395 cmd64x_tuneproc(drive, set_pio);
396 speed = XFER_PIO_0 + set_pio;
397 if (set_speed)
398 (void) ide_config_drive_speed(drive, speed);
399}
400
401static void config_chipset_for_pio (ide_drive_t *drive, u8 set_speed)
402{
403 config_cmd64x_chipset_for_pio(drive, set_speed);
404}
405
406static int cmd64x_tune_chipset (ide_drive_t *drive, u8 xferspeed) 384static int cmd64x_tune_chipset (ide_drive_t *drive, u8 xferspeed)
407{ 385{
408 ide_hwif_t *hwif = HWIF(drive); 386 ide_hwif_t *hwif = HWIF(drive);
@@ -414,7 +392,7 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 xferspeed)
414 392
415 u8 speed = ide_rate_filter(cmd64x_ratemask(drive), xferspeed); 393 u8 speed = ide_rate_filter(cmd64x_ratemask(drive), xferspeed);
416 394
417 if (speed > XFER_PIO_4) { 395 if (speed >= XFER_SW_DMA_0) {
418 (void) pci_read_config_byte(dev, pciD, &regD); 396 (void) pci_read_config_byte(dev, pciD, &regD);
419 (void) pci_read_config_byte(dev, pciU, &regU); 397 (void) pci_read_config_byte(dev, pciU, &regU);
420 regD &= ~(unit ? 0x40 : 0x20); 398 regD &= ~(unit ? 0x40 : 0x20);
@@ -438,17 +416,20 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 xferspeed)
438 case XFER_SW_DMA_2: regD |= (unit ? 0x40 : 0x10); break; 416 case XFER_SW_DMA_2: regD |= (unit ? 0x40 : 0x10); break;
439 case XFER_SW_DMA_1: regD |= (unit ? 0x80 : 0x20); break; 417 case XFER_SW_DMA_1: regD |= (unit ? 0x80 : 0x20); break;
440 case XFER_SW_DMA_0: regD |= (unit ? 0xC0 : 0x30); break; 418 case XFER_SW_DMA_0: regD |= (unit ? 0xC0 : 0x30); break;
441 case XFER_PIO_4: cmd64x_tuneproc(drive, 4); break; 419 case XFER_PIO_5:
442 case XFER_PIO_3: cmd64x_tuneproc(drive, 3); break; 420 case XFER_PIO_4:
443 case XFER_PIO_2: cmd64x_tuneproc(drive, 2); break; 421 case XFER_PIO_3:
444 case XFER_PIO_1: cmd64x_tuneproc(drive, 1); break; 422 case XFER_PIO_2:
445 case XFER_PIO_0: cmd64x_tuneproc(drive, 0); break; 423 case XFER_PIO_1:
424 case XFER_PIO_0:
425 (void) cmd64x_tune_pio(drive, speed - XFER_PIO_0);
426 break;
446 427
447 default: 428 default:
448 return 1; 429 return 1;
449 } 430 }
450 431
451 if (speed > XFER_PIO_4) { 432 if (speed >= XFER_SW_DMA_0) {
452 (void) pci_write_config_byte(dev, pciU, regU); 433 (void) pci_write_config_byte(dev, pciU, regU);
453 regD |= (unit ? 0x40 : 0x20); 434 regD |= (unit ? 0x40 : 0x20);
454 (void) pci_write_config_byte(dev, pciD, regD); 435 (void) pci_write_config_byte(dev, pciD, regD);
@@ -461,8 +442,6 @@ static int config_chipset_for_dma (ide_drive_t *drive)
461{ 442{
462 u8 speed = ide_dma_speed(drive, cmd64x_ratemask(drive)); 443 u8 speed = ide_dma_speed(drive, cmd64x_ratemask(drive));
463 444
464 config_chipset_for_pio(drive, !speed);
465
466 if (!speed) 445 if (!speed)
467 return 0; 446 return 0;
468 447
@@ -478,7 +457,7 @@ static int cmd64x_config_drive_for_dma (ide_drive_t *drive)
478 return 0; 457 return 0;
479 458
480 if (ide_use_fast_pio(drive)) 459 if (ide_use_fast_pio(drive))
481 config_chipset_for_pio(drive, 1); 460 cmd64x_tune_drive(drive, 255);
482 461
483 return -1; 462 return -1;
484} 463}
@@ -679,14 +658,13 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
679 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); 658 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
680 class_rev &= 0xff; 659 class_rev &= 0xff;
681 660
682 hwif->tuneproc = &cmd64x_tuneproc; 661 hwif->tuneproc = &cmd64x_tune_drive;
683 hwif->speedproc = &cmd64x_tune_chipset; 662 hwif->speedproc = &cmd64x_tune_chipset;
684 663
685 if (!hwif->dma_base) { 664 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
686 hwif->drives[0].autotune = 1; 665
687 hwif->drives[1].autotune = 1; 666 if (!hwif->dma_base)
688 return; 667 return;
689 }
690 668
691 hwif->atapi_dma = 1; 669 hwif->atapi_dma = 1;
692 670
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index e2672fc65d..d4b753e701 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -122,7 +122,7 @@ static struct pci_driver driver = {
122static int 122static int
123delkin_cb_init (void) 123delkin_cb_init (void)
124{ 124{
125 return pci_module_init(&driver); 125 return pci_register_driver(&driver);
126} 126}
127 127
128static void 128static void
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index b408c6c517..f2c5a141ca 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -21,8 +21,6 @@
21 * are deemed to be part of the source code. 21 * are deemed to be part of the source code.
22 */ 22 */
23 23
24#undef REALLY_SLOW_IO /* most systems can safely undef this */
25
26#include <linux/types.h> 24#include <linux/types.h>
27#include <linux/module.h> 25#include <linux/module.h>
28#include <linux/kernel.h> 26#include <linux/kernel.h>
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c
index 9ca60dd218..aede7eee92 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/pci/opti621.c
@@ -57,7 +57,7 @@
57 * There is a 25/33MHz switch in configuration 57 * There is a 25/33MHz switch in configuration
58 * register, but driver is written for use at any frequency which get 58 * register, but driver is written for use at any frequency which get
59 * (use idebus=xx to select PCI bus speed). 59 * (use idebus=xx to select PCI bus speed).
60 * Use ide0=autotune for automatical tune of the PIO modes. 60 * Use hda=autotune and hdb=autotune for automatical tune of the PIO modes.
61 * If you get strange results, do not use this and set PIO manually 61 * If you get strange results, do not use this and set PIO manually
62 * by hdparm. 62 * by hdparm.
63 * 63 *
@@ -87,7 +87,6 @@
87 * 0.5 doesn't work. 87 * 0.5 doesn't work.
88 */ 88 */
89 89
90#undef REALLY_SLOW_IO /* most systems can safely undef this */
91#define OPTI621_DEBUG /* define for debug messages */ 90#define OPTI621_DEBUG /* define for debug messages */
92 91
93#include <linux/types.h> 92#include <linux/types.h>
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 569822f4cf..061d300ab8 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -1,10 +1,10 @@
1/* 1/*
2 * linux/drivers/ide/pci/piix.c Version 0.46 December 3, 2006 2 * linux/drivers/ide/pci/piix.c Version 0.47 February 8, 2007
3 * 3 *
4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
6 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com> 6 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
7 * Copyright (C) 2006 MontaVista Software, Inc. <source@mvista.com> 7 * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
8 * 8 *
9 * May be copied or modified under the terms of the GNU General Public License 9 * May be copied or modified under the terms of the GNU General Public License
10 * 10 *
@@ -205,14 +205,13 @@ static u8 piix_dma_2_pio (u8 xfer_rate) {
205} 205}
206 206
207/** 207/**
208 * piix_tune_drive - tune a drive attached to a PIIX 208 * piix_tune_pio - tune PIIX for PIO mode
209 * @drive: drive to tune 209 * @drive: drive to tune
210 * @pio: desired PIO mode 210 * @pio: desired PIO mode
211 * 211 *
212 * Set the interface PIO mode based upon the settings done by AMI BIOS 212 * Set the interface PIO mode based upon the settings done by AMI BIOS.
213 * (might be useful if drive is not registered in CMOS for any reason).
214 */ 213 */
215static void piix_tune_drive (ide_drive_t *drive, u8 pio) 214static void piix_tune_pio (ide_drive_t *drive, u8 pio)
216{ 215{
217 ide_hwif_t *hwif = HWIF(drive); 216 ide_hwif_t *hwif = HWIF(drive);
218 struct pci_dev *dev = hwif->pci_dev; 217 struct pci_dev *dev = hwif->pci_dev;
@@ -233,8 +232,6 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
233 { 2, 1 }, 232 { 2, 1 },
234 { 2, 3 }, }; 233 { 2, 3 }, };
235 234
236 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
237
238 /* 235 /*
239 * Master vs slave is synchronized above us but the slave register is 236 * Master vs slave is synchronized above us but the slave register is
240 * shared by the two hwifs so the corner case of two slave timeouts in 237 * shared by the two hwifs so the corner case of two slave timeouts in
@@ -253,19 +250,20 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
253 master_data |= 0x4000; 250 master_data |= 0x4000;
254 master_data &= ~0x0070; 251 master_data &= ~0x0070;
255 if (pio > 1) { 252 if (pio > 1) {
256 /* enable PPE, IE and TIME */ 253 /* Set PPE, IE and TIME */
257 master_data = master_data | (control << 4); 254 master_data |= control << 4;
258 } 255 }
259 pci_read_config_byte(dev, slave_port, &slave_data); 256 pci_read_config_byte(dev, slave_port, &slave_data);
260 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0); 257 slave_data &= hwif->channel ? 0x0f : 0xf0;
261 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0)); 258 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
259 (hwif->channel ? 4 : 0);
262 } else { 260 } else {
263 master_data &= ~0x3307; 261 master_data &= ~0x3307;
264 if (pio > 1) { 262 if (pio > 1) {
265 /* enable PPE, IE and TIME */ 263 /* enable PPE, IE and TIME */
266 master_data = master_data | control; 264 master_data |= control;
267 } 265 }
268 master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8); 266 master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
269 } 267 }
270 pci_write_config_word(dev, master_port, master_data); 268 pci_write_config_word(dev, master_port, master_data);
271 if (is_slave) 269 if (is_slave)
@@ -274,6 +272,21 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
274} 272}
275 273
276/** 274/**
275 * piix_tune_drive - tune a drive attached to PIIX
276 * @drive: drive to tune
277 * @pio: desired PIO mode
278 *
279 * Set the drive's PIO mode (might be useful if drive is not registered
280 * in CMOS for any reason).
281 */
282static void piix_tune_drive (ide_drive_t *drive, u8 pio)
283{
284 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
285 piix_tune_pio(drive, pio);
286 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
287}
288
289/**
277 * piix_tune_chipset - tune a PIIX interface 290 * piix_tune_chipset - tune a PIIX interface
278 * @drive: IDE drive to tune 291 * @drive: IDE drive to tune
279 * @xferspeed: speed to configure 292 * @xferspeed: speed to configure
@@ -348,8 +361,8 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed)
348 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 361 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
349 } 362 }
350 363
351 piix_tune_drive(drive, piix_dma_2_pio(speed)); 364 piix_tune_pio(drive, piix_dma_2_pio(speed));
352 return (ide_config_drive_speed(drive, speed)); 365 return ide_config_drive_speed(drive, speed);
353} 366}
354 367
355/** 368/**
@@ -392,9 +405,7 @@ static int piix_config_drive_xfer_rate (ide_drive_t *drive)
392 return 0; 405 return 0;
393 406
394 if (ide_use_fast_pio(drive)) 407 if (ide_use_fast_pio(drive))
395 /* Find best PIO mode. */ 408 piix_tune_drive(drive, 255);
396 piix_tune_chipset(drive, XFER_PIO_0 +
397 ide_get_best_pio_mode(drive, 255, 4, NULL));
398 409
399 return -1; 410 return -1;
400} 411}
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/pci/rz1000.c
index c185531105..f8c9546901 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/pci/rz1000.c
@@ -15,8 +15,6 @@
15 * Dunno if this fixes both ports, or only the primary port (?). 15 * Dunno if this fixes both ports, or only the primary port (?).
16 */ 16 */
17 17
18#undef REALLY_SLOW_IO /* most systems can safely undef this */
19
20#include <linux/types.h> 18#include <linux/types.h>
21#include <linux/module.h> 19#include <linux/module.h>
22#include <linux/kernel.h> 20#include <linux/kernel.h>
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 7b4c189a9d..71eccdf5f8 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -26,6 +26,11 @@
26 * If you have strange problems with nVidia chipset systems please 26 * If you have strange problems with nVidia chipset systems please
27 * see the SI support documentation and update your system BIOS 27 * see the SI support documentation and update your system BIOS
28 * if neccessary 28 * if neccessary
29 *
30 * The Dell DRAC4 has some interesting features including effectively hot
31 * unplugging/replugging the virtual CD interface when the DRAC is reset.
32 * This often causes drivers/ide/siimage to panic but is ok with the rather
33 * smarter code in libata.
29 */ 34 */
30 35
31#include <linux/types.h> 36#include <linux/types.h>
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index ae7eb58d96..852ccb36da 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * linux/drivers/ide/pci/slc90e66.c Version 0.13 December 30, 2006 2 * linux/drivers/ide/pci/slc90e66.c Version 0.14 February 8, 2007
3 * 3 *
4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2006 MontaVista Software, Inc. <source@mvista.com> 5 * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
6 * 6 *
7 * This is a look-alike variation of the ICH0 PIIX4 Ultra-66, 7 * This is a look-alike variation of the ICH0 PIIX4 Ultra-66,
8 * but this keeps the ISA-Bridge and slots alive. 8 * but this keeps the ISA-Bridge and slots alive.
@@ -57,11 +57,7 @@ static u8 slc90e66_dma_2_pio (u8 xfer_rate) {
57 } 57 }
58} 58}
59 59
60/* 60static void slc90e66_tune_pio (ide_drive_t *drive, u8 pio)
61 * Based on settings done by AMI BIOS
62 * (might be useful if drive is not registered in CMOS for any reason).
63 */
64static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
65{ 61{
66 ide_hwif_t *hwif = HWIF(drive); 62 ide_hwif_t *hwif = HWIF(drive);
67 struct pci_dev *dev = hwif->pci_dev; 63 struct pci_dev *dev = hwif->pci_dev;
@@ -80,7 +76,6 @@ static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
80 { 2, 1 }, 76 { 2, 1 },
81 { 2, 3 }, }; 77 { 2, 3 }, };
82 78
83 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
84 spin_lock_irqsave(&ide_lock, flags); 79 spin_lock_irqsave(&ide_lock, flags);
85 pci_read_config_word(dev, master_port, &master_data); 80 pci_read_config_word(dev, master_port, &master_data);
86 81
@@ -94,19 +89,20 @@ static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
94 master_data |= 0x4000; 89 master_data |= 0x4000;
95 master_data &= ~0x0070; 90 master_data &= ~0x0070;
96 if (pio > 1) { 91 if (pio > 1) {
97 /* enable PPE, IE and TIME */ 92 /* Set PPE, IE and TIME */
98 master_data = master_data | (control << 4); 93 master_data |= control << 4;
99 } 94 }
100 pci_read_config_byte(dev, slave_port, &slave_data); 95 pci_read_config_byte(dev, slave_port, &slave_data);
101 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0); 96 slave_data &= hwif->channel ? 0x0f : 0xf0;
102 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0)); 97 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
98 (hwif->channel ? 4 : 0);
103 } else { 99 } else {
104 master_data &= ~0x3307; 100 master_data &= ~0x3307;
105 if (pio > 1) { 101 if (pio > 1) {
106 /* enable PPE, IE and TIME */ 102 /* enable PPE, IE and TIME */
107 master_data = master_data | control; 103 master_data |= control;
108 } 104 }
109 master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8); 105 master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
110 } 106 }
111 pci_write_config_word(dev, master_port, master_data); 107 pci_write_config_word(dev, master_port, master_data);
112 if (is_slave) 108 if (is_slave)
@@ -114,6 +110,13 @@ static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
114 spin_unlock_irqrestore(&ide_lock, flags); 110 spin_unlock_irqrestore(&ide_lock, flags);
115} 111}
116 112
113static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
114{
115 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
116 slc90e66_tune_pio(drive, pio);
117 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
118}
119
117static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed) 120static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
118{ 121{
119 ide_hwif_t *hwif = HWIF(drive); 122 ide_hwif_t *hwif = HWIF(drive);
@@ -162,8 +165,8 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
162 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); 165 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
163 } 166 }
164 167
165 slc90e66_tune_drive(drive, slc90e66_dma_2_pio(speed)); 168 slc90e66_tune_pio(drive, slc90e66_dma_2_pio(speed));
166 return (ide_config_drive_speed(drive, speed)); 169 return ide_config_drive_speed(drive, speed);
167} 170}
168 171
169static int slc90e66_config_drive_for_dma (ide_drive_t *drive) 172static int slc90e66_config_drive_for_dma (ide_drive_t *drive)
@@ -185,8 +188,7 @@ static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive)
185 return 0; 188 return 0;
186 189
187 if (ide_use_fast_pio(drive)) 190 if (ide_use_fast_pio(drive))
188 (void)slc90e66_tune_chipset(drive, XFER_PIO_0 + 191 slc90e66_tune_drive(drive, 255);
189 ide_get_best_pio_mode(drive, 255, 4, NULL));
190 192
191 return -1; 193 return -1;
192} 194}
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 395d35253d..071a030ec2 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -48,7 +48,7 @@
48#include <asm/mediabay.h> 48#include <asm/mediabay.h>
49#endif 49#endif
50 50
51#include "ide-timing.h" 51#include "../ide-timing.h"
52 52
53#undef IDE_PMAC_DEBUG 53#undef IDE_PMAC_DEBUG
54 54
@@ -1551,19 +1551,34 @@ static struct pci_driver pmac_ide_pci_driver = {
1551}; 1551};
1552MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match); 1552MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match);
1553 1553
1554void __init 1554int __init pmac_ide_probe(void)
1555pmac_ide_probe(void)
1556{ 1555{
1556 int error;
1557
1557 if (!machine_is(powermac)) 1558 if (!machine_is(powermac))
1558 return; 1559 return -ENODEV;
1559 1560
1560#ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST 1561#ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
1561 pci_register_driver(&pmac_ide_pci_driver); 1562 error = pci_register_driver(&pmac_ide_pci_driver);
1562 macio_register_driver(&pmac_ide_macio_driver); 1563 if (error)
1564 goto out;
1565 error = macio_register_driver(&pmac_ide_macio_driver);
1566 if (error) {
1567 pci_unregister_driver(&pmac_ide_pci_driver);
1568 goto out;
1569 }
1563#else 1570#else
1564 macio_register_driver(&pmac_ide_macio_driver); 1571 error = macio_register_driver(&pmac_ide_macio_driver);
1565 pci_register_driver(&pmac_ide_pci_driver); 1572 if (error)
1573 goto out;
1574 error = pci_register_driver(&pmac_ide_pci_driver);
1575 if (error) {
1576 macio_unregister_driver(&pmac_ide_macio_driver);
1577 goto out;
1578 }
1566#endif 1579#endif
1580out:
1581 return error;
1567} 1582}
1568 1583
1569#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1584#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
@@ -1983,7 +1998,7 @@ static void pmac_ide_dma_host_off(ide_drive_t *drive)
1983{ 1998{
1984} 1999}
1985 2000
1986static int pmac_ide_dma_host_on(ide_drive_t *drive) 2001static void pmac_ide_dma_host_on(ide_drive_t *drive)
1987{ 2002{
1988} 2003}
1989 2004
diff --git a/drivers/ide/ppc/scc_pata.c b/drivers/ide/ppc/scc_pata.c
index de64b02247..f84bf791f7 100644
--- a/drivers/ide/ppc/scc_pata.c
+++ b/drivers/ide/ppc/scc_pata.c
@@ -509,6 +509,32 @@ static int scc_ide_dma_end(ide_drive_t * drive)
509 return __ide_dma_end(drive); 509 return __ide_dma_end(drive);
510} 510}
511 511
512/* returns 1 if dma irq issued, 0 otherwise */
513static int scc_dma_test_irq(ide_drive_t *drive)
514{
515 ide_hwif_t *hwif = HWIF(drive);
516 u8 dma_stat = hwif->INB(hwif->dma_status);
517
518 /* return 1 if INTR asserted */
519 if ((dma_stat & 4) == 4)
520 return 1;
521
522 /* Workaround for PTERADD: emulate DMA_INTR when
523 * - IDE_STATUS[ERR] = 1
524 * - INT_STATUS[INTRQ] = 1
525 * - DMA_STATUS[IORACTA] = 1
526 */
527 if (in_be32((void __iomem *)IDE_ALTSTATUS_REG) & ERR_STAT &&
528 in_be32((void __iomem *)(hwif->dma_base + 0x014)) & INTSTS_INTRQ &&
529 dma_stat & 1)
530 return 1;
531
532 if (!drive->waiting_for_dma)
533 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
534 drive->name, __FUNCTION__);
535 return 0;
536}
537
512/** 538/**
513 * setup_mmio_scc - map CTRL/BMID region 539 * setup_mmio_scc - map CTRL/BMID region
514 * @dev: PCI device we are configuring 540 * @dev: PCI device we are configuring
@@ -712,6 +738,7 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
712 hwif->speedproc = scc_tune_chipset; 738 hwif->speedproc = scc_tune_chipset;
713 hwif->tuneproc = scc_tuneproc; 739 hwif->tuneproc = scc_tuneproc;
714 hwif->ide_dma_check = scc_config_drive_for_dma; 740 hwif->ide_dma_check = scc_config_drive_for_dma;
741 hwif->ide_dma_test_irq = scc_dma_test_irq;
715 742
716 hwif->drives[0].autotune = IDE_TUNE_AUTO; 743 hwif->drives[0].autotune = IDE_TUNE_AUTO;
717 hwif->drives[1].autotune = IDE_TUNE_AUTO; 744 hwif->drives[1].autotune = IDE_TUNE_AUTO;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 64509689fa..f17e9c7d4b 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -215,14 +215,16 @@ config KEYBOARD_AAED2000
215 module will be called aaed2000_kbd. 215 module will be called aaed2000_kbd.
216 216
217config KEYBOARD_GPIO 217config KEYBOARD_GPIO
218 tristate "Buttons on CPU GPIOs (PXA)" 218 tristate "GPIO Buttons"
219 depends on (ARCH_SA1100 || ARCH_PXA || ARCH_S3C2410) 219 depends on GENERIC_GPIO
220 help 220 help
221 This driver implements support for buttons connected 221 This driver implements support for buttons connected
222 directly to GPIO pins of SA1100, PXA or S3C24xx CPUs. 222 to GPIO pins of various CPUs (and some other chips).
223 223
224 Say Y here if your device has buttons connected 224 Say Y here if your device has buttons connected
225 directly to GPIO pins of the CPU. 225 directly to such GPIO pins. Your board-specific
226 setup logic must also provide a platform device,
227 with configuration data saying which GPIOs are used.
226 228
227 To compile this driver as a module, choose M here: the 229 To compile this driver as a module, choose M here: the
228 module will be called gpio-keys. 230 module will be called gpio-keys.
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index fa03a00b4c..ccf6df387b 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -23,11 +23,9 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/input.h> 24#include <linux/input.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/gpio_keys.h>
26 27
27#include <asm/gpio.h> 28#include <asm/gpio.h>
28#include <asm/arch/hardware.h>
29
30#include <asm/hardware/gpio_keys.h>
31 29
32static irqreturn_t gpio_keys_isr(int irq, void *dev_id) 30static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
33{ 31{
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index ec195a36e8..db9cca3b65 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -553,7 +553,8 @@ static int __devinit i8042_check_aux(void)
553 */ 553 */
554 554
555 param = 0x5a; 555 param = 0x5a;
556 if (i8042_command(&param, I8042_CMD_AUX_LOOP) || param != 0x5a) { 556 retval = i8042_command(&param, I8042_CMD_AUX_LOOP);
557 if (retval || param != 0x5a) {
557 558
558/* 559/*
559 * External connection test - filters out AT-soldered PS/2 i8042's 560 * External connection test - filters out AT-soldered PS/2 i8042's
@@ -567,7 +568,12 @@ static int __devinit i8042_check_aux(void)
567 (param && param != 0xfa && param != 0xff)) 568 (param && param != 0xfa && param != 0xff))
568 return -1; 569 return -1;
569 570
570 aux_loop_broken = 1; 571/*
572 * If AUX_LOOP completed without error but returned unexpected data
573 * mark it as broken
574 */
575 if (!retval)
576 aux_loop_broken = 1;
571 } 577 }
572 578
573/* 579/*
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 8b6c9a431f..c921d6c522 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -2,13 +2,25 @@
2# Config.in for the CAPI subsystem 2# Config.in for the CAPI subsystem
3# 3#
4config ISDN_DRV_AVMB1_VERBOSE_REASON 4config ISDN_DRV_AVMB1_VERBOSE_REASON
5 bool "Verbose reason code reporting (kernel size +=7K)" 5 bool "Verbose reason code reporting"
6 depends on ISDN_CAPI 6 depends on ISDN_CAPI
7 default y
7 help 8 help
8 If you say Y here, the AVM B1 driver will give verbose reasons for 9 If you say Y here, the CAPI drivers will give verbose reasons for
9 disconnecting. This will increase the size of the kernel by 7 KB. If 10 disconnecting. This will increase the size of the kernel by 7 KB. If
10 unsure, say Y. 11 unsure, say Y.
11 12
13config CAPI_TRACE
14 bool "CAPI trace support"
15 depends on ISDN_CAPI
16 default y
17 help
18 If you say Y here, the kernelcapi driver can make verbose traces
19 of CAPI messages. This feature can be enabled/disabled via IOCTL for
20 every controler (default disabled).
21 This will increase the size of the kernelcapi module by 20 KB.
22 If unsure, say Y.
23
12config ISDN_CAPI_MIDDLEWARE 24config ISDN_CAPI_MIDDLEWARE
13 bool "CAPI2.0 Middleware support (EXPERIMENTAL)" 25 bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
14 depends on ISDN_CAPI && EXPERIMENTAL 26 depends on ISDN_CAPI && EXPERIMENTAL
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 2a49cea0a2..23b6f7bc16 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -990,6 +990,7 @@ static void handle_plci(_cmsg * cmsg)
990 capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f); 990 capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
991 capidrv_plci *plcip; 991 capidrv_plci *plcip;
992 isdn_ctrl cmd; 992 isdn_ctrl cmd;
993 _cdebbuf *cdb;
993 994
994 if (!card) { 995 if (!card) {
995 printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n", 996 printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
@@ -1122,8 +1123,15 @@ static void handle_plci(_cmsg * cmsg)
1122 break; 1123 break;
1123 } 1124 }
1124 } 1125 }
1125 printk(KERN_ERR "capidrv-%d: %s\n", 1126 cdb = capi_cmsg2str(cmsg);
1126 card->contrnr, capi_cmsg2str(cmsg)); 1127 if (cdb) {
1128 printk(KERN_WARNING "capidrv-%d: %s\n",
1129 card->contrnr, cdb->buf);
1130 cdebbuf_free(cdb);
1131 } else
1132 printk(KERN_WARNING "capidrv-%d: CAPI_INFO_IND InfoNumber %x not handled\n",
1133 card->contrnr, cmsg->InfoNumber);
1134
1127 break; 1135 break;
1128 1136
1129 case CAPI_CONNECT_ACTIVE_CONF: /* plci */ 1137 case CAPI_CONNECT_ACTIVE_CONF: /* plci */
@@ -1371,10 +1379,18 @@ static _cmsg s_cmsg;
1371static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb) 1379static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
1372{ 1380{
1373 capi_message2cmsg(&s_cmsg, skb->data); 1381 capi_message2cmsg(&s_cmsg, skb->data);
1374 if (debugmode > 3) 1382 if (debugmode > 3) {
1375 printk(KERN_DEBUG "capidrv_signal: applid=%d %s\n", 1383 _cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
1376 ap->applid, capi_cmsg2str(&s_cmsg)); 1384
1377 1385 if (cdb) {
1386 printk(KERN_DEBUG "%s: applid=%d %s\n", __FUNCTION__,
1387 ap->applid, cdb->buf);
1388 cdebbuf_free(cdb);
1389 } else
1390 printk(KERN_DEBUG "%s: applid=%d %s not traced\n",
1391 __FUNCTION__, ap->applid,
1392 capi_cmd2str(s_cmsg.Command, s_cmsg.Subcommand));
1393 }
1378 if (s_cmsg.Command == CAPI_DATA_B3 1394 if (s_cmsg.Command == CAPI_DATA_B3
1379 && s_cmsg.Subcommand == CAPI_IND) { 1395 && s_cmsg.Subcommand == CAPI_IND) {
1380 handle_data(&s_cmsg, skb); 1396 handle_data(&s_cmsg, skb);
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index c1b21552fc..ad1e2702c2 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -648,6 +648,9 @@ char *capi_cmd2str(u8 cmd, u8 subcmd)
648 648
649 649
650/*-------------------------------------------------------*/ 650/*-------------------------------------------------------*/
651
652#ifdef CONFIG_CAPI_TRACE
653
651/*-------------------------------------------------------*/ 654/*-------------------------------------------------------*/
652 655
653static char *pnames[] = 656static char *pnames[] =
@@ -703,44 +706,77 @@ static char *pnames[] =
703}; 706};
704 707
705 708
706static char buf[8192];
707static char *p = NULL;
708 709
709#include <stdarg.h> 710#include <stdarg.h>
710 711
711/*-------------------------------------------------------*/ 712/*-------------------------------------------------------*/
712static void bufprint(char *fmt,...) 713static _cdebbuf *bufprint(_cdebbuf *cdb, char *fmt,...)
713{ 714{
714 va_list f; 715 va_list f;
716 size_t n,r;
717
718 if (!cdb)
719 return NULL;
715 va_start(f, fmt); 720 va_start(f, fmt);
716 vsprintf(p, fmt, f); 721 r = cdb->size - cdb->pos;
722 n = vsnprintf(cdb->p, r, fmt, f);
717 va_end(f); 723 va_end(f);
718 p += strlen(p); 724 if (n >= r) {
725 /* truncated, need bigger buffer */
726 size_t ns = 2 * cdb->size;
727 u_char *nb;
728
729 while ((ns - cdb->pos) <= n)
730 ns *= 2;
731 nb = kmalloc(ns, GFP_ATOMIC);
732 if (!nb) {
733 cdebbuf_free(cdb);
734 return NULL;
735 }
736 memcpy(nb, cdb->buf, cdb->pos);
737 kfree(cdb->buf);
738 nb[cdb->pos] = 0;
739 cdb->buf = nb;
740 cdb->p = cdb->buf + cdb->pos;
741 cdb->size = ns;
742 va_start(f, fmt);
743 r = cdb->size - cdb->pos;
744 n = vsnprintf(cdb->p, r, fmt, f);
745 va_end(f);
746 }
747 cdb->p += n;
748 cdb->pos += n;
749 return cdb;
719} 750}
720 751
721static void printstructlen(u8 * m, unsigned len) 752static _cdebbuf *printstructlen(_cdebbuf *cdb, u8 * m, unsigned len)
722{ 753{
723 unsigned hex = 0; 754 unsigned hex = 0;
755
756 if (!cdb)
757 return NULL;
724 for (; len; len--, m++) 758 for (; len; len--, m++)
725 if (isalnum(*m) || *m == ' ') { 759 if (isalnum(*m) || *m == ' ') {
726 if (hex) 760 if (hex)
727 bufprint(">"); 761 cdb = bufprint(cdb, ">");
728 bufprint("%c", *m); 762 cdb = bufprint(cdb, "%c", *m);
729 hex = 0; 763 hex = 0;
730 } else { 764 } else {
731 if (!hex) 765 if (!hex)
732 bufprint("<%02x", *m); 766 cdb = bufprint(cdb, "<%02x", *m);
733 else 767 else
734 bufprint(" %02x", *m); 768 cdb = bufprint(cdb, " %02x", *m);
735 hex = 1; 769 hex = 1;
736 } 770 }
737 if (hex) 771 if (hex)
738 bufprint(">"); 772 cdb = bufprint(cdb, ">");
773 return cdb;
739} 774}
740 775
741static void printstruct(u8 * m) 776static _cdebbuf *printstruct(_cdebbuf *cdb, u8 * m)
742{ 777{
743 unsigned len; 778 unsigned len;
779
744 if (m[0] != 0xff) { 780 if (m[0] != 0xff) {
745 len = m[0]; 781 len = m[0];
746 m += 1; 782 m += 1;
@@ -748,42 +784,45 @@ static void printstruct(u8 * m)
748 len = ((u16 *) (m + 1))[0]; 784 len = ((u16 *) (m + 1))[0];
749 m += 3; 785 m += 3;
750 } 786 }
751 printstructlen(m, len); 787 cdb = printstructlen(cdb, m, len);
788 return cdb;
752} 789}
753 790
754/*-------------------------------------------------------*/ 791/*-------------------------------------------------------*/
755#define NAME (pnames[cmsg->par[cmsg->p]]) 792#define NAME (pnames[cmsg->par[cmsg->p]])
756 793
757static void protocol_message_2_pars(_cmsg * cmsg, int level) 794static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level)
758{ 795{
759 for (; TYP != _CEND; cmsg->p++) { 796 for (; TYP != _CEND; cmsg->p++) {
760 int slen = 29 + 3 - level; 797 int slen = 29 + 3 - level;
761 int i; 798 int i;
762 799
763 bufprint(" "); 800 if (!cdb)
801 return NULL;
802 cdb = bufprint(cdb, " ");
764 for (i = 0; i < level - 1; i++) 803 for (i = 0; i < level - 1; i++)
765 bufprint(" "); 804 cdb = bufprint(cdb, " ");
766 805
767 switch (TYP) { 806 switch (TYP) {
768 case _CBYTE: 807 case _CBYTE:
769 bufprint("%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l)); 808 cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l));
770 cmsg->l++; 809 cmsg->l++;
771 break; 810 break;
772 case _CWORD: 811 case _CWORD:
773 bufprint("%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l)); 812 cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l));
774 cmsg->l += 2; 813 cmsg->l += 2;
775 break; 814 break;
776 case _CDWORD: 815 case _CDWORD:
777 bufprint("%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l)); 816 cdb = bufprint(cdb, "%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l));
778 cmsg->l += 4; 817 cmsg->l += 4;
779 break; 818 break;
780 case _CSTRUCT: 819 case _CSTRUCT:
781 bufprint("%-*s = ", slen, NAME); 820 cdb = bufprint(cdb, "%-*s = ", slen, NAME);
782 if (cmsg->m[cmsg->l] == '\0') 821 if (cmsg->m[cmsg->l] == '\0')
783 bufprint("default"); 822 cdb = bufprint(cdb, "default");
784 else 823 else
785 printstruct(cmsg->m + cmsg->l); 824 cdb = printstruct(cdb, cmsg->m + cmsg->l);
786 bufprint("\n"); 825 cdb = bufprint(cdb, "\n");
787 if (cmsg->m[cmsg->l] != 0xff) 826 if (cmsg->m[cmsg->l] != 0xff)
788 cmsg->l += 1 + cmsg->m[cmsg->l]; 827 cmsg->l += 1 + cmsg->m[cmsg->l];
789 else 828 else
@@ -794,61 +833,184 @@ static void protocol_message_2_pars(_cmsg * cmsg, int level)
794 case _CMSTRUCT: 833 case _CMSTRUCT:
795/*----- Metastruktur 0 -----*/ 834/*----- Metastruktur 0 -----*/
796 if (cmsg->m[cmsg->l] == '\0') { 835 if (cmsg->m[cmsg->l] == '\0') {
797 bufprint("%-*s = default\n", slen, NAME); 836 cdb = bufprint(cdb, "%-*s = default\n", slen, NAME);
798 cmsg->l++; 837 cmsg->l++;
799 jumpcstruct(cmsg); 838 jumpcstruct(cmsg);
800 } else { 839 } else {
801 char *name = NAME; 840 char *name = NAME;
802 unsigned _l = cmsg->l; 841 unsigned _l = cmsg->l;
803 bufprint("%-*s\n", slen, name); 842 cdb = bufprint(cdb, "%-*s\n", slen, name);
804 cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1; 843 cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1;
805 cmsg->p++; 844 cmsg->p++;
806 protocol_message_2_pars(cmsg, level + 1); 845 cdb = protocol_message_2_pars(cdb, cmsg, level + 1);
807 } 846 }
808 break; 847 break;
809 } 848 }
810 } 849 }
850 return cdb;
811} 851}
812/*-------------------------------------------------------*/ 852/*-------------------------------------------------------*/
813char *capi_message2str(u8 * msg) 853
854static _cdebbuf *g_debbuf;
855static u_long g_debbuf_lock;
856static _cmsg *g_cmsg;
857
858_cdebbuf *cdebbuf_alloc(void)
814{ 859{
860 _cdebbuf *cdb;
861
862 if (likely(!test_and_set_bit(1, &g_debbuf_lock))) {
863 cdb = g_debbuf;
864 goto init;
865 } else
866 cdb = kmalloc(sizeof(_cdebbuf), GFP_ATOMIC);
867 if (!cdb)
868 return NULL;
869 cdb->buf = kmalloc(CDEBUG_SIZE, GFP_ATOMIC);
870 if (!cdb->buf) {
871 kfree(cdb);
872 return NULL;
873 }
874 cdb->size = CDEBUG_SIZE;
875init:
876 cdb->buf[0] = 0;
877 cdb->p = cdb->buf;
878 cdb->pos = 0;
879 return cdb;
880}
815 881
816 _cmsg cmsg; 882void cdebbuf_free(_cdebbuf *cdb)
817 p = buf; 883{
818 p[0] = 0; 884 if (likely(cdb == g_debbuf)) {
885 test_and_clear_bit(1, &g_debbuf_lock);
886 return;
887 }
888 if (likely(cdb))
889 kfree(cdb->buf);
890 kfree(cdb);
891}
819 892
820 cmsg.m = msg;
821 cmsg.l = 8;
822 cmsg.p = 0;
823 byteTRcpy(cmsg.m + 4, &cmsg.Command);
824 byteTRcpy(cmsg.m + 5, &cmsg.Subcommand);
825 cmsg.par = cpars[command_2_index(cmsg.Command, cmsg.Subcommand)];
826 893
827 bufprint("%-26s ID=%03d #0x%04x LEN=%04d\n", 894_cdebbuf *capi_message2str(u8 * msg)
828 mnames[command_2_index(cmsg.Command, cmsg.Subcommand)], 895{
896 _cdebbuf *cdb;
897 _cmsg *cmsg;
898
899 cdb = cdebbuf_alloc();
900 if (unlikely(!cdb))
901 return NULL;
902 if (likely(cdb == g_debbuf))
903 cmsg = g_cmsg;
904 else
905 cmsg = kmalloc(sizeof(_cmsg), GFP_ATOMIC);
906 if (unlikely(!cmsg)) {
907 cdebbuf_free(cdb);
908 return NULL;
909 }
910 cmsg->m = msg;
911 cmsg->l = 8;
912 cmsg->p = 0;
913 byteTRcpy(cmsg->m + 4, &cmsg->Command);
914 byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
915 cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)];
916
917 cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n",
918 mnames[command_2_index(cmsg->Command, cmsg->Subcommand)],
829 ((unsigned short *) msg)[1], 919 ((unsigned short *) msg)[1],
830 ((unsigned short *) msg)[3], 920 ((unsigned short *) msg)[3],
831 ((unsigned short *) msg)[0]); 921 ((unsigned short *) msg)[0]);
832 922
833 protocol_message_2_pars(&cmsg, 1); 923 cdb = protocol_message_2_pars(cdb, cmsg, 1);
834 return buf; 924 if (unlikely(cmsg != g_cmsg))
925 kfree(cmsg);
926 return cdb;
835} 927}
836 928
837char *capi_cmsg2str(_cmsg * cmsg) 929_cdebbuf *capi_cmsg2str(_cmsg * cmsg)
838{ 930{
839 p = buf; 931 _cdebbuf *cdb;
840 p[0] = 0; 932
933 cdb = cdebbuf_alloc();
934 if (!cdb)
935 return NULL;
841 cmsg->l = 8; 936 cmsg->l = 8;
842 cmsg->p = 0; 937 cmsg->p = 0;
843 bufprint("%s ID=%03d #0x%04x LEN=%04d\n", 938 cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n",
844 mnames[command_2_index(cmsg->Command, cmsg->Subcommand)], 939 mnames[command_2_index(cmsg->Command, cmsg->Subcommand)],
845 ((u16 *) cmsg->m)[1], 940 ((u16 *) cmsg->m)[1],
846 ((u16 *) cmsg->m)[3], 941 ((u16 *) cmsg->m)[3],
847 ((u16 *) cmsg->m)[0]); 942 ((u16 *) cmsg->m)[0]);
848 protocol_message_2_pars(cmsg, 1); 943 cdb = protocol_message_2_pars(cdb, cmsg, 1);
849 return buf; 944 return cdb;
850} 945}
851 946
947int __init cdebug_init(void)
948{
949 g_cmsg= kmalloc(sizeof(_cmsg), GFP_KERNEL);
950 if (!g_cmsg)
951 return ENOMEM;
952 g_debbuf = kmalloc(sizeof(_cdebbuf), GFP_KERNEL);
953 if (!g_debbuf) {
954 kfree(g_cmsg);
955 return ENOMEM;
956 }
957 g_debbuf->buf = kmalloc(CDEBUG_GSIZE, GFP_KERNEL);
958 if (!g_debbuf->buf) {
959 kfree(g_cmsg);
960 kfree(g_debbuf);
961 return ENOMEM;;
962 }
963 g_debbuf->size = CDEBUG_GSIZE;
964 g_debbuf->buf[0] = 0;
965 g_debbuf->p = g_debbuf->buf;
966 g_debbuf->pos = 0;
967 return 0;
968}
969
970void __exit cdebug_exit(void)
971{
972 if (g_debbuf)
973 kfree(g_debbuf->buf);
974 kfree(g_debbuf);
975 kfree(g_cmsg);
976}
977
978#else /* !CONFIG_CAPI_TRACE */
979
980static _cdebbuf g_debbuf = {"CONFIG_CAPI_TRACE not enabled", NULL, 0, 0};
981
982_cdebbuf *capi_message2str(u8 * msg)
983{
984 return &g_debbuf;
985}
986
987_cdebbuf *capi_cmsg2str(_cmsg * cmsg)
988{
989 return &g_debbuf;
990}
991
992_cdebbuf *cdebbuf_alloc(void)
993{
994 return &g_debbuf;
995}
996
997void cdebbuf_free(_cdebbuf *cdb)
998{
999}
1000
1001int __init cdebug_init(void)
1002{
1003 return 0;
1004}
1005
1006void __exit cdebug_exit(void)
1007{
1008}
1009
1010#endif
1011
1012EXPORT_SYMBOL(cdebbuf_alloc);
1013EXPORT_SYMBOL(cdebbuf_free);
852EXPORT_SYMBOL(capi_cmsg2message); 1014EXPORT_SYMBOL(capi_cmsg2message);
853EXPORT_SYMBOL(capi_message2cmsg); 1015EXPORT_SYMBOL(capi_message2cmsg);
854EXPORT_SYMBOL(capi_cmsg_header); 1016EXPORT_SYMBOL(capi_cmsg_header);
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 783a255263..3ed34f7a1c 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -276,10 +276,17 @@ void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *s
276 int showctl = 0; 276 int showctl = 0;
277 u8 cmd, subcmd; 277 u8 cmd, subcmd;
278 unsigned long flags; 278 unsigned long flags;
279 _cdebbuf *cdb;
279 280
280 if (card->cardstate != CARD_RUNNING) { 281 if (card->cardstate != CARD_RUNNING) {
281 printk(KERN_INFO "kcapi: controller %d not active, got: %s", 282 cdb = capi_message2str(skb->data);
282 card->cnr, capi_message2str(skb->data)); 283 if (cdb) {
284 printk(KERN_INFO "kcapi: controller [%03d] not active, got: %s",
285 card->cnr, cdb->buf);
286 cdebbuf_free(cdb);
287 } else
288 printk(KERN_INFO "kcapi: controller [%03d] not active, cannot trace\n",
289 card->cnr);
283 goto error; 290 goto error;
284 } 291 }
285 292
@@ -295,15 +302,21 @@ void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *s
295 showctl |= (card->traceflag & 1); 302 showctl |= (card->traceflag & 1);
296 if (showctl & 2) { 303 if (showctl & 2) {
297 if (showctl & 1) { 304 if (showctl & 1) {
298 printk(KERN_DEBUG "kcapi: got [0x%lx] id#%d %s len=%u\n", 305 printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u\n",
299 (unsigned long) card->cnr, 306 card->cnr, CAPIMSG_APPID(skb->data),
300 CAPIMSG_APPID(skb->data),
301 capi_cmd2str(cmd, subcmd), 307 capi_cmd2str(cmd, subcmd),
302 CAPIMSG_LEN(skb->data)); 308 CAPIMSG_LEN(skb->data));
303 } else { 309 } else {
304 printk(KERN_DEBUG "kcapi: got [0x%lx] %s\n", 310 cdb = capi_message2str(skb->data);
305 (unsigned long) card->cnr, 311 if (cdb) {
306 capi_message2str(skb->data)); 312 printk(KERN_DEBUG "kcapi: got [%03d] %s\n",
313 card->cnr, cdb->buf);
314 cdebbuf_free(cdb);
315 } else
316 printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u, cannot trace\n",
317 card->cnr, CAPIMSG_APPID(skb->data),
318 capi_cmd2str(cmd, subcmd),
319 CAPIMSG_LEN(skb->data));
307 } 320 }
308 321
309 } 322 }
@@ -312,8 +325,15 @@ void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *s
312 ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data)); 325 ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data));
313 if ((!ap) || (ap->release_in_progress)) { 326 if ((!ap) || (ap->release_in_progress)) {
314 read_unlock_irqrestore(&application_lock, flags); 327 read_unlock_irqrestore(&application_lock, flags);
315 printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n", 328 cdb = capi_message2str(skb->data);
316 CAPIMSG_APPID(skb->data), capi_message2str(skb->data)); 329 if (cdb) {
330 printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n",
331 CAPIMSG_APPID(skb->data), cdb->buf);
332 cdebbuf_free(cdb);
333 } else
334 printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s) cannot trace\n",
335 CAPIMSG_APPID(skb->data),
336 capi_cmd2str(cmd, subcmd));
317 goto error; 337 goto error;
318 } 338 }
319 skb_queue_tail(&ap->recv_queue, skb); 339 skb_queue_tail(&ap->recv_queue, skb);
@@ -332,7 +352,7 @@ void capi_ctr_ready(struct capi_ctr * card)
332{ 352{
333 card->cardstate = CARD_RUNNING; 353 card->cardstate = CARD_RUNNING;
334 354
335 printk(KERN_NOTICE "kcapi: card %d \"%s\" ready.\n", 355 printk(KERN_NOTICE "kcapi: card [%03d] \"%s\" ready.\n",
336 card->cnr, card->name); 356 card->cnr, card->name);
337 357
338 notify_push(KCI_CONTRUP, card->cnr, 0, 0); 358 notify_push(KCI_CONTRUP, card->cnr, 0, 0);
@@ -364,7 +384,7 @@ void capi_ctr_reseted(struct capi_ctr * card)
364 capi_ctr_put(card); 384 capi_ctr_put(card);
365 } 385 }
366 386
367 printk(KERN_NOTICE "kcapi: card %d down.\n", card->cnr); 387 printk(KERN_NOTICE "kcapi: card [%03d] down.\n", card->cnr);
368 388
369 notify_push(KCI_CONTRDOWN, card->cnr, 0, 0); 389 notify_push(KCI_CONTRDOWN, card->cnr, 0, 0);
370} 390}
@@ -374,7 +394,7 @@ EXPORT_SYMBOL(capi_ctr_reseted);
374void capi_ctr_suspend_output(struct capi_ctr *card) 394void capi_ctr_suspend_output(struct capi_ctr *card)
375{ 395{
376 if (!card->blocked) { 396 if (!card->blocked) {
377 printk(KERN_DEBUG "kcapi: card %d suspend\n", card->cnr); 397 printk(KERN_DEBUG "kcapi: card [%03d] suspend\n", card->cnr);
378 card->blocked = 1; 398 card->blocked = 1;
379 } 399 }
380} 400}
@@ -384,7 +404,7 @@ EXPORT_SYMBOL(capi_ctr_suspend_output);
384void capi_ctr_resume_output(struct capi_ctr *card) 404void capi_ctr_resume_output(struct capi_ctr *card)
385{ 405{
386 if (card->blocked) { 406 if (card->blocked) {
387 printk(KERN_DEBUG "kcapi: card %d resume\n", card->cnr); 407 printk(KERN_DEBUG "kcapi: card [%03d] resume\n", card->cnr);
388 card->blocked = 0; 408 card->blocked = 0;
389 } 409 }
390} 410}
@@ -432,7 +452,7 @@ attach_capi_ctr(struct capi_ctr *card)
432 } 452 }
433 453
434 ncards++; 454 ncards++;
435 printk(KERN_NOTICE "kcapi: Controller %d: %s attached\n", 455 printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n",
436 card->cnr, card->name); 456 card->cnr, card->name);
437 return 0; 457 return 0;
438} 458}
@@ -451,7 +471,7 @@ int detach_capi_ctr(struct capi_ctr *card)
451 card->procent = NULL; 471 card->procent = NULL;
452 } 472 }
453 capi_cards[card->cnr - 1] = NULL; 473 capi_cards[card->cnr - 1] = NULL;
454 printk(KERN_NOTICE "kcapi: Controller %d: %s unregistered\n", 474 printk(KERN_NOTICE "kcapi: Controller [%03d]: %s unregistered\n",
455 card->cnr, card->name); 475 card->cnr, card->name);
456 476
457 return 0; 477 return 0;
@@ -623,17 +643,25 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
623 showctl |= (card->traceflag & 1); 643 showctl |= (card->traceflag & 1);
624 if (showctl & 2) { 644 if (showctl & 2) {
625 if (showctl & 1) { 645 if (showctl & 1) {
626 printk(KERN_DEBUG "kcapi: put [%#x] id#%d %s len=%u\n", 646 printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u\n",
627 CAPIMSG_CONTROLLER(skb->data), 647 CAPIMSG_CONTROLLER(skb->data),
628 CAPIMSG_APPID(skb->data), 648 CAPIMSG_APPID(skb->data),
629 capi_cmd2str(cmd, subcmd), 649 capi_cmd2str(cmd, subcmd),
630 CAPIMSG_LEN(skb->data)); 650 CAPIMSG_LEN(skb->data));
631 } else { 651 } else {
632 printk(KERN_DEBUG "kcapi: put [%#x] %s\n", 652 _cdebbuf *cdb = capi_message2str(skb->data);
633 CAPIMSG_CONTROLLER(skb->data), 653 if (cdb) {
634 capi_message2str(skb->data)); 654 printk(KERN_DEBUG "kcapi: put [%03d] %s\n",
655 CAPIMSG_CONTROLLER(skb->data),
656 cdb->buf);
657 cdebbuf_free(cdb);
658 } else
659 printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u cannot trace\n",
660 CAPIMSG_CONTROLLER(skb->data),
661 CAPIMSG_APPID(skb->data),
662 capi_cmd2str(cmd, subcmd),
663 CAPIMSG_LEN(skb->data));
635 } 664 }
636
637 } 665 }
638 return card->send_message(card, skb); 666 return card->send_message(card, skb);
639} 667}
@@ -894,7 +922,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
894 return -ESRCH; 922 return -ESRCH;
895 923
896 card->traceflag = fdef.flag; 924 card->traceflag = fdef.flag;
897 printk(KERN_INFO "kcapi: contr %d set trace=%d\n", 925 printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n",
898 card->cnr, card->traceflag); 926 card->cnr, card->traceflag);
899 return 0; 927 return 0;
900 } 928 }
@@ -967,7 +995,11 @@ static int __init kcapi_init(void)
967{ 995{
968 char *p; 996 char *p;
969 char rev[32]; 997 char rev[32];
998 int ret;
970 999
1000 ret = cdebug_init();
1001 if (ret)
1002 return ret;
971 kcapi_proc_init(); 1003 kcapi_proc_init();
972 1004
973 if ((p = strchr(revision, ':')) != 0 && p[1]) { 1005 if ((p = strchr(revision, ':')) != 0 && p[1]) {
@@ -988,6 +1020,7 @@ static void __exit kcapi_exit(void)
988 1020
989 /* make sure all notifiers are finished */ 1021 /* make sure all notifiers are finished */
990 flush_scheduled_work(); 1022 flush_scheduled_work();
1023 cdebug_exit();
991} 1024}
992 1025
993module_init(kcapi_init); 1026module_init(kcapi_init);
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
index 5158be0b7a..e9d3189f56 100644
--- a/drivers/isdn/gigaset/Makefile
+++ b/drivers/isdn/gigaset/Makefile
@@ -1,8 +1,9 @@
1gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o asyncdata.o 1gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o asyncdata.o
2usb_gigaset-y := usb-gigaset.o 2usb_gigaset-y := usb-gigaset.o
3bas_gigaset-y := bas-gigaset.o isocdata.o
4ser_gigaset-y := ser-gigaset.o 3ser_gigaset-y := ser-gigaset.o
4bas_gigaset-y := bas-gigaset.o isocdata.o
5 5
6obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o 6obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset.o
7obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o 7obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o
8obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o gigaset.o 8obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o
9obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index f2f108fcec..00a3be5b86 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -13,11 +13,6 @@
13 * ===================================================================== 13 * =====================================================================
14 */ 14 */
15 15
16/* not set by Kbuild when building both ser_gigaset and usb_gigaset */
17#ifndef KBUILD_MODNAME
18#define KBUILD_MODNAME "asy_gigaset"
19#endif
20
21#include "gigaset.h" 16#include "gigaset.h"
22#include <linux/crc-ccitt.h> 17#include <linux/crc-ccitt.h>
23#include <linux/bitrev.h> 18#include <linux/bitrev.h>
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 04574a9d44..0d122bf889 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -14,6 +14,7 @@
14 14
15#include "vmx.h" 15#include "vmx.h"
16#include <linux/kvm.h> 16#include <linux/kvm.h>
17#include <linux/kvm_para.h>
17 18
18#define CR0_PE_MASK (1ULL << 0) 19#define CR0_PE_MASK (1ULL << 0)
19#define CR0_TS_MASK (1ULL << 3) 20#define CR0_TS_MASK (1ULL << 3)
@@ -237,6 +238,9 @@ struct kvm_vcpu {
237 unsigned long cr0; 238 unsigned long cr0;
238 unsigned long cr2; 239 unsigned long cr2;
239 unsigned long cr3; 240 unsigned long cr3;
241 gpa_t para_state_gpa;
242 struct page *para_state_page;
243 gpa_t hypercall_gpa;
240 unsigned long cr4; 244 unsigned long cr4;
241 unsigned long cr8; 245 unsigned long cr8;
242 u64 pdptrs[4]; /* pae */ 246 u64 pdptrs[4]; /* pae */
@@ -305,6 +309,7 @@ struct kvm {
305 int busy; 309 int busy;
306 unsigned long rmap_overflow; 310 unsigned long rmap_overflow;
307 struct list_head vm_list; 311 struct list_head vm_list;
312 struct file *filp;
308}; 313};
309 314
310struct kvm_stat { 315struct kvm_stat {
@@ -339,7 +344,7 @@ struct kvm_arch_ops {
339 int (*vcpu_create)(struct kvm_vcpu *vcpu); 344 int (*vcpu_create)(struct kvm_vcpu *vcpu);
340 void (*vcpu_free)(struct kvm_vcpu *vcpu); 345 void (*vcpu_free)(struct kvm_vcpu *vcpu);
341 346
342 struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu); 347 void (*vcpu_load)(struct kvm_vcpu *vcpu);
343 void (*vcpu_put)(struct kvm_vcpu *vcpu); 348 void (*vcpu_put)(struct kvm_vcpu *vcpu);
344 void (*vcpu_decache)(struct kvm_vcpu *vcpu); 349 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
345 350
@@ -382,6 +387,8 @@ struct kvm_arch_ops {
382 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); 387 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
383 int (*vcpu_setup)(struct kvm_vcpu *vcpu); 388 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
384 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 389 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
390 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
391 unsigned char *hypercall_addr);
385}; 392};
386 393
387extern struct kvm_stat kvm_stat; 394extern struct kvm_stat kvm_stat;
@@ -476,6 +483,8 @@ void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
476int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 483int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
477void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 484void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
478 485
486int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
487
479static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 488static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
480 u32 error_code) 489 u32 error_code)
481{ 490{
@@ -523,7 +532,7 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
523{ 532{
524 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 533 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
525 534
526 return (struct kvm_mmu_page *)page->private; 535 return (struct kvm_mmu_page *)page_private(page);
527} 536}
528 537
529static inline u16 read_fs(void) 538static inline u16 read_fs(void)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index af866147ff..a163bca389 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -20,6 +20,7 @@
20#include <linux/kvm.h> 20#include <linux/kvm.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/magic.h>
23#include <asm/processor.h> 24#include <asm/processor.h>
24#include <linux/percpu.h> 25#include <linux/percpu.h>
25#include <linux/gfp.h> 26#include <linux/gfp.h>
@@ -36,6 +37,9 @@
36#include <asm/desc.h> 37#include <asm/desc.h>
37#include <linux/sysdev.h> 38#include <linux/sysdev.h>
38#include <linux/cpu.h> 39#include <linux/cpu.h>
40#include <linux/file.h>
41#include <linux/fs.h>
42#include <linux/mount.h>
39 43
40#include "x86_emulate.h" 44#include "x86_emulate.h"
41#include "segment_descriptor.h" 45#include "segment_descriptor.h"
@@ -72,6 +76,8 @@ static struct kvm_stats_debugfs_item {
72 76
73static struct dentry *debugfs_dir; 77static struct dentry *debugfs_dir;
74 78
79struct vfsmount *kvmfs_mnt;
80
75#define MAX_IO_MSRS 256 81#define MAX_IO_MSRS 256
76 82
77#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL 83#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
@@ -90,6 +96,58 @@ struct segment_descriptor_64 {
90 96
91#endif 97#endif
92 98
99static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
100 unsigned long arg);
101
102static struct inode *kvmfs_inode(struct file_operations *fops)
103{
104 int error = -ENOMEM;
105 struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
106
107 if (!inode)
108 goto eexit_1;
109
110 inode->i_fop = fops;
111
112 /*
113 * Mark the inode dirty from the very beginning,
114 * that way it will never be moved to the dirty
115 * list because mark_inode_dirty() will think
116 * that it already _is_ on the dirty list.
117 */
118 inode->i_state = I_DIRTY;
119 inode->i_mode = S_IRUSR | S_IWUSR;
120 inode->i_uid = current->fsuid;
121 inode->i_gid = current->fsgid;
122 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
123 return inode;
124
125eexit_1:
126 return ERR_PTR(error);
127}
128
129static struct file *kvmfs_file(struct inode *inode, void *private_data)
130{
131 struct file *file = get_empty_filp();
132
133 if (!file)
134 return ERR_PTR(-ENFILE);
135
136 file->f_path.mnt = mntget(kvmfs_mnt);
137 file->f_path.dentry = d_alloc_anon(inode);
138 if (!file->f_path.dentry)
139 return ERR_PTR(-ENOMEM);
140 file->f_mapping = inode->i_mapping;
141
142 file->f_pos = 0;
143 file->f_flags = O_RDWR;
144 file->f_op = inode->i_fop;
145 file->f_mode = FMODE_READ | FMODE_WRITE;
146 file->f_version = 0;
147 file->private_data = private_data;
148 return file;
149}
150
93unsigned long segment_base(u16 selector) 151unsigned long segment_base(u16 selector)
94{ 152{
95 struct descriptor_table gdt; 153 struct descriptor_table gdt;
@@ -126,10 +184,8 @@ static inline int valid_vcpu(int n)
126 return likely(n >= 0 && n < KVM_MAX_VCPUS); 184 return likely(n >= 0 && n < KVM_MAX_VCPUS);
127} 185}
128 186
129int kvm_read_guest(struct kvm_vcpu *vcpu, 187int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
130 gva_t addr, 188 void *dest)
131 unsigned long size,
132 void *dest)
133{ 189{
134 unsigned char *host_buf = dest; 190 unsigned char *host_buf = dest;
135 unsigned long req_size = size; 191 unsigned long req_size = size;
@@ -161,10 +217,8 @@ int kvm_read_guest(struct kvm_vcpu *vcpu,
161} 217}
162EXPORT_SYMBOL_GPL(kvm_read_guest); 218EXPORT_SYMBOL_GPL(kvm_read_guest);
163 219
164int kvm_write_guest(struct kvm_vcpu *vcpu, 220int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
165 gva_t addr, 221 void *data)
166 unsigned long size,
167 void *data)
168{ 222{
169 unsigned char *host_buf = data; 223 unsigned char *host_buf = data;
170 unsigned long req_size = size; 224 unsigned long req_size = size;
@@ -174,12 +228,15 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
174 unsigned now; 228 unsigned now;
175 unsigned offset; 229 unsigned offset;
176 hva_t guest_buf; 230 hva_t guest_buf;
231 gfn_t gfn;
177 232
178 paddr = gva_to_hpa(vcpu, addr); 233 paddr = gva_to_hpa(vcpu, addr);
179 234
180 if (is_error_hpa(paddr)) 235 if (is_error_hpa(paddr))
181 break; 236 break;
182 237
238 gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
239 mark_page_dirty(vcpu->kvm, gfn);
183 guest_buf = (hva_t)kmap_atomic( 240 guest_buf = (hva_t)kmap_atomic(
184 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0); 241 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
185 offset = addr & ~PAGE_MASK; 242 offset = addr & ~PAGE_MASK;
@@ -195,24 +252,30 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
195} 252}
196EXPORT_SYMBOL_GPL(kvm_write_guest); 253EXPORT_SYMBOL_GPL(kvm_write_guest);
197 254
198static int vcpu_slot(struct kvm_vcpu *vcpu) 255/*
256 * Switches to specified vcpu, until a matching vcpu_put()
257 */
258static void vcpu_load(struct kvm_vcpu *vcpu)
199{ 259{
200 return vcpu - vcpu->kvm->vcpus; 260 mutex_lock(&vcpu->mutex);
261 kvm_arch_ops->vcpu_load(vcpu);
201} 262}
202 263
203/* 264/*
204 * Switches to specified vcpu, until a matching vcpu_put() 265 * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL
266 * if the slot is not populated.
205 */ 267 */
206static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot) 268static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot)
207{ 269{
208 struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot]; 270 struct kvm_vcpu *vcpu = &kvm->vcpus[slot];
209 271
210 mutex_lock(&vcpu->mutex); 272 mutex_lock(&vcpu->mutex);
211 if (unlikely(!vcpu->vmcs)) { 273 if (!vcpu->vmcs) {
212 mutex_unlock(&vcpu->mutex); 274 mutex_unlock(&vcpu->mutex);
213 return NULL; 275 return NULL;
214 } 276 }
215 return kvm_arch_ops->vcpu_load(vcpu); 277 kvm_arch_ops->vcpu_load(vcpu);
278 return vcpu;
216} 279}
217 280
218static void vcpu_put(struct kvm_vcpu *vcpu) 281static void vcpu_put(struct kvm_vcpu *vcpu)
@@ -221,13 +284,13 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
221 mutex_unlock(&vcpu->mutex); 284 mutex_unlock(&vcpu->mutex);
222} 285}
223 286
224static int kvm_dev_open(struct inode *inode, struct file *filp) 287static struct kvm *kvm_create_vm(void)
225{ 288{
226 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 289 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
227 int i; 290 int i;
228 291
229 if (!kvm) 292 if (!kvm)
230 return -ENOMEM; 293 return ERR_PTR(-ENOMEM);
231 294
232 spin_lock_init(&kvm->lock); 295 spin_lock_init(&kvm->lock);
233 INIT_LIST_HEAD(&kvm->active_mmu_pages); 296 INIT_LIST_HEAD(&kvm->active_mmu_pages);
@@ -243,7 +306,11 @@ static int kvm_dev_open(struct inode *inode, struct file *filp)
243 list_add(&kvm->vm_list, &vm_list); 306 list_add(&kvm->vm_list, &vm_list);
244 spin_unlock(&kvm_lock); 307 spin_unlock(&kvm_lock);
245 } 308 }
246 filp->private_data = kvm; 309 return kvm;
310}
311
312static int kvm_dev_open(struct inode *inode, struct file *filp)
313{
247 return 0; 314 return 0;
248} 315}
249 316
@@ -281,9 +348,10 @@ static void kvm_free_physmem(struct kvm *kvm)
281 348
282static void kvm_free_vcpu(struct kvm_vcpu *vcpu) 349static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
283{ 350{
284 if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu))) 351 if (!vcpu->vmcs)
285 return; 352 return;
286 353
354 vcpu_load(vcpu);
287 kvm_mmu_destroy(vcpu); 355 kvm_mmu_destroy(vcpu);
288 vcpu_put(vcpu); 356 vcpu_put(vcpu);
289 kvm_arch_ops->vcpu_free(vcpu); 357 kvm_arch_ops->vcpu_free(vcpu);
@@ -299,14 +367,24 @@ static void kvm_free_vcpus(struct kvm *kvm)
299 367
300static int kvm_dev_release(struct inode *inode, struct file *filp) 368static int kvm_dev_release(struct inode *inode, struct file *filp)
301{ 369{
302 struct kvm *kvm = filp->private_data; 370 return 0;
371}
303 372
373static void kvm_destroy_vm(struct kvm *kvm)
374{
304 spin_lock(&kvm_lock); 375 spin_lock(&kvm_lock);
305 list_del(&kvm->vm_list); 376 list_del(&kvm->vm_list);
306 spin_unlock(&kvm_lock); 377 spin_unlock(&kvm_lock);
307 kvm_free_vcpus(kvm); 378 kvm_free_vcpus(kvm);
308 kvm_free_physmem(kvm); 379 kvm_free_physmem(kvm);
309 kfree(kvm); 380 kfree(kvm);
381}
382
383static int kvm_vm_release(struct inode *inode, struct file *filp)
384{
385 struct kvm *kvm = filp->private_data;
386
387 kvm_destroy_vm(kvm);
310 return 0; 388 return 0;
311} 389}
312 390
@@ -457,7 +535,7 @@ EXPORT_SYMBOL_GPL(set_cr4);
457void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 535void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
458{ 536{
459 if (is_long_mode(vcpu)) { 537 if (is_long_mode(vcpu)) {
460 if ( cr3 & CR3_L_MODE_RESEVED_BITS) { 538 if (cr3 & CR3_L_MODE_RESEVED_BITS) {
461 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); 539 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
462 inject_gp(vcpu); 540 inject_gp(vcpu);
463 return; 541 return;
@@ -533,55 +611,11 @@ void fx_init(struct kvm_vcpu *vcpu)
533} 611}
534EXPORT_SYMBOL_GPL(fx_init); 612EXPORT_SYMBOL_GPL(fx_init);
535 613
536/* 614static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
537 * Creates some virtual cpus. Good luck creating more than one.
538 */
539static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
540{ 615{
541 int r; 616 spin_lock(&vcpu->kvm->lock);
542 struct kvm_vcpu *vcpu; 617 kvm_mmu_slot_remove_write_access(vcpu, slot);
543 618 spin_unlock(&vcpu->kvm->lock);
544 r = -EINVAL;
545 if (!valid_vcpu(n))
546 goto out;
547
548 vcpu = &kvm->vcpus[n];
549
550 mutex_lock(&vcpu->mutex);
551
552 if (vcpu->vmcs) {
553 mutex_unlock(&vcpu->mutex);
554 return -EEXIST;
555 }
556
557 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
558 FX_IMAGE_ALIGN);
559 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
560
561 r = kvm_arch_ops->vcpu_create(vcpu);
562 if (r < 0)
563 goto out_free_vcpus;
564
565 r = kvm_mmu_create(vcpu);
566 if (r < 0)
567 goto out_free_vcpus;
568
569 kvm_arch_ops->vcpu_load(vcpu);
570 r = kvm_mmu_setup(vcpu);
571 if (r >= 0)
572 r = kvm_arch_ops->vcpu_setup(vcpu);
573 vcpu_put(vcpu);
574
575 if (r < 0)
576 goto out_free_vcpus;
577
578 return 0;
579
580out_free_vcpus:
581 kvm_free_vcpu(vcpu);
582 mutex_unlock(&vcpu->mutex);
583out:
584 return r;
585} 619}
586 620
587/* 621/*
@@ -590,8 +624,8 @@ out:
590 * 624 *
591 * Discontiguous memory is allowed, mostly for framebuffers. 625 * Discontiguous memory is allowed, mostly for framebuffers.
592 */ 626 */
593static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm, 627static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
594 struct kvm_memory_region *mem) 628 struct kvm_memory_region *mem)
595{ 629{
596 int r; 630 int r;
597 gfn_t base_gfn; 631 gfn_t base_gfn;
@@ -674,7 +708,7 @@ raced:
674 | __GFP_ZERO); 708 | __GFP_ZERO);
675 if (!new.phys_mem[i]) 709 if (!new.phys_mem[i])
676 goto out_free; 710 goto out_free;
677 new.phys_mem[i]->private = 0; 711 set_page_private(new.phys_mem[i],0);
678 } 712 }
679 } 713 }
680 714
@@ -711,9 +745,11 @@ raced:
711 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 745 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
712 struct kvm_vcpu *vcpu; 746 struct kvm_vcpu *vcpu;
713 747
714 vcpu = vcpu_load(kvm, i); 748 vcpu = vcpu_load_slot(kvm, i);
715 if (!vcpu) 749 if (!vcpu)
716 continue; 750 continue;
751 if (new.flags & KVM_MEM_LOG_DIRTY_PAGES)
752 do_remove_write_access(vcpu, mem->slot);
717 kvm_mmu_reset_context(vcpu); 753 kvm_mmu_reset_context(vcpu);
718 vcpu_put(vcpu); 754 vcpu_put(vcpu);
719 } 755 }
@@ -729,18 +765,11 @@ out:
729 return r; 765 return r;
730} 766}
731 767
732static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
733{
734 spin_lock(&vcpu->kvm->lock);
735 kvm_mmu_slot_remove_write_access(vcpu, slot);
736 spin_unlock(&vcpu->kvm->lock);
737}
738
739/* 768/*
740 * Get (and clear) the dirty memory log for a memory slot. 769 * Get (and clear) the dirty memory log for a memory slot.
741 */ 770 */
742static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, 771static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
743 struct kvm_dirty_log *log) 772 struct kvm_dirty_log *log)
744{ 773{
745 struct kvm_memory_slot *memslot; 774 struct kvm_memory_slot *memslot;
746 int r, i; 775 int r, i;
@@ -765,21 +794,21 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
765 if (!memslot->dirty_bitmap) 794 if (!memslot->dirty_bitmap)
766 goto out; 795 goto out;
767 796
768 n = ALIGN(memslot->npages, 8) / 8; 797 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
769 798
770 for (i = 0; !any && i < n; ++i) 799 for (i = 0; !any && i < n/sizeof(long); ++i)
771 any = memslot->dirty_bitmap[i]; 800 any = memslot->dirty_bitmap[i];
772 801
773 r = -EFAULT; 802 r = -EFAULT;
774 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 803 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
775 goto out; 804 goto out;
776 805
777
778 if (any) { 806 if (any) {
779 cleared = 0; 807 cleared = 0;
780 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 808 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
781 struct kvm_vcpu *vcpu = vcpu_load(kvm, i); 809 struct kvm_vcpu *vcpu;
782 810
811 vcpu = vcpu_load_slot(kvm, i);
783 if (!vcpu) 812 if (!vcpu)
784 continue; 813 continue;
785 if (!cleared) { 814 if (!cleared) {
@@ -903,8 +932,9 @@ static int emulator_read_emulated(unsigned long addr,
903 return X86EMUL_CONTINUE; 932 return X86EMUL_CONTINUE;
904 else { 933 else {
905 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 934 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
935
906 if (gpa == UNMAPPED_GVA) 936 if (gpa == UNMAPPED_GVA)
907 return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT; 937 return X86EMUL_PROPAGATE_FAULT;
908 vcpu->mmio_needed = 1; 938 vcpu->mmio_needed = 1;
909 vcpu->mmio_phys_addr = gpa; 939 vcpu->mmio_phys_addr = gpa;
910 vcpu->mmio_size = bytes; 940 vcpu->mmio_size = bytes;
@@ -928,6 +958,7 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
928 return 0; 958 return 0;
929 page = gfn_to_page(m, gpa >> PAGE_SHIFT); 959 page = gfn_to_page(m, gpa >> PAGE_SHIFT);
930 kvm_mmu_pre_write(vcpu, gpa, bytes); 960 kvm_mmu_pre_write(vcpu, gpa, bytes);
961 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
931 virt = kmap_atomic(page, KM_USER0); 962 virt = kmap_atomic(page, KM_USER0);
932 memcpy(virt + offset_in_page(gpa), &val, bytes); 963 memcpy(virt + offset_in_page(gpa), &val, bytes);
933 kunmap_atomic(virt, KM_USER0); 964 kunmap_atomic(virt, KM_USER0);
@@ -1142,6 +1173,42 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1142} 1173}
1143EXPORT_SYMBOL_GPL(emulate_instruction); 1174EXPORT_SYMBOL_GPL(emulate_instruction);
1144 1175
1176int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1177{
1178 unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
1179
1180 kvm_arch_ops->decache_regs(vcpu);
1181 ret = -KVM_EINVAL;
1182#ifdef CONFIG_X86_64
1183 if (is_long_mode(vcpu)) {
1184 nr = vcpu->regs[VCPU_REGS_RAX];
1185 a0 = vcpu->regs[VCPU_REGS_RDI];
1186 a1 = vcpu->regs[VCPU_REGS_RSI];
1187 a2 = vcpu->regs[VCPU_REGS_RDX];
1188 a3 = vcpu->regs[VCPU_REGS_RCX];
1189 a4 = vcpu->regs[VCPU_REGS_R8];
1190 a5 = vcpu->regs[VCPU_REGS_R9];
1191 } else
1192#endif
1193 {
1194 nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
1195 a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
1196 a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
1197 a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
1198 a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
1199 a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
1200 a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
1201 }
1202 switch (nr) {
1203 default:
1204 ;
1205 }
1206 vcpu->regs[VCPU_REGS_RAX] = ret;
1207 kvm_arch_ops->cache_regs(vcpu);
1208 return 1;
1209}
1210EXPORT_SYMBOL_GPL(kvm_hypercall);
1211
1145static u64 mk_cr_64(u64 curr_cr, u32 new_val) 1212static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1146{ 1213{
1147 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 1214 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
@@ -1208,6 +1275,75 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1208 } 1275 }
1209} 1276}
1210 1277
1278/*
1279 * Register the para guest with the host:
1280 */
1281static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1282{
1283 struct kvm_vcpu_para_state *para_state;
1284 hpa_t para_state_hpa, hypercall_hpa;
1285 struct page *para_state_page;
1286 unsigned char *hypercall;
1287 gpa_t hypercall_gpa;
1288
1289 printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
1290 printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
1291
1292 /*
1293 * Needs to be page aligned:
1294 */
1295 if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
1296 goto err_gp;
1297
1298 para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
1299 printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
1300 if (is_error_hpa(para_state_hpa))
1301 goto err_gp;
1302
1303 mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
1304 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
1305 para_state = kmap_atomic(para_state_page, KM_USER0);
1306
1307 printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
1308 printk(KERN_DEBUG ".... size: %d\n", para_state->size);
1309
1310 para_state->host_version = KVM_PARA_API_VERSION;
1311 /*
1312 * We cannot support guests that try to register themselves
1313 * with a newer API version than the host supports:
1314 */
1315 if (para_state->guest_version > KVM_PARA_API_VERSION) {
1316 para_state->ret = -KVM_EINVAL;
1317 goto err_kunmap_skip;
1318 }
1319
1320 hypercall_gpa = para_state->hypercall_gpa;
1321 hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
1322 printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
1323 if (is_error_hpa(hypercall_hpa)) {
1324 para_state->ret = -KVM_EINVAL;
1325 goto err_kunmap_skip;
1326 }
1327
1328 printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
1329 vcpu->para_state_page = para_state_page;
1330 vcpu->para_state_gpa = para_state_gpa;
1331 vcpu->hypercall_gpa = hypercall_gpa;
1332
1333 mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
1334 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1335 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1336 kvm_arch_ops->patch_hypercall(vcpu, hypercall);
1337 kunmap_atomic(hypercall, KM_USER1);
1338
1339 para_state->ret = 0;
1340err_kunmap_skip:
1341 kunmap_atomic(para_state, KM_USER0);
1342 return 0;
1343err_gp:
1344 return 1;
1345}
1346
1211int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 1347int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1212{ 1348{
1213 u64 data; 1349 u64 data;
@@ -1316,6 +1452,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1316 case MSR_IA32_MISC_ENABLE: 1452 case MSR_IA32_MISC_ENABLE:
1317 vcpu->ia32_misc_enable_msr = data; 1453 vcpu->ia32_misc_enable_msr = data;
1318 break; 1454 break;
1455 /*
1456 * This is the 'probe whether the host is KVM' logic:
1457 */
1458 case MSR_KVM_API_MAGIC:
1459 return vcpu_register_para(vcpu, data);
1460
1319 default: 1461 default:
1320 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr); 1462 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
1321 return 1; 1463 return 1;
@@ -1338,8 +1480,7 @@ void kvm_resched(struct kvm_vcpu *vcpu)
1338{ 1480{
1339 vcpu_put(vcpu); 1481 vcpu_put(vcpu);
1340 cond_resched(); 1482 cond_resched();
1341 /* Cannot fail - no vcpu unplug yet. */ 1483 vcpu_load(vcpu);
1342 vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
1343} 1484}
1344EXPORT_SYMBOL_GPL(kvm_resched); 1485EXPORT_SYMBOL_GPL(kvm_resched);
1345 1486
@@ -1361,17 +1502,11 @@ void save_msrs(struct vmx_msr_entry *e, int n)
1361} 1502}
1362EXPORT_SYMBOL_GPL(save_msrs); 1503EXPORT_SYMBOL_GPL(save_msrs);
1363 1504
1364static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run) 1505static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1365{ 1506{
1366 struct kvm_vcpu *vcpu;
1367 int r; 1507 int r;
1368 1508
1369 if (!valid_vcpu(kvm_run->vcpu)) 1509 vcpu_load(vcpu);
1370 return -EINVAL;
1371
1372 vcpu = vcpu_load(kvm, kvm_run->vcpu);
1373 if (!vcpu)
1374 return -ENOENT;
1375 1510
1376 /* re-sync apic's tpr */ 1511 /* re-sync apic's tpr */
1377 vcpu->cr8 = kvm_run->cr8; 1512 vcpu->cr8 = kvm_run->cr8;
@@ -1394,16 +1529,10 @@ static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
1394 return r; 1529 return r;
1395} 1530}
1396 1531
1397static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs) 1532static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1533 struct kvm_regs *regs)
1398{ 1534{
1399 struct kvm_vcpu *vcpu; 1535 vcpu_load(vcpu);
1400
1401 if (!valid_vcpu(regs->vcpu))
1402 return -EINVAL;
1403
1404 vcpu = vcpu_load(kvm, regs->vcpu);
1405 if (!vcpu)
1406 return -ENOENT;
1407 1536
1408 kvm_arch_ops->cache_regs(vcpu); 1537 kvm_arch_ops->cache_regs(vcpu);
1409 1538
@@ -1440,16 +1569,10 @@ static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
1440 return 0; 1569 return 0;
1441} 1570}
1442 1571
1443static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs) 1572static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1573 struct kvm_regs *regs)
1444{ 1574{
1445 struct kvm_vcpu *vcpu; 1575 vcpu_load(vcpu);
1446
1447 if (!valid_vcpu(regs->vcpu))
1448 return -EINVAL;
1449
1450 vcpu = vcpu_load(kvm, regs->vcpu);
1451 if (!vcpu)
1452 return -ENOENT;
1453 1576
1454 vcpu->regs[VCPU_REGS_RAX] = regs->rax; 1577 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1455 vcpu->regs[VCPU_REGS_RBX] = regs->rbx; 1578 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
@@ -1486,16 +1609,12 @@ static void get_segment(struct kvm_vcpu *vcpu,
1486 return kvm_arch_ops->get_segment(vcpu, var, seg); 1609 return kvm_arch_ops->get_segment(vcpu, var, seg);
1487} 1610}
1488 1611
1489static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs) 1612static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1613 struct kvm_sregs *sregs)
1490{ 1614{
1491 struct kvm_vcpu *vcpu;
1492 struct descriptor_table dt; 1615 struct descriptor_table dt;
1493 1616
1494 if (!valid_vcpu(sregs->vcpu)) 1617 vcpu_load(vcpu);
1495 return -EINVAL;
1496 vcpu = vcpu_load(kvm, sregs->vcpu);
1497 if (!vcpu)
1498 return -ENOENT;
1499 1618
1500 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 1619 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1501 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 1620 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
@@ -1537,18 +1656,14 @@ static void set_segment(struct kvm_vcpu *vcpu,
1537 return kvm_arch_ops->set_segment(vcpu, var, seg); 1656 return kvm_arch_ops->set_segment(vcpu, var, seg);
1538} 1657}
1539 1658
1540static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) 1659static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1660 struct kvm_sregs *sregs)
1541{ 1661{
1542 struct kvm_vcpu *vcpu;
1543 int mmu_reset_needed = 0; 1662 int mmu_reset_needed = 0;
1544 int i; 1663 int i;
1545 struct descriptor_table dt; 1664 struct descriptor_table dt;
1546 1665
1547 if (!valid_vcpu(sregs->vcpu)) 1666 vcpu_load(vcpu);
1548 return -EINVAL;
1549 vcpu = vcpu_load(kvm, sregs->vcpu);
1550 if (!vcpu)
1551 return -ENOENT;
1552 1667
1553 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 1668 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1554 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 1669 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
@@ -1654,20 +1769,14 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1654 * 1769 *
1655 * @return number of msrs set successfully. 1770 * @return number of msrs set successfully.
1656 */ 1771 */
1657static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs, 1772static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1658 struct kvm_msr_entry *entries, 1773 struct kvm_msr_entry *entries,
1659 int (*do_msr)(struct kvm_vcpu *vcpu, 1774 int (*do_msr)(struct kvm_vcpu *vcpu,
1660 unsigned index, u64 *data)) 1775 unsigned index, u64 *data))
1661{ 1776{
1662 struct kvm_vcpu *vcpu;
1663 int i; 1777 int i;
1664 1778
1665 if (!valid_vcpu(msrs->vcpu)) 1779 vcpu_load(vcpu);
1666 return -EINVAL;
1667
1668 vcpu = vcpu_load(kvm, msrs->vcpu);
1669 if (!vcpu)
1670 return -ENOENT;
1671 1780
1672 for (i = 0; i < msrs->nmsrs; ++i) 1781 for (i = 0; i < msrs->nmsrs; ++i)
1673 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 1782 if (do_msr(vcpu, entries[i].index, &entries[i].data))
@@ -1683,7 +1792,7 @@ static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
1683 * 1792 *
1684 * @return number of msrs set successfully. 1793 * @return number of msrs set successfully.
1685 */ 1794 */
1686static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs, 1795static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1687 int (*do_msr)(struct kvm_vcpu *vcpu, 1796 int (*do_msr)(struct kvm_vcpu *vcpu,
1688 unsigned index, u64 *data), 1797 unsigned index, u64 *data),
1689 int writeback) 1798 int writeback)
@@ -1711,7 +1820,7 @@ static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
1711 if (copy_from_user(entries, user_msrs->entries, size)) 1820 if (copy_from_user(entries, user_msrs->entries, size))
1712 goto out_free; 1821 goto out_free;
1713 1822
1714 r = n = __msr_io(kvm, &msrs, entries, do_msr); 1823 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1715 if (r < 0) 1824 if (r < 0)
1716 goto out_free; 1825 goto out_free;
1717 1826
@@ -1730,38 +1839,31 @@ out:
1730/* 1839/*
1731 * Translate a guest virtual address to a guest physical address. 1840 * Translate a guest virtual address to a guest physical address.
1732 */ 1841 */
1733static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr) 1842static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1843 struct kvm_translation *tr)
1734{ 1844{
1735 unsigned long vaddr = tr->linear_address; 1845 unsigned long vaddr = tr->linear_address;
1736 struct kvm_vcpu *vcpu;
1737 gpa_t gpa; 1846 gpa_t gpa;
1738 1847
1739 vcpu = vcpu_load(kvm, tr->vcpu); 1848 vcpu_load(vcpu);
1740 if (!vcpu) 1849 spin_lock(&vcpu->kvm->lock);
1741 return -ENOENT;
1742 spin_lock(&kvm->lock);
1743 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); 1850 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
1744 tr->physical_address = gpa; 1851 tr->physical_address = gpa;
1745 tr->valid = gpa != UNMAPPED_GVA; 1852 tr->valid = gpa != UNMAPPED_GVA;
1746 tr->writeable = 1; 1853 tr->writeable = 1;
1747 tr->usermode = 0; 1854 tr->usermode = 0;
1748 spin_unlock(&kvm->lock); 1855 spin_unlock(&vcpu->kvm->lock);
1749 vcpu_put(vcpu); 1856 vcpu_put(vcpu);
1750 1857
1751 return 0; 1858 return 0;
1752} 1859}
1753 1860
1754static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq) 1861static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1862 struct kvm_interrupt *irq)
1755{ 1863{
1756 struct kvm_vcpu *vcpu;
1757
1758 if (!valid_vcpu(irq->vcpu))
1759 return -EINVAL;
1760 if (irq->irq < 0 || irq->irq >= 256) 1864 if (irq->irq < 0 || irq->irq >= 256)
1761 return -EINVAL; 1865 return -EINVAL;
1762 vcpu = vcpu_load(kvm, irq->vcpu); 1866 vcpu_load(vcpu);
1763 if (!vcpu)
1764 return -ENOENT;
1765 1867
1766 set_bit(irq->irq, vcpu->irq_pending); 1868 set_bit(irq->irq, vcpu->irq_pending);
1767 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary); 1869 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
@@ -1771,17 +1873,12 @@ static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
1771 return 0; 1873 return 0;
1772} 1874}
1773 1875
1774static int kvm_dev_ioctl_debug_guest(struct kvm *kvm, 1876static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
1775 struct kvm_debug_guest *dbg) 1877 struct kvm_debug_guest *dbg)
1776{ 1878{
1777 struct kvm_vcpu *vcpu;
1778 int r; 1879 int r;
1779 1880
1780 if (!valid_vcpu(dbg->vcpu)) 1881 vcpu_load(vcpu);
1781 return -EINVAL;
1782 vcpu = vcpu_load(kvm, dbg->vcpu);
1783 if (!vcpu)
1784 return -ENOENT;
1785 1882
1786 r = kvm_arch_ops->set_guest_debug(vcpu, dbg); 1883 r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
1787 1884
@@ -1790,30 +1887,129 @@ static int kvm_dev_ioctl_debug_guest(struct kvm *kvm,
1790 return r; 1887 return r;
1791} 1888}
1792 1889
1793static long kvm_dev_ioctl(struct file *filp, 1890static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1794 unsigned int ioctl, unsigned long arg)
1795{ 1891{
1796 struct kvm *kvm = filp->private_data; 1892 struct kvm_vcpu *vcpu = filp->private_data;
1893
1894 fput(vcpu->kvm->filp);
1895 return 0;
1896}
1897
1898static struct file_operations kvm_vcpu_fops = {
1899 .release = kvm_vcpu_release,
1900 .unlocked_ioctl = kvm_vcpu_ioctl,
1901 .compat_ioctl = kvm_vcpu_ioctl,
1902};
1903
1904/*
1905 * Allocates an inode for the vcpu.
1906 */
1907static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1908{
1909 int fd, r;
1910 struct inode *inode;
1911 struct file *file;
1912
1913 atomic_inc(&vcpu->kvm->filp->f_count);
1914 inode = kvmfs_inode(&kvm_vcpu_fops);
1915 if (IS_ERR(inode)) {
1916 r = PTR_ERR(inode);
1917 goto out1;
1918 }
1919
1920 file = kvmfs_file(inode, vcpu);
1921 if (IS_ERR(file)) {
1922 r = PTR_ERR(file);
1923 goto out2;
1924 }
1925
1926 r = get_unused_fd();
1927 if (r < 0)
1928 goto out3;
1929 fd = r;
1930 fd_install(fd, file);
1931
1932 return fd;
1933
1934out3:
1935 fput(file);
1936out2:
1937 iput(inode);
1938out1:
1939 fput(vcpu->kvm->filp);
1940 return r;
1941}
1942
1943/*
1944 * Creates some virtual cpus. Good luck creating more than one.
1945 */
1946static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1947{
1948 int r;
1949 struct kvm_vcpu *vcpu;
1950
1951 r = -EINVAL;
1952 if (!valid_vcpu(n))
1953 goto out;
1954
1955 vcpu = &kvm->vcpus[n];
1956
1957 mutex_lock(&vcpu->mutex);
1958
1959 if (vcpu->vmcs) {
1960 mutex_unlock(&vcpu->mutex);
1961 return -EEXIST;
1962 }
1963
1964 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
1965 FX_IMAGE_ALIGN);
1966 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
1967
1968 r = kvm_arch_ops->vcpu_create(vcpu);
1969 if (r < 0)
1970 goto out_free_vcpus;
1971
1972 r = kvm_mmu_create(vcpu);
1973 if (r < 0)
1974 goto out_free_vcpus;
1975
1976 kvm_arch_ops->vcpu_load(vcpu);
1977 r = kvm_mmu_setup(vcpu);
1978 if (r >= 0)
1979 r = kvm_arch_ops->vcpu_setup(vcpu);
1980 vcpu_put(vcpu);
1981
1982 if (r < 0)
1983 goto out_free_vcpus;
1984
1985 r = create_vcpu_fd(vcpu);
1986 if (r < 0)
1987 goto out_free_vcpus;
1988
1989 return r;
1990
1991out_free_vcpus:
1992 kvm_free_vcpu(vcpu);
1993 mutex_unlock(&vcpu->mutex);
1994out:
1995 return r;
1996}
1997
1998static long kvm_vcpu_ioctl(struct file *filp,
1999 unsigned int ioctl, unsigned long arg)
2000{
2001 struct kvm_vcpu *vcpu = filp->private_data;
1797 void __user *argp = (void __user *)arg; 2002 void __user *argp = (void __user *)arg;
1798 int r = -EINVAL; 2003 int r = -EINVAL;
1799 2004
1800 switch (ioctl) { 2005 switch (ioctl) {
1801 case KVM_GET_API_VERSION:
1802 r = KVM_API_VERSION;
1803 break;
1804 case KVM_CREATE_VCPU: {
1805 r = kvm_dev_ioctl_create_vcpu(kvm, arg);
1806 if (r)
1807 goto out;
1808 break;
1809 }
1810 case KVM_RUN: { 2006 case KVM_RUN: {
1811 struct kvm_run kvm_run; 2007 struct kvm_run kvm_run;
1812 2008
1813 r = -EFAULT; 2009 r = -EFAULT;
1814 if (copy_from_user(&kvm_run, argp, sizeof kvm_run)) 2010 if (copy_from_user(&kvm_run, argp, sizeof kvm_run))
1815 goto out; 2011 goto out;
1816 r = kvm_dev_ioctl_run(kvm, &kvm_run); 2012 r = kvm_vcpu_ioctl_run(vcpu, &kvm_run);
1817 if (r < 0 && r != -EINTR) 2013 if (r < 0 && r != -EINTR)
1818 goto out; 2014 goto out;
1819 if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) { 2015 if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) {
@@ -1825,10 +2021,8 @@ static long kvm_dev_ioctl(struct file *filp,
1825 case KVM_GET_REGS: { 2021 case KVM_GET_REGS: {
1826 struct kvm_regs kvm_regs; 2022 struct kvm_regs kvm_regs;
1827 2023
1828 r = -EFAULT; 2024 memset(&kvm_regs, 0, sizeof kvm_regs);
1829 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) 2025 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
1830 goto out;
1831 r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
1832 if (r) 2026 if (r)
1833 goto out; 2027 goto out;
1834 r = -EFAULT; 2028 r = -EFAULT;
@@ -1843,7 +2037,7 @@ static long kvm_dev_ioctl(struct file *filp,
1843 r = -EFAULT; 2037 r = -EFAULT;
1844 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) 2038 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
1845 goto out; 2039 goto out;
1846 r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs); 2040 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
1847 if (r) 2041 if (r)
1848 goto out; 2042 goto out;
1849 r = 0; 2043 r = 0;
@@ -1852,10 +2046,8 @@ static long kvm_dev_ioctl(struct file *filp,
1852 case KVM_GET_SREGS: { 2046 case KVM_GET_SREGS: {
1853 struct kvm_sregs kvm_sregs; 2047 struct kvm_sregs kvm_sregs;
1854 2048
1855 r = -EFAULT; 2049 memset(&kvm_sregs, 0, sizeof kvm_sregs);
1856 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) 2050 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
1857 goto out;
1858 r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
1859 if (r) 2051 if (r)
1860 goto out; 2052 goto out;
1861 r = -EFAULT; 2053 r = -EFAULT;
@@ -1870,7 +2062,7 @@ static long kvm_dev_ioctl(struct file *filp,
1870 r = -EFAULT; 2062 r = -EFAULT;
1871 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) 2063 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
1872 goto out; 2064 goto out;
1873 r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs); 2065 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
1874 if (r) 2066 if (r)
1875 goto out; 2067 goto out;
1876 r = 0; 2068 r = 0;
@@ -1882,7 +2074,7 @@ static long kvm_dev_ioctl(struct file *filp,
1882 r = -EFAULT; 2074 r = -EFAULT;
1883 if (copy_from_user(&tr, argp, sizeof tr)) 2075 if (copy_from_user(&tr, argp, sizeof tr))
1884 goto out; 2076 goto out;
1885 r = kvm_dev_ioctl_translate(kvm, &tr); 2077 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
1886 if (r) 2078 if (r)
1887 goto out; 2079 goto out;
1888 r = -EFAULT; 2080 r = -EFAULT;
@@ -1897,7 +2089,7 @@ static long kvm_dev_ioctl(struct file *filp,
1897 r = -EFAULT; 2089 r = -EFAULT;
1898 if (copy_from_user(&irq, argp, sizeof irq)) 2090 if (copy_from_user(&irq, argp, sizeof irq))
1899 goto out; 2091 goto out;
1900 r = kvm_dev_ioctl_interrupt(kvm, &irq); 2092 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1901 if (r) 2093 if (r)
1902 goto out; 2094 goto out;
1903 r = 0; 2095 r = 0;
@@ -1909,19 +2101,45 @@ static long kvm_dev_ioctl(struct file *filp,
1909 r = -EFAULT; 2101 r = -EFAULT;
1910 if (copy_from_user(&dbg, argp, sizeof dbg)) 2102 if (copy_from_user(&dbg, argp, sizeof dbg))
1911 goto out; 2103 goto out;
1912 r = kvm_dev_ioctl_debug_guest(kvm, &dbg); 2104 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
1913 if (r) 2105 if (r)
1914 goto out; 2106 goto out;
1915 r = 0; 2107 r = 0;
1916 break; 2108 break;
1917 } 2109 }
2110 case KVM_GET_MSRS:
2111 r = msr_io(vcpu, argp, get_msr, 1);
2112 break;
2113 case KVM_SET_MSRS:
2114 r = msr_io(vcpu, argp, do_set_msr, 0);
2115 break;
2116 default:
2117 ;
2118 }
2119out:
2120 return r;
2121}
2122
2123static long kvm_vm_ioctl(struct file *filp,
2124 unsigned int ioctl, unsigned long arg)
2125{
2126 struct kvm *kvm = filp->private_data;
2127 void __user *argp = (void __user *)arg;
2128 int r = -EINVAL;
2129
2130 switch (ioctl) {
2131 case KVM_CREATE_VCPU:
2132 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2133 if (r < 0)
2134 goto out;
2135 break;
1918 case KVM_SET_MEMORY_REGION: { 2136 case KVM_SET_MEMORY_REGION: {
1919 struct kvm_memory_region kvm_mem; 2137 struct kvm_memory_region kvm_mem;
1920 2138
1921 r = -EFAULT; 2139 r = -EFAULT;
1922 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) 2140 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1923 goto out; 2141 goto out;
1924 r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem); 2142 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
1925 if (r) 2143 if (r)
1926 goto out; 2144 goto out;
1927 break; 2145 break;
@@ -1932,16 +2150,112 @@ static long kvm_dev_ioctl(struct file *filp,
1932 r = -EFAULT; 2150 r = -EFAULT;
1933 if (copy_from_user(&log, argp, sizeof log)) 2151 if (copy_from_user(&log, argp, sizeof log))
1934 goto out; 2152 goto out;
1935 r = kvm_dev_ioctl_get_dirty_log(kvm, &log); 2153 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1936 if (r) 2154 if (r)
1937 goto out; 2155 goto out;
1938 break; 2156 break;
1939 } 2157 }
1940 case KVM_GET_MSRS: 2158 default:
1941 r = msr_io(kvm, argp, get_msr, 1); 2159 ;
2160 }
2161out:
2162 return r;
2163}
2164
2165static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2166 unsigned long address,
2167 int *type)
2168{
2169 struct kvm *kvm = vma->vm_file->private_data;
2170 unsigned long pgoff;
2171 struct kvm_memory_slot *slot;
2172 struct page *page;
2173
2174 *type = VM_FAULT_MINOR;
2175 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2176 slot = gfn_to_memslot(kvm, pgoff);
2177 if (!slot)
2178 return NOPAGE_SIGBUS;
2179 page = gfn_to_page(slot, pgoff);
2180 if (!page)
2181 return NOPAGE_SIGBUS;
2182 get_page(page);
2183 return page;
2184}
2185
2186static struct vm_operations_struct kvm_vm_vm_ops = {
2187 .nopage = kvm_vm_nopage,
2188};
2189
2190static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2191{
2192 vma->vm_ops = &kvm_vm_vm_ops;
2193 return 0;
2194}
2195
2196static struct file_operations kvm_vm_fops = {
2197 .release = kvm_vm_release,
2198 .unlocked_ioctl = kvm_vm_ioctl,
2199 .compat_ioctl = kvm_vm_ioctl,
2200 .mmap = kvm_vm_mmap,
2201};
2202
2203static int kvm_dev_ioctl_create_vm(void)
2204{
2205 int fd, r;
2206 struct inode *inode;
2207 struct file *file;
2208 struct kvm *kvm;
2209
2210 inode = kvmfs_inode(&kvm_vm_fops);
2211 if (IS_ERR(inode)) {
2212 r = PTR_ERR(inode);
2213 goto out1;
2214 }
2215
2216 kvm = kvm_create_vm();
2217 if (IS_ERR(kvm)) {
2218 r = PTR_ERR(kvm);
2219 goto out2;
2220 }
2221
2222 file = kvmfs_file(inode, kvm);
2223 if (IS_ERR(file)) {
2224 r = PTR_ERR(file);
2225 goto out3;
2226 }
2227 kvm->filp = file;
2228
2229 r = get_unused_fd();
2230 if (r < 0)
2231 goto out4;
2232 fd = r;
2233 fd_install(fd, file);
2234
2235 return fd;
2236
2237out4:
2238 fput(file);
2239out3:
2240 kvm_destroy_vm(kvm);
2241out2:
2242 iput(inode);
2243out1:
2244 return r;
2245}
2246
2247static long kvm_dev_ioctl(struct file *filp,
2248 unsigned int ioctl, unsigned long arg)
2249{
2250 void __user *argp = (void __user *)arg;
2251 int r = -EINVAL;
2252
2253 switch (ioctl) {
2254 case KVM_GET_API_VERSION:
2255 r = KVM_API_VERSION;
1942 break; 2256 break;
1943 case KVM_SET_MSRS: 2257 case KVM_CREATE_VM:
1944 r = msr_io(kvm, argp, do_set_msr, 0); 2258 r = kvm_dev_ioctl_create_vm();
1945 break; 2259 break;
1946 case KVM_GET_MSR_INDEX_LIST: { 2260 case KVM_GET_MSR_INDEX_LIST: {
1947 struct kvm_msr_list __user *user_msr_list = argp; 2261 struct kvm_msr_list __user *user_msr_list = argp;
@@ -1977,43 +2291,11 @@ out:
1977 return r; 2291 return r;
1978} 2292}
1979 2293
1980static struct page *kvm_dev_nopage(struct vm_area_struct *vma,
1981 unsigned long address,
1982 int *type)
1983{
1984 struct kvm *kvm = vma->vm_file->private_data;
1985 unsigned long pgoff;
1986 struct kvm_memory_slot *slot;
1987 struct page *page;
1988
1989 *type = VM_FAULT_MINOR;
1990 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1991 slot = gfn_to_memslot(kvm, pgoff);
1992 if (!slot)
1993 return NOPAGE_SIGBUS;
1994 page = gfn_to_page(slot, pgoff);
1995 if (!page)
1996 return NOPAGE_SIGBUS;
1997 get_page(page);
1998 return page;
1999}
2000
2001static struct vm_operations_struct kvm_dev_vm_ops = {
2002 .nopage = kvm_dev_nopage,
2003};
2004
2005static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma)
2006{
2007 vma->vm_ops = &kvm_dev_vm_ops;
2008 return 0;
2009}
2010
2011static struct file_operations kvm_chardev_ops = { 2294static struct file_operations kvm_chardev_ops = {
2012 .open = kvm_dev_open, 2295 .open = kvm_dev_open,
2013 .release = kvm_dev_release, 2296 .release = kvm_dev_release,
2014 .unlocked_ioctl = kvm_dev_ioctl, 2297 .unlocked_ioctl = kvm_dev_ioctl,
2015 .compat_ioctl = kvm_dev_ioctl, 2298 .compat_ioctl = kvm_dev_ioctl,
2016 .mmap = kvm_dev_mmap,
2017}; 2299};
2018 2300
2019static struct miscdevice kvm_dev = { 2301static struct miscdevice kvm_dev = {
@@ -2080,13 +2362,17 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2080 int cpu = (long)v; 2362 int cpu = (long)v;
2081 2363
2082 switch (val) { 2364 switch (val) {
2083 case CPU_DEAD: 2365 case CPU_DOWN_PREPARE:
2084 case CPU_UP_CANCELED: 2366 case CPU_UP_CANCELED:
2367 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2368 cpu);
2085 decache_vcpus_on_cpu(cpu); 2369 decache_vcpus_on_cpu(cpu);
2086 smp_call_function_single(cpu, kvm_arch_ops->hardware_disable, 2370 smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
2087 NULL, 0, 1); 2371 NULL, 0, 1);
2088 break; 2372 break;
2089 case CPU_UP_PREPARE: 2373 case CPU_ONLINE:
2374 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2375 cpu);
2090 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, 2376 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
2091 NULL, 0, 1); 2377 NULL, 0, 1);
2092 break; 2378 break;
@@ -2121,13 +2407,13 @@ static void kvm_exit_debug(void)
2121static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2407static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2122{ 2408{
2123 decache_vcpus_on_cpu(raw_smp_processor_id()); 2409 decache_vcpus_on_cpu(raw_smp_processor_id());
2124 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); 2410 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
2125 return 0; 2411 return 0;
2126} 2412}
2127 2413
2128static int kvm_resume(struct sys_device *dev) 2414static int kvm_resume(struct sys_device *dev)
2129{ 2415{
2130 on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1); 2416 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
2131 return 0; 2417 return 0;
2132} 2418}
2133 2419
@@ -2144,6 +2430,18 @@ static struct sys_device kvm_sysdev = {
2144 2430
2145hpa_t bad_page_address; 2431hpa_t bad_page_address;
2146 2432
2433static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
2434 const char *dev_name, void *data, struct vfsmount *mnt)
2435{
2436 return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
2437}
2438
2439static struct file_system_type kvm_fs_type = {
2440 .name = "kvmfs",
2441 .get_sb = kvmfs_get_sb,
2442 .kill_sb = kill_anon_super,
2443};
2444
2147int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) 2445int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
2148{ 2446{
2149 int r; 2447 int r;
@@ -2220,8 +2518,16 @@ void kvm_exit_arch(void)
2220static __init int kvm_init(void) 2518static __init int kvm_init(void)
2221{ 2519{
2222 static struct page *bad_page; 2520 static struct page *bad_page;
2223 int r = 0; 2521 int r;
2522
2523 r = register_filesystem(&kvm_fs_type);
2524 if (r)
2525 goto out3;
2224 2526
2527 kvmfs_mnt = kern_mount(&kvm_fs_type);
2528 r = PTR_ERR(kvmfs_mnt);
2529 if (IS_ERR(kvmfs_mnt))
2530 goto out2;
2225 kvm_init_debug(); 2531 kvm_init_debug();
2226 2532
2227 kvm_init_msr_list(); 2533 kvm_init_msr_list();
@@ -2234,10 +2540,14 @@ static __init int kvm_init(void)
2234 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT; 2540 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
2235 memset(__va(bad_page_address), 0, PAGE_SIZE); 2541 memset(__va(bad_page_address), 0, PAGE_SIZE);
2236 2542
2237 return r; 2543 return 0;
2238 2544
2239out: 2545out:
2240 kvm_exit_debug(); 2546 kvm_exit_debug();
2547 mntput(kvmfs_mnt);
2548out2:
2549 unregister_filesystem(&kvm_fs_type);
2550out3:
2241 return r; 2551 return r;
2242} 2552}
2243 2553
@@ -2245,6 +2555,8 @@ static __exit void kvm_exit(void)
2245{ 2555{
2246 kvm_exit_debug(); 2556 kvm_exit_debug();
2247 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); 2557 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
2558 mntput(kvmfs_mnt);
2559 unregister_filesystem(&kvm_fs_type);
2248} 2560}
2249 2561
2250module_init(kvm_init) 2562module_init(kvm_init)
diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h
index 74cc862f49..624f1ca486 100644
--- a/drivers/kvm/kvm_svm.h
+++ b/drivers/kvm/kvm_svm.h
@@ -1,6 +1,7 @@
1#ifndef __KVM_SVM_H 1#ifndef __KVM_SVM_H
2#define __KVM_SVM_H 2#define __KVM_SVM_H
3 3
4#include <linux/kernel.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/list.h> 6#include <linux/list.h>
6#include <asm/msr.h> 7#include <asm/msr.h>
@@ -18,7 +19,7 @@ static const u32 host_save_msrs[] = {
18 MSR_IA32_LASTBRANCHTOIP, MSR_IA32_LASTINTFROMIP,MSR_IA32_LASTINTTOIP,*/ 19 MSR_IA32_LASTBRANCHTOIP, MSR_IA32_LASTINTFROMIP,MSR_IA32_LASTINTTOIP,*/
19}; 20};
20 21
21#define NR_HOST_SAVE_MSRS (sizeof(host_save_msrs) / sizeof(*host_save_msrs)) 22#define NR_HOST_SAVE_MSRS ARRAY_SIZE(host_save_msrs)
22#define NUM_DB_REGS 4 23#define NUM_DB_REGS 4
23 24
24struct vcpu_svm { 25struct vcpu_svm {
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index be793770f3..a1a93368f3 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -298,18 +298,18 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
298 if (!is_rmap_pte(*spte)) 298 if (!is_rmap_pte(*spte))
299 return; 299 return;
300 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 300 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
301 if (!page->private) { 301 if (!page_private(page)) {
302 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); 302 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
303 page->private = (unsigned long)spte; 303 set_page_private(page,(unsigned long)spte);
304 } else if (!(page->private & 1)) { 304 } else if (!(page_private(page) & 1)) {
305 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); 305 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
306 desc = mmu_alloc_rmap_desc(vcpu); 306 desc = mmu_alloc_rmap_desc(vcpu);
307 desc->shadow_ptes[0] = (u64 *)page->private; 307 desc->shadow_ptes[0] = (u64 *)page_private(page);
308 desc->shadow_ptes[1] = spte; 308 desc->shadow_ptes[1] = spte;
309 page->private = (unsigned long)desc | 1; 309 set_page_private(page,(unsigned long)desc | 1);
310 } else { 310 } else {
311 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 311 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
312 desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 312 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
313 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) 313 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
314 desc = desc->more; 314 desc = desc->more;
315 if (desc->shadow_ptes[RMAP_EXT-1]) { 315 if (desc->shadow_ptes[RMAP_EXT-1]) {
@@ -337,12 +337,12 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
337 if (j != 0) 337 if (j != 0)
338 return; 338 return;
339 if (!prev_desc && !desc->more) 339 if (!prev_desc && !desc->more)
340 page->private = (unsigned long)desc->shadow_ptes[0]; 340 set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
341 else 341 else
342 if (prev_desc) 342 if (prev_desc)
343 prev_desc->more = desc->more; 343 prev_desc->more = desc->more;
344 else 344 else
345 page->private = (unsigned long)desc->more | 1; 345 set_page_private(page,(unsigned long)desc->more | 1);
346 mmu_free_rmap_desc(vcpu, desc); 346 mmu_free_rmap_desc(vcpu, desc);
347} 347}
348 348
@@ -356,20 +356,20 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
356 if (!is_rmap_pte(*spte)) 356 if (!is_rmap_pte(*spte))
357 return; 357 return;
358 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 358 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
359 if (!page->private) { 359 if (!page_private(page)) {
360 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 360 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
361 BUG(); 361 BUG();
362 } else if (!(page->private & 1)) { 362 } else if (!(page_private(page) & 1)) {
363 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); 363 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
364 if ((u64 *)page->private != spte) { 364 if ((u64 *)page_private(page) != spte) {
365 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", 365 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
366 spte, *spte); 366 spte, *spte);
367 BUG(); 367 BUG();
368 } 368 }
369 page->private = 0; 369 set_page_private(page,0);
370 } else { 370 } else {
371 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); 371 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
372 desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 372 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
373 prev_desc = NULL; 373 prev_desc = NULL;
374 while (desc) { 374 while (desc) {
375 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) 375 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
@@ -398,11 +398,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
398 BUG_ON(!slot); 398 BUG_ON(!slot);
399 page = gfn_to_page(slot, gfn); 399 page = gfn_to_page(slot, gfn);
400 400
401 while (page->private) { 401 while (page_private(page)) {
402 if (!(page->private & 1)) 402 if (!(page_private(page) & 1))
403 spte = (u64 *)page->private; 403 spte = (u64 *)page_private(page);
404 else { 404 else {
405 desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 405 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
406 spte = desc->shadow_ptes[0]; 406 spte = desc->shadow_ptes[0];
407 } 407 }
408 BUG_ON(!spte); 408 BUG_ON(!spte);
@@ -1218,7 +1218,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1218 INIT_LIST_HEAD(&page_header->link); 1218 INIT_LIST_HEAD(&page_header->link);
1219 if ((page = alloc_page(GFP_KERNEL)) == NULL) 1219 if ((page = alloc_page(GFP_KERNEL)) == NULL)
1220 goto error_1; 1220 goto error_1;
1221 page->private = (unsigned long)page_header; 1221 set_page_private(page, (unsigned long)page_header);
1222 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; 1222 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1223 memset(__va(page_header->page_hpa), 0, PAGE_SIZE); 1223 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1224 list_add(&page_header->link, &vcpu->free_pages); 1224 list_add(&page_header->link, &vcpu->free_pages);
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index b6b90e9e13..f3bcee9046 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -128,8 +128,10 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
128 goto access_error; 128 goto access_error;
129#endif 129#endif
130 130
131 if (!(*ptep & PT_ACCESSED_MASK)) 131 if (!(*ptep & PT_ACCESSED_MASK)) {
132 *ptep |= PT_ACCESSED_MASK; /* avoid rmw */ 132 mark_page_dirty(vcpu->kvm, table_gfn);
133 *ptep |= PT_ACCESSED_MASK;
134 }
133 135
134 if (walker->level == PT_PAGE_TABLE_LEVEL) { 136 if (walker->level == PT_PAGE_TABLE_LEVEL) {
135 walker->gfn = (*ptep & PT_BASE_ADDR_MASK) 137 walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
@@ -185,6 +187,12 @@ static void FNAME(release_walker)(struct guest_walker *walker)
185 kunmap_atomic(walker->table, KM_USER0); 187 kunmap_atomic(walker->table, KM_USER0);
186} 188}
187 189
190static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
191 struct guest_walker *walker)
192{
193 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
194}
195
188static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, 196static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
189 u64 *shadow_pte, u64 access_bits, gfn_t gfn) 197 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
190{ 198{
@@ -348,12 +356,15 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
348 } else if (kvm_mmu_lookup_page(vcpu, gfn)) { 356 } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
349 pgprintk("%s: found shadow page for %lx, marking ro\n", 357 pgprintk("%s: found shadow page for %lx, marking ro\n",
350 __FUNCTION__, gfn); 358 __FUNCTION__, gfn);
359 mark_page_dirty(vcpu->kvm, gfn);
360 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
351 *guest_ent |= PT_DIRTY_MASK; 361 *guest_ent |= PT_DIRTY_MASK;
352 *write_pt = 1; 362 *write_pt = 1;
353 return 0; 363 return 0;
354 } 364 }
355 mark_page_dirty(vcpu->kvm, gfn); 365 mark_page_dirty(vcpu->kvm, gfn);
356 *shadow_ent |= PT_WRITABLE_MASK; 366 *shadow_ent |= PT_WRITABLE_MASK;
367 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
357 *guest_ent |= PT_DIRTY_MASK; 368 *guest_ent |= PT_DIRTY_MASK;
358 rmap_add(vcpu, shadow_ent); 369 rmap_add(vcpu, shadow_ent);
359 370
@@ -430,9 +441,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
430 /* 441 /*
431 * mmio: emulate if accessible, otherwise its a guest fault. 442 * mmio: emulate if accessible, otherwise its a guest fault.
432 */ 443 */
433 if (is_io_pte(*shadow_pte)) { 444 if (is_io_pte(*shadow_pte))
434 return 1; 445 return 1;
435 }
436 446
437 ++kvm_stat.pf_fixed; 447 ++kvm_stat.pf_fixed;
438 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 448 kvm_mmu_audit(vcpu, "post page fault (fixed)");
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 83da4ea150..3d8ea7ac2e 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h>
18#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
20#include <linux/profile.h> 21#include <linux/profile.h>
@@ -75,7 +76,7 @@ struct svm_init_data {
75 76
76static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; 77static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
77 78
78#define NUM_MSR_MAPS (sizeof(msrpm_ranges) / sizeof(*msrpm_ranges)) 79#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
79#define MSRS_RANGE_SIZE 2048 80#define MSRS_RANGE_SIZE 2048
80#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) 81#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
81 82
@@ -485,6 +486,7 @@ static void init_vmcb(struct vmcb *vmcb)
485 486
486 control->intercept = (1ULL << INTERCEPT_INTR) | 487 control->intercept = (1ULL << INTERCEPT_INTR) |
487 (1ULL << INTERCEPT_NMI) | 488 (1ULL << INTERCEPT_NMI) |
489 (1ULL << INTERCEPT_SMI) |
488 /* 490 /*
489 * selective cr0 intercept bug? 491 * selective cr0 intercept bug?
490 * 0: 0f 22 d8 mov %eax,%cr3 492 * 0: 0f 22 d8 mov %eax,%cr3
@@ -553,7 +555,7 @@ static void init_vmcb(struct vmcb *vmcb)
553 * cr0 val on cpu init should be 0x60000010, we enable cpu 555 * cr0 val on cpu init should be 0x60000010, we enable cpu
554 * cache by default. the orderly way is to enable cache in bios. 556 * cache by default. the orderly way is to enable cache in bios.
555 */ 557 */
556 save->cr0 = 0x00000010 | CR0_PG_MASK; 558 save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK;
557 save->cr4 = CR4_PAE_MASK; 559 save->cr4 = CR4_PAE_MASK;
558 /* rdx = ?? */ 560 /* rdx = ?? */
559} 561}
@@ -598,10 +600,9 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
598 kfree(vcpu->svm); 600 kfree(vcpu->svm);
599} 601}
600 602
601static struct kvm_vcpu *svm_vcpu_load(struct kvm_vcpu *vcpu) 603static void svm_vcpu_load(struct kvm_vcpu *vcpu)
602{ 604{
603 get_cpu(); 605 get_cpu();
604 return vcpu;
605} 606}
606 607
607static void svm_vcpu_put(struct kvm_vcpu *vcpu) 608static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1042,22 +1043,22 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1042 1043
1043 addr_mask = io_adress(vcpu, _in, &kvm_run->io.address); 1044 addr_mask = io_adress(vcpu, _in, &kvm_run->io.address);
1044 if (!addr_mask) { 1045 if (!addr_mask) {
1045 printk(KERN_DEBUG "%s: get io address failed\n", __FUNCTION__); 1046 printk(KERN_DEBUG "%s: get io address failed\n",
1047 __FUNCTION__);
1046 return 1; 1048 return 1;
1047 } 1049 }
1048 1050
1049 if (kvm_run->io.rep) { 1051 if (kvm_run->io.rep) {
1050 kvm_run->io.count = vcpu->regs[VCPU_REGS_RCX] & addr_mask; 1052 kvm_run->io.count
1053 = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
1051 kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags 1054 kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags
1052 & X86_EFLAGS_DF) != 0; 1055 & X86_EFLAGS_DF) != 0;
1053 } 1056 }
1054 } else { 1057 } else
1055 kvm_run->io.value = vcpu->svm->vmcb->save.rax; 1058 kvm_run->io.value = vcpu->svm->vmcb->save.rax;
1056 }
1057 return 0; 1059 return 0;
1058} 1060}
1059 1061
1060
1061static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1062static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1062{ 1063{
1063 return 1; 1064 return 1;
@@ -1075,6 +1076,12 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1075 return 0; 1076 return 0;
1076} 1077}
1077 1078
1079static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1080{
1081 vcpu->svm->vmcb->save.rip += 3;
1082 return kvm_hypercall(vcpu, kvm_run);
1083}
1084
1078static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1085static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1079{ 1086{
1080 inject_ud(vcpu); 1087 inject_ud(vcpu);
@@ -1275,7 +1282,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1275 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 1282 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
1276 [SVM_EXIT_SHUTDOWN] = shutdown_interception, 1283 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
1277 [SVM_EXIT_VMRUN] = invalid_op_interception, 1284 [SVM_EXIT_VMRUN] = invalid_op_interception,
1278 [SVM_EXIT_VMMCALL] = invalid_op_interception, 1285 [SVM_EXIT_VMMCALL] = vmmcall_interception,
1279 [SVM_EXIT_VMLOAD] = invalid_op_interception, 1286 [SVM_EXIT_VMLOAD] = invalid_op_interception,
1280 [SVM_EXIT_VMSAVE] = invalid_op_interception, 1287 [SVM_EXIT_VMSAVE] = invalid_op_interception,
1281 [SVM_EXIT_STGI] = invalid_op_interception, 1288 [SVM_EXIT_STGI] = invalid_op_interception,
@@ -1297,7 +1304,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1297 __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, 1304 __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info,
1298 exit_code); 1305 exit_code);
1299 1306
1300 if (exit_code >= sizeof(svm_exit_handlers) / sizeof(*svm_exit_handlers) 1307 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
1301 || svm_exit_handlers[exit_code] == 0) { 1308 || svm_exit_handlers[exit_code] == 0) {
1302 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 1309 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1303 printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n", 1310 printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n",
@@ -1668,6 +1675,18 @@ static int is_disabled(void)
1668 return 0; 1675 return 0;
1669} 1676}
1670 1677
1678static void
1679svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1680{
1681 /*
1682 * Patch in the VMMCALL instruction:
1683 */
1684 hypercall[0] = 0x0f;
1685 hypercall[1] = 0x01;
1686 hypercall[2] = 0xd9;
1687 hypercall[3] = 0xc3;
1688}
1689
1671static struct kvm_arch_ops svm_arch_ops = { 1690static struct kvm_arch_ops svm_arch_ops = {
1672 .cpu_has_kvm_support = has_svm, 1691 .cpu_has_kvm_support = has_svm,
1673 .disabled_by_bios = is_disabled, 1692 .disabled_by_bios = is_disabled,
@@ -1716,6 +1735,7 @@ static struct kvm_arch_ops svm_arch_ops = {
1716 .run = svm_vcpu_run, 1735 .run = svm_vcpu_run,
1717 .skip_emulated_instruction = skip_emulated_instruction, 1736 .skip_emulated_instruction = skip_emulated_instruction,
1718 .vcpu_setup = svm_vcpu_setup, 1737 .vcpu_setup = svm_vcpu_setup,
1738 .patch_hypercall = svm_patch_hypercall,
1719}; 1739};
1720 1740
1721static int __init svm_init(void) 1741static int __init svm_init(void)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index fd4e917343..c07178e611 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -19,6 +19,7 @@
19#include "vmx.h" 19#include "vmx.h"
20#include "kvm_vmx.h" 20#include "kvm_vmx.h"
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/kernel.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/highmem.h> 24#include <linux/highmem.h>
24#include <linux/profile.h> 25#include <linux/profile.h>
@@ -27,7 +28,6 @@
27 28
28#include "segment_descriptor.h" 29#include "segment_descriptor.h"
29 30
30
31MODULE_AUTHOR("Qumranet"); 31MODULE_AUTHOR("Qumranet");
32MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
33 33
@@ -76,7 +76,7 @@ static const u32 vmx_msr_index[] = {
76#endif 76#endif
77 MSR_EFER, MSR_K6_STAR, 77 MSR_EFER, MSR_K6_STAR,
78}; 78};
79#define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index)) 79#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
80 80
81static inline int is_page_fault(u32 intr_info) 81static inline int is_page_fault(u32 intr_info)
82{ 82{
@@ -204,7 +204,7 @@ static void vmcs_write64(unsigned long field, u64 value)
204 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 204 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
205 * vcpu mutex is already taken. 205 * vcpu mutex is already taken.
206 */ 206 */
207static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu) 207static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
208{ 208{
209 u64 phys_addr = __pa(vcpu->vmcs); 209 u64 phys_addr = __pa(vcpu->vmcs);
210 int cpu; 210 int cpu;
@@ -242,7 +242,6 @@ static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu)
242 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 242 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
243 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 243 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
244 } 244 }
245 return vcpu;
246} 245}
247 246
248static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 247static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
@@ -418,10 +417,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
418 case MSR_IA32_SYSENTER_ESP: 417 case MSR_IA32_SYSENTER_ESP:
419 vmcs_write32(GUEST_SYSENTER_ESP, data); 418 vmcs_write32(GUEST_SYSENTER_ESP, data);
420 break; 419 break;
421 case MSR_IA32_TIME_STAMP_COUNTER: { 420 case MSR_IA32_TIME_STAMP_COUNTER:
422 guest_write_tsc(data); 421 guest_write_tsc(data);
423 break; 422 break;
424 }
425 default: 423 default:
426 msr = find_msr_entry(vcpu, msr_index); 424 msr = find_msr_entry(vcpu, msr_index);
427 if (msr) { 425 if (msr) {
@@ -793,6 +791,9 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
793 */ 791 */
794static void vmx_set_cr0_no_modeswitch(struct kvm_vcpu *vcpu, unsigned long cr0) 792static void vmx_set_cr0_no_modeswitch(struct kvm_vcpu *vcpu, unsigned long cr0)
795{ 793{
794 if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
795 enter_rmode(vcpu);
796
796 vcpu->rmode.active = ((cr0 & CR0_PE_MASK) == 0); 797 vcpu->rmode.active = ((cr0 & CR0_PE_MASK) == 0);
797 update_exception_bitmap(vcpu); 798 update_exception_bitmap(vcpu);
798 vmcs_writel(CR0_READ_SHADOW, cr0); 799 vmcs_writel(CR0_READ_SHADOW, cr0);
@@ -1467,6 +1468,18 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1467 return 0; 1468 return 0;
1468} 1469}
1469 1470
1471static void
1472vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1473{
1474 /*
1475 * Patch in the VMCALL instruction:
1476 */
1477 hypercall[0] = 0x0f;
1478 hypercall[1] = 0x01;
1479 hypercall[2] = 0xc1;
1480 hypercall[3] = 0xc3;
1481}
1482
1470static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1483static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1471{ 1484{
1472 u64 exit_qualification; 1485 u64 exit_qualification;
@@ -1643,6 +1656,12 @@ static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1643 return 0; 1656 return 0;
1644} 1657}
1645 1658
1659static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1660{
1661 vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3);
1662 return kvm_hypercall(vcpu, kvm_run);
1663}
1664
1646/* 1665/*
1647 * The exit handlers return 1 if the exit was handled fully and guest execution 1666 * The exit handlers return 1 if the exit was handled fully and guest execution
1648 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 1667 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -1661,6 +1680,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1661 [EXIT_REASON_MSR_WRITE] = handle_wrmsr, 1680 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
1662 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, 1681 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
1663 [EXIT_REASON_HLT] = handle_halt, 1682 [EXIT_REASON_HLT] = handle_halt,
1683 [EXIT_REASON_VMCALL] = handle_vmcall,
1664}; 1684};
1665 1685
1666static const int kvm_vmx_max_exit_handlers = 1686static const int kvm_vmx_max_exit_handlers =
@@ -2062,6 +2082,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
2062 .run = vmx_vcpu_run, 2082 .run = vmx_vcpu_run,
2063 .skip_emulated_instruction = skip_emulated_instruction, 2083 .skip_emulated_instruction = skip_emulated_instruction,
2064 .vcpu_setup = vmx_vcpu_setup, 2084 .vcpu_setup = vmx_vcpu_setup,
2085 .patch_hypercall = vmx_patch_hypercall,
2065}; 2086};
2066 2087
2067static int __init vmx_init(void) 2088static int __init vmx_init(void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 05febfd9f0..6c06e825cf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1296,27 +1296,17 @@ static struct super_type super_types[] = {
1296 .sync_super = super_1_sync, 1296 .sync_super = super_1_sync,
1297 }, 1297 },
1298}; 1298};
1299
1300static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1301{
1302 struct list_head *tmp;
1303 mdk_rdev_t *rdev;
1304
1305 ITERATE_RDEV(mddev,rdev,tmp)
1306 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1307 return rdev;
1308
1309 return NULL;
1310}
1311 1299
1312static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1300static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1313{ 1301{
1314 struct list_head *tmp; 1302 struct list_head *tmp, *tmp2;
1315 mdk_rdev_t *rdev; 1303 mdk_rdev_t *rdev, *rdev2;
1316 1304
1317 ITERATE_RDEV(mddev1,rdev,tmp) 1305 ITERATE_RDEV(mddev1,rdev,tmp)
1318 if (match_dev_unit(mddev2, rdev)) 1306 ITERATE_RDEV(mddev2, rdev2, tmp2)
1319 return 1; 1307 if (rdev->bdev->bd_contains ==
1308 rdev2->bdev->bd_contains)
1309 return 1;
1320 1310
1321 return 0; 1311 return 0;
1322} 1312}
@@ -1325,8 +1315,7 @@ static LIST_HEAD(pending_raid_disks);
1325 1315
1326static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1316static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1327{ 1317{
1328 mdk_rdev_t *same_pdev; 1318 char b[BDEVNAME_SIZE];
1329 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1330 struct kobject *ko; 1319 struct kobject *ko;
1331 char *s; 1320 char *s;
1332 1321
@@ -1342,14 +1331,6 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1342 else 1331 else
1343 mddev->size = rdev->size; 1332 mddev->size = rdev->size;
1344 } 1333 }
1345 same_pdev = match_dev_unit(mddev, rdev);
1346 if (same_pdev)
1347 printk(KERN_WARNING
1348 "%s: WARNING: %s appears to be on the same physical"
1349 " disk as %s. True\n protection against single-disk"
1350 " failure might be compromised.\n",
1351 mdname(mddev), bdevname(rdev->bdev,b),
1352 bdevname(same_pdev->bdev,b2));
1353 1334
1354 /* Verify rdev->desc_nr is unique. 1335 /* Verify rdev->desc_nr is unique.
1355 * If it is -1, assign a free number, else 1336 * If it is -1, assign a free number, else
@@ -3109,6 +3090,36 @@ static int do_md_run(mddev_t * mddev)
3109 return -EINVAL; 3090 return -EINVAL;
3110 } 3091 }
3111 3092
3093 if (pers->sync_request) {
3094 /* Warn if this is a potentially silly
3095 * configuration.
3096 */
3097 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3098 mdk_rdev_t *rdev2;
3099 struct list_head *tmp2;
3100 int warned = 0;
3101 ITERATE_RDEV(mddev, rdev, tmp) {
3102 ITERATE_RDEV(mddev, rdev2, tmp2) {
3103 if (rdev < rdev2 &&
3104 rdev->bdev->bd_contains ==
3105 rdev2->bdev->bd_contains) {
3106 printk(KERN_WARNING
3107 "%s: WARNING: %s appears to be"
3108 " on the same physical disk as"
3109 " %s.\n",
3110 mdname(mddev),
3111 bdevname(rdev->bdev,b),
3112 bdevname(rdev2->bdev,b2));
3113 warned = 1;
3114 }
3115 }
3116 }
3117 if (warned)
3118 printk(KERN_WARNING
3119 "True protection against single-disk"
3120 " failure might be compromised.\n");
3121 }
3122
3112 mddev->recovery = 0; 3123 mddev->recovery = 0;
3113 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 3124 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3114 mddev->barriers_work = 1; 3125 mddev->barriers_work = 1;
@@ -3311,6 +3322,9 @@ static int do_md_stop(mddev_t * mddev, int mode)
3311 set_disk_ro(disk, 0); 3322 set_disk_ro(disk, 0);
3312 blk_queue_make_request(mddev->queue, md_fail_request); 3323 blk_queue_make_request(mddev->queue, md_fail_request);
3313 mddev->pers->stop(mddev); 3324 mddev->pers->stop(mddev);
3325 mddev->queue->merge_bvec_fn = NULL;
3326 mddev->queue->unplug_fn = NULL;
3327 mddev->queue->issue_flush_fn = NULL;
3314 if (mddev->pers->sync_request) 3328 if (mddev->pers->sync_request)
3315 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 3329 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3316 3330
@@ -5343,6 +5357,44 @@ void md_do_sync(mddev_t *mddev)
5343EXPORT_SYMBOL_GPL(md_do_sync); 5357EXPORT_SYMBOL_GPL(md_do_sync);
5344 5358
5345 5359
5360static int remove_and_add_spares(mddev_t *mddev)
5361{
5362 mdk_rdev_t *rdev;
5363 struct list_head *rtmp;
5364 int spares = 0;
5365
5366 ITERATE_RDEV(mddev,rdev,rtmp)
5367 if (rdev->raid_disk >= 0 &&
5368 (test_bit(Faulty, &rdev->flags) ||
5369 ! test_bit(In_sync, &rdev->flags)) &&
5370 atomic_read(&rdev->nr_pending)==0) {
5371 if (mddev->pers->hot_remove_disk(
5372 mddev, rdev->raid_disk)==0) {
5373 char nm[20];
5374 sprintf(nm,"rd%d", rdev->raid_disk);
5375 sysfs_remove_link(&mddev->kobj, nm);
5376 rdev->raid_disk = -1;
5377 }
5378 }
5379
5380 if (mddev->degraded) {
5381 ITERATE_RDEV(mddev,rdev,rtmp)
5382 if (rdev->raid_disk < 0
5383 && !test_bit(Faulty, &rdev->flags)) {
5384 rdev->recovery_offset = 0;
5385 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5386 char nm[20];
5387 sprintf(nm, "rd%d", rdev->raid_disk);
5388 sysfs_create_link(&mddev->kobj,
5389 &rdev->kobj, nm);
5390 spares++;
5391 md_new_event(mddev);
5392 } else
5393 break;
5394 }
5395 }
5396 return spares;
5397}
5346/* 5398/*
5347 * This routine is regularly called by all per-raid-array threads to 5399 * This routine is regularly called by all per-raid-array threads to
5348 * deal with generic issues like resync and super-block update. 5400 * deal with generic issues like resync and super-block update.
@@ -5397,7 +5449,7 @@ void md_check_recovery(mddev_t *mddev)
5397 return; 5449 return;
5398 5450
5399 if (mddev_trylock(mddev)) { 5451 if (mddev_trylock(mddev)) {
5400 int spares =0; 5452 int spares = 0;
5401 5453
5402 spin_lock_irq(&mddev->write_lock); 5454 spin_lock_irq(&mddev->write_lock);
5403 if (mddev->safemode && !atomic_read(&mddev->writes_pending) && 5455 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
@@ -5460,35 +5512,13 @@ void md_check_recovery(mddev_t *mddev)
5460 * Spare are also removed and re-added, to allow 5512 * Spare are also removed and re-added, to allow
5461 * the personality to fail the re-add. 5513 * the personality to fail the re-add.
5462 */ 5514 */
5463 ITERATE_RDEV(mddev,rdev,rtmp)
5464 if (rdev->raid_disk >= 0 &&
5465 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
5466 atomic_read(&rdev->nr_pending)==0) {
5467 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
5468 char nm[20];
5469 sprintf(nm,"rd%d", rdev->raid_disk);
5470 sysfs_remove_link(&mddev->kobj, nm);
5471 rdev->raid_disk = -1;
5472 }
5473 }
5474
5475 if (mddev->degraded) {
5476 ITERATE_RDEV(mddev,rdev,rtmp)
5477 if (rdev->raid_disk < 0
5478 && !test_bit(Faulty, &rdev->flags)) {
5479 rdev->recovery_offset = 0;
5480 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5481 char nm[20];
5482 sprintf(nm, "rd%d", rdev->raid_disk);
5483 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
5484 spares++;
5485 md_new_event(mddev);
5486 } else
5487 break;
5488 }
5489 }
5490 5515
5491 if (spares) { 5516 if (mddev->reshape_position != MaxSector) {
5517 if (mddev->pers->check_reshape(mddev) != 0)
5518 /* Cannot proceed */
5519 goto unlock;
5520 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5521 } else if ((spares = remove_and_add_spares(mddev))) {
5492 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5522 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5493 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5523 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5494 } else if (mddev->recovery_cp < MaxSector) { 5524 } else if (mddev->recovery_cp < MaxSector) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a9401c017e..82249a6901 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -429,7 +429,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
429 if (dev < 0) 429 if (dev < 0)
430 dev += conf->raid_disks; 430 dev += conf->raid_disks;
431 } else { 431 } else {
432 while (sector > conf->stride) { 432 while (sector >= conf->stride) {
433 sector -= conf->stride; 433 sector -= conf->stride;
434 if (dev < conf->near_copies) 434 if (dev < conf->near_copies)
435 dev += conf->raid_disks - conf->near_copies; 435 dev += conf->raid_disks - conf->near_copies;
@@ -1801,6 +1801,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1801 for (k=0; k<conf->copies; k++) 1801 for (k=0; k<conf->copies; k++)
1802 if (r10_bio->devs[k].devnum == i) 1802 if (r10_bio->devs[k].devnum == i)
1803 break; 1803 break;
1804 BUG_ON(k == conf->copies);
1804 bio = r10_bio->devs[1].bio; 1805 bio = r10_bio->devs[1].bio;
1805 bio->bi_next = biolist; 1806 bio->bi_next = biolist;
1806 biolist = bio; 1807 biolist = bio;
@@ -2021,19 +2022,30 @@ static int run(mddev_t *mddev)
2021 if (!conf->tmppage) 2022 if (!conf->tmppage)
2022 goto out_free_conf; 2023 goto out_free_conf;
2023 2024
2025 conf->mddev = mddev;
2026 conf->raid_disks = mddev->raid_disks;
2024 conf->near_copies = nc; 2027 conf->near_copies = nc;
2025 conf->far_copies = fc; 2028 conf->far_copies = fc;
2026 conf->copies = nc*fc; 2029 conf->copies = nc*fc;
2027 conf->far_offset = fo; 2030 conf->far_offset = fo;
2028 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; 2031 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
2029 conf->chunk_shift = ffz(~mddev->chunk_size) - 9; 2032 conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
2033 size = mddev->size >> (conf->chunk_shift-1);
2034 sector_div(size, fc);
2035 size = size * conf->raid_disks;
2036 sector_div(size, nc);
2037 /* 'size' is now the number of chunks in the array */
2038 /* calculate "used chunks per device" in 'stride' */
2039 stride = size * conf->copies;
2040 sector_div(stride, conf->raid_disks);
2041 mddev->size = stride << (conf->chunk_shift-1);
2042
2030 if (fo) 2043 if (fo)
2031 conf->stride = 1 << conf->chunk_shift; 2044 stride = 1;
2032 else { 2045 else
2033 stride = mddev->size >> (conf->chunk_shift-1);
2034 sector_div(stride, fc); 2046 sector_div(stride, fc);
2035 conf->stride = stride << conf->chunk_shift; 2047 conf->stride = stride << conf->chunk_shift;
2036 } 2048
2037 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, 2049 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
2038 r10bio_pool_free, conf); 2050 r10bio_pool_free, conf);
2039 if (!conf->r10bio_pool) { 2051 if (!conf->r10bio_pool) {
@@ -2063,8 +2075,6 @@ static int run(mddev_t *mddev)
2063 2075
2064 disk->head_position = 0; 2076 disk->head_position = 0;
2065 } 2077 }
2066 conf->raid_disks = mddev->raid_disks;
2067 conf->mddev = mddev;
2068 spin_lock_init(&conf->device_lock); 2078 spin_lock_init(&conf->device_lock);
2069 INIT_LIST_HEAD(&conf->retry_list); 2079 INIT_LIST_HEAD(&conf->retry_list);
2070 2080
@@ -2106,16 +2116,8 @@ static int run(mddev_t *mddev)
2106 /* 2116 /*
2107 * Ok, everything is just fine now 2117 * Ok, everything is just fine now
2108 */ 2118 */
2109 if (conf->far_offset) { 2119 mddev->array_size = size << (conf->chunk_shift-1);
2110 size = mddev->size >> (conf->chunk_shift-1); 2120 mddev->resync_max_sectors = size << conf->chunk_shift;
2111 size *= conf->raid_disks;
2112 size <<= conf->chunk_shift;
2113 sector_div(size, conf->far_copies);
2114 } else
2115 size = conf->stride * conf->raid_disks;
2116 sector_div(size, conf->near_copies);
2117 mddev->array_size = size/2;
2118 mddev->resync_max_sectors = size;
2119 2121
2120 mddev->queue->unplug_fn = raid10_unplug; 2122 mddev->queue->unplug_fn = raid10_unplug;
2121 mddev->queue->issue_flush_fn = raid10_issue_flush; 2123 mddev->queue->issue_flush_fn = raid10_issue_flush;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 11c3d7bfa7..54a1ad5eef 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1050,7 +1050,7 @@ static void compute_parity5(struct stripe_head *sh, int method)
1050static void compute_parity6(struct stripe_head *sh, int method) 1050static void compute_parity6(struct stripe_head *sh, int method)
1051{ 1051{
1052 raid6_conf_t *conf = sh->raid_conf; 1052 raid6_conf_t *conf = sh->raid_conf;
1053 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count; 1053 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
1054 struct bio *chosen; 1054 struct bio *chosen;
1055 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1055 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1056 void *ptrs[disks]; 1056 void *ptrs[disks];
@@ -1131,8 +1131,7 @@ static void compute_parity6(struct stripe_head *sh, int method)
1131/* Compute one missing block */ 1131/* Compute one missing block */
1132static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1132static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
1133{ 1133{
1134 raid6_conf_t *conf = sh->raid_conf; 1134 int i, count, disks = sh->disks;
1135 int i, count, disks = conf->raid_disks;
1136 void *ptr[MAX_XOR_BLOCKS], *p; 1135 void *ptr[MAX_XOR_BLOCKS], *p;
1137 int pd_idx = sh->pd_idx; 1136 int pd_idx = sh->pd_idx;
1138 int qd_idx = raid6_next_disk(pd_idx, disks); 1137 int qd_idx = raid6_next_disk(pd_idx, disks);
@@ -1170,8 +1169,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
1170/* Compute two missing blocks */ 1169/* Compute two missing blocks */
1171static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1170static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
1172{ 1171{
1173 raid6_conf_t *conf = sh->raid_conf; 1172 int i, count, disks = sh->disks;
1174 int i, count, disks = conf->raid_disks;
1175 int pd_idx = sh->pd_idx; 1173 int pd_idx = sh->pd_idx;
1176 int qd_idx = raid6_next_disk(pd_idx, disks); 1174 int qd_idx = raid6_next_disk(pd_idx, disks);
1177 int d0_idx = raid6_next_disk(qd_idx, disks); 1175 int d0_idx = raid6_next_disk(qd_idx, disks);
@@ -1887,11 +1885,11 @@ static void handle_stripe5(struct stripe_head *sh)
1887static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 1885static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
1888{ 1886{
1889 raid6_conf_t *conf = sh->raid_conf; 1887 raid6_conf_t *conf = sh->raid_conf;
1890 int disks = conf->raid_disks; 1888 int disks = sh->disks;
1891 struct bio *return_bi= NULL; 1889 struct bio *return_bi= NULL;
1892 struct bio *bi; 1890 struct bio *bi;
1893 int i; 1891 int i;
1894 int syncing; 1892 int syncing, expanding, expanded;
1895 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 1893 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1896 int non_overwrite = 0; 1894 int non_overwrite = 0;
1897 int failed_num[2] = {0, 0}; 1895 int failed_num[2] = {0, 0};
@@ -1909,6 +1907,8 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
1909 clear_bit(STRIPE_DELAYED, &sh->state); 1907 clear_bit(STRIPE_DELAYED, &sh->state);
1910 1908
1911 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1909 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1910 expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1911 expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
1912 /* Now to look around and see what can be done */ 1912 /* Now to look around and see what can be done */
1913 1913
1914 rcu_read_lock(); 1914 rcu_read_lock();
@@ -2114,13 +2114,15 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2114 * parity, or to satisfy requests 2114 * parity, or to satisfy requests
2115 * or to load a block that is being partially written. 2115 * or to load a block that is being partially written.
2116 */ 2116 */
2117 if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) { 2117 if (to_read || non_overwrite || (to_write && failed) ||
2118 (syncing && (uptodate < disks)) || expanding) {
2118 for (i=disks; i--;) { 2119 for (i=disks; i--;) {
2119 dev = &sh->dev[i]; 2120 dev = &sh->dev[i];
2120 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 2121 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
2121 (dev->toread || 2122 (dev->toread ||
2122 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2123 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2123 syncing || 2124 syncing ||
2125 expanding ||
2124 (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) || 2126 (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
2125 (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write)) 2127 (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
2126 ) 2128 )
@@ -2355,6 +2357,79 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2355 } 2357 }
2356 } 2358 }
2357 } 2359 }
2360
2361 if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
2362 /* Need to write out all blocks after computing P&Q */
2363 sh->disks = conf->raid_disks;
2364 sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
2365 conf->raid_disks);
2366 compute_parity6(sh, RECONSTRUCT_WRITE);
2367 for (i = conf->raid_disks ; i-- ; ) {
2368 set_bit(R5_LOCKED, &sh->dev[i].flags);
2369 locked++;
2370 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2371 }
2372 clear_bit(STRIPE_EXPANDING, &sh->state);
2373 } else if (expanded) {
2374 clear_bit(STRIPE_EXPAND_READY, &sh->state);
2375 atomic_dec(&conf->reshape_stripes);
2376 wake_up(&conf->wait_for_overlap);
2377 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
2378 }
2379
2380 if (expanding && locked == 0) {
2381 /* We have read all the blocks in this stripe and now we need to
2382 * copy some of them into a target stripe for expand.
2383 */
2384 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2385 for (i = 0; i < sh->disks ; i++)
2386 if (i != pd_idx && i != qd_idx) {
2387 int dd_idx2, pd_idx2, j;
2388 struct stripe_head *sh2;
2389
2390 sector_t bn = compute_blocknr(sh, i);
2391 sector_t s = raid5_compute_sector(
2392 bn, conf->raid_disks,
2393 conf->raid_disks - conf->max_degraded,
2394 &dd_idx2, &pd_idx2, conf);
2395 sh2 = get_active_stripe(conf, s,
2396 conf->raid_disks,
2397 pd_idx2, 1);
2398 if (sh2 == NULL)
2399 /* so for only the early blocks of
2400 * this stripe have been requests.
2401 * When later blocks get requests, we
2402 * will try again
2403 */
2404 continue;
2405 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2406 test_bit(R5_Expanded,
2407 &sh2->dev[dd_idx2].flags)) {
2408 /* must have already done this block */
2409 release_stripe(sh2);
2410 continue;
2411 }
2412 memcpy(page_address(sh2->dev[dd_idx2].page),
2413 page_address(sh->dev[i].page),
2414 STRIPE_SIZE);
2415 set_bit(R5_Expanded, &sh2->dev[dd_idx2].flags);
2416 set_bit(R5_UPTODATE, &sh2->dev[dd_idx2].flags);
2417 for (j = 0 ; j < conf->raid_disks ; j++)
2418 if (j != sh2->pd_idx &&
2419 j != raid6_next_disk(sh2->pd_idx,
2420 sh2->disks) &&
2421 !test_bit(R5_Expanded,
2422 &sh2->dev[j].flags))
2423 break;
2424 if (j == conf->raid_disks) {
2425 set_bit(STRIPE_EXPAND_READY,
2426 &sh2->state);
2427 set_bit(STRIPE_HANDLE, &sh2->state);
2428 }
2429 release_stripe(sh2);
2430 }
2431 }
2432
2358 spin_unlock(&sh->lock); 2433 spin_unlock(&sh->lock);
2359 2434
2360 while ((bi=return_bi)) { 2435 while ((bi=return_bi)) {
@@ -2395,7 +2470,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2395 rcu_read_unlock(); 2470 rcu_read_unlock();
2396 2471
2397 if (rdev) { 2472 if (rdev) {
2398 if (syncing) 2473 if (syncing || expanding || expanded)
2399 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 2474 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
2400 2475
2401 bi->bi_bdev = rdev->bdev; 2476 bi->bi_bdev = rdev->bdev;
@@ -2915,8 +2990,9 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
2915 struct stripe_head *sh; 2990 struct stripe_head *sh;
2916 int pd_idx; 2991 int pd_idx;
2917 sector_t first_sector, last_sector; 2992 sector_t first_sector, last_sector;
2918 int raid_disks; 2993 int raid_disks = conf->previous_raid_disks;
2919 int data_disks; 2994 int data_disks = raid_disks - conf->max_degraded;
2995 int new_data_disks = conf->raid_disks - conf->max_degraded;
2920 int i; 2996 int i;
2921 int dd_idx; 2997 int dd_idx;
2922 sector_t writepos, safepos, gap; 2998 sector_t writepos, safepos, gap;
@@ -2925,7 +3001,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
2925 conf->expand_progress != 0) { 3001 conf->expand_progress != 0) {
2926 /* restarting in the middle, skip the initial sectors */ 3002 /* restarting in the middle, skip the initial sectors */
2927 sector_nr = conf->expand_progress; 3003 sector_nr = conf->expand_progress;
2928 sector_div(sector_nr, conf->raid_disks-1); 3004 sector_div(sector_nr, new_data_disks);
2929 *skipped = 1; 3005 *skipped = 1;
2930 return sector_nr; 3006 return sector_nr;
2931 } 3007 }
@@ -2939,14 +3015,14 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
2939 * to after where expand_lo old_maps to 3015 * to after where expand_lo old_maps to
2940 */ 3016 */
2941 writepos = conf->expand_progress + 3017 writepos = conf->expand_progress +
2942 conf->chunk_size/512*(conf->raid_disks-1); 3018 conf->chunk_size/512*(new_data_disks);
2943 sector_div(writepos, conf->raid_disks-1); 3019 sector_div(writepos, new_data_disks);
2944 safepos = conf->expand_lo; 3020 safepos = conf->expand_lo;
2945 sector_div(safepos, conf->previous_raid_disks-1); 3021 sector_div(safepos, data_disks);
2946 gap = conf->expand_progress - conf->expand_lo; 3022 gap = conf->expand_progress - conf->expand_lo;
2947 3023
2948 if (writepos >= safepos || 3024 if (writepos >= safepos ||
2949 gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) { 3025 gap > (new_data_disks)*3000*2 /*3Meg*/) {
2950 /* Cannot proceed until we've updated the superblock... */ 3026 /* Cannot proceed until we've updated the superblock... */
2951 wait_event(conf->wait_for_overlap, 3027 wait_event(conf->wait_for_overlap,
2952 atomic_read(&conf->reshape_stripes)==0); 3028 atomic_read(&conf->reshape_stripes)==0);
@@ -2976,6 +3052,9 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
2976 sector_t s; 3052 sector_t s;
2977 if (j == sh->pd_idx) 3053 if (j == sh->pd_idx)
2978 continue; 3054 continue;
3055 if (conf->level == 6 &&
3056 j == raid6_next_disk(sh->pd_idx, sh->disks))
3057 continue;
2979 s = compute_blocknr(sh, j); 3058 s = compute_blocknr(sh, j);
2980 if (s < (mddev->array_size<<1)) { 3059 if (s < (mddev->array_size<<1)) {
2981 skipped = 1; 3060 skipped = 1;
@@ -2992,28 +3071,27 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
2992 release_stripe(sh); 3071 release_stripe(sh);
2993 } 3072 }
2994 spin_lock_irq(&conf->device_lock); 3073 spin_lock_irq(&conf->device_lock);
2995 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1); 3074 conf->expand_progress = (sector_nr + i) * new_data_disks;
2996 spin_unlock_irq(&conf->device_lock); 3075 spin_unlock_irq(&conf->device_lock);
2997 /* Ok, those stripe are ready. We can start scheduling 3076 /* Ok, those stripe are ready. We can start scheduling
2998 * reads on the source stripes. 3077 * reads on the source stripes.
2999 * The source stripes are determined by mapping the first and last 3078 * The source stripes are determined by mapping the first and last
3000 * block on the destination stripes. 3079 * block on the destination stripes.
3001 */ 3080 */
3002 raid_disks = conf->previous_raid_disks;
3003 data_disks = raid_disks - 1;
3004 first_sector = 3081 first_sector =
3005 raid5_compute_sector(sector_nr*(conf->raid_disks-1), 3082 raid5_compute_sector(sector_nr*(new_data_disks),
3006 raid_disks, data_disks, 3083 raid_disks, data_disks,
3007 &dd_idx, &pd_idx, conf); 3084 &dd_idx, &pd_idx, conf);
3008 last_sector = 3085 last_sector =
3009 raid5_compute_sector((sector_nr+conf->chunk_size/512) 3086 raid5_compute_sector((sector_nr+conf->chunk_size/512)
3010 *(conf->raid_disks-1) -1, 3087 *(new_data_disks) -1,
3011 raid_disks, data_disks, 3088 raid_disks, data_disks,
3012 &dd_idx, &pd_idx, conf); 3089 &dd_idx, &pd_idx, conf);
3013 if (last_sector >= (mddev->size<<1)) 3090 if (last_sector >= (mddev->size<<1))
3014 last_sector = (mddev->size<<1)-1; 3091 last_sector = (mddev->size<<1)-1;
3015 while (first_sector <= last_sector) { 3092 while (first_sector <= last_sector) {
3016 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks); 3093 pd_idx = stripe_to_pdidx(first_sector, conf,
3094 conf->previous_raid_disks);
3017 sh = get_active_stripe(conf, first_sector, 3095 sh = get_active_stripe(conf, first_sector,
3018 conf->previous_raid_disks, pd_idx, 0); 3096 conf->previous_raid_disks, pd_idx, 0);
3019 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3097 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
@@ -3348,35 +3426,44 @@ static int run(mddev_t *mddev)
3348 */ 3426 */
3349 sector_t here_new, here_old; 3427 sector_t here_new, here_old;
3350 int old_disks; 3428 int old_disks;
3429 int max_degraded = (mddev->level == 5 ? 1 : 2);
3351 3430
3352 if (mddev->new_level != mddev->level || 3431 if (mddev->new_level != mddev->level ||
3353 mddev->new_layout != mddev->layout || 3432 mddev->new_layout != mddev->layout ||
3354 mddev->new_chunk != mddev->chunk_size) { 3433 mddev->new_chunk != mddev->chunk_size) {
3355 printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n", 3434 printk(KERN_ERR "raid5: %s: unsupported reshape "
3435 "required - aborting.\n",
3356 mdname(mddev)); 3436 mdname(mddev));
3357 return -EINVAL; 3437 return -EINVAL;
3358 } 3438 }
3359 if (mddev->delta_disks <= 0) { 3439 if (mddev->delta_disks <= 0) {
3360 printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n", 3440 printk(KERN_ERR "raid5: %s: unsupported reshape "
3441 "(reduce disks) required - aborting.\n",
3361 mdname(mddev)); 3442 mdname(mddev));
3362 return -EINVAL; 3443 return -EINVAL;
3363 } 3444 }
3364 old_disks = mddev->raid_disks - mddev->delta_disks; 3445 old_disks = mddev->raid_disks - mddev->delta_disks;
3365 /* reshape_position must be on a new-stripe boundary, and one 3446 /* reshape_position must be on a new-stripe boundary, and one
3366 * further up in new geometry must map after here in old geometry. 3447 * further up in new geometry must map after here in old
3448 * geometry.
3367 */ 3449 */
3368 here_new = mddev->reshape_position; 3450 here_new = mddev->reshape_position;
3369 if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) { 3451 if (sector_div(here_new, (mddev->chunk_size>>9)*
3370 printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n"); 3452 (mddev->raid_disks - max_degraded))) {
3453 printk(KERN_ERR "raid5: reshape_position not "
3454 "on a stripe boundary\n");
3371 return -EINVAL; 3455 return -EINVAL;
3372 } 3456 }
3373 /* here_new is the stripe we will write to */ 3457 /* here_new is the stripe we will write to */
3374 here_old = mddev->reshape_position; 3458 here_old = mddev->reshape_position;
3375 sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1)); 3459 sector_div(here_old, (mddev->chunk_size>>9)*
3376 /* here_old is the first stripe that we might need to read from */ 3460 (old_disks-max_degraded));
3461 /* here_old is the first stripe that we might need to read
3462 * from */
3377 if (here_new >= here_old) { 3463 if (here_new >= here_old) {
3378 /* Reading from the same stripe as writing to - bad */ 3464 /* Reading from the same stripe as writing to - bad */
3379 printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n"); 3465 printk(KERN_ERR "raid5: reshape_position too early for "
3466 "auto-recovery - aborting.\n");
3380 return -EINVAL; 3467 return -EINVAL;
3381 } 3468 }
3382 printk(KERN_INFO "raid5: reshape will continue\n"); 3469 printk(KERN_INFO "raid5: reshape will continue\n");
@@ -3814,6 +3901,8 @@ static int raid5_check_reshape(mddev_t *mddev)
3814 if (err) 3901 if (err)
3815 return err; 3902 return err;
3816 3903
3904 if (mddev->degraded > conf->max_degraded)
3905 return -EINVAL;
3817 /* looks like we might be able to manage this */ 3906 /* looks like we might be able to manage this */
3818 return 0; 3907 return 0;
3819} 3908}
@@ -3827,8 +3916,7 @@ static int raid5_start_reshape(mddev_t *mddev)
3827 int added_devices = 0; 3916 int added_devices = 0;
3828 unsigned long flags; 3917 unsigned long flags;
3829 3918
3830 if (mddev->degraded || 3919 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3831 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3832 return -EBUSY; 3920 return -EBUSY;
3833 3921
3834 ITERATE_RDEV(mddev, rdev, rtmp) 3922 ITERATE_RDEV(mddev, rdev, rtmp)
@@ -3836,7 +3924,7 @@ static int raid5_start_reshape(mddev_t *mddev)
3836 !test_bit(Faulty, &rdev->flags)) 3924 !test_bit(Faulty, &rdev->flags))
3837 spares++; 3925 spares++;
3838 3926
3839 if (spares < mddev->delta_disks-1) 3927 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
3840 /* Not enough devices even to make a degraded array 3928 /* Not enough devices even to make a degraded array
3841 * of that size 3929 * of that size
3842 */ 3930 */
@@ -3899,7 +3987,8 @@ static void end_reshape(raid5_conf_t *conf)
3899 struct block_device *bdev; 3987 struct block_device *bdev;
3900 3988
3901 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 3989 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
3902 conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1); 3990 conf->mddev->array_size = conf->mddev->size *
3991 (conf->raid_disks - conf->max_degraded);
3903 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 3992 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
3904 conf->mddev->changed = 1; 3993 conf->mddev->changed = 1;
3905 3994
@@ -3972,6 +4061,10 @@ static struct mdk_personality raid6_personality =
3972 .spare_active = raid5_spare_active, 4061 .spare_active = raid5_spare_active,
3973 .sync_request = sync_request, 4062 .sync_request = sync_request,
3974 .resize = raid5_resize, 4063 .resize = raid5_resize,
4064#ifdef CONFIG_MD_RAID5_RESHAPE
4065 .check_reshape = raid5_check_reshape,
4066 .start_reshape = raid5_start_reshape,
4067#endif
3975 .quiesce = raid5_quiesce, 4068 .quiesce = raid5_quiesce,
3976}; 4069};
3977static struct mdk_personality raid5_personality = 4070static struct mdk_personality raid5_personality =
diff --git a/drivers/md/raid6mmx.c b/drivers/md/raid6mmx.c
index 359157aaf9..6181a5a336 100644
--- a/drivers/md/raid6mmx.c
+++ b/drivers/md/raid6mmx.c
@@ -30,14 +30,8 @@ const struct raid6_mmx_constants {
30 30
31static int raid6_have_mmx(void) 31static int raid6_have_mmx(void)
32{ 32{
33#ifdef __KERNEL__
34 /* Not really "boot_cpu" but "all_cpus" */ 33 /* Not really "boot_cpu" but "all_cpus" */
35 return boot_cpu_has(X86_FEATURE_MMX); 34 return boot_cpu_has(X86_FEATURE_MMX);
36#else
37 /* User space test code */
38 u32 features = cpuid_features();
39 return ( (features & (1<<23)) == (1<<23) );
40#endif
41} 35}
42 36
43/* 37/*
@@ -48,13 +42,12 @@ static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
48 u8 **dptr = (u8 **)ptrs; 42 u8 **dptr = (u8 **)ptrs;
49 u8 *p, *q; 43 u8 *p, *q;
50 int d, z, z0; 44 int d, z, z0;
51 raid6_mmx_save_t sa;
52 45
53 z0 = disks - 3; /* Highest data disk */ 46 z0 = disks - 3; /* Highest data disk */
54 p = dptr[z0+1]; /* XOR parity */ 47 p = dptr[z0+1]; /* XOR parity */
55 q = dptr[z0+2]; /* RS syndrome */ 48 q = dptr[z0+2]; /* RS syndrome */
56 49
57 raid6_before_mmx(&sa); 50 kernel_fpu_begin();
58 51
59 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); 52 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
60 asm volatile("pxor %mm5,%mm5"); /* Zero temp */ 53 asm volatile("pxor %mm5,%mm5"); /* Zero temp */
@@ -78,7 +71,7 @@ static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
78 asm volatile("pxor %mm4,%mm4"); 71 asm volatile("pxor %mm4,%mm4");
79 } 72 }
80 73
81 raid6_after_mmx(&sa); 74 kernel_fpu_end();
82} 75}
83 76
84const struct raid6_calls raid6_mmxx1 = { 77const struct raid6_calls raid6_mmxx1 = {
@@ -96,13 +89,12 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
96 u8 **dptr = (u8 **)ptrs; 89 u8 **dptr = (u8 **)ptrs;
97 u8 *p, *q; 90 u8 *p, *q;
98 int d, z, z0; 91 int d, z, z0;
99 raid6_mmx_save_t sa;
100 92
101 z0 = disks - 3; /* Highest data disk */ 93 z0 = disks - 3; /* Highest data disk */
102 p = dptr[z0+1]; /* XOR parity */ 94 p = dptr[z0+1]; /* XOR parity */
103 q = dptr[z0+2]; /* RS syndrome */ 95 q = dptr[z0+2]; /* RS syndrome */
104 96
105 raid6_before_mmx(&sa); 97 kernel_fpu_begin();
106 98
107 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); 99 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
108 asm volatile("pxor %mm5,%mm5"); /* Zero temp */ 100 asm volatile("pxor %mm5,%mm5"); /* Zero temp */
@@ -137,7 +129,7 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
137 asm volatile("movq %%mm6,%0" : "=m" (q[d+8])); 129 asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
138 } 130 }
139 131
140 raid6_after_mmx(&sa); 132 kernel_fpu_end();
141} 133}
142 134
143const struct raid6_calls raid6_mmxx2 = { 135const struct raid6_calls raid6_mmxx2 = {
diff --git a/drivers/md/raid6sse1.c b/drivers/md/raid6sse1.c
index f7e7859f71..f0a1ba8f40 100644
--- a/drivers/md/raid6sse1.c
+++ b/drivers/md/raid6sse1.c
@@ -33,16 +33,10 @@ extern const struct raid6_mmx_constants {
33 33
34static int raid6_have_sse1_or_mmxext(void) 34static int raid6_have_sse1_or_mmxext(void)
35{ 35{
36#ifdef __KERNEL__
37 /* Not really boot_cpu but "all_cpus" */ 36 /* Not really boot_cpu but "all_cpus" */
38 return boot_cpu_has(X86_FEATURE_MMX) && 37 return boot_cpu_has(X86_FEATURE_MMX) &&
39 (boot_cpu_has(X86_FEATURE_XMM) || 38 (boot_cpu_has(X86_FEATURE_XMM) ||
40 boot_cpu_has(X86_FEATURE_MMXEXT)); 39 boot_cpu_has(X86_FEATURE_MMXEXT));
41#else
42 /* User space test code - this incorrectly breaks on some Athlons */
43 u32 features = cpuid_features();
44 return ( (features & (5<<23)) == (5<<23) );
45#endif
46} 40}
47 41
48/* 42/*
@@ -53,14 +47,12 @@ static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
53 u8 **dptr = (u8 **)ptrs; 47 u8 **dptr = (u8 **)ptrs;
54 u8 *p, *q; 48 u8 *p, *q;
55 int d, z, z0; 49 int d, z, z0;
56 raid6_mmx_save_t sa;
57 50
58 z0 = disks - 3; /* Highest data disk */ 51 z0 = disks - 3; /* Highest data disk */
59 p = dptr[z0+1]; /* XOR parity */ 52 p = dptr[z0+1]; /* XOR parity */
60 q = dptr[z0+2]; /* RS syndrome */ 53 q = dptr[z0+2]; /* RS syndrome */
61 54
62 /* This is really MMX code, not SSE */ 55 kernel_fpu_begin();
63 raid6_before_mmx(&sa);
64 56
65 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); 57 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
66 asm volatile("pxor %mm5,%mm5"); /* Zero temp */ 58 asm volatile("pxor %mm5,%mm5"); /* Zero temp */
@@ -94,8 +86,8 @@ static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
94 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); 86 asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
95 } 87 }
96 88
97 raid6_after_mmx(&sa);
98 asm volatile("sfence" : : : "memory"); 89 asm volatile("sfence" : : : "memory");
90 kernel_fpu_end();
99} 91}
100 92
101const struct raid6_calls raid6_sse1x1 = { 93const struct raid6_calls raid6_sse1x1 = {
@@ -113,13 +105,12 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
113 u8 **dptr = (u8 **)ptrs; 105 u8 **dptr = (u8 **)ptrs;
114 u8 *p, *q; 106 u8 *p, *q;
115 int d, z, z0; 107 int d, z, z0;
116 raid6_mmx_save_t sa;
117 108
118 z0 = disks - 3; /* Highest data disk */ 109 z0 = disks - 3; /* Highest data disk */
119 p = dptr[z0+1]; /* XOR parity */ 110 p = dptr[z0+1]; /* XOR parity */
120 q = dptr[z0+2]; /* RS syndrome */ 111 q = dptr[z0+2]; /* RS syndrome */
121 112
122 raid6_before_mmx(&sa); 113 kernel_fpu_begin();
123 114
124 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); 115 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
125 asm volatile("pxor %mm5,%mm5"); /* Zero temp */ 116 asm volatile("pxor %mm5,%mm5"); /* Zero temp */
@@ -157,8 +148,8 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
157 asm volatile("movntq %%mm6,%0" : "=m" (q[d+8])); 148 asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
158 } 149 }
159 150
160 raid6_after_mmx(&sa);
161 asm volatile("sfence" : :: "memory"); 151 asm volatile("sfence" : :: "memory");
152 kernel_fpu_end();
162} 153}
163 154
164const struct raid6_calls raid6_sse1x2 = { 155const struct raid6_calls raid6_sse1x2 = {
diff --git a/drivers/md/raid6sse2.c b/drivers/md/raid6sse2.c
index b3aa7fe087..0f019762a7 100644
--- a/drivers/md/raid6sse2.c
+++ b/drivers/md/raid6sse2.c
@@ -30,17 +30,11 @@ static const struct raid6_sse_constants {
30 30
31static int raid6_have_sse2(void) 31static int raid6_have_sse2(void)
32{ 32{
33#ifdef __KERNEL__
34 /* Not really boot_cpu but "all_cpus" */ 33 /* Not really boot_cpu but "all_cpus" */
35 return boot_cpu_has(X86_FEATURE_MMX) && 34 return boot_cpu_has(X86_FEATURE_MMX) &&
36 boot_cpu_has(X86_FEATURE_FXSR) && 35 boot_cpu_has(X86_FEATURE_FXSR) &&
37 boot_cpu_has(X86_FEATURE_XMM) && 36 boot_cpu_has(X86_FEATURE_XMM) &&
38 boot_cpu_has(X86_FEATURE_XMM2); 37 boot_cpu_has(X86_FEATURE_XMM2);
39#else
40 /* User space test code */
41 u32 features = cpuid_features();
42 return ( (features & (15<<23)) == (15<<23) );
43#endif
44} 38}
45 39
46/* 40/*
@@ -51,13 +45,12 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
51 u8 **dptr = (u8 **)ptrs; 45 u8 **dptr = (u8 **)ptrs;
52 u8 *p, *q; 46 u8 *p, *q;
53 int d, z, z0; 47 int d, z, z0;
54 raid6_sse_save_t sa;
55 48
56 z0 = disks - 3; /* Highest data disk */ 49 z0 = disks - 3; /* Highest data disk */
57 p = dptr[z0+1]; /* XOR parity */ 50 p = dptr[z0+1]; /* XOR parity */
58 q = dptr[z0+2]; /* RS syndrome */ 51 q = dptr[z0+2]; /* RS syndrome */
59 52
60 raid6_before_sse2(&sa); 53 kernel_fpu_begin();
61 54
62 asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); 55 asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
63 asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ 56 asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
@@ -93,8 +86,8 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
93 asm volatile("pxor %xmm4,%xmm4"); 86 asm volatile("pxor %xmm4,%xmm4");
94 } 87 }
95 88
96 raid6_after_sse2(&sa);
97 asm volatile("sfence" : : : "memory"); 89 asm volatile("sfence" : : : "memory");
90 kernel_fpu_end();
98} 91}
99 92
100const struct raid6_calls raid6_sse2x1 = { 93const struct raid6_calls raid6_sse2x1 = {
@@ -112,13 +105,12 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
112 u8 **dptr = (u8 **)ptrs; 105 u8 **dptr = (u8 **)ptrs;
113 u8 *p, *q; 106 u8 *p, *q;
114 int d, z, z0; 107 int d, z, z0;
115 raid6_sse_save_t sa;
116 108
117 z0 = disks - 3; /* Highest data disk */ 109 z0 = disks - 3; /* Highest data disk */
118 p = dptr[z0+1]; /* XOR parity */ 110 p = dptr[z0+1]; /* XOR parity */
119 q = dptr[z0+2]; /* RS syndrome */ 111 q = dptr[z0+2]; /* RS syndrome */
120 112
121 raid6_before_sse2(&sa); 113 kernel_fpu_begin();
122 114
123 asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); 115 asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
124 asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ 116 asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
@@ -156,8 +148,8 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
156 asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); 148 asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
157 } 149 }
158 150
159 raid6_after_sse2(&sa);
160 asm volatile("sfence" : : : "memory"); 151 asm volatile("sfence" : : : "memory");
152 kernel_fpu_end();
161} 153}
162 154
163const struct raid6_calls raid6_sse2x2 = { 155const struct raid6_calls raid6_sse2x2 = {
@@ -179,13 +171,12 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
179 u8 **dptr = (u8 **)ptrs; 171 u8 **dptr = (u8 **)ptrs;
180 u8 *p, *q; 172 u8 *p, *q;
181 int d, z, z0; 173 int d, z, z0;
182 raid6_sse16_save_t sa;
183 174
184 z0 = disks - 3; /* Highest data disk */ 175 z0 = disks - 3; /* Highest data disk */
185 p = dptr[z0+1]; /* XOR parity */ 176 p = dptr[z0+1]; /* XOR parity */
186 q = dptr[z0+2]; /* RS syndrome */ 177 q = dptr[z0+2]; /* RS syndrome */
187 178
188 raid6_before_sse16(&sa); 179 kernel_fpu_begin();
189 180
190 asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0])); 181 asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
191 asm volatile("pxor %xmm2,%xmm2"); /* P[0] */ 182 asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
@@ -256,8 +247,9 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
256 asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48])); 247 asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
257 asm volatile("pxor %xmm14,%xmm14"); 248 asm volatile("pxor %xmm14,%xmm14");
258 } 249 }
250
259 asm volatile("sfence" : : : "memory"); 251 asm volatile("sfence" : : : "memory");
260 raid6_after_sse16(&sa); 252 kernel_fpu_end();
261} 253}
262 254
263const struct raid6_calls raid6_sse2x4 = { 255const struct raid6_calls raid6_sse2x4 = {
diff --git a/drivers/md/raid6x86.h b/drivers/md/raid6x86.h
index 4cf20534fe..9111950414 100644
--- a/drivers/md/raid6x86.h
+++ b/drivers/md/raid6x86.h
@@ -21,224 +21,40 @@
21 21
22#if defined(__i386__) || defined(__x86_64__) 22#if defined(__i386__) || defined(__x86_64__)
23 23
24#ifdef __x86_64__
25
26typedef struct {
27 unsigned int fsave[27];
28 unsigned long cr0;
29} raid6_mmx_save_t __attribute__((aligned(16)));
30
31/* N.B.: For SSE we only save %xmm0-%xmm7 even for x86-64, since
32 the code doesn't know about the additional x86-64 registers */
33typedef struct {
34 unsigned int sarea[8*4+2];
35 unsigned long cr0;
36} raid6_sse_save_t __attribute__((aligned(16)));
37
38/* This is for x86-64-specific code which uses all 16 XMM registers */
39typedef struct {
40 unsigned int sarea[16*4+2];
41 unsigned long cr0;
42} raid6_sse16_save_t __attribute__((aligned(16)));
43
44/* On x86-64 the stack *SHOULD* be 16-byte aligned, but currently this
45 is buggy in the kernel and it's only 8-byte aligned in places, so
46 we need to do this anyway. Sigh. */
47#define SAREA(x) ((unsigned int *)((((unsigned long)&(x)->sarea)+15) & ~15))
48
49#else /* __i386__ */
50
51typedef struct {
52 unsigned int fsave[27];
53 unsigned long cr0;
54} raid6_mmx_save_t;
55
56/* On i386, the stack is only 8-byte aligned, but SSE requires 16-byte
57 alignment. The +3 is so we have the slack space to manually align
58 a properly-sized area correctly. */
59typedef struct {
60 unsigned int sarea[8*4+3];
61 unsigned long cr0;
62} raid6_sse_save_t;
63
64/* Find the 16-byte aligned save area */
65#define SAREA(x) ((unsigned int *)((((unsigned long)&(x)->sarea)+15) & ~15))
66
67#endif
68
69#ifdef __KERNEL__ /* Real code */ 24#ifdef __KERNEL__ /* Real code */
70 25
71/* Note: %cr0 is 32 bits on i386 and 64 bits on x86-64 */ 26#include <asm/i387.h>
72
73static inline unsigned long raid6_get_fpu(void)
74{
75 unsigned long cr0;
76
77 preempt_disable();
78 asm volatile("mov %%cr0,%0 ; clts" : "=r" (cr0));
79 return cr0;
80}
81
82static inline void raid6_put_fpu(unsigned long cr0)
83{
84 asm volatile("mov %0,%%cr0" : : "r" (cr0));
85 preempt_enable();
86}
87 27
88#else /* Dummy code for user space testing */ 28#else /* Dummy code for user space testing */
89 29
90static inline unsigned long raid6_get_fpu(void) 30static inline void kernel_fpu_begin(void)
91{
92 return 0xf00ba6;
93}
94
95static inline void raid6_put_fpu(unsigned long cr0)
96{
97 (void)cr0;
98}
99
100#endif
101
102static inline void raid6_before_mmx(raid6_mmx_save_t *s)
103{
104 s->cr0 = raid6_get_fpu();
105 asm volatile("fsave %0 ; fwait" : "=m" (s->fsave[0]));
106}
107
108static inline void raid6_after_mmx(raid6_mmx_save_t *s)
109{
110 asm volatile("frstor %0" : : "m" (s->fsave[0]));
111 raid6_put_fpu(s->cr0);
112}
113
114static inline void raid6_before_sse(raid6_sse_save_t *s)
115{
116 unsigned int *rsa = SAREA(s);
117
118 s->cr0 = raid6_get_fpu();
119
120 asm volatile("movaps %%xmm0,%0" : "=m" (rsa[0]));
121 asm volatile("movaps %%xmm1,%0" : "=m" (rsa[4]));
122 asm volatile("movaps %%xmm2,%0" : "=m" (rsa[8]));
123 asm volatile("movaps %%xmm3,%0" : "=m" (rsa[12]));
124 asm volatile("movaps %%xmm4,%0" : "=m" (rsa[16]));
125 asm volatile("movaps %%xmm5,%0" : "=m" (rsa[20]));
126 asm volatile("movaps %%xmm6,%0" : "=m" (rsa[24]));
127 asm volatile("movaps %%xmm7,%0" : "=m" (rsa[28]));
128}
129
130static inline void raid6_after_sse(raid6_sse_save_t *s)
131{
132 unsigned int *rsa = SAREA(s);
133
134 asm volatile("movaps %0,%%xmm0" : : "m" (rsa[0]));
135 asm volatile("movaps %0,%%xmm1" : : "m" (rsa[4]));
136 asm volatile("movaps %0,%%xmm2" : : "m" (rsa[8]));
137 asm volatile("movaps %0,%%xmm3" : : "m" (rsa[12]));
138 asm volatile("movaps %0,%%xmm4" : : "m" (rsa[16]));
139 asm volatile("movaps %0,%%xmm5" : : "m" (rsa[20]));
140 asm volatile("movaps %0,%%xmm6" : : "m" (rsa[24]));
141 asm volatile("movaps %0,%%xmm7" : : "m" (rsa[28]));
142
143 raid6_put_fpu(s->cr0);
144}
145
146static inline void raid6_before_sse2(raid6_sse_save_t *s)
147{ 31{
148 unsigned int *rsa = SAREA(s);
149
150 s->cr0 = raid6_get_fpu();
151
152 asm volatile("movdqa %%xmm0,%0" : "=m" (rsa[0]));
153 asm volatile("movdqa %%xmm1,%0" : "=m" (rsa[4]));
154 asm volatile("movdqa %%xmm2,%0" : "=m" (rsa[8]));
155 asm volatile("movdqa %%xmm3,%0" : "=m" (rsa[12]));
156 asm volatile("movdqa %%xmm4,%0" : "=m" (rsa[16]));
157 asm volatile("movdqa %%xmm5,%0" : "=m" (rsa[20]));
158 asm volatile("movdqa %%xmm6,%0" : "=m" (rsa[24]));
159 asm volatile("movdqa %%xmm7,%0" : "=m" (rsa[28]));
160} 32}
161 33
162static inline void raid6_after_sse2(raid6_sse_save_t *s) 34static inline void kernel_fpu_end(void)
163{ 35{
164 unsigned int *rsa = SAREA(s);
165
166 asm volatile("movdqa %0,%%xmm0" : : "m" (rsa[0]));
167 asm volatile("movdqa %0,%%xmm1" : : "m" (rsa[4]));
168 asm volatile("movdqa %0,%%xmm2" : : "m" (rsa[8]));
169 asm volatile("movdqa %0,%%xmm3" : : "m" (rsa[12]));
170 asm volatile("movdqa %0,%%xmm4" : : "m" (rsa[16]));
171 asm volatile("movdqa %0,%%xmm5" : : "m" (rsa[20]));
172 asm volatile("movdqa %0,%%xmm6" : : "m" (rsa[24]));
173 asm volatile("movdqa %0,%%xmm7" : : "m" (rsa[28]));
174
175 raid6_put_fpu(s->cr0);
176} 36}
177 37
178#ifdef __x86_64__ 38#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
179 39#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions
180static inline void raid6_before_sse16(raid6_sse16_save_t *s) 40 * (fast save and restore) */
181{ 41#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
182 unsigned int *rsa = SAREA(s); 42#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
183 43#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
184 s->cr0 = raid6_get_fpu();
185 44
186 asm volatile("movdqa %%xmm0,%0" : "=m" (rsa[0])); 45/* Should work well enough on modern CPUs for testing */
187 asm volatile("movdqa %%xmm1,%0" : "=m" (rsa[4])); 46static inline int boot_cpu_has(int flag)
188 asm volatile("movdqa %%xmm2,%0" : "=m" (rsa[8]));
189 asm volatile("movdqa %%xmm3,%0" : "=m" (rsa[12]));
190 asm volatile("movdqa %%xmm4,%0" : "=m" (rsa[16]));
191 asm volatile("movdqa %%xmm5,%0" : "=m" (rsa[20]));
192 asm volatile("movdqa %%xmm6,%0" : "=m" (rsa[24]));
193 asm volatile("movdqa %%xmm7,%0" : "=m" (rsa[28]));
194 asm volatile("movdqa %%xmm8,%0" : "=m" (rsa[32]));
195 asm volatile("movdqa %%xmm9,%0" : "=m" (rsa[36]));
196 asm volatile("movdqa %%xmm10,%0" : "=m" (rsa[40]));
197 asm volatile("movdqa %%xmm11,%0" : "=m" (rsa[44]));
198 asm volatile("movdqa %%xmm12,%0" : "=m" (rsa[48]));
199 asm volatile("movdqa %%xmm13,%0" : "=m" (rsa[52]));
200 asm volatile("movdqa %%xmm14,%0" : "=m" (rsa[56]));
201 asm volatile("movdqa %%xmm15,%0" : "=m" (rsa[60]));
202}
203
204static inline void raid6_after_sse16(raid6_sse16_save_t *s)
205{ 47{
206 unsigned int *rsa = SAREA(s); 48 u32 eax = (flag >> 5) ? 0x80000001 : 1;
49 u32 edx;
207 50
208 asm volatile("movdqa %0,%%xmm0" : : "m" (rsa[0])); 51 asm volatile("cpuid"
209 asm volatile("movdqa %0,%%xmm1" : : "m" (rsa[4])); 52 : "+a" (eax), "=d" (edx)
210 asm volatile("movdqa %0,%%xmm2" : : "m" (rsa[8])); 53 : : "ecx", "ebx");
211 asm volatile("movdqa %0,%%xmm3" : : "m" (rsa[12]));
212 asm volatile("movdqa %0,%%xmm4" : : "m" (rsa[16]));
213 asm volatile("movdqa %0,%%xmm5" : : "m" (rsa[20]));
214 asm volatile("movdqa %0,%%xmm6" : : "m" (rsa[24]));
215 asm volatile("movdqa %0,%%xmm7" : : "m" (rsa[28]));
216 asm volatile("movdqa %0,%%xmm8" : : "m" (rsa[32]));
217 asm volatile("movdqa %0,%%xmm9" : : "m" (rsa[36]));
218 asm volatile("movdqa %0,%%xmm10" : : "m" (rsa[40]));
219 asm volatile("movdqa %0,%%xmm11" : : "m" (rsa[44]));
220 asm volatile("movdqa %0,%%xmm12" : : "m" (rsa[48]));
221 asm volatile("movdqa %0,%%xmm13" : : "m" (rsa[52]));
222 asm volatile("movdqa %0,%%xmm14" : : "m" (rsa[56]));
223 asm volatile("movdqa %0,%%xmm15" : : "m" (rsa[60]));
224 54
225 raid6_put_fpu(s->cr0); 55 return (edx >> (flag & 31)) & 1;
226} 56}
227 57
228#endif /* __x86_64__ */
229
230/* User space test hack */
231#ifndef __KERNEL__
232static inline int cpuid_features(void)
233{
234 u32 eax = 1;
235 u32 ebx, ecx, edx;
236
237 asm volatile("cpuid" :
238 "+a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx));
239
240 return edx;
241}
242#endif /* ndef __KERNEL__ */ 58#endif /* ndef __KERNEL__ */
243 59
244#endif 60#endif
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 15d12fce34..127a94b9a1 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -469,9 +469,9 @@ static int bluebird_patch_dvico_firmware_download(struct usb_device *udev,
469 fw->data[BLUEBIRD_01_ID_OFFSET + 1] == USB_VID_DVICO >> 8) { 469 fw->data[BLUEBIRD_01_ID_OFFSET + 1] == USB_VID_DVICO >> 8) {
470 470
471 fw->data[BLUEBIRD_01_ID_OFFSET + 2] = 471 fw->data[BLUEBIRD_01_ID_OFFSET + 2] =
472 udev->descriptor.idProduct + 1; 472 le16_to_cpu(udev->descriptor.idProduct) + 1;
473 fw->data[BLUEBIRD_01_ID_OFFSET + 3] = 473 fw->data[BLUEBIRD_01_ID_OFFSET + 3] =
474 udev->descriptor.idProduct >> 8; 474 le16_to_cpu(udev->descriptor.idProduct) >> 8;
475 475
476 return usb_cypress_load_firmware(udev, fw, CYPRESS_FX2); 476 return usb_cypress_load_firmware(udev, fw, CYPRESS_FX2);
477 } 477 }
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 4a198d4755..b5acb11c0b 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -119,6 +119,8 @@ static int digitv_nxt6000_tuner_set_params(struct dvb_frontend *fe, struct dvb_f
119 struct dvb_usb_adapter *adap = fe->dvb->priv; 119 struct dvb_usb_adapter *adap = fe->dvb->priv;
120 u8 b[5]; 120 u8 b[5];
121 dvb_usb_tuner_calc_regs(fe,fep,b, 5); 121 dvb_usb_tuner_calc_regs(fe,fep,b, 5);
122 if (fe->ops.i2c_gate_ctrl)
123 fe->ops.i2c_gate_ctrl(fe, 1);
122 return digitv_ctrl_msg(adap->dev, USB_WRITE_TUNER, 0, &b[1], 4, NULL, 0); 124 return digitv_ctrl_msg(adap->dev, USB_WRITE_TUNER, 0, &b[1], 4, NULL, 0);
123} 125}
124 126
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 682dc7ce48..710c11a682 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -1022,7 +1022,7 @@ static ssize_t cafe_v4l_read(struct file *filp,
1022 char __user *buffer, size_t len, loff_t *pos) 1022 char __user *buffer, size_t len, loff_t *pos)
1023{ 1023{
1024 struct cafe_camera *cam = filp->private_data; 1024 struct cafe_camera *cam = filp->private_data;
1025 int ret; 1025 int ret = 0;
1026 1026
1027 /* 1027 /*
1028 * Perhaps we're in speculative read mode and already 1028 * Perhaps we're in speculative read mode and already
@@ -1251,8 +1251,6 @@ static int cafe_vidioc_reqbufs(struct file *filp, void *priv,
1251 1251
1252 if (cam->n_sbufs == 0) /* no luck at all - ret already set */ 1252 if (cam->n_sbufs == 0) /* no luck at all - ret already set */
1253 kfree(cam->sb_bufs); 1253 kfree(cam->sb_bufs);
1254 else
1255 ret = 0;
1256 req->count = cam->n_sbufs; /* In case of partial success */ 1254 req->count = cam->n_sbufs; /* In case of partial success */
1257 1255
1258 out: 1256 out:
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index cc535ca713..774d253655 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -633,7 +633,7 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
633 { 633 {
634 struct v4l2_register *reg = arg; 634 struct v4l2_register *reg = arg;
635 635
636 if (reg->i2c_id != I2C_DRIVERID_CX25840) 636 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
637 return -EINVAL; 637 return -EINVAL;
638 if (!capable(CAP_SYS_ADMIN)) 638 if (!capable(CAP_SYS_ADMIN))
639 return -EPERM; 639 return -EPERM;
@@ -903,13 +903,13 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address,
903 state->vbi_line_offset = 8; 903 state->vbi_line_offset = 8;
904 state->id = id; 904 state->id = id;
905 905
906 i2c_attach_client(client);
907
906 if (state->is_cx25836) 908 if (state->is_cx25836)
907 cx25836_initialize(client); 909 cx25836_initialize(client);
908 else 910 else
909 cx25840_initialize(client, 1); 911 cx25840_initialize(client, 1);
910 912
911 i2c_attach_client(client);
912
913 return 0; 913 return 0;
914} 914}
915 915
diff --git a/drivers/media/video/cx25840/cx25840-firmware.c b/drivers/media/video/cx25840/cx25840-firmware.c
index 1958d4016e..0e86b9d033 100644
--- a/drivers/media/video/cx25840/cx25840-firmware.c
+++ b/drivers/media/video/cx25840/cx25840-firmware.c
@@ -37,7 +37,7 @@
37 */ 37 */
38#define FWSEND 48 38#define FWSEND 48
39 39
40#define FWDEV(x) &((x)->adapter->dev) 40#define FWDEV(x) &((x)->dev)
41 41
42static char *firmware = FWFILE; 42static char *firmware = FWFILE;
43 43
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index a1be1e279d..b0466b88f5 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -56,7 +56,8 @@ MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
56 56
57/* ------------------------------------------------------------------ */ 57/* ------------------------------------------------------------------ */
58 58
59#define BLACKBIRD_FIRM_IMAGE_SIZE 256*1024 59#define OLD_BLACKBIRD_FIRM_IMAGE_SIZE 262144
60#define BLACKBIRD_FIRM_IMAGE_SIZE 376836
60 61
61/* defines below are from ivtv-driver.h */ 62/* defines below are from ivtv-driver.h */
62 63
@@ -404,7 +405,7 @@ static int blackbird_find_mailbox(struct cx8802_dev *dev)
404 u32 value; 405 u32 value;
405 int i; 406 int i;
406 407
407 for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) { 408 for (i = 0; i < dev->fw_size; i++) {
408 memory_read(dev->core, i, &value); 409 memory_read(dev->core, i, &value);
409 if (value == signature[signaturecnt]) 410 if (value == signature[signaturecnt])
410 signaturecnt++; 411 signaturecnt++;
@@ -452,12 +453,15 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
452 return -1; 453 return -1;
453 } 454 }
454 455
455 if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) { 456 if ((firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) &&
456 dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n", 457 (firmware->size != OLD_BLACKBIRD_FIRM_IMAGE_SIZE)) {
457 firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE); 458 dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d or %d)\n",
459 firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE,
460 OLD_BLACKBIRD_FIRM_IMAGE_SIZE);
458 release_firmware(firmware); 461 release_firmware(firmware);
459 return -1; 462 return -1;
460 } 463 }
464 dev->fw_size = firmware->size;
461 465
462 if (0 != memcmp(firmware->data, magic, 8)) { 466 if (0 != memcmp(firmware->data, magic, 8)) {
463 dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n"); 467 dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n");
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index a97be1bdc3..bdfe2af701 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1389,7 +1389,7 @@ static int vidioc_g_register (struct file *file, void *fh,
1389{ 1389{
1390 struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core; 1390 struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
1391 1391
1392 if (reg->i2c_id != 0) 1392 if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
1393 return -EINVAL; 1393 return -EINVAL;
1394 /* cx2388x has a 24-bit register space */ 1394 /* cx2388x has a 24-bit register space */
1395 reg->val = cx_read(reg->reg&0xffffff); 1395 reg->val = cx_read(reg->reg&0xffffff);
@@ -1401,7 +1401,7 @@ static int vidioc_s_register (struct file *file, void *fh,
1401{ 1401{
1402 struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core; 1402 struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
1403 1403
1404 if (reg->i2c_id != 0) 1404 if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
1405 return -EINVAL; 1405 return -EINVAL;
1406 cx_write(reg->reg&0xffffff, reg->val); 1406 cx_write(reg->reg&0xffffff, reg->val);
1407 return 0; 1407 return 0;
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index d2ecfba9bb..a4f7befda5 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -463,6 +463,7 @@ struct cx8802_dev {
463 u32 mailbox; 463 u32 mailbox;
464 int width; 464 int width;
465 int height; 465 int height;
466 int fw_size;
466 467
467#if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE) 468#if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE)
468 /* for dvb only */ 469 /* for dvb only */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index a1ca0f5007..1ff5138e4b 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -1268,7 +1268,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
1268 if (fw_len % sizeof(u32)) { 1268 if (fw_len % sizeof(u32)) {
1269 pvr2_trace(PVR2_TRACE_ERROR_LEGS, 1269 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1270 "size of %s firmware" 1270 "size of %s firmware"
1271 " must be a multiple of %u bytes", 1271 " must be a multiple of %zu bytes",
1272 fw_files[fwidx],sizeof(u32)); 1272 fw_files[fwidx],sizeof(u32));
1273 release_firmware(fw_entry); 1273 release_firmware(fw_entry);
1274 return -1; 1274 return -1;
@@ -3256,8 +3256,8 @@ static int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw)
3256 3256
3257 3257
3258int pvr2_hdw_register_access(struct pvr2_hdw *hdw, 3258int pvr2_hdw_register_access(struct pvr2_hdw *hdw,
3259 u32 chip_id, u64 reg_id, 3259 u32 match_type, u32 match_chip, u64 reg_id,
3260 int setFl,u32 *val_ptr) 3260 int setFl,u64 *val_ptr)
3261{ 3261{
3262#ifdef CONFIG_VIDEO_ADV_DEBUG 3262#ifdef CONFIG_VIDEO_ADV_DEBUG
3263 struct list_head *item; 3263 struct list_head *item;
@@ -3268,13 +3268,16 @@ int pvr2_hdw_register_access(struct pvr2_hdw *hdw,
3268 3268
3269 if (!capable(CAP_SYS_ADMIN)) return -EPERM; 3269 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
3270 3270
3271 req.i2c_id = chip_id; 3271 req.match_type = match_type;
3272 req.match_chip = match_chip;
3272 req.reg = reg_id; 3273 req.reg = reg_id;
3273 if (setFl) req.val = *val_ptr; 3274 if (setFl) req.val = *val_ptr;
3274 mutex_lock(&hdw->i2c_list_lock); do { 3275 mutex_lock(&hdw->i2c_list_lock); do {
3275 list_for_each(item,&hdw->i2c_clients) { 3276 list_for_each(item,&hdw->i2c_clients) {
3276 cp = list_entry(item,struct pvr2_i2c_client,list); 3277 cp = list_entry(item,struct pvr2_i2c_client,list);
3277 if (cp->client->driver->id != chip_id) continue; 3278 if (!v4l2_chip_match_i2c_client(cp->client, req.match_type, req.match_chip)) {
3279 continue;
3280 }
3278 stat = pvr2_i2c_client_cmd( 3281 stat = pvr2_i2c_client_cmd(
3279 cp,(setFl ? VIDIOC_DBG_S_REGISTER : 3282 cp,(setFl ? VIDIOC_DBG_S_REGISTER :
3280 VIDIOC_DBG_G_REGISTER),&req); 3283 VIDIOC_DBG_G_REGISTER),&req);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
index 566a8ef7e1..0c9cca43ff 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
@@ -217,13 +217,14 @@ void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *,
217 enum pvr2_v4l_type index,int); 217 enum pvr2_v4l_type index,int);
218 218
219/* Direct read/write access to chip's registers: 219/* Direct read/write access to chip's registers:
220 chip_id - unique id of chip (e.g. I2C_DRIVERD_xxxx) 220 match_type - how to interpret match_chip (e.g. driver ID, i2c address)
221 match_chip - chip match value (e.g. I2C_DRIVERD_xxxx)
221 reg_id - register number to access 222 reg_id - register number to access
222 setFl - true to set the register, false to read it 223 setFl - true to set the register, false to read it
223 val_ptr - storage location for source / result. */ 224 val_ptr - storage location for source / result. */
224int pvr2_hdw_register_access(struct pvr2_hdw *, 225int pvr2_hdw_register_access(struct pvr2_hdw *,
225 u32 chip_id,u64 reg_id, 226 u32 match_type, u32 match_chip,u64 reg_id,
226 int setFl,u32 *val_ptr); 227 int setFl,u64 *val_ptr);
227 228
228/* The following entry points are all lower level things you normally don't 229/* The following entry points are all lower level things you normally don't
229 want to worry about. */ 230 want to worry about. */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 4fe4136204..5313d34266 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -740,11 +740,11 @@ static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
740 case VIDIOC_DBG_S_REGISTER: 740 case VIDIOC_DBG_S_REGISTER:
741 case VIDIOC_DBG_G_REGISTER: 741 case VIDIOC_DBG_G_REGISTER:
742 { 742 {
743 u32 val; 743 u64 val;
744 struct v4l2_register *req = (struct v4l2_register *)arg; 744 struct v4l2_register *req = (struct v4l2_register *)arg;
745 if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val; 745 if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val;
746 ret = pvr2_hdw_register_access( 746 ret = pvr2_hdw_register_access(
747 hdw,req->i2c_id,req->reg, 747 hdw,req->match_type,req->match_chip,req->reg,
748 cmd == VIDIOC_DBG_S_REGISTER,&val); 748 cmd == VIDIOC_DBG_S_REGISTER,&val);
749 if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val; 749 if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val;
750 break; 750 break;
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index c4f066d666..7735b67589 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1425,7 +1425,7 @@ static int saa711x_command(struct i2c_client *client, unsigned int cmd, void *ar
1425 { 1425 {
1426 struct v4l2_register *reg = arg; 1426 struct v4l2_register *reg = arg;
1427 1427
1428 if (reg->i2c_id != I2C_DRIVERID_SAA711X) 1428 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
1429 return -EINVAL; 1429 return -EINVAL;
1430 if (!capable(CAP_SYS_ADMIN)) 1430 if (!capable(CAP_SYS_ADMIN))
1431 return -EPERM; 1431 return -EPERM;
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index bd9c4f3ad0..654863db15 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -619,7 +619,7 @@ static int saa7127_command(struct i2c_client *client,
619 { 619 {
620 struct v4l2_register *reg = arg; 620 struct v4l2_register *reg = arg;
621 621
622 if (reg->i2c_id != I2C_DRIVERID_SAA7127) 622 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
623 return -EINVAL; 623 return -EINVAL;
624 if (!capable(CAP_SYS_ADMIN)) 624 if (!capable(CAP_SYS_ADMIN))
625 return -EPERM; 625 return -EPERM;
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 886b5df7c9..d5ec05f56a 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -955,7 +955,7 @@ static int tvp5150_command(struct i2c_client *c,
955 { 955 {
956 struct v4l2_register *reg = arg; 956 struct v4l2_register *reg = arg;
957 957
958 if (reg->i2c_id != I2C_DRIVERID_TVP5150) 958 if (!v4l2_chip_match_i2c_client(c, reg->match_type, reg->match_chip))
959 return -EINVAL; 959 return -EINVAL;
960 if (!capable(CAP_SYS_ADMIN)) 960 if (!capable(CAP_SYS_ADMIN))
961 return -EPERM; 961 return -EPERM;
diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c
index b3b5fd536d..28d1133a3b 100644
--- a/drivers/media/video/upd64031a.c
+++ b/drivers/media/video/upd64031a.c
@@ -167,7 +167,7 @@ static int upd64031a_command(struct i2c_client *client, unsigned int cmd, void *
167 { 167 {
168 struct v4l2_register *reg = arg; 168 struct v4l2_register *reg = arg;
169 169
170 if (reg->i2c_id != I2C_DRIVERID_UPD64031A) 170 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
171 return -EINVAL; 171 return -EINVAL;
172 if (!capable(CAP_SYS_ADMIN)) 172 if (!capable(CAP_SYS_ADMIN))
173 return -EPERM; 173 return -EPERM;
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
index 8852903e7a..fe38224150 100644
--- a/drivers/media/video/upd64083.c
+++ b/drivers/media/video/upd64083.c
@@ -144,7 +144,7 @@ static int upd64083_command(struct i2c_client *client, unsigned int cmd, void *a
144 { 144 {
145 struct v4l2_register *reg = arg; 145 struct v4l2_register *reg = arg;
146 146
147 if (reg->i2c_id != I2C_DRIVERID_UPD64083) 147 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
148 return -EINVAL; 148 return -EINVAL;
149 if (!capable(CAP_SYS_ADMIN)) 149 if (!capable(CAP_SYS_ADMIN))
150 return -EPERM; 150 return -EPERM;
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index ae5f42562c..6fc14557d6 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -521,7 +521,7 @@ static int usbvision_v4l2_do_ioctl(struct inode *inode, struct file *file,
521 struct v4l2_register *reg = arg; 521 struct v4l2_register *reg = arg;
522 int errCode; 522 int errCode;
523 523
524 if (reg->i2c_id != 0) 524 if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
525 return -EINVAL; 525 return -EINVAL;
526 if (!capable(CAP_SYS_ADMIN)) 526 if (!capable(CAP_SYS_ADMIN))
527 return -EPERM; 527 return -EPERM;
@@ -540,7 +540,7 @@ static int usbvision_v4l2_do_ioctl(struct inode *inode, struct file *file,
540 540
541 PDEBUG(DBG_IOCTL, "VIDIOC_DBG_%c_REGISTER reg=0x%02X, value=0x%02X", 541 PDEBUG(DBG_IOCTL, "VIDIOC_DBG_%c_REGISTER reg=0x%02X, value=0x%02X",
542 cmd == VIDIOC_DBG_G_REGISTER ? 'G' : 'S', 542 cmd == VIDIOC_DBG_G_REGISTER ? 'G' : 'S',
543 (unsigned int)reg->reg, reg->val); 543 (unsigned int)reg->reg, (unsigned int)reg->val);
544 return 0; 544 return 0;
545 } 545 }
546#endif 546#endif
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index ddfd80c561..54747606ea 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -51,6 +51,7 @@
51#include <linux/mm.h> 51#include <linux/mm.h>
52#include <linux/string.h> 52#include <linux/string.h>
53#include <linux/errno.h> 53#include <linux/errno.h>
54#include <linux/i2c.h>
54#include <asm/uaccess.h> 55#include <asm/uaccess.h>
55#include <asm/system.h> 56#include <asm/system.h>
56#include <asm/pgtable.h> 57#include <asm/pgtable.h>
@@ -365,13 +366,21 @@ static const char *v4l2_ioctls[] = {
365 [_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT", 366 [_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT",
366 [_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY", 367 [_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY",
367 [_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY", 368 [_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY",
368#if 1
369 [_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP", 369 [_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP",
370#endif
371 [_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS", 370 [_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS",
372 [_IOC_NR(VIDIOC_G_EXT_CTRLS)] = "VIDIOC_G_EXT_CTRLS", 371 [_IOC_NR(VIDIOC_G_EXT_CTRLS)] = "VIDIOC_G_EXT_CTRLS",
373 [_IOC_NR(VIDIOC_S_EXT_CTRLS)] = "VIDIOC_S_EXT_CTRLS", 372 [_IOC_NR(VIDIOC_S_EXT_CTRLS)] = "VIDIOC_S_EXT_CTRLS",
374 [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS" 373 [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS",
374#if 1
375 [_IOC_NR(VIDIOC_ENUM_FRAMESIZES)] = "VIDIOC_ENUM_FRAMESIZES",
376 [_IOC_NR(VIDIOC_ENUM_FRAMEINTERVALS)] = "VIDIOC_ENUM_FRAMEINTERVALS",
377 [_IOC_NR(VIDIOC_G_ENC_INDEX)] = "VIDIOC_G_ENC_INDEX",
378 [_IOC_NR(VIDIOC_ENCODER_CMD)] = "VIDIOC_ENCODER_CMD",
379 [_IOC_NR(VIDIOC_TRY_ENCODER_CMD)] = "VIDIOC_TRY_ENCODER_CMD",
380
381 [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
382 [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
383#endif
375}; 384};
376#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) 385#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
377 386
@@ -395,9 +404,6 @@ static const char *v4l2_int_ioctls[] = {
395 [_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY", 404 [_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY",
396 [_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG", 405 [_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG",
397 406
398 [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
399 [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
400
401 [_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE", 407 [_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE",
402 [_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET", 408 [_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET",
403 [_IOC_NR(VIDIOC_INT_AUDIO_CLOCK_FREQ)] = "VIDIOC_INT_AUDIO_CLOCK_FREQ", 409 [_IOC_NR(VIDIOC_INT_AUDIO_CLOCK_FREQ)] = "VIDIOC_INT_AUDIO_CLOCK_FREQ",
@@ -947,6 +953,28 @@ u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
947 return **ctrl_classes; 953 return **ctrl_classes;
948} 954}
949 955
956int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 match_type, u32 match_chip)
957{
958 switch (match_type) {
959 case V4L2_CHIP_MATCH_I2C_DRIVER:
960 return (c != NULL && c->driver != NULL && c->driver->id == match_chip);
961 case V4L2_CHIP_MATCH_I2C_ADDR:
962 return (c != NULL && c->addr == match_chip);
963 default:
964 return 0;
965 }
966}
967
968int v4l2_chip_match_host(u32 match_type, u32 match_chip)
969{
970 switch (match_type) {
971 case V4L2_CHIP_MATCH_HOST:
972 return match_chip == 0;
973 default:
974 return 0;
975 }
976}
977
950/* ----------------------------------------------------------------- */ 978/* ----------------------------------------------------------------- */
951 979
952EXPORT_SYMBOL(v4l2_norm_to_name); 980EXPORT_SYMBOL(v4l2_norm_to_name);
@@ -970,6 +998,9 @@ EXPORT_SYMBOL(v4l2_ctrl_query_menu);
970EXPORT_SYMBOL(v4l2_ctrl_query_fill); 998EXPORT_SYMBOL(v4l2_ctrl_query_fill);
971EXPORT_SYMBOL(v4l2_ctrl_query_fill_std); 999EXPORT_SYMBOL(v4l2_ctrl_query_fill_std);
972 1000
1001EXPORT_SYMBOL(v4l2_chip_match_i2c_client);
1002EXPORT_SYMBOL(v4l2_chip_match_host);
1003
973/* 1004/*
974 * Local variables: 1005 * Local variables:
975 * c-basic-offset: 8 1006 * c-basic-offset: 8
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index dc9b1ef678..011938fb7e 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -1342,6 +1342,42 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1342 ret=vfd->vidioc_s_jpegcomp(file, fh, p); 1342 ret=vfd->vidioc_s_jpegcomp(file, fh, p);
1343 break; 1343 break;
1344 } 1344 }
1345 case VIDIOC_G_ENC_INDEX:
1346 {
1347 struct v4l2_enc_idx *p=arg;
1348
1349 if (!vfd->vidioc_g_enc_index)
1350 break;
1351 ret=vfd->vidioc_g_enc_index(file, fh, p);
1352 if (!ret)
1353 dbgarg (cmd, "entries=%d, entries_cap=%d\n",
1354 p->entries,p->entries_cap);
1355 break;
1356 }
1357 case VIDIOC_ENCODER_CMD:
1358 {
1359 struct v4l2_encoder_cmd *p=arg;
1360
1361 if (!vfd->vidioc_encoder_cmd)
1362 break;
1363 ret=vfd->vidioc_encoder_cmd(file, fh, p);
1364 if (!ret)
1365 dbgarg (cmd, "cmd=%d, flags=%d\n",
1366 p->cmd,p->flags);
1367 break;
1368 }
1369 case VIDIOC_TRY_ENCODER_CMD:
1370 {
1371 struct v4l2_encoder_cmd *p=arg;
1372
1373 if (!vfd->vidioc_try_encoder_cmd)
1374 break;
1375 ret=vfd->vidioc_try_encoder_cmd(file, fh, p);
1376 if (!ret)
1377 dbgarg (cmd, "cmd=%d, flags=%d\n",
1378 p->cmd,p->flags);
1379 break;
1380 }
1345 case VIDIOC_G_PARM: 1381 case VIDIOC_G_PARM:
1346 { 1382 {
1347 struct v4l2_streamparm *p=arg; 1383 struct v4l2_streamparm *p=arg;
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 5046a16613..4a73e8b242 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -376,10 +376,11 @@ static inline void mmc_set_ios(struct mmc_host *host)
376{ 376{
377 struct mmc_ios *ios = &host->ios; 377 struct mmc_ios *ios = &host->ios;
378 378
379 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n", 379 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
380 "width %u timing %u\n",
380 mmc_hostname(host), ios->clock, ios->bus_mode, 381 mmc_hostname(host), ios->clock, ios->bus_mode,
381 ios->power_mode, ios->chip_select, ios->vdd, 382 ios->power_mode, ios->chip_select, ios->vdd,
382 ios->bus_width); 383 ios->bus_width, ios->timing);
383 384
384 host->ops->set_ios(host, ios); 385 host->ops->set_ios(host, ios);
385} 386}
@@ -809,6 +810,7 @@ static void mmc_power_up(struct mmc_host *host)
809 host->ios.chip_select = MMC_CS_DONTCARE; 810 host->ios.chip_select = MMC_CS_DONTCARE;
810 host->ios.power_mode = MMC_POWER_UP; 811 host->ios.power_mode = MMC_POWER_UP;
811 host->ios.bus_width = MMC_BUS_WIDTH_1; 812 host->ios.bus_width = MMC_BUS_WIDTH_1;
813 host->ios.timing = MMC_TIMING_LEGACY;
812 mmc_set_ios(host); 814 mmc_set_ios(host);
813 815
814 mmc_delay(1); 816 mmc_delay(1);
@@ -828,6 +830,7 @@ static void mmc_power_off(struct mmc_host *host)
828 host->ios.chip_select = MMC_CS_DONTCARE; 830 host->ios.chip_select = MMC_CS_DONTCARE;
829 host->ios.power_mode = MMC_POWER_OFF; 831 host->ios.power_mode = MMC_POWER_OFF;
830 host->ios.bus_width = MMC_BUS_WIDTH_1; 832 host->ios.bus_width = MMC_BUS_WIDTH_1;
833 host->ios.timing = MMC_TIMING_LEGACY;
831 mmc_set_ios(host); 834 mmc_set_ios(host);
832} 835}
833 836
@@ -1112,46 +1115,50 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1112 continue; 1115 continue;
1113 } 1116 }
1114 1117
1115 /* Activate highspeed support. */ 1118 if (host->caps & MMC_CAP_MMC_HIGHSPEED) {
1116 cmd.opcode = MMC_SWITCH; 1119 /* Activate highspeed support. */
1117 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 1120 cmd.opcode = MMC_SWITCH;
1118 (EXT_CSD_HS_TIMING << 16) | 1121 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1119 (1 << 8) | 1122 (EXT_CSD_HS_TIMING << 16) |
1120 EXT_CSD_CMD_SET_NORMAL; 1123 (1 << 8) |
1121 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 1124 EXT_CSD_CMD_SET_NORMAL;
1125 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1122 1126
1123 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 1127 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
1124 if (err != MMC_ERR_NONE) { 1128 if (err != MMC_ERR_NONE) {
1125 printk("%s: failed to switch card to mmc v4 " 1129 printk("%s: failed to switch card to mmc v4 "
1126 "high-speed mode.\n", 1130 "high-speed mode.\n",
1127 mmc_hostname(card->host)); 1131 mmc_hostname(card->host));
1128 continue; 1132 continue;
1129 } 1133 }
1130 1134
1131 mmc_card_set_highspeed(card); 1135 mmc_card_set_highspeed(card);
1132 1136
1133 /* Check for host support for wide-bus modes. */ 1137 host->ios.timing = MMC_TIMING_SD_HS;
1134 if (!(host->caps & MMC_CAP_4_BIT_DATA)) { 1138 mmc_set_ios(host);
1135 continue;
1136 } 1139 }
1137 1140
1138 /* Activate 4-bit support. */ 1141 /* Check for host support for wide-bus modes. */
1139 cmd.opcode = MMC_SWITCH; 1142 if (host->caps & MMC_CAP_4_BIT_DATA) {
1140 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 1143 /* Activate 4-bit support. */
1141 (EXT_CSD_BUS_WIDTH << 16) | 1144 cmd.opcode = MMC_SWITCH;
1142 (EXT_CSD_BUS_WIDTH_4 << 8) | 1145 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1143 EXT_CSD_CMD_SET_NORMAL; 1146 (EXT_CSD_BUS_WIDTH << 16) |
1144 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 1147 (EXT_CSD_BUS_WIDTH_4 << 8) |
1148 EXT_CSD_CMD_SET_NORMAL;
1149 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1145 1150
1146 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 1151 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
1147 if (err != MMC_ERR_NONE) { 1152 if (err != MMC_ERR_NONE) {
1148 printk("%s: failed to switch card to " 1153 printk("%s: failed to switch card to "
1149 "mmc v4 4-bit bus mode.\n", 1154 "mmc v4 4-bit bus mode.\n",
1150 mmc_hostname(card->host)); 1155 mmc_hostname(card->host));
1151 continue; 1156 continue;
1152 } 1157 }
1153 1158
1154 host->ios.bus_width = MMC_BUS_WIDTH_4; 1159 host->ios.bus_width = MMC_BUS_WIDTH_4;
1160 mmc_set_ios(host);
1161 }
1155 } 1162 }
1156 1163
1157 kfree(ext_csd); 1164 kfree(ext_csd);
@@ -1241,6 +1248,9 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1241 unsigned char *status; 1248 unsigned char *status;
1242 struct scatterlist sg; 1249 struct scatterlist sg;
1243 1250
1251 if (!(host->caps & MMC_CAP_SD_HIGHSPEED))
1252 return;
1253
1244 status = kmalloc(64, GFP_KERNEL); 1254 status = kmalloc(64, GFP_KERNEL);
1245 if (!status) { 1255 if (!status) {
1246 printk(KERN_WARNING "%s: Unable to allocate buffer for " 1256 printk(KERN_WARNING "%s: Unable to allocate buffer for "
@@ -1332,6 +1342,9 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1332 } 1342 }
1333 1343
1334 mmc_card_set_highspeed(card); 1344 mmc_card_set_highspeed(card);
1345
1346 host->ios.timing = MMC_TIMING_SD_HS;
1347 mmc_set_ios(host);
1335 } 1348 }
1336 1349
1337 kfree(status); 1350 kfree(status);
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 7522f76b15..d749f08601 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -606,7 +606,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
606static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 606static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
607{ 607{
608 int div; 608 int div;
609 u8 ctrl;
610 u16 clk; 609 u16 clk;
611 unsigned long timeout; 610 unsigned long timeout;
612 611
@@ -615,13 +614,6 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
615 614
616 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); 615 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
617 616
618 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
619 if (clock > 25000000)
620 ctrl |= SDHCI_CTRL_HISPD;
621 else
622 ctrl &= ~SDHCI_CTRL_HISPD;
623 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
624
625 if (clock == 0) 617 if (clock == 0)
626 goto out; 618 goto out;
627 619
@@ -761,10 +753,17 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
761 sdhci_set_power(host, ios->vdd); 753 sdhci_set_power(host, ios->vdd);
762 754
763 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); 755 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
756
764 if (ios->bus_width == MMC_BUS_WIDTH_4) 757 if (ios->bus_width == MMC_BUS_WIDTH_4)
765 ctrl |= SDHCI_CTRL_4BITBUS; 758 ctrl |= SDHCI_CTRL_4BITBUS;
766 else 759 else
767 ctrl &= ~SDHCI_CTRL_4BITBUS; 760 ctrl &= ~SDHCI_CTRL_4BITBUS;
761
762 if (ios->timing == MMC_TIMING_SD_HS)
763 ctrl |= SDHCI_CTRL_HISPD;
764 else
765 ctrl &= ~SDHCI_CTRL_HISPD;
766
768 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); 767 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
769 768
770 mmiowb(); 769 mmiowb();
@@ -994,7 +993,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
994 993
995 intmask = readl(host->ioaddr + SDHCI_INT_STATUS); 994 intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
996 995
997 if (!intmask) { 996 if (!intmask || intmask == 0xffffffff) {
998 result = IRQ_NONE; 997 result = IRQ_NONE;
999 goto out; 998 goto out;
1000 } 999 }
@@ -1080,6 +1079,13 @@ static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state)
1080 1079
1081 pci_save_state(pdev); 1080 pci_save_state(pdev);
1082 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 1081 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1082
1083 for (i = 0;i < chip->num_slots;i++) {
1084 if (!chip->hosts[i])
1085 continue;
1086 free_irq(chip->hosts[i]->irq, chip->hosts[i]);
1087 }
1088
1083 pci_disable_device(pdev); 1089 pci_disable_device(pdev);
1084 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1090 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1085 1091
@@ -1108,6 +1114,11 @@ static int sdhci_resume (struct pci_dev *pdev)
1108 continue; 1114 continue;
1109 if (chip->hosts[i]->flags & SDHCI_USE_DMA) 1115 if (chip->hosts[i]->flags & SDHCI_USE_DMA)
1110 pci_set_master(pdev); 1116 pci_set_master(pdev);
1117 ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
1118 IRQF_SHARED, chip->hosts[i]->slot_descr,
1119 chip->hosts[i]);
1120 if (ret)
1121 return ret;
1111 sdhci_init(chip->hosts[i]); 1122 sdhci_init(chip->hosts[i]);
1112 mmiowb(); 1123 mmiowb();
1113 ret = mmc_resume_host(chip->hosts[i]->mmc); 1124 ret = mmc_resume_host(chip->hosts[i]->mmc);
@@ -1274,6 +1285,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1274 mmc->f_max = host->max_clk; 1285 mmc->f_max = host->max_clk;
1275 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK; 1286 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1276 1287
1288 if (caps & SDHCI_CAN_DO_HISPD)
1289 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1290
1277 mmc->ocr_avail = 0; 1291 mmc->ocr_avail = 0;
1278 if (caps & SDHCI_CAN_VDD_330) 1292 if (caps & SDHCI_CAN_VDD_330)
1279 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1293 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
@@ -1282,13 +1296,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1282 if (caps & SDHCI_CAN_VDD_180) 1296 if (caps & SDHCI_CAN_VDD_180)
1283 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; 1297 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
1284 1298
1285 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
1286 printk(KERN_ERR "%s: Controller reports > 25 MHz base clock,"
1287 " but no high speed support.\n",
1288 host->slot_descr);
1289 mmc->f_max = 25000000;
1290 }
1291
1292 if (mmc->ocr_avail == 0) { 1299 if (mmc->ocr_avail == 0) {
1293 printk(KERN_ERR "%s: Hardware doesn't report any " 1300 printk(KERN_ERR "%s: Hardware doesn't report any "
1294 "support voltages.\n", host->slot_descr); 1301 "support voltages.\n", host->slot_descr);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 716a47210a..72995777f8 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -822,11 +822,17 @@ static int vortex_resume(struct pci_dev *pdev)
822{ 822{
823 struct net_device *dev = pci_get_drvdata(pdev); 823 struct net_device *dev = pci_get_drvdata(pdev);
824 struct vortex_private *vp = netdev_priv(dev); 824 struct vortex_private *vp = netdev_priv(dev);
825 int err;
825 826
826 if (dev && vp) { 827 if (dev && vp) {
827 pci_set_power_state(pdev, PCI_D0); 828 pci_set_power_state(pdev, PCI_D0);
828 pci_restore_state(pdev); 829 pci_restore_state(pdev);
829 pci_enable_device(pdev); 830 err = pci_enable_device(pdev);
831 if (err) {
832 printk(KERN_WARNING "%s: Could not enable device \n",
833 dev->name);
834 return err;
835 }
830 pci_set_master(pdev); 836 pci_set_master(pdev);
831 if (request_irq(dev->irq, vp->full_bus_master_rx ? 837 if (request_irq(dev->irq, vp->full_bus_master_rx ?
832 &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) { 838 &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) {
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 6f93a765e5..12c8453f44 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -448,8 +448,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
448 spin_lock_irqsave(&cp->lock, flags); 448 spin_lock_irqsave(&cp->lock, flags);
449 cp->cpcmd &= ~RxVlanOn; 449 cp->cpcmd &= ~RxVlanOn;
450 cpw16(CpCmd, cp->cpcmd); 450 cpw16(CpCmd, cp->cpcmd);
451 if (cp->vlgrp) 451 vlan_group_set_device(cp->vlgrp, vid, NULL);
452 cp->vlgrp->vlan_devices[vid] = NULL;
453 spin_unlock_irqrestore(&cp->lock, flags); 452 spin_unlock_irqrestore(&cp->lock, flags);
454} 453}
455#endif /* CP_VLAN_TAG_USED */ 454#endif /* CP_VLAN_TAG_USED */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 38ac6796fc..5ff0922e62 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2245,7 +2245,7 @@ config BNX2
2245 2245
2246config SPIDER_NET 2246config SPIDER_NET
2247 tristate "Spider Gigabit Ethernet driver" 2247 tristate "Spider Gigabit Ethernet driver"
2248 depends on PCI && PPC_IBM_CELL_BLADE 2248 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
2249 select FW_LOADER 2249 select FW_LOADER
2250 help 2250 help
2251 This driver supports the Gigabit Ethernet chips present on the 2251 This driver supports the Gigabit Ethernet chips present on the
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 33c6645455..7138e0e025 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2293,10 +2293,7 @@ static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2293 2293
2294 local_irq_save(flags); 2294 local_irq_save(flags);
2295 ace_mask_irq(dev); 2295 ace_mask_irq(dev);
2296 2296 vlan_group_set_device(ap->vlgrp, vid, NULL);
2297 if (ap->vlgrp)
2298 ap->vlgrp->vlan_devices[vid] = NULL;
2299
2300 ace_unmask_irq(dev); 2297 ace_unmask_irq(dev);
2301 local_irq_restore(flags); 2298 local_irq_restore(flags);
2302} 2299}
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 9c399aaefb..962c954c2d 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1737,8 +1737,7 @@ static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
1737{ 1737{
1738 struct amd8111e_priv *lp = netdev_priv(dev); 1738 struct amd8111e_priv *lp = netdev_priv(dev);
1739 spin_lock_irq(&lp->lock); 1739 spin_lock_irq(&lp->lock);
1740 if (lp->vlgrp) 1740 vlan_group_set_device(lp->vlgrp, vid, NULL);
1741 lp->vlgrp->vlan_devices[vid] = NULL;
1742 spin_unlock_irq(&lp->lock); 1741 spin_unlock_irq(&lp->lock);
1743} 1742}
1744#endif 1743#endif
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index 65673485bb..88d4f70035 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -1252,8 +1252,7 @@ static void atl1_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1252 1252
1253 spin_lock_irqsave(&adapter->lock, flags); 1253 spin_lock_irqsave(&adapter->lock, flags);
1254 /* atl1_irq_disable(adapter); */ 1254 /* atl1_irq_disable(adapter); */
1255 if (adapter->vlgrp) 1255 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1256 adapter->vlgrp->vlan_devices[vid] = NULL;
1257 /* atl1_irq_enable(adapter); */ 1256 /* atl1_irq_enable(adapter); */
1258 spin_unlock_irqrestore(&adapter->lock, flags); 1257 spin_unlock_irqrestore(&adapter->lock, flags);
1259 /* We don't do Vlan filtering */ 1258 /* We don't do Vlan filtering */
@@ -1266,7 +1265,7 @@ static void atl1_restore_vlan(struct atl1_adapter *adapter)
1266 if (adapter->vlgrp) { 1265 if (adapter->vlgrp) {
1267 u16 vid; 1266 u16 vid;
1268 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1267 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1269 if (!adapter->vlgrp->vlan_devices[vid]) 1268 if (!vlan_group_get_device(adapter->vlgrp, vid))
1270 continue; 1269 continue;
1271 atl1_vlan_rx_add_vid(adapter->netdev, vid); 1270 atl1_vlan_rx_add_vid(adapter->netdev, vid);
1272 } 1271 }
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 5a96d7611a..c12e5ea618 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4467,9 +4467,7 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4467 struct bnx2 *bp = netdev_priv(dev); 4467 struct bnx2 *bp = netdev_priv(dev);
4468 4468
4469 bnx2_netif_stop(bp); 4469 bnx2_netif_stop(bp);
4470 4470 vlan_group_set_device(bp->vlgrp, vid, NULL);
4471 if (bp->vlgrp)
4472 bp->vlgrp->vlan_devices[vid] = NULL;
4473 bnx2_set_rx_mode(dev); 4471 bnx2_set_rx_mode(dev);
4474 4472
4475 bnx2_netif_start(bp); 4473 bnx2_netif_start(bp);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a7c8f98a89..e4724d874e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -60,6 +60,7 @@
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/netdevice.h> 61#include <linux/netdevice.h>
62#include <linux/inetdevice.h> 62#include <linux/inetdevice.h>
63#include <linux/igmp.h>
63#include <linux/etherdevice.h> 64#include <linux/etherdevice.h>
64#include <linux/skbuff.h> 65#include <linux/skbuff.h>
65#include <net/sock.h> 66#include <net/sock.h>
@@ -488,9 +489,9 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
488 /* Save and then restore vlan_dev in the grp array, 489 /* Save and then restore vlan_dev in the grp array,
489 * since the slave's driver might clear it. 490 * since the slave's driver might clear it.
490 */ 491 */
491 vlan_dev = bond->vlgrp->vlan_devices[vid]; 492 vlan_dev = vlan_group_get_device(bond->vlgrp, vid);
492 slave_dev->vlan_rx_kill_vid(slave_dev, vid); 493 slave_dev->vlan_rx_kill_vid(slave_dev, vid);
493 bond->vlgrp->vlan_devices[vid] = vlan_dev; 494 vlan_group_set_device(bond->vlgrp, vid, vlan_dev);
494 } 495 }
495 } 496 }
496 497
@@ -550,9 +551,9 @@ static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *s
550 /* Save and then restore vlan_dev in the grp array, 551 /* Save and then restore vlan_dev in the grp array,
551 * since the slave's driver might clear it. 552 * since the slave's driver might clear it.
552 */ 553 */
553 vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 554 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
554 slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); 555 slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
555 bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev; 556 vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev);
556 } 557 }
557 558
558unreg: 559unreg:
@@ -861,6 +862,28 @@ static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
861 } 862 }
862} 863}
863 864
865
866/*
867 * Retrieve the list of registered multicast addresses for the bonding
868 * device and retransmit an IGMP JOIN request to the current active
869 * slave.
870 */
871static void bond_resend_igmp_join_requests(struct bonding *bond)
872{
873 struct in_device *in_dev;
874 struct ip_mc_list *im;
875
876 rcu_read_lock();
877 in_dev = __in_dev_get_rcu(bond->dev);
878 if (in_dev) {
879 for (im = in_dev->mc_list; im; im = im->next) {
880 ip_mc_rejoin_group(im);
881 }
882 }
883
884 rcu_read_unlock();
885}
886
864/* 887/*
865 * Totally destroys the mc_list in bond 888 * Totally destroys the mc_list in bond
866 */ 889 */
@@ -874,6 +897,7 @@ static void bond_mc_list_destroy(struct bonding *bond)
874 kfree(dmi); 897 kfree(dmi);
875 dmi = bond->mc_list; 898 dmi = bond->mc_list;
876 } 899 }
900 bond->mc_list = NULL;
877} 901}
878 902
879/* 903/*
@@ -967,6 +991,7 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct
967 for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) { 991 for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) {
968 dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); 992 dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
969 } 993 }
994 bond_resend_igmp_join_requests(bond);
970 } 995 }
971} 996}
972 997
@@ -2397,7 +2422,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2397 vlan_id = 0; 2422 vlan_id = 0;
2398 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 2423 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
2399 vlan_list) { 2424 vlan_list) {
2400 vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 2425 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2401 if (vlan_dev == rt->u.dst.dev) { 2426 if (vlan_dev == rt->u.dst.dev) {
2402 vlan_id = vlan->vlan_id; 2427 vlan_id = vlan->vlan_id;
2403 dprintk("basa: vlan match on %s %d\n", 2428 dprintk("basa: vlan match on %s %d\n",
@@ -2444,7 +2469,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
2444 } 2469 }
2445 2470
2446 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2471 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2447 vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 2472 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2448 if (vlan->vlan_ip) { 2473 if (vlan->vlan_ip) {
2449 bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, 2474 bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip,
2450 vlan->vlan_ip, vlan->vlan_id); 2475 vlan->vlan_ip, vlan->vlan_id);
@@ -3371,7 +3396,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3371 3396
3372 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 3397 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
3373 vlan_list) { 3398 vlan_list) {
3374 vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 3399 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
3375 if (vlan_dev == event_dev) { 3400 if (vlan_dev == event_dev) {
3376 switch (event) { 3401 switch (event) {
3377 case NETDEV_UP: 3402 case NETDEV_UP:
@@ -3423,15 +3448,21 @@ void bond_register_arp(struct bonding *bond)
3423{ 3448{
3424 struct packet_type *pt = &bond->arp_mon_pt; 3449 struct packet_type *pt = &bond->arp_mon_pt;
3425 3450
3451 if (pt->type)
3452 return;
3453
3426 pt->type = htons(ETH_P_ARP); 3454 pt->type = htons(ETH_P_ARP);
3427 pt->dev = NULL; /*bond->dev;XXX*/ 3455 pt->dev = bond->dev;
3428 pt->func = bond_arp_rcv; 3456 pt->func = bond_arp_rcv;
3429 dev_add_pack(pt); 3457 dev_add_pack(pt);
3430} 3458}
3431 3459
3432void bond_unregister_arp(struct bonding *bond) 3460void bond_unregister_arp(struct bonding *bond)
3433{ 3461{
3434 dev_remove_pack(&bond->arp_mon_pt); 3462 struct packet_type *pt = &bond->arp_mon_pt;
3463
3464 dev_remove_pack(pt);
3465 pt->type = 0;
3435} 3466}
3436 3467
3437/*---------------------------- Hashing Policies -----------------------------*/ 3468/*---------------------------- Hashing Policies -----------------------------*/
@@ -4011,42 +4042,6 @@ out:
4011 return 0; 4042 return 0;
4012} 4043}
4013 4044
4014static void bond_activebackup_xmit_copy(struct sk_buff *skb,
4015 struct bonding *bond,
4016 struct slave *slave)
4017{
4018 struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
4019 struct ethhdr *eth_data;
4020 u8 *hwaddr;
4021 int res;
4022
4023 if (!skb2) {
4024 printk(KERN_ERR DRV_NAME ": Error: "
4025 "bond_activebackup_xmit_copy(): skb_copy() failed\n");
4026 return;
4027 }
4028
4029 skb2->mac.raw = (unsigned char *)skb2->data;
4030 eth_data = eth_hdr(skb2);
4031
4032 /* Pick an appropriate source MAC address
4033 * -- use slave's perm MAC addr, unless used by bond
4034 * -- otherwise, borrow active slave's perm MAC addr
4035 * since that will not be used
4036 */
4037 hwaddr = slave->perm_hwaddr;
4038 if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN))
4039 hwaddr = bond->curr_active_slave->perm_hwaddr;
4040
4041 /* Set source MAC address appropriately */
4042 memcpy(eth_data->h_source, hwaddr, ETH_ALEN);
4043
4044 res = bond_dev_queue_xmit(bond, skb2, slave->dev);
4045 if (res)
4046 dev_kfree_skb(skb2);
4047
4048 return;
4049}
4050 4045
4051/* 4046/*
4052 * in active-backup mode, we know that bond->curr_active_slave is always valid if 4047 * in active-backup mode, we know that bond->curr_active_slave is always valid if
@@ -4067,21 +4062,6 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
4067 if (!bond->curr_active_slave) 4062 if (!bond->curr_active_slave)
4068 goto out; 4063 goto out;
4069 4064
4070 /* Xmit IGMP frames on all slaves to ensure rapid fail-over
4071 for multicast traffic on snooping switches */
4072 if (skb->protocol == __constant_htons(ETH_P_IP) &&
4073 skb->nh.iph->protocol == IPPROTO_IGMP) {
4074 struct slave *slave, *active_slave;
4075 int i;
4076
4077 active_slave = bond->curr_active_slave;
4078 bond_for_each_slave_from_to(bond, slave, i, active_slave->next,
4079 active_slave->prev)
4080 if (IS_UP(slave->dev) &&
4081 (slave->link == BOND_LINK_UP))
4082 bond_activebackup_xmit_copy(skb, bond, slave);
4083 }
4084
4085 res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); 4065 res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
4086 4066
4087out: 4067out:
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 7d0f24f697..125c9b1058 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -889,8 +889,7 @@ static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
889 struct adapter *adapter = dev->priv; 889 struct adapter *adapter = dev->priv;
890 890
891 spin_lock_irq(&adapter->async_lock); 891 spin_lock_irq(&adapter->async_lock);
892 if (adapter->vlan_grp) 892 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
893 adapter->vlan_grp->vlan_devices[vid] = NULL;
894 spin_unlock_irq(&adapter->async_lock); 893 spin_unlock_irq(&adapter->async_lock);
895} 894}
896#endif 895#endif
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 89a682702f..326d4a6651 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1696,6 +1696,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie)
1696{ 1696{
1697 int work_done; 1697 int work_done;
1698 struct adapter *adapter = cookie; 1698 struct adapter *adapter = cookie;
1699 struct respQ *Q = &adapter->sge->respQ;
1699 1700
1700 spin_lock(&adapter->async_lock); 1701 spin_lock(&adapter->async_lock);
1701 1702
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 5c97a64451..80c3d8f268 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -74,6 +74,11 @@ enum { /* adapter flags */
74struct rx_desc; 74struct rx_desc;
75struct rx_sw_desc; 75struct rx_sw_desc;
76 76
77struct sge_fl_page {
78 struct skb_frag_struct frag;
79 unsigned char *va;
80};
81
77struct sge_fl { /* SGE per free-buffer list state */ 82struct sge_fl { /* SGE per free-buffer list state */
78 unsigned int buf_size; /* size of each Rx buffer */ 83 unsigned int buf_size; /* size of each Rx buffer */
79 unsigned int credits; /* # of available Rx buffers */ 84 unsigned int credits; /* # of available Rx buffers */
@@ -81,11 +86,13 @@ struct sge_fl { /* SGE per free-buffer list state */
81 unsigned int cidx; /* consumer index */ 86 unsigned int cidx; /* consumer index */
82 unsigned int pidx; /* producer index */ 87 unsigned int pidx; /* producer index */
83 unsigned int gen; /* free list generation */ 88 unsigned int gen; /* free list generation */
89 unsigned int cntxt_id; /* SGE context id for the free list */
90 struct sge_fl_page page;
84 struct rx_desc *desc; /* address of HW Rx descriptor ring */ 91 struct rx_desc *desc; /* address of HW Rx descriptor ring */
85 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 92 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
86 dma_addr_t phys_addr; /* physical address of HW ring start */ 93 dma_addr_t phys_addr; /* physical address of HW ring start */
87 unsigned int cntxt_id; /* SGE context id for the free list */
88 unsigned long empty; /* # of times queue ran out of buffers */ 94 unsigned long empty; /* # of times queue ran out of buffers */
95 unsigned long alloc_failed; /* # of times buffer allocation failed */
89}; 96};
90 97
91/* 98/*
@@ -121,6 +128,8 @@ struct sge_rspq { /* state for an SGE response queue */
121 unsigned long empty; /* # of times queue ran out of credits */ 128 unsigned long empty; /* # of times queue ran out of credits */
122 unsigned long nomem; /* # of responses deferred due to no mem */ 129 unsigned long nomem; /* # of responses deferred due to no mem */
123 unsigned long unhandled_irqs; /* # of spurious intrs */ 130 unsigned long unhandled_irqs; /* # of spurious intrs */
131 unsigned long starved;
132 unsigned long restarted;
124}; 133};
125 134
126struct tx_desc; 135struct tx_desc;
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
index a94281861a..0a82fcddf2 100644
--- a/drivers/net/cxgb3/cxgb3_ioctl.h
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -36,28 +36,17 @@
36 * Ioctl commands specific to this driver. 36 * Ioctl commands specific to this driver.
37 */ 37 */
38enum { 38enum {
39 CHELSIO_SETREG = 1024, 39 CHELSIO_GETMTUTAB = 1029,
40 CHELSIO_GETREG, 40 CHELSIO_SETMTUTAB = 1030,
41 CHELSIO_SETTPI, 41 CHELSIO_SET_PM = 1032,
42 CHELSIO_GETTPI, 42 CHELSIO_GET_PM = 1033,
43 CHELSIO_GETMTUTAB, 43 CHELSIO_GET_MEM = 1038,
44 CHELSIO_SETMTUTAB, 44 CHELSIO_LOAD_FW = 1041,
45 CHELSIO_GETMTU, 45 CHELSIO_SET_TRACE_FILTER = 1044,
46 CHELSIO_SET_PM, 46 CHELSIO_SET_QSET_PARAMS = 1045,
47 CHELSIO_GET_PM, 47 CHELSIO_GET_QSET_PARAMS = 1046,
48 CHELSIO_GET_TCAM, 48 CHELSIO_SET_QSET_NUM = 1047,
49 CHELSIO_SET_TCAM, 49 CHELSIO_GET_QSET_NUM = 1048,
50 CHELSIO_GET_TCB,
51 CHELSIO_GET_MEM,
52 CHELSIO_LOAD_FW,
53 CHELSIO_GET_PROTO,
54 CHELSIO_SET_PROTO,
55 CHELSIO_SET_TRACE_FILTER,
56 CHELSIO_SET_QSET_PARAMS,
57 CHELSIO_GET_QSET_PARAMS,
58 CHELSIO_SET_QSET_NUM,
59 CHELSIO_GET_QSET_NUM,
60 CHELSIO_SET_PKTSCHED,
61}; 50};
62 51
63struct ch_reg { 52struct ch_reg {
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 43583ed655..7ff834e45d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -434,27 +434,25 @@ static int setup_sge_qsets(struct adapter *adap)
434 434
435static ssize_t attr_show(struct device *d, struct device_attribute *attr, 435static ssize_t attr_show(struct device *d, struct device_attribute *attr,
436 char *buf, 436 char *buf,
437 ssize_t(*format) (struct adapter *, char *)) 437 ssize_t(*format) (struct net_device *, char *))
438{ 438{
439 ssize_t len; 439 ssize_t len;
440 struct adapter *adap = to_net_dev(d)->priv;
441 440
442 /* Synchronize with ioctls that may shut down the device */ 441 /* Synchronize with ioctls that may shut down the device */
443 rtnl_lock(); 442 rtnl_lock();
444 len = (*format) (adap, buf); 443 len = (*format) (to_net_dev(d), buf);
445 rtnl_unlock(); 444 rtnl_unlock();
446 return len; 445 return len;
447} 446}
448 447
449static ssize_t attr_store(struct device *d, struct device_attribute *attr, 448static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len, 449 const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int), 450 ssize_t(*set) (struct net_device *, unsigned int),
452 unsigned int min_val, unsigned int max_val) 451 unsigned int min_val, unsigned int max_val)
453{ 452{
454 char *endp; 453 char *endp;
455 ssize_t ret; 454 ssize_t ret;
456 unsigned int val; 455 unsigned int val;
457 struct adapter *adap = to_net_dev(d)->priv;
458 456
459 if (!capable(CAP_NET_ADMIN)) 457 if (!capable(CAP_NET_ADMIN))
460 return -EPERM; 458 return -EPERM;
@@ -464,7 +462,7 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
464 return -EINVAL; 462 return -EINVAL;
465 463
466 rtnl_lock(); 464 rtnl_lock();
467 ret = (*set) (adap, val); 465 ret = (*set) (to_net_dev(d), val);
468 if (!ret) 466 if (!ret)
469 ret = len; 467 ret = len;
470 rtnl_unlock(); 468 rtnl_unlock();
@@ -472,8 +470,9 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
472} 470}
473 471
474#define CXGB3_SHOW(name, val_expr) \ 472#define CXGB3_SHOW(name, val_expr) \
475static ssize_t format_##name(struct adapter *adap, char *buf) \ 473static ssize_t format_##name(struct net_device *dev, char *buf) \
476{ \ 474{ \
475 struct adapter *adap = dev->priv; \
477 return sprintf(buf, "%u\n", val_expr); \ 476 return sprintf(buf, "%u\n", val_expr); \
478} \ 477} \
479static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ 478static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
@@ -482,8 +481,10 @@ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
482 return attr_show(d, attr, buf, format_##name); \ 481 return attr_show(d, attr, buf, format_##name); \
483} 482}
484 483
485static ssize_t set_nfilters(struct adapter *adap, unsigned int val) 484static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
486{ 485{
486 struct adapter *adap = dev->priv;
487
487 if (adap->flags & FULL_INIT_DONE) 488 if (adap->flags & FULL_INIT_DONE)
488 return -EBUSY; 489 return -EBUSY;
489 if (val && adap->params.rev == 0) 490 if (val && adap->params.rev == 0)
@@ -500,8 +501,10 @@ static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
500 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0); 501 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
501} 502}
502 503
503static ssize_t set_nservers(struct adapter *adap, unsigned int val) 504static ssize_t set_nservers(struct net_device *dev, unsigned int val)
504{ 505{
506 struct adapter *adap = dev->priv;
507
505 if (adap->flags & FULL_INIT_DONE) 508 if (adap->flags & FULL_INIT_DONE)
506 return -EBUSY; 509 return -EBUSY;
507 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters) 510 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
@@ -1549,32 +1552,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1549 return -EFAULT; 1552 return -EFAULT;
1550 1553
1551 switch (cmd) { 1554 switch (cmd) {
1552 case CHELSIO_SETREG:{
1553 struct ch_reg edata;
1554
1555 if (!capable(CAP_NET_ADMIN))
1556 return -EPERM;
1557 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1558 return -EFAULT;
1559 if ((edata.addr & 3) != 0
1560 || edata.addr >= adapter->mmio_len)
1561 return -EINVAL;
1562 writel(edata.val, adapter->regs + edata.addr);
1563 break;
1564 }
1565 case CHELSIO_GETREG:{
1566 struct ch_reg edata;
1567
1568 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1569 return -EFAULT;
1570 if ((edata.addr & 3) != 0
1571 || edata.addr >= adapter->mmio_len)
1572 return -EINVAL;
1573 edata.val = readl(adapter->regs + edata.addr);
1574 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1575 return -EFAULT;
1576 break;
1577 }
1578 case CHELSIO_SET_QSET_PARAMS:{ 1555 case CHELSIO_SET_QSET_PARAMS:{
1579 int i; 1556 int i;
1580 struct qset_params *q; 1557 struct qset_params *q;
@@ -1838,10 +1815,10 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1838 return -EINVAL; 1815 return -EINVAL;
1839 1816
1840 /* 1817 /*
1841 * Version scheme: 1818 * Version scheme:
1842 * bits 0..9: chip version 1819 * bits 0..9: chip version
1843 * bits 10..15: chip revision 1820 * bits 10..15: chip revision
1844 */ 1821 */
1845 t.version = 3 | (adapter->params.rev << 10); 1822 t.version = 3 | (adapter->params.rev << 10);
1846 if (copy_to_user(useraddr, &t, sizeof(t))) 1823 if (copy_to_user(useraddr, &t, sizeof(t)))
1847 return -EFAULT; 1824 return -EFAULT;
@@ -1890,20 +1867,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1890 t.trace_rx); 1867 t.trace_rx);
1891 break; 1868 break;
1892 } 1869 }
1893 case CHELSIO_SET_PKTSCHED:{
1894 struct ch_pktsched_params p;
1895
1896 if (!capable(CAP_NET_ADMIN))
1897 return -EPERM;
1898 if (!adapter->open_device_map)
1899 return -EAGAIN; /* uP and SGE must be running */
1900 if (copy_from_user(&p, useraddr, sizeof(p)))
1901 return -EFAULT;
1902 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1903 p.binding);
1904 break;
1905
1906 }
1907 default: 1870 default:
1908 return -EOPNOTSUPP; 1871 return -EOPNOTSUPP;
1909 } 1872 }
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index b2cf5f6feb..f6ed033efb 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -160,14 +160,16 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
160 int i; 160 int i;
161 161
162 for_each_port(adapter, i) { 162 for_each_port(adapter, i) {
163 const struct vlan_group *grp; 163 struct vlan_group *grp;
164 struct net_device *dev = adapter->port[i]; 164 struct net_device *dev = adapter->port[i];
165 const struct port_info *p = netdev_priv(dev); 165 const struct port_info *p = netdev_priv(dev);
166 166
167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
168 if (vlan && vlan != VLAN_VID_MASK) { 168 if (vlan && vlan != VLAN_VID_MASK) {
169 grp = p->vlan_grp; 169 grp = p->vlan_grp;
170 dev = grp ? grp->vlan_devices[vlan] : NULL; 170 dev = NULL;
171 if (grp)
172 dev = vlan_group_get_device(grp, vlan);
171 } else 173 } else
172 while (dev->master) 174 while (dev->master)
173 dev = dev->master; 175 dev = dev->master;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 3f2cf8a07c..c23783432e 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -45,9 +45,25 @@
45#define USE_GTS 0 45#define USE_GTS 0
46 46
47#define SGE_RX_SM_BUF_SIZE 1536 47#define SGE_RX_SM_BUF_SIZE 1536
48
49/*
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
53 */
54#define USE_RX_PAGE
55#define RX_PAGE_SIZE 2048
56
57/*
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
60 */
48#define SGE_RX_COPY_THRES 256 61#define SGE_RX_COPY_THRES 256
49 62
50# define SGE_RX_DROP_THRES 16 63/*
64 * Minimum number of freelist entries before we start dropping TUNNEL frames.
65 */
66#define SGE_RX_DROP_THRES 16
51 67
52/* 68/*
53 * Period of the Tx buffer reclaim timer. This timer does not need to run 69 * Period of the Tx buffer reclaim timer. This timer does not need to run
@@ -85,7 +101,10 @@ struct tx_sw_desc { /* SW state per Tx descriptor */
85}; 101};
86 102
87struct rx_sw_desc { /* SW state per Rx descriptor */ 103struct rx_sw_desc { /* SW state per Rx descriptor */
88 struct sk_buff *skb; 104 union {
105 struct sk_buff *skb;
106 struct sge_fl_page page;
107 } t;
89 DECLARE_PCI_UNMAP_ADDR(dma_addr); 108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
90}; 109};
91 110
@@ -105,6 +124,15 @@ struct unmap_info { /* packet unmapping info, overlays skb->cb */
105}; 124};
106 125
107/* 126/*
127 * Holds unmapping information for Tx packets that need deferred unmapping.
128 * This structure lives at skb->head and must be allocated by callers.
129 */
130struct deferred_unmap_info {
131 struct pci_dev *pdev;
132 dma_addr_t addr[MAX_SKB_FRAGS + 1];
133};
134
135/*
108 * Maps a number of flits to the number of Tx descriptors that can hold them. 136 * Maps a number of flits to the number of Tx descriptors that can hold them.
109 * The formula is 137 * The formula is
110 * 138 *
@@ -252,10 +280,13 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
252 struct pci_dev *pdev = adapter->pdev; 280 struct pci_dev *pdev = adapter->pdev;
253 unsigned int cidx = q->cidx; 281 unsigned int cidx = q->cidx;
254 282
283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
285
255 d = &q->sdesc[cidx]; 286 d = &q->sdesc[cidx];
256 while (n--) { 287 while (n--) {
257 if (d->skb) { /* an SGL is present */ 288 if (d->skb) { /* an SGL is present */
258 if (need_skb_unmap()) 289 if (need_unmap)
259 unmap_skb(d->skb, q, cidx, pdev); 290 unmap_skb(d->skb, q, cidx, pdev);
260 if (d->skb->priority == cidx) 291 if (d->skb->priority == cidx)
261 kfree_skb(d->skb); 292 kfree_skb(d->skb);
@@ -320,16 +351,27 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
320 351
321 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
322 q->buf_size, PCI_DMA_FROMDEVICE); 353 q->buf_size, PCI_DMA_FROMDEVICE);
323 kfree_skb(d->skb); 354
324 d->skb = NULL; 355 if (q->buf_size != RX_PAGE_SIZE) {
356 kfree_skb(d->t.skb);
357 d->t.skb = NULL;
358 } else {
359 if (d->t.page.frag.page)
360 put_page(d->t.page.frag.page);
361 d->t.page.frag.page = NULL;
362 }
325 if (++cidx == q->size) 363 if (++cidx == q->size)
326 cidx = 0; 364 cidx = 0;
327 } 365 }
366
367 if (q->page.frag.page)
368 put_page(q->page.frag.page);
369 q->page.frag.page = NULL;
328} 370}
329 371
330/** 372/**
331 * add_one_rx_buf - add a packet buffer to a free-buffer list 373 * add_one_rx_buf - add a packet buffer to a free-buffer list
332 * @skb: the buffer to add 374 * @va: va of the buffer to add
333 * @len: the buffer length 375 * @len: the buffer length
334 * @d: the HW Rx descriptor to write 376 * @d: the HW Rx descriptor to write
335 * @sd: the SW Rx descriptor to write 377 * @sd: the SW Rx descriptor to write
@@ -339,14 +381,13 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
339 * Add a buffer of the given length to the supplied HW and SW Rx 381 * Add a buffer of the given length to the supplied HW and SW Rx
340 * descriptors. 382 * descriptors.
341 */ 383 */
342static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len, 384static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
343 struct rx_desc *d, struct rx_sw_desc *sd, 385 struct rx_desc *d, struct rx_sw_desc *sd,
344 unsigned int gen, struct pci_dev *pdev) 386 unsigned int gen, struct pci_dev *pdev)
345{ 387{
346 dma_addr_t mapping; 388 dma_addr_t mapping;
347 389
348 sd->skb = skb; 390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
349 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
350 pci_unmap_addr_set(sd, dma_addr, mapping); 391 pci_unmap_addr_set(sd, dma_addr, mapping);
351 392
352 d->addr_lo = cpu_to_be32(mapping); 393 d->addr_lo = cpu_to_be32(mapping);
@@ -371,14 +412,47 @@ static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
371{ 412{
372 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 413 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
373 struct rx_desc *d = &q->desc[q->pidx]; 414 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
374 416
375 while (n--) { 417 while (n--) {
376 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); 418 unsigned char *va;
377 419
378 if (!skb) 420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
379 break; 421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
422
423 if (!skb) {
424 q->alloc_failed++;
425 break;
426 }
427 va = skb->data;
428 sd->t.skb = skb;
429 } else {
430 if (!p->frag.page) {
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
433 q->alloc_failed++;
434 break;
435 } else {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
439 }
440 }
441
442 memcpy(&sd->t, p, sizeof(*p));
443 va = p->va;
444
445 p->frag.page_offset += RX_PAGE_SIZE;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE);
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
449 p->frag.page = NULL;
450 else
451 get_page(p->frag.page);
452 }
453
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
380 455
381 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
382 d++; 456 d++;
383 sd++; 457 sd++;
384 if (++q->pidx == q->size) { 458 if (++q->pidx == q->size) {
@@ -413,7 +487,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
413 struct rx_desc *from = &q->desc[idx]; 487 struct rx_desc *from = &q->desc[idx];
414 struct rx_desc *to = &q->desc[q->pidx]; 488 struct rx_desc *to = &q->desc[q->pidx];
415 489
416 q->sdesc[q->pidx] = q->sdesc[idx]; 490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
417 to->addr_lo = from->addr_lo; /* already big endian */ 491 to->addr_lo = from->addr_lo; /* already big endian */
418 to->addr_hi = from->addr_hi; /* likewise */ 492 to->addr_hi = from->addr_hi; /* likewise */
419 wmb(); 493 wmb();
@@ -446,7 +520,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
446 * of the SW ring. 520 * of the SW ring.
447 */ 521 */
448static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, 522static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
449 size_t sw_size, dma_addr_t *phys, void *metadata) 523 size_t sw_size, dma_addr_t * phys, void *metadata)
450{ 524{
451 size_t len = nelem * elem_size; 525 size_t len = nelem * elem_size;
452 void *s = NULL; 526 void *s = NULL;
@@ -576,61 +650,6 @@ static inline unsigned int flits_to_desc(unsigned int n)
576} 650}
577 651
578/** 652/**
579 * get_packet - return the next ingress packet buffer from a free list
580 * @adap: the adapter that received the packet
581 * @fl: the SGE free list holding the packet
582 * @len: the packet length including any SGE padding
583 * @drop_thres: # of remaining buffers before we start dropping packets
584 *
585 * Get the next packet from a free list and complete setup of the
586 * sk_buff. If the packet is small we make a copy and recycle the
587 * original buffer, otherwise we use the original buffer itself. If a
588 * positive drop threshold is supplied packets are dropped and their
589 * buffers recycled if (a) the number of remaining buffers is under the
590 * threshold and the packet is too big to copy, or (b) the packet should
591 * be copied but there is no memory for the copy.
592 */
593static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
594 unsigned int len, unsigned int drop_thres)
595{
596 struct sk_buff *skb = NULL;
597 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
598
599 prefetch(sd->skb->data);
600
601 if (len <= SGE_RX_COPY_THRES) {
602 skb = alloc_skb(len, GFP_ATOMIC);
603 if (likely(skb != NULL)) {
604 __skb_put(skb, len);
605 pci_dma_sync_single_for_cpu(adap->pdev,
606 pci_unmap_addr(sd,
607 dma_addr),
608 len, PCI_DMA_FROMDEVICE);
609 memcpy(skb->data, sd->skb->data, len);
610 pci_dma_sync_single_for_device(adap->pdev,
611 pci_unmap_addr(sd,
612 dma_addr),
613 len, PCI_DMA_FROMDEVICE);
614 } else if (!drop_thres)
615 goto use_orig_buf;
616 recycle:
617 recycle_rx_buf(adap, fl, fl->cidx);
618 return skb;
619 }
620
621 if (unlikely(fl->credits < drop_thres))
622 goto recycle;
623
624 use_orig_buf:
625 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
626 fl->buf_size, PCI_DMA_FROMDEVICE);
627 skb = sd->skb;
628 skb_put(skb, len);
629 __refill_fl(adap, fl);
630 return skb;
631}
632
633/**
634 * get_imm_packet - return the next ingress packet buffer from a response 653 * get_imm_packet - return the next ingress packet buffer from a response
635 * @resp: the response descriptor containing the packet data 654 * @resp: the response descriptor containing the packet data
636 * 655 *
@@ -1227,6 +1246,50 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1227} 1246}
1228 1247
1229/** 1248/**
1249 * deferred_unmap_destructor - unmap a packet when it is freed
1250 * @skb: the packet
1251 *
1252 * This is the packet destructor used for Tx packets that need to remain
1253 * mapped until they are freed rather than until their Tx descriptors are
1254 * freed.
1255 */
1256static void deferred_unmap_destructor(struct sk_buff *skb)
1257{
1258 int i;
1259 const dma_addr_t *p;
1260 const struct skb_shared_info *si;
1261 const struct deferred_unmap_info *dui;
1262 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1263
1264 dui = (struct deferred_unmap_info *)skb->head;
1265 p = dui->addr;
1266
1267 if (ui->len)
1268 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1269
1270 si = skb_shinfo(skb);
1271 for (i = 0; i < si->nr_frags; i++)
1272 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1273 PCI_DMA_TODEVICE);
1274}
1275
1276static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1277 const struct sg_ent *sgl, int sgl_flits)
1278{
1279 dma_addr_t *p;
1280 struct deferred_unmap_info *dui;
1281
1282 dui = (struct deferred_unmap_info *)skb->head;
1283 dui->pdev = pdev;
1284 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1285 *p++ = be64_to_cpu(sgl->addr[0]);
1286 *p++ = be64_to_cpu(sgl->addr[1]);
1287 }
1288 if (sgl_flits)
1289 *p = be64_to_cpu(sgl->addr[0]);
1290}
1291
1292/**
1230 * write_ofld_wr - write an offload work request 1293 * write_ofld_wr - write an offload work request
1231 * @adap: the adapter 1294 * @adap: the adapter
1232 * @skb: the packet to send 1295 * @skb: the packet to send
@@ -1262,8 +1325,11 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1262 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1325 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1263 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, 1326 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1264 adap->pdev); 1327 adap->pdev);
1265 if (need_skb_unmap()) 1328 if (need_skb_unmap()) {
1329 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1330 skb->destructor = deferred_unmap_destructor;
1266 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; 1331 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1332 }
1267 1333
1268 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, 1334 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1269 gen, from->wr_hi, from->wr_lo); 1335 gen, from->wr_hi, from->wr_lo);
@@ -1617,7 +1683,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1617 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); 1683 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1618 struct port_info *pi; 1684 struct port_info *pi;
1619 1685
1620 rq->eth_pkts++;
1621 skb_pull(skb, sizeof(*p) + pad); 1686 skb_pull(skb, sizeof(*p) + pad);
1622 skb->dev = adap->port[p->iff]; 1687 skb->dev = adap->port[p->iff];
1623 skb->dev->last_rx = jiffies; 1688 skb->dev->last_rx = jiffies;
@@ -1645,6 +1710,85 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1645 netif_rx(skb); 1710 netif_rx(skb);
1646} 1711}
1647 1712
1713#define SKB_DATA_SIZE 128
1714
1715static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1716 unsigned int len)
1717{
1718 skb->len = len;
1719 if (len <= SKB_DATA_SIZE) {
1720 memcpy(skb->data, p->va, len);
1721 skb->tail += len;
1722 put_page(p->frag.page);
1723 } else {
1724 memcpy(skb->data, p->va, SKB_DATA_SIZE);
1725 skb_shinfo(skb)->frags[0].page = p->frag.page;
1726 skb_shinfo(skb)->frags[0].page_offset =
1727 p->frag.page_offset + SKB_DATA_SIZE;
1728 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1729 skb_shinfo(skb)->nr_frags = 1;
1730 skb->data_len = len - SKB_DATA_SIZE;
1731 skb->tail += SKB_DATA_SIZE;
1732 skb->truesize += skb->data_len;
1733 }
1734}
1735
1736/**
1737* get_packet - return the next ingress packet buffer from a free list
1738* @adap: the adapter that received the packet
1739* @fl: the SGE free list holding the packet
1740* @len: the packet length including any SGE padding
1741* @drop_thres: # of remaining buffers before we start dropping packets
1742*
1743* Get the next packet from a free list and complete setup of the
1744* sk_buff. If the packet is small we make a copy and recycle the
1745* original buffer, otherwise we use the original buffer itself. If a
1746* positive drop threshold is supplied packets are dropped and their
1747* buffers recycled if (a) the number of remaining buffers is under the
1748* threshold and the packet is too big to copy, or (b) the packet should
1749* be copied but there is no memory for the copy.
1750*/
1751static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1752 unsigned int len, unsigned int drop_thres)
1753{
1754 struct sk_buff *skb = NULL;
1755 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1756
1757 prefetch(sd->t.skb->data);
1758
1759 if (len <= SGE_RX_COPY_THRES) {
1760 skb = alloc_skb(len, GFP_ATOMIC);
1761 if (likely(skb != NULL)) {
1762 struct rx_desc *d = &fl->desc[fl->cidx];
1763 dma_addr_t mapping =
1764 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1765 be32_to_cpu(d->addr_lo));
1766
1767 __skb_put(skb, len);
1768 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1769 PCI_DMA_FROMDEVICE);
1770 memcpy(skb->data, sd->t.skb->data, len);
1771 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1772 PCI_DMA_FROMDEVICE);
1773 } else if (!drop_thres)
1774 goto use_orig_buf;
1775recycle:
1776 recycle_rx_buf(adap, fl, fl->cidx);
1777 return skb;
1778 }
1779
1780 if (unlikely(fl->credits < drop_thres))
1781 goto recycle;
1782
1783use_orig_buf:
1784 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1785 fl->buf_size, PCI_DMA_FROMDEVICE);
1786 skb = sd->t.skb;
1787 skb_put(skb, len);
1788 __refill_fl(adap, fl);
1789 return skb;
1790}
1791
1648/** 1792/**
1649 * handle_rsp_cntrl_info - handles control information in a response 1793 * handle_rsp_cntrl_info - handles control information in a response
1650 * @qs: the queue set corresponding to the response 1794 * @qs: the queue set corresponding to the response
@@ -1767,7 +1911,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1767 q->next_holdoff = q->holdoff_tmr; 1911 q->next_holdoff = q->holdoff_tmr;
1768 1912
1769 while (likely(budget_left && is_new_response(r, q))) { 1913 while (likely(budget_left && is_new_response(r, q))) {
1770 int eth, ethpad = 0; 1914 int eth, ethpad = 2;
1771 struct sk_buff *skb = NULL; 1915 struct sk_buff *skb = NULL;
1772 u32 len, flags = ntohl(r->flags); 1916 u32 len, flags = ntohl(r->flags);
1773 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val; 1917 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
@@ -1794,18 +1938,56 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1794 break; 1938 break;
1795 } 1939 }
1796 q->imm_data++; 1940 q->imm_data++;
1941 ethpad = 0;
1797 } else if ((len = ntohl(r->len_cq)) != 0) { 1942 } else if ((len = ntohl(r->len_cq)) != 0) {
1798 struct sge_fl *fl; 1943 struct sge_fl *fl =
1944 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1945
1946 if (fl->buf_size == RX_PAGE_SIZE) {
1947 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1948 struct sge_fl_page *p = &sd->t.page;
1949
1950 prefetch(p->va);
1951 prefetch(p->va + L1_CACHE_BYTES);
1952
1953 __refill_fl(adap, fl);
1954
1955 pci_unmap_single(adap->pdev,
1956 pci_unmap_addr(sd, dma_addr),
1957 fl->buf_size,
1958 PCI_DMA_FROMDEVICE);
1959
1960 if (eth) {
1961 if (unlikely(fl->credits <
1962 SGE_RX_DROP_THRES))
1963 goto eth_recycle;
1964
1965 skb = alloc_skb(SKB_DATA_SIZE,
1966 GFP_ATOMIC);
1967 if (unlikely(!skb)) {
1968eth_recycle:
1969 q->rx_drops++;
1970 recycle_rx_buf(adap, fl,
1971 fl->cidx);
1972 goto eth_done;
1973 }
1974 } else {
1975 skb = alloc_skb(SKB_DATA_SIZE,
1976 GFP_ATOMIC);
1977 if (unlikely(!skb))
1978 goto no_mem;
1979 }
1980
1981 skb_data_init(skb, p, G_RSPD_LEN(len));
1982eth_done:
1983 fl->credits--;
1984 q->eth_pkts++;
1985 } else {
1986 fl->credits--;
1987 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1988 eth ? SGE_RX_DROP_THRES : 0);
1989 }
1799 1990
1800 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1801 fl->credits--;
1802 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1803 eth ? SGE_RX_DROP_THRES : 0);
1804 if (!skb)
1805 q->rx_drops++;
1806 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1807 __skb_pull(skb, 2);
1808 ethpad = 2;
1809 if (++fl->cidx == fl->size) 1991 if (++fl->cidx == fl->size)
1810 fl->cidx = 0; 1992 fl->cidx = 0;
1811 } else 1993 } else
@@ -1829,18 +2011,23 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1829 q->credits = 0; 2011 q->credits = 0;
1830 } 2012 }
1831 2013
1832 if (likely(skb != NULL)) { 2014 if (skb) {
2015 /* Preserve the RSS info in csum & priority */
2016 skb->csum = rss_hi;
2017 skb->priority = rss_lo;
2018
1833 if (eth) 2019 if (eth)
1834 rx_eth(adap, q, skb, ethpad); 2020 rx_eth(adap, q, skb, ethpad);
1835 else { 2021 else {
1836 /* Preserve the RSS info in csum & priority */ 2022 if (unlikely(r->rss_hdr.opcode ==
1837 skb->csum = rss_hi; 2023 CPL_TRACE_PKT))
1838 skb->priority = rss_lo; 2024 __skb_pull(skb, ethpad);
1839 ngathered = rx_offload(&adap->tdev, q, skb, 2025
1840 offload_skbs, ngathered); 2026 ngathered = rx_offload(&adap->tdev, q,
2027 skb, offload_skbs,
2028 ngathered);
1841 } 2029 }
1842 } 2030 }
1843
1844 --budget_left; 2031 --budget_left;
1845 } 2032 }
1846 2033
@@ -2320,10 +2507,23 @@ static void sge_timer_cb(unsigned long data)
2320 &adap->sge.qs[0].rspq.lock; 2507 &adap->sge.qs[0].rspq.lock;
2321 if (spin_trylock_irq(lock)) { 2508 if (spin_trylock_irq(lock)) {
2322 if (!napi_is_scheduled(qs->netdev)) { 2509 if (!napi_is_scheduled(qs->netdev)) {
2510 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2511
2323 if (qs->fl[0].credits < qs->fl[0].size) 2512 if (qs->fl[0].credits < qs->fl[0].size)
2324 __refill_fl(adap, &qs->fl[0]); 2513 __refill_fl(adap, &qs->fl[0]);
2325 if (qs->fl[1].credits < qs->fl[1].size) 2514 if (qs->fl[1].credits < qs->fl[1].size)
2326 __refill_fl(adap, &qs->fl[1]); 2515 __refill_fl(adap, &qs->fl[1]);
2516
2517 if (status & (1 << qs->rspq.cntxt_id)) {
2518 qs->rspq.starved++;
2519 if (qs->rspq.credits) {
2520 refill_rspq(adap, &qs->rspq, 1);
2521 qs->rspq.credits--;
2522 qs->rspq.restarted++;
2523 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2524 1 << qs->rspq.cntxt_id);
2525 }
2526 }
2327 } 2527 }
2328 spin_unlock_irq(lock); 2528 spin_unlock_irq(lock);
2329 } 2529 }
@@ -2432,13 +2632,21 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2432 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2632 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2433 2633
2434 if (ntxq == 1) { 2634 if (ntxq == 1) {
2635#ifdef USE_RX_PAGE
2636 q->fl[0].buf_size = RX_PAGE_SIZE;
2637#else
2435 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + 2638 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2436 sizeof(struct cpl_rx_pkt); 2639 sizeof(struct cpl_rx_pkt);
2640#endif
2437 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + 2641 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2438 sizeof(struct cpl_rx_pkt); 2642 sizeof(struct cpl_rx_pkt);
2439 } else { 2643 } else {
2644#ifdef USE_RX_PAGE
2645 q->fl[0].buf_size = RX_PAGE_SIZE;
2646#else
2440 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2647 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2441 sizeof(struct cpl_rx_data); 2648 sizeof(struct cpl_rx_data);
2649#endif
2442 q->fl[1].buf_size = (16 * 1024) - 2650 q->fl[1].buf_size = (16 * 1024) -
2443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2651 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2444 } 2652 }
@@ -2632,7 +2840,7 @@ void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2632 q->polling = adap->params.rev > 0; 2840 q->polling = adap->params.rev > 0;
2633 q->coalesce_usecs = 5; 2841 q->coalesce_usecs = 5;
2634 q->rspq_size = 1024; 2842 q->rspq_size = 1024;
2635 q->fl_size = 4096; 2843 q->fl_size = 1024;
2636 q->jumbo_size = 512; 2844 q->jumbo_size = 512;
2637 q->txq_size[TXQ_ETH] = 1024; 2845 q->txq_size[TXQ_ETH] = 1024;
2638 q->txq_size[TXQ_OFLD] = 1024; 2846 q->txq_size[TXQ_OFLD] = 1024;
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 365a7f5b1f..eaa7a2e89a 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -884,11 +884,13 @@ int t3_check_fw_version(struct adapter *adapter)
884 major = G_FW_VERSION_MAJOR(vers); 884 major = G_FW_VERSION_MAJOR(vers);
885 minor = G_FW_VERSION_MINOR(vers); 885 minor = G_FW_VERSION_MINOR(vers);
886 886
887 if (type == FW_VERSION_T3 && major == 3 && minor == 1) 887 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
888 minor == FW_VERSION_MINOR)
888 return 0; 889 return 0;
889 890
890 CH_ERR(adapter, "found wrong FW version(%u.%u), " 891 CH_ERR(adapter, "found wrong FW version(%u.%u), "
891 "driver needs version 3.1\n", major, minor); 892 "driver needs version %u.%u\n", major, minor,
893 FW_VERSION_MAJOR, FW_VERSION_MINOR);
892 return -EINVAL; 894 return -EINVAL;
893} 895}
894 896
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 2b67dd523c..82278f8502 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -35,5 +35,7 @@
35#define DRV_DESC "Chelsio T3 Network Driver" 35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3" 36#define DRV_NAME "cxgb3"
37/* Driver version */ 37/* Driver version */
38#define DRV_VERSION "1.0" 38#define DRV_VERSION "1.0-ko"
39#define FW_VERSION_MAJOR 3
40#define FW_VERSION_MINOR 2
39#endif /* __CHELSIO_VERSION_H */ 41#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 8396e411f1..e547ce14ee 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -38,12 +38,6 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
38/* Add more time here if your adapter won't work OK: */ 38/* Add more time here if your adapter won't work OK: */
39#define DE600_SLOW_DOWN udelay(delay_time) 39#define DE600_SLOW_DOWN udelay(delay_time)
40 40
41 /*
42 * If you still have trouble reading/writing to the adapter,
43 * modify the following "#define": (see <asm/io.h> for more info)
44#define REALLY_SLOW_IO
45 */
46
47/* use 0 for production, 1 for verification, >2 for debug */ 41/* use 0 for production, 1 for verification, >2 for debug */
48#ifdef DE600_DEBUG 42#ifdef DE600_DEBUG
49#define PRINTK(x) if (de600_debug >= 2) printk x 43#define PRINTK(x) if (de600_debug >= 2) printk x
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 98215fdd7d..1d08e937af 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -376,7 +376,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
376 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 376 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
377 uint16_t old_vid = adapter->mng_vlan_id; 377 uint16_t old_vid = adapter->mng_vlan_id;
378 if (adapter->vlgrp) { 378 if (adapter->vlgrp) {
379 if (!adapter->vlgrp->vlan_devices[vid]) { 379 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
380 if (adapter->hw.mng_cookie.status & 380 if (adapter->hw.mng_cookie.status &
381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
382 e1000_vlan_rx_add_vid(netdev, vid); 382 e1000_vlan_rx_add_vid(netdev, vid);
@@ -386,7 +386,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
386 386
387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
388 (vid != old_vid) && 388 (vid != old_vid) &&
389 !adapter->vlgrp->vlan_devices[old_vid]) 389 !vlan_group_get_device(adapter->vlgrp, old_vid))
390 e1000_vlan_rx_kill_vid(netdev, old_vid); 390 e1000_vlan_rx_kill_vid(netdev, old_vid);
391 } else 391 } else
392 adapter->mng_vlan_id = vid; 392 adapter->mng_vlan_id = vid;
@@ -1482,7 +1482,7 @@ e1000_close(struct net_device *netdev)
1482 if ((adapter->hw.mng_cookie.status & 1482 if ((adapter->hw.mng_cookie.status &
1483 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1483 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1484 !(adapter->vlgrp && 1484 !(adapter->vlgrp &&
1485 adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) { 1485 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
1486 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1486 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1487 } 1487 }
1488 1488
@@ -4998,10 +4998,7 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4998 uint32_t vfta, index; 4998 uint32_t vfta, index;
4999 4999
5000 e1000_irq_disable(adapter); 5000 e1000_irq_disable(adapter);
5001 5001 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5002 if (adapter->vlgrp)
5003 adapter->vlgrp->vlan_devices[vid] = NULL;
5004
5005 e1000_irq_enable(adapter); 5002 e1000_irq_enable(adapter);
5006 5003
5007 if ((adapter->hw.mng_cookie.status & 5004 if ((adapter->hw.mng_cookie.status &
@@ -5027,7 +5024,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
5027 if (adapter->vlgrp) { 5024 if (adapter->vlgrp) {
5028 uint16_t vid; 5025 uint16_t vid;
5029 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 5026 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5030 if (!adapter->vlgrp->vlan_devices[vid]) 5027 if (!vlan_group_get_device(adapter->vlgrp, vid))
5031 continue; 5028 continue;
5032 e1000_vlan_rx_add_vid(adapter->netdev, vid); 5029 e1000_vlan_rx_add_vid(adapter->netdev, vid);
5033 } 5030 }
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 88ad1c8bce..0e4042bc0a 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1939,8 +1939,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1939 int index; 1939 int index;
1940 u64 hret; 1940 u64 hret;
1941 1941
1942 if (port->vgrp) 1942 vlan_group_set_device(port->vgrp, vid, NULL);
1943 port->vgrp->vlan_devices[vid] = NULL;
1944 1943
1945 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1944 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1946 if (!cb1) { 1945 if (!cb1) {
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index a363148d01..46e1697d9c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -839,7 +839,7 @@ enum {
839 NV_MSIX_INT_DISABLED, 839 NV_MSIX_INT_DISABLED,
840 NV_MSIX_INT_ENABLED 840 NV_MSIX_INT_ENABLED
841}; 841};
842static int msix = NV_MSIX_INT_ENABLED; 842static int msix = NV_MSIX_INT_DISABLED;
843 843
844/* 844/*
845 * DMA 64bit 845 * DMA 64bit
@@ -3104,13 +3104,17 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
3104 struct fe_priv *np = netdev_priv(dev); 3104 struct fe_priv *np = netdev_priv(dev);
3105 u8 __iomem *base = get_hwbase(dev); 3105 u8 __iomem *base = get_hwbase(dev);
3106 unsigned long flags; 3106 unsigned long flags;
3107 int retcode;
3107 3108
3108 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 3109 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3109 pkts = nv_rx_process(dev, limit); 3110 pkts = nv_rx_process(dev, limit);
3110 else 3111 retcode = nv_alloc_rx(dev);
3112 } else {
3111 pkts = nv_rx_process_optimized(dev, limit); 3113 pkts = nv_rx_process_optimized(dev, limit);
3114 retcode = nv_alloc_rx_optimized(dev);
3115 }
3112 3116
3113 if (nv_alloc_rx(dev)) { 3117 if (retcode) {
3114 spin_lock_irqsave(&np->lock, flags); 3118 spin_lock_irqsave(&np->lock, flags);
3115 if (!np->in_shutdown) 3119 if (!np->in_shutdown)
3116 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3120 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -5370,19 +5374,19 @@ static struct pci_device_id pci_tbl[] = {
5370 }, 5374 },
5371 { /* MCP65 Ethernet Controller */ 5375 { /* MCP65 Ethernet Controller */
5372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5374 }, 5378 },
5375 { /* MCP65 Ethernet Controller */ 5379 { /* MCP65 Ethernet Controller */
5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5378 }, 5382 },
5379 { /* MCP65 Ethernet Controller */ 5383 { /* MCP65 Ethernet Controller */
5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5382 }, 5386 },
5383 { /* MCP65 Ethernet Controller */ 5387 { /* MCP65 Ethernet Controller */
5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5389 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5386 }, 5390 },
5387 { /* MCP67 Ethernet Controller */ 5391 { /* MCP67 Ethernet Controller */
5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5392 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 1f83988a6a..d981d4c41d 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1132,8 +1132,7 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1132 1132
1133 spin_lock_irqsave(&priv->rxlock, flags); 1133 spin_lock_irqsave(&priv->rxlock, flags);
1134 1134
1135 if (priv->vlgrp) 1135 vlan_group_set_device(priv->vlgrp, vid, NULL);
1136 priv->vlgrp->vlan_devices[vid] = NULL;
1137 1136
1138 spin_unlock_irqrestore(&priv->rxlock, flags); 1137 spin_unlock_irqrestore(&priv->rxlock, flags);
1139} 1138}
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 0c36828893..afc2ec7252 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -2213,8 +2213,7 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2213 2213
2214 ixgb_irq_disable(adapter); 2214 ixgb_irq_disable(adapter);
2215 2215
2216 if(adapter->vlgrp) 2216 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2217 adapter->vlgrp->vlan_devices[vid] = NULL;
2218 2217
2219 ixgb_irq_enable(adapter); 2218 ixgb_irq_enable(adapter);
2220 2219
@@ -2234,7 +2233,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2234 if(adapter->vlgrp) { 2233 if(adapter->vlgrp) {
2235 uint16_t vid; 2234 uint16_t vid;
2236 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2235 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2237 if(!adapter->vlgrp->vlan_devices[vid]) 2236 if(!vlan_group_get_device(adapter->vlgrp, vid))
2238 continue; 2237 continue;
2239 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2238 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2240 } 2239 }
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index d98e53efa2..9ba21e0f27 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -147,13 +147,13 @@ static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
147 int unaligned; 147 int unaligned;
148 148
149 while (mp->rx_desc_count < mp->rx_ring_size) { 149 while (mp->rx_desc_count < mp->rx_ring_size) {
150 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN); 150 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
151 if (!skb) 151 if (!skb)
152 break; 152 break;
153 mp->rx_desc_count++; 153 mp->rx_desc_count++;
154 unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1); 154 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
155 if (unaligned) 155 if (unaligned)
156 skb_reserve(skb, ETH_DMA_ALIGN - unaligned); 156 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
157 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; 157 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
158 pkt_info.byte_cnt = ETH_RX_SKB_SIZE; 158 pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
159 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, 159 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
@@ -1309,7 +1309,7 @@ static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address,
1309static int mv643xx_eth_probe(struct platform_device *pdev) 1309static int mv643xx_eth_probe(struct platform_device *pdev)
1310{ 1310{
1311 struct mv643xx_eth_platform_data *pd; 1311 struct mv643xx_eth_platform_data *pd;
1312 int port_num = pdev->id; 1312 int port_num;
1313 struct mv643xx_private *mp; 1313 struct mv643xx_private *mp;
1314 struct net_device *dev; 1314 struct net_device *dev;
1315 u8 *p; 1315 u8 *p;
@@ -1319,6 +1319,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1319 int duplex = DUPLEX_HALF; 1319 int duplex = DUPLEX_HALF;
1320 int speed = 0; /* default to auto-negotiation */ 1320 int speed = 0; /* default to auto-negotiation */
1321 1321
1322 pd = pdev->dev.platform_data;
1323 if (pd == NULL) {
1324 printk(KERN_ERR "No mv643xx_eth_platform_data\n");
1325 return -ENODEV;
1326 }
1327
1322 dev = alloc_etherdev(sizeof(struct mv643xx_private)); 1328 dev = alloc_etherdev(sizeof(struct mv643xx_private));
1323 if (!dev) 1329 if (!dev)
1324 return -ENOMEM; 1330 return -ENOMEM;
@@ -1331,8 +1337,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1331 BUG_ON(!res); 1337 BUG_ON(!res);
1332 dev->irq = res->start; 1338 dev->irq = res->start;
1333 1339
1334 mp->port_num = port_num;
1335
1336 dev->open = mv643xx_eth_open; 1340 dev->open = mv643xx_eth_open;
1337 dev->stop = mv643xx_eth_stop; 1341 dev->stop = mv643xx_eth_stop;
1338 dev->hard_start_xmit = mv643xx_eth_start_xmit; 1342 dev->hard_start_xmit = mv643xx_eth_start_xmit;
@@ -1373,39 +1377,40 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1373 1377
1374 spin_lock_init(&mp->lock); 1378 spin_lock_init(&mp->lock);
1375 1379
1380 port_num = pd->port_number;
1381
1376 /* set default config values */ 1382 /* set default config values */
1377 eth_port_uc_addr_get(dev, dev->dev_addr); 1383 eth_port_uc_addr_get(dev, dev->dev_addr);
1378 mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1384 mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
1379 mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; 1385 mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
1380 1386
1381 pd = pdev->dev.platform_data; 1387 if (is_valid_ether_addr(pd->mac_addr))
1382 if (pd) { 1388 memcpy(dev->dev_addr, pd->mac_addr, 6);
1383 if (pd->mac_addr)
1384 memcpy(dev->dev_addr, pd->mac_addr, 6);
1385 1389
1386 if (pd->phy_addr || pd->force_phy_addr) 1390 if (pd->phy_addr || pd->force_phy_addr)
1387 ethernet_phy_set(port_num, pd->phy_addr); 1391 ethernet_phy_set(port_num, pd->phy_addr);
1388 1392
1389 if (pd->rx_queue_size) 1393 if (pd->rx_queue_size)
1390 mp->rx_ring_size = pd->rx_queue_size; 1394 mp->rx_ring_size = pd->rx_queue_size;
1391 1395
1392 if (pd->tx_queue_size) 1396 if (pd->tx_queue_size)
1393 mp->tx_ring_size = pd->tx_queue_size; 1397 mp->tx_ring_size = pd->tx_queue_size;
1394 1398
1395 if (pd->tx_sram_size) { 1399 if (pd->tx_sram_size) {
1396 mp->tx_sram_size = pd->tx_sram_size; 1400 mp->tx_sram_size = pd->tx_sram_size;
1397 mp->tx_sram_addr = pd->tx_sram_addr; 1401 mp->tx_sram_addr = pd->tx_sram_addr;
1398 } 1402 }
1399
1400 if (pd->rx_sram_size) {
1401 mp->rx_sram_size = pd->rx_sram_size;
1402 mp->rx_sram_addr = pd->rx_sram_addr;
1403 }
1404 1403
1405 duplex = pd->duplex; 1404 if (pd->rx_sram_size) {
1406 speed = pd->speed; 1405 mp->rx_sram_size = pd->rx_sram_size;
1406 mp->rx_sram_addr = pd->rx_sram_addr;
1407 } 1407 }
1408 1408
1409 duplex = pd->duplex;
1410 speed = pd->speed;
1411
1412 mp->port_num = port_num;
1413
1409 /* Hook up MII support for ethtool */ 1414 /* Hook up MII support for ethtool */
1410 mp->mii.dev = dev; 1415 mp->mii.dev = dev;
1411 mp->mii.mdio_read = mv643xx_mdio_read; 1416 mp->mii.mdio_read = mv643xx_mdio_read;
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 33c5fafdbb..7d4e90cf49 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -9,6 +9,8 @@
9 9
10#include <linux/mv643xx.h> 10#include <linux/mv643xx.h>
11 11
12#include <asm/dma-mapping.h>
13
12/* Checksum offload for Tx works for most packets, but 14/* Checksum offload for Tx works for most packets, but
13 * fails if previous packet sent did not use hw csum 15 * fails if previous packet sent did not use hw csum
14 */ 16 */
@@ -42,23 +44,12 @@
42#define MAX_DESCS_PER_SKB 1 44#define MAX_DESCS_PER_SKB 1
43#endif 45#endif
44 46
45/*
46 * The MV643XX HW requires 8-byte alignment. However, when I/O
47 * is non-cache-coherent, we need to ensure that the I/O buffers
48 * we use don't share cache lines with other data.
49 */
50#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
51#define ETH_DMA_ALIGN L1_CACHE_BYTES
52#else
53#define ETH_DMA_ALIGN 8
54#endif
55
56#define ETH_VLAN_HLEN 4 47#define ETH_VLAN_HLEN 4
57#define ETH_FCS_LEN 4 48#define ETH_FCS_LEN 4
58#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ 49#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
59#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ 50#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
60 ETH_VLAN_HLEN + ETH_FCS_LEN) 51 ETH_VLAN_HLEN + ETH_FCS_LEN)
61#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN) 52#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment())
62 53
63#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ 54#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
64#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ 55#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 030924fb1a..b05dc6ed7f 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1,7 +1,7 @@
1/************************************************************************* 1/*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver. 2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
3 * 3 *
4 * Copyright (C) 2005, 2006 Myricom, Inc. 4 * Copyright (C) 2005 - 2007 Myricom, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
@@ -16,17 +16,17 @@
16 * may be used to endorse or promote products derived from this software 16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission. 17 * without specific prior written permission.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 * 30 *
31 * 31 *
32 * If the eeprom on your board is not recent enough, you will need to get a 32 * If the eeprom on your board is not recent enough, you will need to get a
@@ -195,6 +195,10 @@ struct myri10ge_priv {
195 char *fw_name; 195 char *fw_name;
196 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; 196 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
197 char fw_version[128]; 197 char fw_version[128];
198 int fw_ver_major;
199 int fw_ver_minor;
200 int fw_ver_tiny;
201 int adopted_rx_filter_bug;
198 u8 mac_addr[6]; /* eeprom mac address */ 202 u8 mac_addr[6]; /* eeprom mac address */
199 unsigned long serial_number; 203 unsigned long serial_number;
200 int vendor_specific_offset; 204 int vendor_specific_offset;
@@ -447,7 +451,6 @@ myri10ge_validate_firmware(struct myri10ge_priv *mgp,
447 struct mcp_gen_header *hdr) 451 struct mcp_gen_header *hdr)
448{ 452{
449 struct device *dev = &mgp->pdev->dev; 453 struct device *dev = &mgp->pdev->dev;
450 int major, minor;
451 454
452 /* check firmware type */ 455 /* check firmware type */
453 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) { 456 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
@@ -458,9 +461,11 @@ myri10ge_validate_firmware(struct myri10ge_priv *mgp,
458 /* save firmware version for ethtool */ 461 /* save firmware version for ethtool */
459 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version)); 462 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
460 463
461 sscanf(mgp->fw_version, "%d.%d", &major, &minor); 464 sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
465 &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
462 466
463 if (!(major == MXGEFW_VERSION_MAJOR && minor == MXGEFW_VERSION_MINOR)) { 467 if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR
468 && mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
464 dev_err(dev, "Found firmware version %s\n", mgp->fw_version); 469 dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
465 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR, 470 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
466 MXGEFW_VERSION_MINOR); 471 MXGEFW_VERSION_MINOR);
@@ -561,6 +566,18 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
561 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes); 566 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
562 status = myri10ge_validate_firmware(mgp, hdr); 567 status = myri10ge_validate_firmware(mgp, hdr);
563 kfree(hdr); 568 kfree(hdr);
569
570 /* check to see if adopted firmware has bug where adopting
571 * it will cause broadcasts to be filtered unless the NIC
572 * is kept in ALLMULTI mode */
573 if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
574 mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
575 mgp->adopted_rx_filter_bug = 1;
576 dev_warn(dev, "Adopting fw %d.%d.%d: "
577 "working around rx filter bug\n",
578 mgp->fw_ver_major, mgp->fw_ver_minor,
579 mgp->fw_ver_tiny);
580 }
564 return status; 581 return status;
565} 582}
566 583
@@ -794,6 +811,8 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
794 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 811 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
795 myri10ge_change_promisc(mgp, 0, 0); 812 myri10ge_change_promisc(mgp, 0, 0);
796 myri10ge_change_pause(mgp, mgp->pause); 813 myri10ge_change_pause(mgp, mgp->pause);
814 if (mgp->adopted_rx_filter_bug)
815 (void)myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
797 return status; 816 return status;
798} 817}
799 818
@@ -2239,7 +2258,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
2239 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); 2258 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
2240 2259
2241 /* This firmware is known to not support multicast */ 2260 /* This firmware is known to not support multicast */
2242 if (!mgp->fw_multicast_support) 2261 if (!mgp->fw_multicast_support || mgp->adopted_rx_filter_bug)
2243 return; 2262 return;
2244 2263
2245 /* Disable multicast filtering */ 2264 /* Disable multicast filtering */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index adf29dd667..c6172a77a6 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -260,7 +260,7 @@ static const struct {
260 260
261static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = { 261static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
262 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, 262 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
263 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 263 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
264 { } /* terminate list */ 264 { } /* terminate list */
265}; 265};
266MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl); 266MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
@@ -2024,6 +2024,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
2024 struct netdev_private *np = netdev_priv(dev); 2024 struct netdev_private *np = netdev_priv(dev);
2025 void __iomem * ioaddr = ns_ioaddr(dev); 2025 void __iomem * ioaddr = ns_ioaddr(dev);
2026 unsigned entry; 2026 unsigned entry;
2027 unsigned long flags;
2027 2028
2028 /* Note: Ordering is important here, set the field with the 2029 /* Note: Ordering is important here, set the field with the
2029 "ownership" bit last, and only then increment cur_tx. */ 2030 "ownership" bit last, and only then increment cur_tx. */
@@ -2037,7 +2038,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
2037 2038
2038 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); 2039 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2039 2040
2040 spin_lock_irq(&np->lock); 2041 spin_lock_irqsave(&np->lock, flags);
2041 2042
2042 if (!np->hands_off) { 2043 if (!np->hands_off) {
2043 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); 2044 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
@@ -2056,7 +2057,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
2056 dev_kfree_skb_irq(skb); 2057 dev_kfree_skb_irq(skb);
2057 np->stats.tx_dropped++; 2058 np->stats.tx_dropped++;
2058 } 2059 }
2059 spin_unlock_irq(&np->lock); 2060 spin_unlock_irqrestore(&np->lock, flags);
2060 2061
2061 dev->trans_start = jiffies; 2062 dev->trans_start = jiffies;
2062 2063
@@ -2222,6 +2223,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2222 pkt_len = (desc_status & DescSizeMask) - 4; 2223 pkt_len = (desc_status & DescSizeMask) - 4;
2223 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ 2224 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2224 if (desc_status & DescMore) { 2225 if (desc_status & DescMore) {
2226 unsigned long flags;
2227
2225 if (netif_msg_rx_err(np)) 2228 if (netif_msg_rx_err(np))
2226 printk(KERN_WARNING 2229 printk(KERN_WARNING
2227 "%s: Oversized(?) Ethernet " 2230 "%s: Oversized(?) Ethernet "
@@ -2236,12 +2239,12 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2236 * reset procedure documented in 2239 * reset procedure documented in
2237 * AN-1287. */ 2240 * AN-1287. */
2238 2241
2239 spin_lock_irq(&np->lock); 2242 spin_lock_irqsave(&np->lock, flags);
2240 reset_rx(dev); 2243 reset_rx(dev);
2241 reinit_rx(dev); 2244 reinit_rx(dev);
2242 writel(np->ring_dma, ioaddr + RxRingPtr); 2245 writel(np->ring_dma, ioaddr + RxRingPtr);
2243 check_link(dev); 2246 check_link(dev);
2244 spin_unlock_irq(&np->lock); 2247 spin_unlock_irqrestore(&np->lock, flags);
2245 2248
2246 /* We'll enable RX on exit from this 2249 /* We'll enable RX on exit from this
2247 * function. */ 2250 * function. */
@@ -2396,8 +2399,19 @@ static struct net_device_stats *get_stats(struct net_device *dev)
2396#ifdef CONFIG_NET_POLL_CONTROLLER 2399#ifdef CONFIG_NET_POLL_CONTROLLER
2397static void natsemi_poll_controller(struct net_device *dev) 2400static void natsemi_poll_controller(struct net_device *dev)
2398{ 2401{
2402 struct netdev_private *np = netdev_priv(dev);
2403
2399 disable_irq(dev->irq); 2404 disable_irq(dev->irq);
2400 intr_handler(dev->irq, dev); 2405
2406 /*
2407 * A real interrupt might have already reached us at this point
2408 * but NAPI might still haven't called us back. As the interrupt
2409 * status register is cleared by reading, we should prevent an
2410 * interrupt loss in this case...
2411 */
2412 if (!np->intr_status)
2413 intr_handler(dev->irq, dev);
2414
2401 enable_irq(dev->irq); 2415 enable_irq(dev->irq);
2402} 2416}
2403#endif 2417#endif
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 2807ef400f..81742e4e56 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -72,6 +72,8 @@
72#define FLASH_SECTOR_SIZE (64 * 1024) 72#define FLASH_SECTOR_SIZE (64 * 1024)
73#define FLASH_TOTAL_SIZE (NUM_FLASH_SECTORS * FLASH_SECTOR_SIZE) 73#define FLASH_TOTAL_SIZE (NUM_FLASH_SECTORS * FLASH_SECTOR_SIZE)
74 74
75#define PHAN_VENDOR_ID 0x4040
76
75#define RCV_DESC_RINGSIZE \ 77#define RCV_DESC_RINGSIZE \
76 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count) 78 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
77#define STATUS_DESC_RINGSIZE \ 79#define STATUS_DESC_RINGSIZE \
@@ -82,7 +84,7 @@
82 (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count) 84 (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
83#define RCV_BUFFSIZE \ 85#define RCV_BUFFSIZE \
84 (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count) 86 (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count)
85#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) 87#define find_diff_among(a,b,range) ((a)<=(b)?((b)-(a)):((b)+(range)-(a)))
86 88
87#define NETXEN_NETDEV_STATUS 0x1 89#define NETXEN_NETDEV_STATUS 0x1
88#define NETXEN_RCV_PRODUCER_OFFSET 0 90#define NETXEN_RCV_PRODUCER_OFFSET 0
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 6252e9a872..986ef98db2 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -82,8 +82,7 @@ static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
82#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats) 82#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats)
83 83
84static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { 84static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
85 "Register_Test_offline", "EEPROM_Test_offline", 85 "Register_Test_on_offline",
86 "Interrupt_Test_offline", "Loopback_Test_offline",
87 "Link_Test_on_offline" 86 "Link_Test_on_offline"
88}; 87};
89 88
@@ -394,19 +393,12 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
394 } 393 }
395} 394}
396 395
397static void
398netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
399{
400 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
401 /* options can be added depending upon the mode */
402 wol->wolopts = 0;
403}
404
405static u32 netxen_nic_test_link(struct net_device *dev) 396static u32 netxen_nic_test_link(struct net_device *dev)
406{ 397{
407 struct netxen_port *port = netdev_priv(dev); 398 struct netxen_port *port = netdev_priv(dev);
408 struct netxen_adapter *adapter = port->adapter; 399 struct netxen_adapter *adapter = port->adapter;
409 __u32 status; 400 __u32 status;
401 int val;
410 402
411 /* read which mode */ 403 /* read which mode */
412 if (adapter->ahw.board_type == NETXEN_NIC_GBE) { 404 if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
@@ -415,11 +407,13 @@ static u32 netxen_nic_test_link(struct net_device *dev)
415 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 407 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
416 &status) != 0) 408 &status) != 0)
417 return -EIO; 409 return -EIO;
418 else 410 else {
419 return (netxen_get_phy_link(status)); 411 val = netxen_get_phy_link(status);
412 return !val;
413 }
420 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { 414 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
421 int val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); 415 val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
422 return val == XG_LINK_UP; 416 return (val == XG_LINK_UP) ? 0 : 1;
423 } 417 }
424 return -EIO; 418 return -EIO;
425} 419}
@@ -606,100 +600,21 @@ netxen_nic_set_pauseparam(struct net_device *dev,
606 600
607static int netxen_nic_reg_test(struct net_device *dev) 601static int netxen_nic_reg_test(struct net_device *dev)
608{ 602{
609 struct netxen_port *port = netdev_priv(dev); 603 struct netxen_adapter *adapter = netdev_priv(dev);
610 struct netxen_adapter *adapter = port->adapter; 604 u32 data_read, data_written;
611 u32 data_read, data_written, save;
612 __u32 mode;
613
614 /*
615 * first test the "Read Only" registers by writing which mode
616 */
617 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
618 if (netxen_get_niu_enable_ge(mode)) { /* GB Mode */
619 netxen_nic_read_w0(adapter,
620 NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum),
621 &data_read);
622
623 save = data_read;
624 if (data_read)
625 data_written = data_read & NETXEN_NIC_INVALID_DATA;
626 else
627 data_written = NETXEN_NIC_INVALID_DATA;
628 netxen_nic_write_w0(adapter,
629 NETXEN_NIU_GB_MII_MGMT_STATUS(port->
630 portnum),
631 data_written);
632 netxen_nic_read_w0(adapter,
633 NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum),
634 &data_read);
635
636 if (data_written == data_read) {
637 netxen_nic_write_w0(adapter,
638 NETXEN_NIU_GB_MII_MGMT_STATUS(port->
639 portnum),
640 save);
641
642 return 0;
643 }
644
645 /* netxen_niu_gb_mii_mgmt_indicators is read only */
646 netxen_nic_read_w0(adapter,
647 NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
648 portnum),
649 &data_read);
650
651 save = data_read;
652 if (data_read)
653 data_written = data_read & NETXEN_NIC_INVALID_DATA;
654 else
655 data_written = NETXEN_NIC_INVALID_DATA;
656 netxen_nic_write_w0(adapter,
657 NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
658 portnum),
659 data_written);
660
661 netxen_nic_read_w0(adapter,
662 NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
663 portnum),
664 &data_read);
665
666 if (data_written == data_read) {
667 netxen_nic_write_w0(adapter,
668 NETXEN_NIU_GB_MII_MGMT_INDICATE
669 (port->portnum), save);
670 return 0;
671 }
672 605
673 /* netxen_niu_gb_interface_status is read only */ 606 netxen_nic_read_w0(adapter, NETXEN_PCIX_PH_REG(0), &data_read);
674 netxen_nic_read_w0(adapter, 607 if ((data_read & 0xffff) != PHAN_VENDOR_ID)
675 NETXEN_NIU_GB_INTERFACE_STATUS(port-> 608 return 1;
676 portnum),
677 &data_read);
678 609
679 save = data_read; 610 data_written = (u32)0xa5a5a5a5;
680 if (data_read)
681 data_written = data_read & NETXEN_NIC_INVALID_DATA;
682 else
683 data_written = NETXEN_NIC_INVALID_DATA;
684 netxen_nic_write_w0(adapter,
685 NETXEN_NIU_GB_INTERFACE_STATUS(port->
686 portnum),
687 data_written);
688 611
689 netxen_nic_read_w0(adapter, 612 netxen_nic_reg_write(adapter, CRB_SCRATCHPAD_TEST, data_written);
690 NETXEN_NIU_GB_INTERFACE_STATUS(port-> 613 data_read = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_SCRATCHPAD_TEST));
691 portnum), 614 if (data_written != data_read)
692 &data_read); 615 return 1;
693 616
694 if (data_written == data_read) { 617 return 0;
695 netxen_nic_write_w0(adapter,
696 NETXEN_NIU_GB_INTERFACE_STATUS
697 (port->portnum), save);
698
699 return 0;
700 }
701 } /* GB Mode */
702 return 1;
703} 618}
704 619
705static int netxen_nic_diag_test_count(struct net_device *dev) 620static int netxen_nic_diag_test_count(struct net_device *dev)
@@ -713,26 +628,20 @@ netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
713{ 628{
714 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* offline tests */ 629 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* offline tests */
715 /* link test */ 630 /* link test */
716 if (!(data[4] = (u64) netxen_nic_test_link(dev))) 631 if ((data[1] = (u64) netxen_nic_test_link(dev)))
717 eth_test->flags |= ETH_TEST_FL_FAILED; 632 eth_test->flags |= ETH_TEST_FL_FAILED;
718 633
719 if (netif_running(dev))
720 dev->stop(dev);
721
722 /* register tests */ 634 /* register tests */
723 if (!(data[0] = netxen_nic_reg_test(dev))) 635 if ((data[0] = netxen_nic_reg_test(dev)))
724 eth_test->flags |= ETH_TEST_FL_FAILED; 636 eth_test->flags |= ETH_TEST_FL_FAILED;
725 /* other tests pass as of now */
726 data[1] = data[2] = data[3] = 1;
727 if (netif_running(dev))
728 dev->open(dev);
729 } else { /* online tests */ 637 } else { /* online tests */
730 /* link test */ 638 /* register tests */
731 if (!(data[4] = (u64) netxen_nic_test_link(dev))) 639 if((data[0] = netxen_nic_reg_test(dev)))
732 eth_test->flags |= ETH_TEST_FL_FAILED; 640 eth_test->flags |= ETH_TEST_FL_FAILED;
733 641
734 /* other tests pass by default */ 642 /* link test */
735 data[0] = data[1] = data[2] = data[3] = 1; 643 if ((data[1] = (u64) netxen_nic_test_link(dev)))
644 eth_test->flags |= ETH_TEST_FL_FAILED;
736 } 645 }
737} 646}
738 647
@@ -783,7 +692,6 @@ struct ethtool_ops netxen_nic_ethtool_ops = {
783 .get_drvinfo = netxen_nic_get_drvinfo, 692 .get_drvinfo = netxen_nic_get_drvinfo,
784 .get_regs_len = netxen_nic_get_regs_len, 693 .get_regs_len = netxen_nic_get_regs_len,
785 .get_regs = netxen_nic_get_regs, 694 .get_regs = netxen_nic_get_regs,
786 .get_wol = netxen_nic_get_wol,
787 .get_link = ethtool_op_get_link, 695 .get_link = ethtool_op_get_link,
788 .get_eeprom_len = netxen_nic_get_eeprom_len, 696 .get_eeprom_len = netxen_nic_get_eeprom_len,
789 .get_eeprom = netxen_nic_get_eeprom, 697 .get_eeprom = netxen_nic_get_eeprom,
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 7195af3e8f..a2877f33fa 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -242,10 +242,11 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
242 adapter->cmd_consumer = (uint32_t *) (((char *)addr) + 242 adapter->cmd_consumer = (uint32_t *) (((char *)addr) +
243 sizeof(struct netxen_ring_ctx)); 243 sizeof(struct netxen_ring_ctx));
244 244
245 addr = pci_alloc_consistent(adapter->ahw.pdev, 245 addr = netxen_alloc(adapter->ahw.pdev,
246 sizeof(struct cmd_desc_type0) * 246 sizeof(struct cmd_desc_type0) *
247 adapter->max_tx_desc_count, 247 adapter->max_tx_desc_count,
248 (dma_addr_t *) & hw->cmd_desc_phys_addr); 248 (dma_addr_t *) & hw->cmd_desc_phys_addr,
249 &adapter->ahw.cmd_desc_pdev);
249 printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr); 250 printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr);
250 251
251 if (addr == NULL) { 252 if (addr == NULL) {
@@ -507,8 +508,8 @@ void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw)
507void netxen_load_firmware(struct netxen_adapter *adapter) 508void netxen_load_firmware(struct netxen_adapter *adapter)
508{ 509{
509 int i; 510 int i;
510 long data, size = 0; 511 u32 data, size = 0;
511 long flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE; 512 u32 flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE;
512 u64 off; 513 u64 off;
513 void __iomem *addr; 514 void __iomem *addr;
514 515
@@ -950,6 +951,7 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
950 netxen_nic_driver_name); 951 netxen_nic_driver_name);
951 return; 952 return;
952 } 953 }
954 *ptr32 = le32_to_cpu(*ptr32);
953 ptr32++; 955 ptr32++;
954 addr += sizeof(u32); 956 addr += sizeof(u32);
955 } 957 }
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 2f32436678..586d32b676 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -38,13 +38,13 @@
38#include "netxen_nic_phan_reg.h" 38#include "netxen_nic_phan_reg.h"
39 39
40struct crb_addr_pair { 40struct crb_addr_pair {
41 long addr; 41 u32 addr;
42 long data; 42 u32 data;
43}; 43};
44 44
45#define NETXEN_MAX_CRB_XFORM 60 45#define NETXEN_MAX_CRB_XFORM 60
46static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; 46static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
47#define NETXEN_ADDR_ERROR ((unsigned long ) 0xffffffff ) 47#define NETXEN_ADDR_ERROR (0xffffffff)
48 48
49#define crb_addr_transform(name) \ 49#define crb_addr_transform(name) \
50 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ 50 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
@@ -252,10 +252,10 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
252 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB 252 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
253 * address to external PCI CRB address. 253 * address to external PCI CRB address.
254 */ 254 */
255unsigned long netxen_decode_crb_addr(unsigned long addr) 255u32 netxen_decode_crb_addr(u32 addr)
256{ 256{
257 int i; 257 int i;
258 unsigned long base_addr, offset, pci_base; 258 u32 base_addr, offset, pci_base;
259 259
260 crb_addr_transform_setup(); 260 crb_addr_transform_setup();
261 261
@@ -499,7 +499,10 @@ static inline int do_rom_fast_write_words(struct netxen_adapter *adapter,
499 while(1) { 499 while(1) {
500 int data1; 500 int data1;
501 501
502 do_rom_fast_read(adapter, addridx, &data1); 502 ret = do_rom_fast_read(adapter, addridx, &data1);
503 if (ret < 0)
504 return ret;
505
503 if (data1 == data) 506 if (data1 == data)
504 break; 507 break;
505 508
@@ -753,7 +756,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
753 int n, i; 756 int n, i;
754 int init_delay = 0; 757 int init_delay = 0;
755 struct crb_addr_pair *buf; 758 struct crb_addr_pair *buf;
756 unsigned long off; 759 u32 off;
757 760
758 /* resetall */ 761 /* resetall */
759 status = netxen_nic_get_board_info(adapter); 762 status = netxen_nic_get_board_info(adapter);
@@ -810,14 +813,13 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
810 if (verbose) 813 if (verbose)
811 printk("%s: PCI: 0x%08x == 0x%08x\n", 814 printk("%s: PCI: 0x%08x == 0x%08x\n",
812 netxen_nic_driver_name, (unsigned int) 815 netxen_nic_driver_name, (unsigned int)
813 netxen_decode_crb_addr((unsigned long) 816 netxen_decode_crb_addr(addr), val);
814 addr), val);
815 } 817 }
816 for (i = 0; i < n; i++) { 818 for (i = 0; i < n; i++) {
817 819
818 off = netxen_decode_crb_addr((unsigned long)buf[i].addr); 820 off = netxen_decode_crb_addr(buf[i].addr);
819 if (off == NETXEN_ADDR_ERROR) { 821 if (off == NETXEN_ADDR_ERROR) {
820 printk(KERN_ERR"CRB init value out of range %lx\n", 822 printk(KERN_ERR"CRB init value out of range %x\n",
821 buf[i].addr); 823 buf[i].addr);
822 continue; 824 continue;
823 } 825 }
@@ -924,6 +926,10 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
924void netxen_free_adapter_offload(struct netxen_adapter *adapter) 926void netxen_free_adapter_offload(struct netxen_adapter *adapter)
925{ 927{
926 if (adapter->dummy_dma.addr) { 928 if (adapter->dummy_dma.addr) {
929 writel(0, NETXEN_CRB_NORMALIZE(adapter,
930 CRB_HOST_DUMMY_BUF_ADDR_HI));
931 writel(0, NETXEN_CRB_NORMALIZE(adapter,
932 CRB_HOST_DUMMY_BUF_ADDR_LO));
927 pci_free_consistent(adapter->ahw.pdev, 933 pci_free_consistent(adapter->ahw.pdev,
928 NETXEN_HOST_DUMMY_DMA_SIZE, 934 NETXEN_HOST_DUMMY_DMA_SIZE,
929 adapter->dummy_dma.addr, 935 adapter->dummy_dma.addr,
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 225ff55527..7d2525e76a 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -42,8 +42,6 @@
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43#include <linux/vmalloc.h> 43#include <linux/vmalloc.h>
44 44
45#define PHAN_VENDOR_ID 0x4040
46
47MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); 45MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
48MODULE_LICENSE("GPL"); 46MODULE_LICENSE("GPL");
49MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 47MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
@@ -379,6 +377,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
379 netdev->tx_timeout = netxen_tx_timeout; 377 netdev->tx_timeout = netxen_tx_timeout;
380 netdev->watchdog_timeo = HZ; 378 netdev->watchdog_timeo = HZ;
381 379
380 netxen_nic_change_mtu(netdev, netdev->mtu);
381
382 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 382 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
383 netdev->poll = netxen_nic_poll; 383 netdev->poll = netxen_nic_poll;
384 netdev->weight = NETXEN_NETDEV_WEIGHT; 384 netdev->weight = NETXEN_NETDEV_WEIGHT;
@@ -434,13 +434,11 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
434 adapter->port_count++; 434 adapter->port_count++;
435 adapter->port[i] = port; 435 adapter->port[i] = port;
436 } 436 }
437#ifndef CONFIG_PPC64
438 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); 437 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
439 netxen_pinit_from_rom(adapter, 0); 438 netxen_pinit_from_rom(adapter, 0);
440 udelay(500); 439 udelay(500);
441 netxen_load_firmware(adapter); 440 netxen_load_firmware(adapter);
442 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 441 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
443#endif
444 /* 442 /*
445 * delay a while to ensure that the Pegs are up & running. 443 * delay a while to ensure that the Pegs are up & running.
446 * Otherwise, we might see some flaky behaviour. 444 * Otherwise, we might see some flaky behaviour.
@@ -525,14 +523,17 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
525 if (adapter == NULL) 523 if (adapter == NULL)
526 return; 524 return;
527 525
526 if (adapter->irq)
527 free_irq(adapter->irq, adapter);
528 netxen_nic_stop_all_ports(adapter); 528 netxen_nic_stop_all_ports(adapter);
529 /* leave the hw in the same state as reboot */ 529 /* leave the hw in the same state as reboot */
530 netxen_pinit_from_rom(adapter, 0);
531 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); 530 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
531 netxen_pinit_from_rom(adapter, 0);
532 udelay(500);
532 netxen_load_firmware(adapter); 533 netxen_load_firmware(adapter);
533 netxen_free_adapter_offload(adapter); 534 netxen_free_adapter_offload(adapter);
534 535
535 udelay(500); /* Delay for a while to drain the DMA engines */ 536 mdelay(1000); /* Delay for a while to drain the DMA engines */
536 for (i = 0; i < adapter->port_count; i++) { 537 for (i = 0; i < adapter->port_count; i++) {
537 port = adapter->port[i]; 538 port = adapter->port[i];
538 if ((port) && (port->netdev)) { 539 if ((port) && (port->netdev)) {
@@ -543,7 +544,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
543 544
544 if ((adapter->flags & NETXEN_NIC_MSI_ENABLED)) 545 if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
545 pci_disable_msi(pdev); 546 pci_disable_msi(pdev);
546 pci_set_drvdata(pdev, NULL);
547 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) 547 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
548 netxen_free_hw_resources(adapter); 548 netxen_free_hw_resources(adapter);
549 549
@@ -554,6 +554,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
554 554
555 pci_release_regions(pdev); 555 pci_release_regions(pdev);
556 pci_disable_device(pdev); 556 pci_disable_device(pdev);
557 pci_set_drvdata(pdev, NULL);
557 558
558 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { 559 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
559 recv_ctx = &adapter->recv_ctx[ctxid]; 560 recv_ctx = &adapter->recv_ctx[ctxid];
@@ -672,8 +673,6 @@ static int netxen_nic_close(struct net_device *netdev)
672 673
673 if (!adapter->active_ports) { 674 if (!adapter->active_ports) {
674 netxen_nic_disable_int(adapter); 675 netxen_nic_disable_int(adapter);
675 if (adapter->irq)
676 free_irq(adapter->irq, adapter);
677 cmd_buff = adapter->cmd_buf_arr; 676 cmd_buff = adapter->cmd_buf_arr;
678 for (i = 0; i < adapter->max_tx_desc_count; i++) { 677 for (i = 0; i < adapter->max_tx_desc_count; i++) {
679 buffrag = cmd_buff->frag_array; 678 buffrag = cmd_buff->frag_array;
@@ -1155,8 +1154,8 @@ static void __exit netxen_exit_module(void)
1155 /* 1154 /*
1156 * Wait for some time to allow the dma to drain, if any. 1155 * Wait for some time to allow the dma to drain, if any.
1157 */ 1156 */
1158 destroy_workqueue(netxen_workq);
1159 pci_unregister_driver(&netxen_driver); 1157 pci_unregister_driver(&netxen_driver);
1158 destroy_workqueue(netxen_workq);
1160} 1159}
1161 1160
1162module_exit(netxen_exit_module); 1161module_exit(netxen_exit_module);
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 7879f855af..0c7c94328b 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -102,6 +102,9 @@
102#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0) 102#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0)
103#define CRB_TEMP_STATE NETXEN_NIC_REG(0x1b4) 103#define CRB_TEMP_STATE NETXEN_NIC_REG(0x1b4)
104 104
105/* used for ethtool tests */
106#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280)
107
105/* 108/*
106 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address 109 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
107 * which can be read by the Phantom host to get producer/consumer indexes from 110 * which can be read by the Phantom host to get producer/consumer indexes from
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 196993a29b..a6f4b24b01 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -104,8 +104,6 @@ static int automatic_resume; /* experimental .. better should be zero */
104static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */ 104static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
105static int fifo=0x8; /* don't change */ 105static int fifo=0x8; /* don't change */
106 106
107/* #define REALLY_SLOW_IO */
108
109#include <linux/module.h> 107#include <linux/module.h>
110#include <linux/kernel.h> 108#include <linux/kernel.h>
111#include <linux/string.h> 109#include <linux/string.h>
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 568daeb3e9..9ec6e9e54f 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -514,8 +514,7 @@ static void ns83820_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid
514 514
515 spin_lock_irq(&dev->misc_lock); 515 spin_lock_irq(&dev->misc_lock);
516 spin_lock(&dev->tx_lock); 516 spin_lock(&dev->tx_lock);
517 if (dev->vlgrp) 517 vlan_group_set_device(dev->vlgrp, vid, NULL);
518 dev->vlgrp->vlan_devices[vid] = NULL;
519 spin_unlock(&dev->tx_lock); 518 spin_unlock(&dev->tx_lock);
520 spin_unlock_irq(&dev->misc_lock); 519 spin_unlock_irq(&dev->misc_lock);
521} 520}
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 36f9d98827..4d94ba7899 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1234,14 +1234,14 @@ static void pcnet32_rx_entry(struct net_device *dev,
1234 skb_put(skb, pkt_len); /* Make room */ 1234 skb_put(skb, pkt_len); /* Make room */
1235 pci_dma_sync_single_for_cpu(lp->pci_dev, 1235 pci_dma_sync_single_for_cpu(lp->pci_dev,
1236 lp->rx_dma_addr[entry], 1236 lp->rx_dma_addr[entry],
1237 PKT_BUF_SZ - 2, 1237 pkt_len,
1238 PCI_DMA_FROMDEVICE); 1238 PCI_DMA_FROMDEVICE);
1239 eth_copy_and_sum(skb, 1239 eth_copy_and_sum(skb,
1240 (unsigned char *)(lp->rx_skbuff[entry]->data), 1240 (unsigned char *)(lp->rx_skbuff[entry]->data),
1241 pkt_len, 0); 1241 pkt_len, 0);
1242 pci_dma_sync_single_for_device(lp->pci_dev, 1242 pci_dma_sync_single_for_device(lp->pci_dev,
1243 lp->rx_dma_addr[entry], 1243 lp->rx_dma_addr[entry],
1244 PKT_BUF_SZ - 2, 1244 pkt_len,
1245 PCI_DMA_FROMDEVICE); 1245 PCI_DMA_FROMDEVICE);
1246 } 1246 }
1247 lp->stats.rx_bytes += skb->len; 1247 lp->stats.rx_bytes += skb->len;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 860bb0f60f..ebfa2967cd 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -7,6 +7,12 @@
7 * 7 *
8 * Version: 0.7.0 8 * Version: 0.7.0
9 * 9 *
10 * 070228 : Fix to allow multiple sessions with same remote MAC and same
11 * session id by including the local device ifindex in the
12 * tuple identifying a session. This also ensures packets can't
13 * be injected into a session from interfaces other than the one
14 * specified by userspace. Florian Zumbiehl <florz@florz.de>
15 * (Oh, BTW, this one is YYMMDD, in case you were wondering ...)
10 * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme 16 * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme
11 * 030700 : Fixed connect logic to allow for disconnect. 17 * 030700 : Fixed connect logic to allow for disconnect.
12 * 270700 : Fixed potential SMP problems; we must protect against 18 * 270700 : Fixed potential SMP problems; we must protect against
@@ -127,14 +133,14 @@ static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE];
127 * Set/get/delete/rehash items (internal versions) 133 * Set/get/delete/rehash items (internal versions)
128 * 134 *
129 **********************************************************************/ 135 **********************************************************************/
130static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr) 136static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex)
131{ 137{
132 int hash = hash_item(sid, addr); 138 int hash = hash_item(sid, addr);
133 struct pppox_sock *ret; 139 struct pppox_sock *ret;
134 140
135 ret = item_hash_table[hash]; 141 ret = item_hash_table[hash];
136 142
137 while (ret && !cmp_addr(&ret->pppoe_pa, sid, addr)) 143 while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex))
138 ret = ret->next; 144 ret = ret->next;
139 145
140 return ret; 146 return ret;
@@ -147,21 +153,19 @@ static int __set_item(struct pppox_sock *po)
147 153
148 ret = item_hash_table[hash]; 154 ret = item_hash_table[hash];
149 while (ret) { 155 while (ret) {
150 if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa)) 156 if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex)
151 return -EALREADY; 157 return -EALREADY;
152 158
153 ret = ret->next; 159 ret = ret->next;
154 } 160 }
155 161
156 if (!ret) { 162 po->next = item_hash_table[hash];
157 po->next = item_hash_table[hash]; 163 item_hash_table[hash] = po;
158 item_hash_table[hash] = po;
159 }
160 164
161 return 0; 165 return 0;
162} 166}
163 167
164static struct pppox_sock *__delete_item(unsigned long sid, char *addr) 168static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex)
165{ 169{
166 int hash = hash_item(sid, addr); 170 int hash = hash_item(sid, addr);
167 struct pppox_sock *ret, **src; 171 struct pppox_sock *ret, **src;
@@ -170,7 +174,7 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
170 src = &item_hash_table[hash]; 174 src = &item_hash_table[hash];
171 175
172 while (ret) { 176 while (ret) {
173 if (cmp_addr(&ret->pppoe_pa, sid, addr)) { 177 if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) {
174 *src = ret->next; 178 *src = ret->next;
175 break; 179 break;
176 } 180 }
@@ -188,12 +192,12 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
188 * 192 *
189 **********************************************************************/ 193 **********************************************************************/
190static inline struct pppox_sock *get_item(unsigned long sid, 194static inline struct pppox_sock *get_item(unsigned long sid,
191 unsigned char *addr) 195 unsigned char *addr, int ifindex)
192{ 196{
193 struct pppox_sock *po; 197 struct pppox_sock *po;
194 198
195 read_lock_bh(&pppoe_hash_lock); 199 read_lock_bh(&pppoe_hash_lock);
196 po = __get_item(sid, addr); 200 po = __get_item(sid, addr, ifindex);
197 if (po) 201 if (po)
198 sock_hold(sk_pppox(po)); 202 sock_hold(sk_pppox(po));
199 read_unlock_bh(&pppoe_hash_lock); 203 read_unlock_bh(&pppoe_hash_lock);
@@ -203,7 +207,15 @@ static inline struct pppox_sock *get_item(unsigned long sid,
203 207
204static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) 208static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
205{ 209{
206 return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote); 210 struct net_device *dev = NULL;
211 int ifindex;
212
213 dev = dev_get_by_name(sp->sa_addr.pppoe.dev);
214 if(!dev)
215 return NULL;
216 ifindex = dev->ifindex;
217 dev_put(dev);
218 return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex);
207} 219}
208 220
209static inline int set_item(struct pppox_sock *po) 221static inline int set_item(struct pppox_sock *po)
@@ -220,12 +232,12 @@ static inline int set_item(struct pppox_sock *po)
220 return i; 232 return i;
221} 233}
222 234
223static inline struct pppox_sock *delete_item(unsigned long sid, char *addr) 235static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex)
224{ 236{
225 struct pppox_sock *ret; 237 struct pppox_sock *ret;
226 238
227 write_lock_bh(&pppoe_hash_lock); 239 write_lock_bh(&pppoe_hash_lock);
228 ret = __delete_item(sid, addr); 240 ret = __delete_item(sid, addr, ifindex);
229 write_unlock_bh(&pppoe_hash_lock); 241 write_unlock_bh(&pppoe_hash_lock);
230 242
231 return ret; 243 return ret;
@@ -391,7 +403,7 @@ static int pppoe_rcv(struct sk_buff *skb,
391 403
392 ph = (struct pppoe_hdr *) skb->nh.raw; 404 ph = (struct pppoe_hdr *) skb->nh.raw;
393 405
394 po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); 406 po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
395 if (po != NULL) 407 if (po != NULL)
396 return sk_receive_skb(sk_pppox(po), skb, 0); 408 return sk_receive_skb(sk_pppox(po), skb, 0);
397drop: 409drop:
@@ -425,7 +437,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
425 if (ph->code != PADT_CODE) 437 if (ph->code != PADT_CODE)
426 goto abort; 438 goto abort;
427 439
428 po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); 440 po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
429 if (po) { 441 if (po) {
430 struct sock *sk = sk_pppox(po); 442 struct sock *sk = sk_pppox(po);
431 443
@@ -517,7 +529,7 @@ static int pppoe_release(struct socket *sock)
517 529
518 po = pppox_sk(sk); 530 po = pppox_sk(sk);
519 if (po->pppoe_pa.sid) { 531 if (po->pppoe_pa.sid) {
520 delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 532 delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex);
521 } 533 }
522 534
523 if (po->pppoe_dev) 535 if (po->pppoe_dev)
@@ -539,7 +551,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
539 int sockaddr_len, int flags) 551 int sockaddr_len, int flags)
540{ 552{
541 struct sock *sk = sock->sk; 553 struct sock *sk = sock->sk;
542 struct net_device *dev = NULL; 554 struct net_device *dev;
543 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 555 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
544 struct pppox_sock *po = pppox_sk(sk); 556 struct pppox_sock *po = pppox_sk(sk);
545 int error; 557 int error;
@@ -565,7 +577,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
565 pppox_unbind_sock(sk); 577 pppox_unbind_sock(sk);
566 578
567 /* Delete the old binding */ 579 /* Delete the old binding */
568 delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote); 580 delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex);
569 581
570 if(po->pppoe_dev) 582 if(po->pppoe_dev)
571 dev_put(po->pppoe_dev); 583 dev_put(po->pppoe_dev);
@@ -585,6 +597,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
585 goto end; 597 goto end;
586 598
587 po->pppoe_dev = dev; 599 po->pppoe_dev = dev;
600 po->pppoe_ifindex = dev->ifindex;
588 601
589 if (!(dev->flags & IFF_UP)) 602 if (!(dev->flags & IFF_UP))
590 goto err_put; 603 goto err_put;
@@ -705,7 +718,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
705 break; 718 break;
706 719
707 /* PPPoE address from the user specifies an outbound 720 /* PPPoE address from the user specifies an outbound
708 PPPoE address to which frames are forwarded to */ 721 PPPoE address which frames are forwarded to */
709 err = -EFAULT; 722 err = -EFAULT;
710 if (copy_from_user(&po->pppoe_relay, 723 if (copy_from_user(&po->pppoe_relay,
711 (void __user *)arg, 724 (void __user *)arg,
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index a142cdfd94..d3f65dab30 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "qla3xxx" 40#define DRV_NAME "qla3xxx"
41#define DRV_STRING "QLogic ISP3XXX Network Driver" 41#define DRV_STRING "QLogic ISP3XXX Network Driver"
42#define DRV_VERSION "v2.02.00-k36" 42#define DRV_VERSION "v2.03.00-k3"
43#define PFX DRV_NAME " " 43#define PFX DRV_NAME " "
44 44
45static const char ql3xxx_driver_name[] = DRV_NAME; 45static const char ql3xxx_driver_name[] = DRV_NAME;
@@ -276,7 +276,8 @@ static void ql_enable_interrupts(struct ql3_adapter *qdev)
276static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 276static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
277 struct ql_rcv_buf_cb *lrg_buf_cb) 277 struct ql_rcv_buf_cb *lrg_buf_cb)
278{ 278{
279 u64 map; 279 dma_addr_t map;
280 int err;
280 lrg_buf_cb->next = NULL; 281 lrg_buf_cb->next = NULL;
281 282
282 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 283 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
@@ -287,9 +288,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
287 } 288 }
288 289
289 if (!lrg_buf_cb->skb) { 290 if (!lrg_buf_cb->skb) {
290 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); 291 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
292 qdev->lrg_buffer_len);
291 if (unlikely(!lrg_buf_cb->skb)) { 293 if (unlikely(!lrg_buf_cb->skb)) {
292 printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n", 294 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
293 qdev->ndev->name); 295 qdev->ndev->name);
294 qdev->lrg_buf_skb_check++; 296 qdev->lrg_buf_skb_check++;
295 } else { 297 } else {
@@ -303,6 +305,17 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
303 qdev->lrg_buffer_len - 305 qdev->lrg_buffer_len -
304 QL_HEADER_SPACE, 306 QL_HEADER_SPACE,
305 PCI_DMA_FROMDEVICE); 307 PCI_DMA_FROMDEVICE);
308 err = pci_dma_mapping_error(map);
309 if(err) {
310 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
311 qdev->ndev->name, err);
312 dev_kfree_skb(lrg_buf_cb->skb);
313 lrg_buf_cb->skb = NULL;
314
315 qdev->lrg_buf_skb_check++;
316 return;
317 }
318
306 lrg_buf_cb->buf_phy_addr_low = 319 lrg_buf_cb->buf_phy_addr_low =
307 cpu_to_le32(LS_64BITS(map)); 320 cpu_to_le32(LS_64BITS(map));
308 lrg_buf_cb->buf_phy_addr_high = 321 lrg_buf_cb->buf_phy_addr_high =
@@ -1387,6 +1400,8 @@ static void ql_link_state_machine(struct ql3_adapter *qdev)
1387 printk(KERN_INFO PFX 1400 printk(KERN_INFO PFX
1388 "%s: Reset in progress, skip processing link " 1401 "%s: Reset in progress, skip processing link "
1389 "state.\n", qdev->ndev->name); 1402 "state.\n", qdev->ndev->name);
1403
1404 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1390 return; 1405 return;
1391 } 1406 }
1392 1407
@@ -1518,8 +1533,10 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1518 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1519 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1520 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1521 2) << 7)) 1536 2) << 7)) {
1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1522 return 0; 1538 return 0;
1539 }
1523 status = ql_is_auto_cfg(qdev); 1540 status = ql_is_auto_cfg(qdev);
1524 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1541 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1542 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1533,8 +1550,10 @@ static u32 ql_get_speed(struct ql3_adapter *qdev)
1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1550 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1551 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1552 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1536 2) << 7)) 1553 2) << 7)) {
1554 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1537 return 0; 1555 return 0;
1556 }
1538 status = ql_get_link_speed(qdev); 1557 status = ql_get_link_speed(qdev);
1539 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1558 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1559 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1548,8 +1567,10 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
1548 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1567 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1549 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1568 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1550 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1569 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1551 2) << 7)) 1570 2) << 7)) {
1571 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1552 return 0; 1572 return 0;
1573 }
1553 status = ql_is_link_full_dup(qdev); 1574 status = ql_is_link_full_dup(qdev);
1554 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1575 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1555 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1576 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1615,14 +1636,16 @@ static const struct ethtool_ops ql3xxx_ethtool_ops = {
1615static int ql_populate_free_queue(struct ql3_adapter *qdev) 1636static int ql_populate_free_queue(struct ql3_adapter *qdev)
1616{ 1637{
1617 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1638 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1618 u64 map; 1639 dma_addr_t map;
1640 int err;
1619 1641
1620 while (lrg_buf_cb) { 1642 while (lrg_buf_cb) {
1621 if (!lrg_buf_cb->skb) { 1643 if (!lrg_buf_cb->skb) {
1622 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); 1644 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1645 qdev->lrg_buffer_len);
1623 if (unlikely(!lrg_buf_cb->skb)) { 1646 if (unlikely(!lrg_buf_cb->skb)) {
1624 printk(KERN_DEBUG PFX 1647 printk(KERN_DEBUG PFX
1625 "%s: Failed dev_alloc_skb().\n", 1648 "%s: Failed netdev_alloc_skb().\n",
1626 qdev->ndev->name); 1649 qdev->ndev->name);
1627 break; 1650 break;
1628 } else { 1651 } else {
@@ -1636,6 +1659,17 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1636 qdev->lrg_buffer_len - 1659 qdev->lrg_buffer_len -
1637 QL_HEADER_SPACE, 1660 QL_HEADER_SPACE,
1638 PCI_DMA_FROMDEVICE); 1661 PCI_DMA_FROMDEVICE);
1662
1663 err = pci_dma_mapping_error(map);
1664 if(err) {
1665 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1666 qdev->ndev->name, err);
1667 dev_kfree_skb(lrg_buf_cb->skb);
1668 lrg_buf_cb->skb = NULL;
1669 break;
1670 }
1671
1672
1639 lrg_buf_cb->buf_phy_addr_low = 1673 lrg_buf_cb->buf_phy_addr_low =
1640 cpu_to_le32(LS_64BITS(map)); 1674 cpu_to_le32(LS_64BITS(map));
1641 lrg_buf_cb->buf_phy_addr_high = 1675 lrg_buf_cb->buf_phy_addr_high =
@@ -1690,11 +1724,11 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1690 1724
1691 qdev->lrg_buf_q_producer_index++; 1725 qdev->lrg_buf_q_producer_index++;
1692 1726
1693 if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES) 1727 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
1694 qdev->lrg_buf_q_producer_index = 0; 1728 qdev->lrg_buf_q_producer_index = 0;
1695 1729
1696 if (qdev->lrg_buf_q_producer_index == 1730 if (qdev->lrg_buf_q_producer_index ==
1697 (NUM_LBUFQ_ENTRIES - 1)) { 1731 (qdev->num_lbufq_entries - 1)) {
1698 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1732 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1699 } 1733 }
1700 } 1734 }
@@ -1713,8 +1747,31 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1713{ 1747{
1714 struct ql_tx_buf_cb *tx_cb; 1748 struct ql_tx_buf_cb *tx_cb;
1715 int i; 1749 int i;
1750 int retval = 0;
1716 1751
1752 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1753 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
1754 }
1755
1717 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1756 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1757
1758 /* Check the transmit response flags for any errors */
1759 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1760 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
1761
1762 qdev->stats.tx_errors++;
1763 retval = -EIO;
1764 goto frame_not_sent;
1765 }
1766
1767 if(tx_cb->seg_count == 0) {
1768 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
1769
1770 qdev->stats.tx_errors++;
1771 retval = -EIO;
1772 goto invalid_seg_count;
1773 }
1774
1718 pci_unmap_single(qdev->pdev, 1775 pci_unmap_single(qdev->pdev,
1719 pci_unmap_addr(&tx_cb->map[0], mapaddr), 1776 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1720 pci_unmap_len(&tx_cb->map[0], maplen), 1777 pci_unmap_len(&tx_cb->map[0], maplen),
@@ -1731,11 +1788,32 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1731 } 1788 }
1732 qdev->stats.tx_packets++; 1789 qdev->stats.tx_packets++;
1733 qdev->stats.tx_bytes += tx_cb->skb->len; 1790 qdev->stats.tx_bytes += tx_cb->skb->len;
1791
1792frame_not_sent:
1734 dev_kfree_skb_irq(tx_cb->skb); 1793 dev_kfree_skb_irq(tx_cb->skb);
1735 tx_cb->skb = NULL; 1794 tx_cb->skb = NULL;
1795
1796invalid_seg_count:
1736 atomic_inc(&qdev->tx_count); 1797 atomic_inc(&qdev->tx_count);
1737} 1798}
1738 1799
1800void ql_get_sbuf(struct ql3_adapter *qdev)
1801{
1802 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1803 qdev->small_buf_index = 0;
1804 qdev->small_buf_release_cnt++;
1805}
1806
1807struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1808{
1809 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1810 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1811 qdev->lrg_buf_release_cnt++;
1812 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1813 qdev->lrg_buf_index = 0;
1814 return(lrg_buf_cb);
1815}
1816
1739/* 1817/*
1740 * The difference between 3022 and 3032 for inbound completions: 1818 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains 1819 * 3022 uses two buffers per completion. The first buffer contains
@@ -1751,47 +1829,21 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1751static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1829static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1752 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1830 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1753{ 1831{
1754 long int offset;
1755 u32 lrg_buf_phy_addr_low = 0;
1756 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1832 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1757 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1833 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1758 u32 *curr_ial_ptr;
1759 struct sk_buff *skb; 1834 struct sk_buff *skb;
1760 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 1835 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
1761 1836
1762 /* 1837 /*
1763 * Get the inbound address list (small buffer). 1838 * Get the inbound address list (small buffer).
1764 */ 1839 */
1765 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE; 1840 ql_get_sbuf(qdev);
1766 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1767 qdev->small_buf_index = 0;
1768 1841
1769 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset); 1842 if (qdev->device_id == QL3022_DEVICE_ID)
1770 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1843 lrg_buf_cb1 = ql_get_lbuf(qdev);
1771 qdev->small_buf_release_cnt++;
1772
1773 if (qdev->device_id == QL3022_DEVICE_ID) {
1774 /* start of first buffer (3022 only) */
1775 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1776 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1777 qdev->lrg_buf_release_cnt++;
1778 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
1779 qdev->lrg_buf_index = 0;
1780 }
1781 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1782 curr_ial_ptr++;
1783 }
1784 1844
1785 /* start of second buffer */ 1845 /* start of second buffer */
1786 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1846 lrg_buf_cb2 = ql_get_lbuf(qdev);
1787 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1788
1789 /*
1790 * Second buffer gets sent up the stack.
1791 */
1792 qdev->lrg_buf_release_cnt++;
1793 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1794 qdev->lrg_buf_index = 0;
1795 skb = lrg_buf_cb2->skb; 1847 skb = lrg_buf_cb2->skb;
1796 1848
1797 qdev->stats.rx_packets++; 1849 qdev->stats.rx_packets++;
@@ -1819,11 +1871,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1819static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 1871static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1820 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 1872 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
1821{ 1873{
1822 long int offset;
1823 u32 lrg_buf_phy_addr_low = 0;
1824 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1874 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1825 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1875 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1826 u32 *curr_ial_ptr;
1827 struct sk_buff *skb1 = NULL, *skb2; 1876 struct sk_buff *skb1 = NULL, *skb2;
1828 struct net_device *ndev = qdev->ndev; 1877 struct net_device *ndev = qdev->ndev;
1829 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 1878 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
@@ -1833,35 +1882,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1833 * Get the inbound address list (small buffer). 1882 * Get the inbound address list (small buffer).
1834 */ 1883 */
1835 1884
1836 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE; 1885 ql_get_sbuf(qdev);
1837 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1838 qdev->small_buf_index = 0;
1839 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1840 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1841 qdev->small_buf_release_cnt++;
1842 1886
1843 if (qdev->device_id == QL3022_DEVICE_ID) { 1887 if (qdev->device_id == QL3022_DEVICE_ID) {
1844 /* start of first buffer on 3022 */ 1888 /* start of first buffer on 3022 */
1845 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1889 lrg_buf_cb1 = ql_get_lbuf(qdev);
1846 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1847 qdev->lrg_buf_release_cnt++;
1848 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1849 qdev->lrg_buf_index = 0;
1850 skb1 = lrg_buf_cb1->skb; 1890 skb1 = lrg_buf_cb1->skb;
1851 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1852 curr_ial_ptr++;
1853 size = ETH_HLEN; 1891 size = ETH_HLEN;
1854 if (*((u16 *) skb1->data) != 0xFFFF) 1892 if (*((u16 *) skb1->data) != 0xFFFF)
1855 size += VLAN_ETH_HLEN - ETH_HLEN; 1893 size += VLAN_ETH_HLEN - ETH_HLEN;
1856 } 1894 }
1857 1895
1858 /* start of second buffer */ 1896 /* start of second buffer */
1859 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1897 lrg_buf_cb2 = ql_get_lbuf(qdev);
1860 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1861 skb2 = lrg_buf_cb2->skb; 1898 skb2 = lrg_buf_cb2->skb;
1862 qdev->lrg_buf_release_cnt++;
1863 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1864 qdev->lrg_buf_index = 0;
1865 1899
1866 skb_put(skb2, length); /* Just the second buffer length here. */ 1900 skb_put(skb2, length); /* Just the second buffer length here. */
1867 pci_unmap_single(qdev->pdev, 1901 pci_unmap_single(qdev->pdev,
@@ -1914,10 +1948,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1914 struct net_rsp_iocb *net_rsp; 1948 struct net_rsp_iocb *net_rsp;
1915 struct net_device *ndev = qdev->ndev; 1949 struct net_device *ndev = qdev->ndev;
1916 unsigned long hw_flags; 1950 unsigned long hw_flags;
1951 int work_done = 0;
1952
1953 u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));
1917 1954
1918 /* While there are entries in the completion queue. */ 1955 /* While there are entries in the completion queue. */
1919 while ((cpu_to_le32(*(qdev->prsp_producer_index)) != 1956 while ((rsp_producer_index !=
1920 qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) { 1957 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
1921 1958
1922 net_rsp = qdev->rsp_current; 1959 net_rsp = qdev->rsp_current;
1923 switch (net_rsp->opcode) { 1960 switch (net_rsp->opcode) {
@@ -1968,37 +2005,34 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1968 } else { 2005 } else {
1969 qdev->rsp_current++; 2006 qdev->rsp_current++;
1970 } 2007 }
2008
2009 work_done = *tx_cleaned + *rx_cleaned;
1971 } 2010 }
1972 2011
1973 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2012 if(work_done) {
2013 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1974 2014
1975 ql_update_lrg_bufq_prod_index(qdev); 2015 ql_update_lrg_bufq_prod_index(qdev);
1976 2016
1977 if (qdev->small_buf_release_cnt >= 16) { 2017 if (qdev->small_buf_release_cnt >= 16) {
1978 while (qdev->small_buf_release_cnt >= 16) { 2018 while (qdev->small_buf_release_cnt >= 16) {
1979 qdev->small_buf_q_producer_index++; 2019 qdev->small_buf_q_producer_index++;
1980 2020
1981 if (qdev->small_buf_q_producer_index == 2021 if (qdev->small_buf_q_producer_index ==
1982 NUM_SBUFQ_ENTRIES) 2022 NUM_SBUFQ_ENTRIES)
1983 qdev->small_buf_q_producer_index = 0; 2023 qdev->small_buf_q_producer_index = 0;
1984 qdev->small_buf_release_cnt -= 8; 2024 qdev->small_buf_release_cnt -= 8;
1985 } 2025 }
1986 2026
1987 ql_write_common_reg(qdev, 2027 wmb();
1988 &port_regs->CommonRegs. 2028 ql_write_common_reg(qdev,
1989 rxSmallQProducerIndex, 2029 &port_regs->CommonRegs.
1990 qdev->small_buf_q_producer_index); 2030 rxSmallQProducerIndex,
1991 } 2031 qdev->small_buf_q_producer_index);
1992 2032
1993 ql_write_common_reg(qdev, 2033 }
1994 &port_regs->CommonRegs.rspQConsumerIndex,
1995 qdev->rsp_consumer_index);
1996 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1997 2034
1998 if (unlikely(netif_queue_stopped(qdev->ndev))) { 2035 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1999 if (netif_queue_stopped(qdev->ndev) &&
2000 (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
2001 netif_wake_queue(qdev->ndev);
2002 } 2036 }
2003 2037
2004 return *tx_cleaned + *rx_cleaned; 2038 return *tx_cleaned + *rx_cleaned;
@@ -2009,6 +2043,8 @@ static int ql_poll(struct net_device *ndev, int *budget)
2009 struct ql3_adapter *qdev = netdev_priv(ndev); 2043 struct ql3_adapter *qdev = netdev_priv(ndev);
2010 int work_to_do = min(*budget, ndev->quota); 2044 int work_to_do = min(*budget, ndev->quota);
2011 int rx_cleaned = 0, tx_cleaned = 0; 2045 int rx_cleaned = 0, tx_cleaned = 0;
2046 unsigned long hw_flags;
2047 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2012 2048
2013 if (!netif_carrier_ok(ndev)) 2049 if (!netif_carrier_ok(ndev))
2014 goto quit_polling; 2050 goto quit_polling;
@@ -2017,9 +2053,17 @@ static int ql_poll(struct net_device *ndev, int *budget)
2017 *budget -= rx_cleaned; 2053 *budget -= rx_cleaned;
2018 ndev->quota -= rx_cleaned; 2054 ndev->quota -= rx_cleaned;
2019 2055
2020 if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) { 2056 if( tx_cleaned + rx_cleaned != work_to_do ||
2057 !netif_running(ndev)) {
2021quit_polling: 2058quit_polling:
2022 netif_rx_complete(ndev); 2059 netif_rx_complete(ndev);
2060
2061 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2062 ql_write_common_reg(qdev,
2063 &port_regs->CommonRegs.rspQConsumerIndex,
2064 qdev->rsp_consumer_index);
2065 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2066
2023 ql_enable_interrupts(qdev); 2067 ql_enable_interrupts(qdev);
2024 return 0; 2068 return 0;
2025 } 2069 }
@@ -2073,10 +2117,9 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2073 spin_unlock(&qdev->adapter_lock); 2117 spin_unlock(&qdev->adapter_lock);
2074 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2118 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2075 ql_disable_interrupts(qdev); 2119 ql_disable_interrupts(qdev);
2076 if (likely(netif_rx_schedule_prep(ndev))) 2120 if (likely(netif_rx_schedule_prep(ndev))) {
2077 __netif_rx_schedule(ndev); 2121 __netif_rx_schedule(ndev);
2078 else 2122 }
2079 ql_enable_interrupts(qdev);
2080 } else { 2123 } else {
2081 return IRQ_NONE; 2124 return IRQ_NONE;
2082 } 2125 }
@@ -2093,8 +2136,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2093 * the next AOL if more frags are coming. 2136 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear. 2137 * That is why the frags:segment count ratio is not linear.
2095 */ 2138 */
2096static int ql_get_seg_count(unsigned short frags) 2139static int ql_get_seg_count(struct ql3_adapter *qdev,
2140 unsigned short frags)
2097{ 2141{
2142 if (qdev->device_id == QL3022_DEVICE_ID)
2143 return 1;
2144
2098 switch(frags) { 2145 switch(frags) {
2099 case 0: return 1; /* just the skb->data seg */ 2146 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */ 2147 case 1: return 2; /* skb->data + 1 frag */
@@ -2139,11 +2186,13 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
2139 2186
2140 if (ip) { 2187 if (ip) {
2141 if (ip->protocol == IPPROTO_TCP) { 2188 if (ip->protocol == IPPROTO_TCP) {
2142 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC; 2189 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2190 OB_3032MAC_IOCB_REQ_IC;
2143 mac_iocb_ptr->ip_hdr_off = offset; 2191 mac_iocb_ptr->ip_hdr_off = offset;
2144 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2192 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2145 } else if (ip->protocol == IPPROTO_UDP) { 2193 } else if (ip->protocol == IPPROTO_UDP) {
2146 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC; 2194 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2195 OB_3032MAC_IOCB_REQ_IC;
2147 mac_iocb_ptr->ip_hdr_off = offset; 2196 mac_iocb_ptr->ip_hdr_off = offset;
2148 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2197 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2149 } 2198 }
@@ -2151,53 +2200,42 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
2151} 2200}
2152 2201
2153/* 2202/*
2154 * The difference between 3022 and 3032 sends: 2203 * Map the buffers for this transmit. This will return
2155 * 3022 only supports a simple single segment transmission. 2204 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2163 */ 2205 */
2164static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2206static int ql_send_map(struct ql3_adapter *qdev,
2207 struct ob_mac_iocb_req *mac_iocb_ptr,
2208 struct ql_tx_buf_cb *tx_cb,
2209 struct sk_buff *skb)
2165{ 2210{
2166 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2168 struct ql_tx_buf_cb *tx_cb;
2169 u32 tot_len = skb->len;
2170 struct oal *oal; 2211 struct oal *oal;
2171 struct oal_entry *oal_entry; 2212 struct oal_entry *oal_entry;
2172 int len; 2213 int len = skb_headlen(skb);
2173 struct ob_mac_iocb_req *mac_iocb_ptr; 2214 dma_addr_t map;
2174 u64 map; 2215 int err;
2216 int completed_segs, i;
2175 int seg_cnt, seg = 0; 2217 int seg_cnt, seg = 0;
2176 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2218 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2177 2219
2178 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2220 seg_cnt = tx_cb->seg_count = ql_get_seg_count(qdev,
2179 if (!netif_queue_stopped(ndev)) 2221 (skb_shinfo(skb)->nr_frags));
2180 netif_stop_queue(ndev);
2181 return NETDEV_TX_BUSY;
2182 }
2183 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2184 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2185 if(seg_cnt == -1) { 2222 if(seg_cnt == -1) {
2186 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2223 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2187 return NETDEV_TX_OK; 2224 return NETDEV_TX_BUSY;
2188
2189 } 2225 }
2190 mac_iocb_ptr = tx_cb->queue_entry; 2226 /*
2191 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2227 * Map the skb buffer first.
2192 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2228 */
2193 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2194 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2195 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2196 tx_cb->skb = skb;
2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2198 ql_hw_csum_setup(skb, mac_iocb_ptr);
2199 len = skb_headlen(skb);
2200 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2229 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2230
2231 err = pci_dma_mapping_error(map);
2232 if(err) {
2233 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2234 qdev->ndev->name, err);
2235
2236 return NETDEV_TX_BUSY;
2237 }
2238
2201 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2239 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2202 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2240 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2203 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2241 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
@@ -2206,15 +2244,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2206 pci_unmap_len_set(&tx_cb->map[seg], maplen, len); 2244 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2207 seg++; 2245 seg++;
2208 2246
2209 if (!skb_shinfo(skb)->nr_frags) { 2247 if (seg_cnt == 1) {
2210 /* Terminate the last segment. */ 2248 /* Terminate the last segment. */
2211 oal_entry->len = 2249 oal_entry->len =
2212 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2250 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2213 } else { 2251 } else {
2214 int i;
2215 oal = tx_cb->oal; 2252 oal = tx_cb->oal;
2216 for (i=0; i<frag_cnt; i++,seg++) { 2253 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2254 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2218 oal_entry++; 2255 oal_entry++;
2219 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ 2256 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2220 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ 2257 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
@@ -2224,6 +2261,15 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2224 map = pci_map_single(qdev->pdev, oal, 2261 map = pci_map_single(qdev->pdev, oal,
2225 sizeof(struct oal), 2262 sizeof(struct oal),
2226 PCI_DMA_TODEVICE); 2263 PCI_DMA_TODEVICE);
2264
2265 err = pci_dma_mapping_error(map);
2266 if(err) {
2267
2268 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2269 qdev->ndev->name, err);
2270 goto map_error;
2271 }
2272
2227 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2273 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2228 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2274 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2229 oal_entry->len = 2275 oal_entry->len =
@@ -2242,6 +2288,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2242 pci_map_page(qdev->pdev, frag->page, 2288 pci_map_page(qdev->pdev, frag->page,
2243 frag->page_offset, frag->size, 2289 frag->page_offset, frag->size,
2244 PCI_DMA_TODEVICE); 2290 PCI_DMA_TODEVICE);
2291
2292 err = pci_dma_mapping_error(map);
2293 if(err) {
2294 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2295 qdev->ndev->name, err);
2296 goto map_error;
2297 }
2298
2245 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2299 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2246 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2300 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2247 oal_entry->len = cpu_to_le32(frag->size); 2301 oal_entry->len = cpu_to_le32(frag->size);
@@ -2253,6 +2307,94 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2253 oal_entry->len = 2307 oal_entry->len =
2254 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2308 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2255 } 2309 }
2310
2311 return NETDEV_TX_OK;
2312
2313map_error:
2314 /* A PCI mapping failed and now we will need to back out
2315 * We need to traverse through the oal's and associated pages which
2316 * have been mapped and now we must unmap them to clean up properly
2317 */
2318
2319 seg = 1;
2320 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2321 oal = tx_cb->oal;
2322 for (i=0; i<completed_segs; i++,seg++) {
2323 oal_entry++;
2324
2325 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2326 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2327 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2328 (seg == 17 && seg_cnt > 18)) {
2329 pci_unmap_single(qdev->pdev,
2330 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2331 pci_unmap_len(&tx_cb->map[seg], maplen),
2332 PCI_DMA_TODEVICE);
2333 oal++;
2334 seg++;
2335 }
2336
2337 pci_unmap_page(qdev->pdev,
2338 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2339 pci_unmap_len(&tx_cb->map[seg], maplen),
2340 PCI_DMA_TODEVICE);
2341 }
2342
2343 pci_unmap_single(qdev->pdev,
2344 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2345 pci_unmap_addr(&tx_cb->map[0], maplen),
2346 PCI_DMA_TODEVICE);
2347
2348 return NETDEV_TX_BUSY;
2349
2350}
2351
2352/*
2353 * The difference between 3022 and 3032 sends:
2354 * 3022 only supports a simple single segment transmission.
2355 * 3032 supports checksumming and scatter/gather lists (fragments).
2356 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2357 * in the IOCB plus a chain of outbound address lists (OAL) that
2358 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2359 * will used to point to an OAL when more ALP entries are required.
2360 * The IOCB is always the top of the chain followed by one or more
2361 * OALs (when necessary).
2362 */
2363static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2364{
2365 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2366 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2367 struct ql_tx_buf_cb *tx_cb;
2368 u32 tot_len = skb->len;
2369 struct ob_mac_iocb_req *mac_iocb_ptr;
2370
2371 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2372 return NETDEV_TX_BUSY;
2373 }
2374
2375 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2376 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2377 (skb_shinfo(skb)->nr_frags))) == -1) {
2378 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2379 return NETDEV_TX_OK;
2380 }
2381
2382 mac_iocb_ptr = tx_cb->queue_entry;
2383 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2384 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2385 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2386 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2387 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2388 tx_cb->skb = skb;
2389 if (qdev->device_id == QL3032_DEVICE_ID &&
2390 skb->ip_summed == CHECKSUM_PARTIAL)
2391 ql_hw_csum_setup(skb, mac_iocb_ptr);
2392
2393 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2394 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2395 return NETDEV_TX_BUSY;
2396 }
2397
2256 wmb(); 2398 wmb();
2257 qdev->req_producer_index++; 2399 qdev->req_producer_index++;
2258 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2400 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
@@ -2338,12 +2480,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2338{ 2480{
2339 /* Create Large Buffer Queue */ 2481 /* Create Large Buffer Queue */
2340 qdev->lrg_buf_q_size = 2482 qdev->lrg_buf_q_size =
2341 NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2483 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2342 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2484 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2343 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2485 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2344 else 2486 else
2345 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2487 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2346 2488
2489 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2490 if (qdev->lrg_buf == NULL) {
2491 printk(KERN_ERR PFX
2492 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2493 return -ENOMEM;
2494 }
2495
2347 qdev->lrg_buf_q_alloc_virt_addr = 2496 qdev->lrg_buf_q_alloc_virt_addr =
2348 pci_alloc_consistent(qdev->pdev, 2497 pci_alloc_consistent(qdev->pdev,
2349 qdev->lrg_buf_q_alloc_size, 2498 qdev->lrg_buf_q_alloc_size,
@@ -2393,7 +2542,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2393 "%s: Already done.\n", qdev->ndev->name); 2542 "%s: Already done.\n", qdev->ndev->name);
2394 return; 2543 return;
2395 } 2544 }
2396 2545 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2397 pci_free_consistent(qdev->pdev, 2546 pci_free_consistent(qdev->pdev,
2398 qdev->lrg_buf_q_alloc_size, 2547 qdev->lrg_buf_q_alloc_size,
2399 qdev->lrg_buf_q_alloc_virt_addr, 2548 qdev->lrg_buf_q_alloc_virt_addr,
@@ -2438,8 +2587,6 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2438 2587
2439 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2588 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2440 2589
2441 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
2442
2443 /* Initialize the small buffer queue. */ 2590 /* Initialize the small buffer queue. */
2444 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2591 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2445 small_buf_q_entry->addr_high = 2592 small_buf_q_entry->addr_high =
@@ -2476,7 +2623,7 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
2476 int i = 0; 2623 int i = 0;
2477 struct ql_rcv_buf_cb *lrg_buf_cb; 2624 struct ql_rcv_buf_cb *lrg_buf_cb;
2478 2625
2479 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2626 for (i = 0; i < qdev->num_large_buffers; i++) {
2480 lrg_buf_cb = &qdev->lrg_buf[i]; 2627 lrg_buf_cb = &qdev->lrg_buf[i];
2481 if (lrg_buf_cb->skb) { 2628 if (lrg_buf_cb->skb) {
2482 dev_kfree_skb(lrg_buf_cb->skb); 2629 dev_kfree_skb(lrg_buf_cb->skb);
@@ -2497,7 +2644,7 @@ static void ql_init_large_buffers(struct ql3_adapter *qdev)
2497 struct ql_rcv_buf_cb *lrg_buf_cb; 2644 struct ql_rcv_buf_cb *lrg_buf_cb;
2498 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2645 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2499 2646
2500 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2647 for (i = 0; i < qdev->num_large_buffers; i++) {
2501 lrg_buf_cb = &qdev->lrg_buf[i]; 2648 lrg_buf_cb = &qdev->lrg_buf[i];
2502 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2649 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2503 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2650 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
@@ -2512,10 +2659,12 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2512 int i; 2659 int i;
2513 struct ql_rcv_buf_cb *lrg_buf_cb; 2660 struct ql_rcv_buf_cb *lrg_buf_cb;
2514 struct sk_buff *skb; 2661 struct sk_buff *skb;
2515 u64 map; 2662 dma_addr_t map;
2663 int err;
2516 2664
2517 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2665 for (i = 0; i < qdev->num_large_buffers; i++) {
2518 skb = dev_alloc_skb(qdev->lrg_buffer_len); 2666 skb = netdev_alloc_skb(qdev->ndev,
2667 qdev->lrg_buffer_len);
2519 if (unlikely(!skb)) { 2668 if (unlikely(!skb)) {
2520 /* Better luck next round */ 2669 /* Better luck next round */
2521 printk(KERN_ERR PFX 2670 printk(KERN_ERR PFX
@@ -2541,6 +2690,15 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2541 qdev->lrg_buffer_len - 2690 qdev->lrg_buffer_len -
2542 QL_HEADER_SPACE, 2691 QL_HEADER_SPACE,
2543 PCI_DMA_FROMDEVICE); 2692 PCI_DMA_FROMDEVICE);
2693
2694 err = pci_dma_mapping_error(map);
2695 if(err) {
2696 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2697 qdev->ndev->name, err);
2698 ql_free_large_buffers(qdev);
2699 return -ENOMEM;
2700 }
2701
2544 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2702 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2545 pci_unmap_len_set(lrg_buf_cb, maplen, 2703 pci_unmap_len_set(lrg_buf_cb, maplen,
2546 qdev->lrg_buffer_len - 2704 qdev->lrg_buffer_len -
@@ -2592,9 +2750,15 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev)
2592 2750
2593static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2751static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2594{ 2752{
2595 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) 2753 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2754 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2596 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2755 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2756 }
2597 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2757 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2758 /*
2759 * Bigger buffers, so less of them.
2760 */
2761 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2598 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2762 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2599 } else { 2763 } else {
2600 printk(KERN_ERR PFX 2764 printk(KERN_ERR PFX
@@ -2602,6 +2766,7 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2602 qdev->ndev->name); 2766 qdev->ndev->name);
2603 return -ENOMEM; 2767 return -ENOMEM;
2604 } 2768 }
2769 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2605 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2770 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2606 qdev->max_frame_size = 2771 qdev->max_frame_size =
2607 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2772 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
@@ -2834,7 +2999,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2834 &hmem_regs->rxLargeQBaseAddrLow, 2999 &hmem_regs->rxLargeQBaseAddrLow,
2835 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3000 LS_64BITS(qdev->lrg_buf_q_phy_addr));
2836 3001
2837 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES); 3002 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
2838 3003
2839 ql_write_page1_reg(qdev, 3004 ql_write_page1_reg(qdev,
2840 &hmem_regs->rxLargeBufferLength, 3005 &hmem_regs->rxLargeBufferLength,
@@ -2856,7 +3021,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2856 3021
2857 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3022 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
2858 qdev->small_buf_release_cnt = 8; 3023 qdev->small_buf_release_cnt = 8;
2859 qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1; 3024 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
2860 qdev->lrg_buf_release_cnt = 8; 3025 qdev->lrg_buf_release_cnt = 8;
2861 qdev->lrg_buf_next_free = 3026 qdev->lrg_buf_next_free =
2862 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; 3027 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
@@ -3292,6 +3457,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3292err_init: 3457err_init:
3293 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3458 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3294err_lock: 3459err_lock:
3460 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3295 free_irq(qdev->pdev->irq, ndev); 3461 free_irq(qdev->pdev->irq, ndev);
3296err_irq: 3462err_irq:
3297 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3463 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
@@ -3343,27 +3509,6 @@ static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3343 return &qdev->stats; 3509 return &qdev->stats;
3344} 3510}
3345 3511
3346static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
3347{
3348 struct ql3_adapter *qdev = netdev_priv(ndev);
3349 printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
3350 if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
3351 printk(KERN_ERR PFX
3352 "%s: mtu size of %d is not valid. Use exactly %d or "
3353 "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
3354 JUMBO_MTU_SIZE);
3355 return -EINVAL;
3356 }
3357
3358 if (!netif_running(ndev)) {
3359 ndev->mtu = new_mtu;
3360 return 0;
3361 }
3362
3363 ndev->mtu = new_mtu;
3364 return ql_cycle_adapter(qdev,QL_DO_RESET);
3365}
3366
3367static void ql3xxx_set_multicast_list(struct net_device *ndev) 3512static void ql3xxx_set_multicast_list(struct net_device *ndev)
3368{ 3513{
3369 /* 3514 /*
@@ -3609,8 +3754,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3609 } 3754 }
3610 3755
3611 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3756 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3612 if (!ndev) 3757 if (!ndev) {
3758 printk(KERN_ERR PFX "%s could not alloc etherdev\n",
3759 pci_name(pdev));
3760 err = -ENOMEM;
3613 goto err_out_free_regions; 3761 goto err_out_free_regions;
3762 }
3614 3763
3615 SET_MODULE_OWNER(ndev); 3764 SET_MODULE_OWNER(ndev);
3616 SET_NETDEV_DEV(ndev, &pdev->dev); 3765 SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -3639,6 +3788,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3639 if (!qdev->mem_map_registers) { 3788 if (!qdev->mem_map_registers) {
3640 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3789 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3641 pci_name(pdev)); 3790 pci_name(pdev));
3791 err = -EIO;
3642 goto err_out_free_ndev; 3792 goto err_out_free_ndev;
3643 } 3793 }
3644 3794
@@ -3650,7 +3800,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3650 ndev->hard_start_xmit = ql3xxx_send; 3800 ndev->hard_start_xmit = ql3xxx_send;
3651 ndev->stop = ql3xxx_close; 3801 ndev->stop = ql3xxx_close;
3652 ndev->get_stats = ql3xxx_get_stats; 3802 ndev->get_stats = ql3xxx_get_stats;
3653 ndev->change_mtu = ql3xxx_change_mtu;
3654 ndev->set_multicast_list = ql3xxx_set_multicast_list; 3803 ndev->set_multicast_list = ql3xxx_set_multicast_list;
3655 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3804 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3656 ndev->set_mac_address = ql3xxx_set_mac_address; 3805 ndev->set_mac_address = ql3xxx_set_mac_address;
@@ -3667,6 +3816,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3667 printk(KERN_ALERT PFX 3816 printk(KERN_ALERT PFX
3668 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", 3817 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3669 qdev->index); 3818 qdev->index);
3819 err = -EIO;
3670 goto err_out_iounmap; 3820 goto err_out_iounmap;
3671 } 3821 }
3672 3822
@@ -3674,9 +3824,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3674 3824
3675 /* Validate and set parameters */ 3825 /* Validate and set parameters */
3676 if (qdev->mac_index) { 3826 if (qdev->mac_index) {
3827 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3677 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress, 3828 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
3678 ETH_ALEN); 3829 ETH_ALEN);
3679 } else { 3830 } else {
3831 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3680 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress, 3832 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
3681 ETH_ALEN); 3833 ETH_ALEN);
3682 } 3834 }
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index b2d76ea688..34cd6580fd 100755
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1014,13 +1014,15 @@ struct eeprom_data {
1014 1014
1015/* Transmit and Receive Buffers */ 1015/* Transmit and Receive Buffers */
1016#define NUM_LBUFQ_ENTRIES 128 1016#define NUM_LBUFQ_ENTRIES 128
1017#define JUMBO_NUM_LBUFQ_ENTRIES \
1018(NUM_LBUFQ_ENTRIES/(JUMBO_MTU_SIZE/NORMAL_MTU_SIZE))
1017#define NUM_SBUFQ_ENTRIES 64 1019#define NUM_SBUFQ_ENTRIES 64
1018#define QL_SMALL_BUFFER_SIZE 32 1020#define QL_SMALL_BUFFER_SIZE 32
1019#define QL_ADDR_ELE_PER_BUFQ_ENTRY \ 1021#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
1020(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element)) 1022(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
1021 /* Each send has at least control block. This is how many we keep. */ 1023 /* Each send has at least control block. This is how many we keep. */
1022#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY 1024#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
1023#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY 1025
1024#define QL_HEADER_SPACE 32 /* make header space at top of skb. */ 1026#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
1025/* 1027/*
1026 * Large & Small Buffers for Receives 1028 * Large & Small Buffers for Receives
@@ -1092,7 +1094,6 @@ struct oal_entry {
1092 u32 len; 1094 u32 len;
1093#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ 1095#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
1094#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ 1096#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
1095 u32 reserved;
1096}; 1097};
1097 1098
1098struct oal { 1099struct oal {
@@ -1193,7 +1194,7 @@ struct ql3_adapter {
1193 struct net_rsp_iocb *rsp_current; 1194 struct net_rsp_iocb *rsp_current;
1194 u16 rsp_consumer_index; 1195 u16 rsp_consumer_index;
1195 u16 reserved_06; 1196 u16 reserved_06;
1196 u32 *prsp_producer_index; 1197 volatile u32 *prsp_producer_index;
1197 u32 rsp_producer_index_phy_addr_high; 1198 u32 rsp_producer_index_phy_addr_high;
1198 u32 rsp_producer_index_phy_addr_low; 1199 u32 rsp_producer_index_phy_addr_low;
1199 1200
@@ -1207,9 +1208,11 @@ struct ql3_adapter {
1207 u32 lrg_buf_q_producer_index; 1208 u32 lrg_buf_q_producer_index;
1208 u32 lrg_buf_release_cnt; 1209 u32 lrg_buf_release_cnt;
1209 struct bufq_addr_element *lrg_buf_next_free; 1210 struct bufq_addr_element *lrg_buf_next_free;
1211 u32 num_large_buffers;
1212 u32 num_lbufq_entries;
1210 1213
1211 /* Large (Receive) Buffers */ 1214 /* Large (Receive) Buffers */
1212 struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS]; 1215 struct ql_rcv_buf_cb *lrg_buf;
1213 struct ql_rcv_buf_cb *lrg_buf_free_head; 1216 struct ql_rcv_buf_cb *lrg_buf_free_head;
1214 struct ql_rcv_buf_cb *lrg_buf_free_tail; 1217 struct ql_rcv_buf_cb *lrg_buf_free_tail;
1215 u32 lrg_buf_free_count; 1218 u32 lrg_buf_free_count;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 13cf06ee97..15d954e50c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -890,8 +890,7 @@ static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
890 unsigned long flags; 890 unsigned long flags;
891 891
892 spin_lock_irqsave(&tp->lock, flags); 892 spin_lock_irqsave(&tp->lock, flags);
893 if (tp->vlgrp) 893 vlan_group_set_device(tp->vlgrp, vid, NULL);
894 tp->vlgrp->vlan_devices[vid] = NULL;
895 spin_unlock_irqrestore(&tp->lock, flags); 894 spin_unlock_irqrestore(&tp->lock, flags);
896} 895}
897 896
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 0e345cbc2b..33fb7f3b70 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -430,6 +430,7 @@ struct XENA_dev_config {
430#define TX_PA_CFG_IGNORE_SNAP_OUI BIT(2) 430#define TX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
431#define TX_PA_CFG_IGNORE_LLC_CTRL BIT(3) 431#define TX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
432#define TX_PA_CFG_IGNORE_L2_ERR BIT(6) 432#define TX_PA_CFG_IGNORE_L2_ERR BIT(6)
433#define RX_PA_CFG_STRIP_VLAN_TAG BIT(15)
433 434
434/* Recent add, used only debug purposes. */ 435/* Recent add, used only debug purposes. */
435 u64 pcc_enable; 436 u64 pcc_enable;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index fd85648d98..46ebf141ee 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -42,6 +42,14 @@
42 * Possible values '1' for enable '0' for disable. Default is '0' 42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be 43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet 44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
45 ************************************************************************/ 53 ************************************************************************/
46 54
47#include <linux/module.h> 55#include <linux/module.h>
@@ -76,7 +84,7 @@
76#include "s2io.h" 84#include "s2io.h"
77#include "s2io-regs.h" 85#include "s2io-regs.h"
78 86
79#define DRV_VERSION "2.0.16.1" 87#define DRV_VERSION "2.0.17.1"
80 88
81/* S2io Driver name & version. */ 89/* S2io Driver name & version. */
82static char s2io_driver_name[] = "Neterion"; 90static char s2io_driver_name[] = "Neterion";
@@ -131,7 +139,7 @@ static char s2io_gstrings[][ETH_GSTRING_LEN] = {
131 "BIST Test\t(offline)" 139 "BIST Test\t(offline)"
132}; 140};
133 141
134static char ethtool_stats_keys[][ETH_GSTRING_LEN] = { 142static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
135 {"tmac_frms"}, 143 {"tmac_frms"},
136 {"tmac_data_octets"}, 144 {"tmac_data_octets"},
137 {"tmac_drop_frms"}, 145 {"tmac_drop_frms"},
@@ -225,7 +233,10 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
225 {"rxd_rd_cnt"}, 233 {"rxd_rd_cnt"},
226 {"rxd_wr_cnt"}, 234 {"rxd_wr_cnt"},
227 {"txf_rd_cnt"}, 235 {"txf_rd_cnt"},
228 {"rxf_wr_cnt"}, 236 {"rxf_wr_cnt"}
237};
238
239static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
229 {"rmac_ttl_1519_4095_frms"}, 240 {"rmac_ttl_1519_4095_frms"},
230 {"rmac_ttl_4096_8191_frms"}, 241 {"rmac_ttl_4096_8191_frms"},
231 {"rmac_ttl_8192_max_frms"}, 242 {"rmac_ttl_8192_max_frms"},
@@ -241,7 +252,10 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
241 {"rmac_red_discard"}, 252 {"rmac_red_discard"},
242 {"rmac_rts_discard"}, 253 {"rmac_rts_discard"},
243 {"rmac_ingm_full_discard"}, 254 {"rmac_ingm_full_discard"},
244 {"link_fault_cnt"}, 255 {"link_fault_cnt"}
256};
257
258static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
245 {"\n DRIVER STATISTICS"}, 259 {"\n DRIVER STATISTICS"},
246 {"single_bit_ecc_errs"}, 260 {"single_bit_ecc_errs"},
247 {"double_bit_ecc_errs"}, 261 {"double_bit_ecc_errs"},
@@ -269,8 +283,16 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
269 ("lro_avg_aggr_pkts"), 283 ("lro_avg_aggr_pkts"),
270}; 284};
271 285
272#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN 286#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
273#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN 287#define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
288 ETH_GSTRING_LEN
289#define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
290
291#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
292#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
293
294#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
295#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
274 296
275#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN 297#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN 298#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
@@ -293,6 +315,9 @@ static void s2io_vlan_rx_register(struct net_device *dev,
293 spin_unlock_irqrestore(&nic->tx_lock, flags); 315 spin_unlock_irqrestore(&nic->tx_lock, flags);
294} 316}
295 317
318/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
319int vlan_strip_flag;
320
296/* Unregister the vlan */ 321/* Unregister the vlan */
297static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 322static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
298{ 323{
@@ -300,8 +325,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
300 unsigned long flags; 325 unsigned long flags;
301 326
302 spin_lock_irqsave(&nic->tx_lock, flags); 327 spin_lock_irqsave(&nic->tx_lock, flags);
303 if (nic->vlgrp) 328 vlan_group_set_device(nic->vlgrp, vid, NULL);
304 nic->vlgrp->vlan_devices[vid] = NULL;
305 spin_unlock_irqrestore(&nic->tx_lock, flags); 329 spin_unlock_irqrestore(&nic->tx_lock, flags);
306} 330}
307 331
@@ -404,6 +428,7 @@ S2IO_PARM_INT(indicate_max_pkts, 0);
404 428
405S2IO_PARM_INT(napi, 1); 429S2IO_PARM_INT(napi, 1);
406S2IO_PARM_INT(ufo, 0); 430S2IO_PARM_INT(ufo, 0);
431S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
407 432
408static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 433static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 434 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
@@ -1371,6 +1396,16 @@ static int init_nic(struct s2io_nic *nic)
1371 &bar0->rts_frm_len_n[i]); 1396 &bar0->rts_frm_len_n[i]);
1372 } 1397 }
1373 } 1398 }
1399
1400 /* Disable differentiated services steering logic */
1401 for (i = 0; i < 64; i++) {
1402 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1403 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1404 dev->name);
1405 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1406 return FAILURE;
1407 }
1408 }
1374 1409
1375 /* Program statistics memory */ 1410 /* Program statistics memory */
1376 writeq(mac_control->stats_mem_phy, &bar0->stat_addr); 1411 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
@@ -1943,6 +1978,13 @@ static int start_nic(struct s2io_nic *nic)
1943 writeq(val64, &bar0->rx_pa_cfg); 1978 writeq(val64, &bar0->rx_pa_cfg);
1944 } 1979 }
1945 1980
1981 if (vlan_tag_strip == 0) {
1982 val64 = readq(&bar0->rx_pa_cfg);
1983 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1984 writeq(val64, &bar0->rx_pa_cfg);
1985 vlan_strip_flag = 0;
1986 }
1987
1946 /* 1988 /*
1947 * Enabling MC-RLDRAM. After enabling the device, we timeout 1989 * Enabling MC-RLDRAM. After enabling the device, we timeout
1948 * for around 100ms, which is approximately the time required 1990 * for around 100ms, which is approximately the time required
@@ -3195,26 +3237,37 @@ static void alarm_intr_handler(struct s2io_nic *nic)
3195 * SUCCESS on success and FAILURE on failure. 3237 * SUCCESS on success and FAILURE on failure.
3196 */ 3238 */
3197 3239
3198static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit) 3240static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3241 int bit_state)
3199{ 3242{
3200 int ret = FAILURE, cnt = 0; 3243 int ret = FAILURE, cnt = 0, delay = 1;
3201 u64 val64; 3244 u64 val64;
3202 3245
3203 while (TRUE) { 3246 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3247 return FAILURE;
3248
3249 do {
3204 val64 = readq(addr); 3250 val64 = readq(addr);
3205 if (!(val64 & busy_bit)) { 3251 if (bit_state == S2IO_BIT_RESET) {
3206 ret = SUCCESS; 3252 if (!(val64 & busy_bit)) {
3207 break; 3253 ret = SUCCESS;
3254 break;
3255 }
3256 } else {
3257 if (!(val64 & busy_bit)) {
3258 ret = SUCCESS;
3259 break;
3260 }
3208 } 3261 }
3209 3262
3210 if(in_interrupt()) 3263 if(in_interrupt())
3211 mdelay(50); 3264 mdelay(delay);
3212 else 3265 else
3213 msleep(50); 3266 msleep(delay);
3214 3267
3215 if (cnt++ > 10) 3268 if (++cnt >= 10)
3216 break; 3269 delay = 50;
3217 } 3270 } while (cnt < 20);
3218 return ret; 3271 return ret;
3219} 3272}
3220/* 3273/*
@@ -3340,6 +3393,9 @@ new_way:
3340 writeq(val64, &bar0->pcc_err_reg); 3393 writeq(val64, &bar0->pcc_err_reg);
3341 } 3394 }
3342 3395
3396 /* restore the previously assigned mac address */
3397 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3398
3343 sp->device_enabled_once = FALSE; 3399 sp->device_enabled_once = FALSE;
3344} 3400}
3345 3401
@@ -4087,6 +4143,11 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4087 val64 &= ~GPIO_INT_MASK_LINK_UP; 4143 val64 &= ~GPIO_INT_MASK_LINK_UP;
4088 val64 |= GPIO_INT_MASK_LINK_DOWN; 4144 val64 |= GPIO_INT_MASK_LINK_DOWN;
4089 writeq(val64, &bar0->gpio_int_mask); 4145 writeq(val64, &bar0->gpio_int_mask);
4146
4147 /* turn off LED */
4148 val64 = readq(&bar0->adapter_control);
4149 val64 = val64 &(~ADAPTER_LED_ON);
4150 writeq(val64, &bar0->adapter_control);
4090 } 4151 }
4091 } 4152 }
4092 val64 = readq(&bar0->gpio_int_mask); 4153 val64 = readq(&bar0->gpio_int_mask);
@@ -4296,7 +4357,8 @@ static void s2io_set_multicast(struct net_device *dev)
4296 writeq(val64, &bar0->rmac_addr_cmd_mem); 4357 writeq(val64, &bar0->rmac_addr_cmd_mem);
4297 /* Wait till command completes */ 4358 /* Wait till command completes */
4298 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4359 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4299 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); 4360 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4361 S2IO_BIT_RESET);
4300 4362
4301 sp->m_cast_flg = 1; 4363 sp->m_cast_flg = 1;
4302 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET; 4364 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
@@ -4312,7 +4374,8 @@ static void s2io_set_multicast(struct net_device *dev)
4312 writeq(val64, &bar0->rmac_addr_cmd_mem); 4374 writeq(val64, &bar0->rmac_addr_cmd_mem);
4313 /* Wait till command completes */ 4375 /* Wait till command completes */
4314 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4376 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4315 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); 4377 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4378 S2IO_BIT_RESET);
4316 4379
4317 sp->m_cast_flg = 0; 4380 sp->m_cast_flg = 0;
4318 sp->all_multi_pos = 0; 4381 sp->all_multi_pos = 0;
@@ -4329,6 +4392,13 @@ static void s2io_set_multicast(struct net_device *dev)
4329 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 4392 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4330 writel((u32) (val64 >> 32), (add + 4)); 4393 writel((u32) (val64 >> 32), (add + 4));
4331 4394
4395 if (vlan_tag_strip != 1) {
4396 val64 = readq(&bar0->rx_pa_cfg);
4397 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4398 writeq(val64, &bar0->rx_pa_cfg);
4399 vlan_strip_flag = 0;
4400 }
4401
4332 val64 = readq(&bar0->mac_cfg); 4402 val64 = readq(&bar0->mac_cfg);
4333 sp->promisc_flg = 1; 4403 sp->promisc_flg = 1;
4334 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n", 4404 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
@@ -4344,6 +4414,13 @@ static void s2io_set_multicast(struct net_device *dev)
4344 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 4414 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4345 writel((u32) (val64 >> 32), (add + 4)); 4415 writel((u32) (val64 >> 32), (add + 4));
4346 4416
4417 if (vlan_tag_strip != 0) {
4418 val64 = readq(&bar0->rx_pa_cfg);
4419 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4420 writeq(val64, &bar0->rx_pa_cfg);
4421 vlan_strip_flag = 1;
4422 }
4423
4347 val64 = readq(&bar0->mac_cfg); 4424 val64 = readq(&bar0->mac_cfg);
4348 sp->promisc_flg = 0; 4425 sp->promisc_flg = 0;
4349 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", 4426 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
@@ -4378,7 +4455,8 @@ static void s2io_set_multicast(struct net_device *dev)
4378 4455
4379 /* Wait for command completes */ 4456 /* Wait for command completes */
4380 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4457 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4381 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { 4458 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4459 S2IO_BIT_RESET)) {
4382 DBG_PRINT(ERR_DBG, "%s: Adding ", 4460 DBG_PRINT(ERR_DBG, "%s: Adding ",
4383 dev->name); 4461 dev->name);
4384 DBG_PRINT(ERR_DBG, "Multicasts failed\n"); 4462 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4409,7 +4487,8 @@ static void s2io_set_multicast(struct net_device *dev)
4409 4487
4410 /* Wait for command completes */ 4488 /* Wait for command completes */
4411 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4489 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4412 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { 4490 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4491 S2IO_BIT_RESET)) {
4413 DBG_PRINT(ERR_DBG, "%s: Adding ", 4492 DBG_PRINT(ERR_DBG, "%s: Adding ",
4414 dev->name); 4493 dev->name);
4415 DBG_PRINT(ERR_DBG, "Multicasts failed\n"); 4494 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4435,6 +4514,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4435 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4514 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4436 register u64 val64, mac_addr = 0; 4515 register u64 val64, mac_addr = 0;
4437 int i; 4516 int i;
4517 u64 old_mac_addr = 0;
4438 4518
4439 /* 4519 /*
4440 * Set the new MAC address as the new unicast filter and reflect this 4520 * Set the new MAC address as the new unicast filter and reflect this
@@ -4444,6 +4524,22 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4444 for (i = 0; i < ETH_ALEN; i++) { 4524 for (i = 0; i < ETH_ALEN; i++) {
4445 mac_addr <<= 8; 4525 mac_addr <<= 8;
4446 mac_addr |= addr[i]; 4526 mac_addr |= addr[i];
4527 old_mac_addr <<= 8;
4528 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4529 }
4530
4531 if(0 == mac_addr)
4532 return SUCCESS;
4533
4534 /* Update the internal structure with this new mac address */
4535 if(mac_addr != old_mac_addr) {
4536 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4537 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4538 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4539 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4540 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4541 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4542 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4447 } 4543 }
4448 4544
4449 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), 4545 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
@@ -4455,7 +4551,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4455 writeq(val64, &bar0->rmac_addr_cmd_mem); 4551 writeq(val64, &bar0->rmac_addr_cmd_mem);
4456 /* Wait till command completes */ 4552 /* Wait till command completes */
4457 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4553 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4458 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { 4554 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4459 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name); 4555 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4460 return FAILURE; 4556 return FAILURE;
4461 } 4557 }
@@ -4546,7 +4642,11 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4546 info->regdump_len = XENA_REG_SPACE; 4642 info->regdump_len = XENA_REG_SPACE;
4547 info->eedump_len = XENA_EEPROM_SPACE; 4643 info->eedump_len = XENA_EEPROM_SPACE;
4548 info->testinfo_len = S2IO_TEST_LEN; 4644 info->testinfo_len = S2IO_TEST_LEN;
4549 info->n_stats = S2IO_STAT_LEN; 4645
4646 if (sp->device_type == XFRAME_I_DEVICE)
4647 info->n_stats = XFRAME_I_STAT_LEN;
4648 else
4649 info->n_stats = XFRAME_II_STAT_LEN;
4550} 4650}
4551 4651
4552/** 4652/**
@@ -5568,22 +5668,30 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5568 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt); 5668 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5569 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt); 5669 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5570 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt); 5670 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5571 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms); 5671
5572 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms); 5672 /* Enhanced statistics exist only for Hercules */
5573 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms); 5673 if(sp->device_type == XFRAME_II_DEVICE) {
5574 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms); 5674 tmp_stats[i++] =
5575 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms); 5675 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5576 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms); 5676 tmp_stats[i++] =
5577 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms); 5677 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5578 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms); 5678 tmp_stats[i++] =
5579 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard); 5679 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5580 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard); 5680 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5581 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard); 5681 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5582 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard); 5682 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5583 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard); 5683 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5584 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard); 5684 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5585 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard); 5685 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5586 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt); 5686 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5687 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5688 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5689 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5690 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5691 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5692 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5693 }
5694
5587 tmp_stats[i++] = 0; 5695 tmp_stats[i++] = 0;
5588 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; 5696 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5589 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; 5697 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
@@ -5663,18 +5771,42 @@ static int s2io_ethtool_self_test_count(struct net_device *dev)
5663static void s2io_ethtool_get_strings(struct net_device *dev, 5771static void s2io_ethtool_get_strings(struct net_device *dev,
5664 u32 stringset, u8 * data) 5772 u32 stringset, u8 * data)
5665{ 5773{
5774 int stat_size = 0;
5775 struct s2io_nic *sp = dev->priv;
5776
5666 switch (stringset) { 5777 switch (stringset) {
5667 case ETH_SS_TEST: 5778 case ETH_SS_TEST:
5668 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN); 5779 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5669 break; 5780 break;
5670 case ETH_SS_STATS: 5781 case ETH_SS_STATS:
5671 memcpy(data, &ethtool_stats_keys, 5782 stat_size = sizeof(ethtool_xena_stats_keys);
5672 sizeof(ethtool_stats_keys)); 5783 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5784 if(sp->device_type == XFRAME_II_DEVICE) {
5785 memcpy(data + stat_size,
5786 &ethtool_enhanced_stats_keys,
5787 sizeof(ethtool_enhanced_stats_keys));
5788 stat_size += sizeof(ethtool_enhanced_stats_keys);
5789 }
5790
5791 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5792 sizeof(ethtool_driver_stats_keys));
5673 } 5793 }
5674} 5794}
5675static int s2io_ethtool_get_stats_count(struct net_device *dev) 5795static int s2io_ethtool_get_stats_count(struct net_device *dev)
5676{ 5796{
5677 return (S2IO_STAT_LEN); 5797 struct s2io_nic *sp = dev->priv;
5798 int stat_count = 0;
5799 switch(sp->device_type) {
5800 case XFRAME_I_DEVICE:
5801 stat_count = XFRAME_I_STAT_LEN;
5802 break;
5803
5804 case XFRAME_II_DEVICE:
5805 stat_count = XFRAME_II_STAT_LEN;
5806 break;
5807 }
5808
5809 return stat_count;
5678} 5810}
5679 5811
5680static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 5812static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
@@ -5909,7 +6041,7 @@ static void s2io_set_link(struct work_struct *work)
5909 clear_bit(0, &(nic->link_state)); 6041 clear_bit(0, &(nic->link_state));
5910 6042
5911out_unlock: 6043out_unlock:
5912 rtnl_lock(); 6044 rtnl_unlock();
5913} 6045}
5914 6046
5915static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, 6047static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
@@ -6066,10 +6198,13 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6066 rx_blocks[j].rxds[k].virt_addr; 6198 rx_blocks[j].rxds[k].virt_addr;
6067 if(sp->rxd_mode >= RXD_MODE_3A) 6199 if(sp->rxd_mode >= RXD_MODE_3A)
6068 ba = &mac_control->rings[i].ba[j][k]; 6200 ba = &mac_control->rings[i].ba[j][k];
6069 set_rxd_buffer_pointer(sp, rxdp, ba, 6201 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6070 &skb,(u64 *)&temp0_64, 6202 &skb,(u64 *)&temp0_64,
6071 (u64 *)&temp1_64, 6203 (u64 *)&temp1_64,
6072 (u64 *)&temp2_64, size); 6204 (u64 *)&temp2_64,
6205 size) == ENOMEM) {
6206 return 0;
6207 }
6073 6208
6074 set_rxd_buffer_size(sp, rxdp, size); 6209 set_rxd_buffer_size(sp, rxdp, size);
6075 wmb(); 6210 wmb();
@@ -6112,7 +6247,7 @@ static int s2io_add_isr(struct s2io_nic * sp)
6112 } 6247 }
6113 } 6248 }
6114 if (sp->intr_type == MSI_X) { 6249 if (sp->intr_type == MSI_X) {
6115 int i; 6250 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6116 6251
6117 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { 6252 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6118 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { 6253 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
@@ -6121,16 +6256,36 @@ static int s2io_add_isr(struct s2io_nic * sp)
6121 err = request_irq(sp->entries[i].vector, 6256 err = request_irq(sp->entries[i].vector,
6122 s2io_msix_fifo_handle, 0, sp->desc[i], 6257 s2io_msix_fifo_handle, 0, sp->desc[i],
6123 sp->s2io_entries[i].arg); 6258 sp->s2io_entries[i].arg);
6124 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i], 6259 /* If either data or addr is zero print it */
6125 (unsigned long long)sp->msix_info[i].addr); 6260 if(!(sp->msix_info[i].addr &&
6261 sp->msix_info[i].data)) {
6262 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6263 "Data:0x%lx\n",sp->desc[i],
6264 (unsigned long long)
6265 sp->msix_info[i].addr,
6266 (unsigned long)
6267 ntohl(sp->msix_info[i].data));
6268 } else {
6269 msix_tx_cnt++;
6270 }
6126 } else { 6271 } else {
6127 sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 6272 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6128 dev->name, i); 6273 dev->name, i);
6129 err = request_irq(sp->entries[i].vector, 6274 err = request_irq(sp->entries[i].vector,
6130 s2io_msix_ring_handle, 0, sp->desc[i], 6275 s2io_msix_ring_handle, 0, sp->desc[i],
6131 sp->s2io_entries[i].arg); 6276 sp->s2io_entries[i].arg);
6132 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i], 6277 /* If either data or addr is zero print it */
6133 (unsigned long long)sp->msix_info[i].addr); 6278 if(!(sp->msix_info[i].addr &&
6279 sp->msix_info[i].data)) {
6280 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6281 "Data:0x%lx\n",sp->desc[i],
6282 (unsigned long long)
6283 sp->msix_info[i].addr,
6284 (unsigned long)
6285 ntohl(sp->msix_info[i].data));
6286 } else {
6287 msix_rx_cnt++;
6288 }
6134 } 6289 }
6135 if (err) { 6290 if (err) {
6136 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " 6291 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
@@ -6140,6 +6295,8 @@ static int s2io_add_isr(struct s2io_nic * sp)
6140 } 6295 }
6141 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; 6296 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6142 } 6297 }
6298 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6299 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6143 } 6300 }
6144 if (sp->intr_type == INTA) { 6301 if (sp->intr_type == INTA) {
6145 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, 6302 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
@@ -6567,7 +6724,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6567 6724
6568 if (!sp->lro) { 6725 if (!sp->lro) {
6569 skb->protocol = eth_type_trans(skb, dev); 6726 skb->protocol = eth_type_trans(skb, dev);
6570 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { 6727 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6728 vlan_strip_flag)) {
6571 /* Queueing the vlan frame to the upper layer */ 6729 /* Queueing the vlan frame to the upper layer */
6572 if (napi) 6730 if (napi)
6573 vlan_hwaccel_receive_skb(skb, sp->vlgrp, 6731 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
@@ -6704,8 +6862,7 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6704 "Defaulting to INTA\n"); 6862 "Defaulting to INTA\n");
6705 *dev_intr_type = INTA; 6863 *dev_intr_type = INTA;
6706 } 6864 }
6707 if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) ) 6865
6708 napi = 0;
6709 if (rx_ring_mode > 3) { 6866 if (rx_ring_mode > 3) {
6710 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); 6867 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6711 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); 6868 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
@@ -6715,6 +6872,37 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6715} 6872}
6716 6873
6717/** 6874/**
6875 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6876 * or Traffic class respectively.
6877 * @nic: device peivate variable
6878 * Description: The function configures the receive steering to
6879 * desired receive ring.
6880 * Return Value: SUCCESS on success and
6881 * '-1' on failure (endian settings incorrect).
6882 */
6883static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6884{
6885 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6886 register u64 val64 = 0;
6887
6888 if (ds_codepoint > 63)
6889 return FAILURE;
6890
6891 val64 = RTS_DS_MEM_DATA(ring);
6892 writeq(val64, &bar0->rts_ds_mem_data);
6893
6894 val64 = RTS_DS_MEM_CTRL_WE |
6895 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6896 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6897
6898 writeq(val64, &bar0->rts_ds_mem_ctrl);
6899
6900 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6901 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6902 S2IO_BIT_RESET);
6903}
6904
6905/**
6718 * s2io_init_nic - Initialization of the adapter . 6906 * s2io_init_nic - Initialization of the adapter .
6719 * @pdev : structure containing the PCI related information of the device. 6907 * @pdev : structure containing the PCI related information of the device.
6720 * @pre: List of PCI devices supported by the driver listed in s2io_tbl. 6908 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
@@ -7008,13 +7196,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7008 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET); 7196 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7009 writeq(val64, &bar0->rmac_addr_cmd_mem); 7197 writeq(val64, &bar0->rmac_addr_cmd_mem);
7010 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 7198 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7011 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); 7199 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7012 tmp64 = readq(&bar0->rmac_addr_data0_mem); 7200 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7013 mac_down = (u32) tmp64; 7201 mac_down = (u32) tmp64;
7014 mac_up = (u32) (tmp64 >> 32); 7202 mac_up = (u32) (tmp64 >> 32);
7015 7203
7016 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7017
7018 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up); 7204 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7019 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8); 7205 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7020 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16); 7206 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0de0c65f94..803137ca4b 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -32,7 +32,8 @@
32#define FAILURE -1 32#define FAILURE -1
33#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL 33#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
34#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100 34#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
35 35#define S2IO_BIT_RESET 1
36#define S2IO_BIT_SET 2
36#define CHECKBIT(value, nbit) (value & (1 << nbit)) 37#define CHECKBIT(value, nbit) (value & (1 << nbit))
37 38
38/* Maximum time to flicker LED when asked to identify NIC using ethtool */ 39/* Maximum time to flicker LED when asked to identify NIC using ethtool */
@@ -296,6 +297,9 @@ struct stat_block {
296 struct xpakStat xpak_stat; 297 struct xpakStat xpak_stat;
297}; 298};
298 299
300/* Default value for 'vlan_strip_tag' configuration parameter */
301#define NO_STRIP_IN_PROMISC 2
302
299/* 303/*
300 * Structures representing different init time configuration 304 * Structures representing different init time configuration
301 * parameters of the NIC. 305 * parameters of the NIC.
@@ -1005,7 +1009,8 @@ static int s2io_set_swapper(struct s2io_nic * sp);
1005static void s2io_card_down(struct s2io_nic *nic); 1009static void s2io_card_down(struct s2io_nic *nic);
1006static int s2io_card_up(struct s2io_nic *nic); 1010static int s2io_card_up(struct s2io_nic *nic);
1007static int get_xena_rev_id(struct pci_dev *pdev); 1011static int get_xena_rev_id(struct pci_dev *pdev);
1008static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit); 1012static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
1013 int bit_state);
1009static int s2io_add_isr(struct s2io_nic * sp); 1014static int s2io_add_isr(struct s2io_nic * sp);
1010static void s2io_rem_isr(struct s2io_nic * sp); 1015static void s2io_rem_isr(struct s2io_nic * sp);
1011 1016
@@ -1019,6 +1024,7 @@ static void queue_rx_frame(struct sk_buff *skb);
1019static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); 1024static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
1020static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, 1025static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
1021 struct sk_buff *skb, u32 tcp_len); 1026 struct sk_buff *skb, u32 tcp_len);
1027static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
1022 1028
1023#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size 1029#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1024#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size 1030#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index a833e7f975..52ed522a23 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -12,26 +12,15 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/ioport.h>
16#include <linux/socket.h>
17#include <linux/in.h>
18#include <linux/route.h>
19#include <linux/slab.h> 15#include <linux/slab.h>
20#include <linux/string.h> 16#include <linux/string.h>
21#include <linux/delay.h> 17#include <linux/delay.h>
22#include <linux/netdevice.h> 18#include <linux/netdevice.h>
23#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
24#include <linux/skbuff.h> 20#include <linux/skbuff.h>
25#include <linux/bitops.h>
26 21
27#include <asm/byteorder.h>
28#include <asm/io.h>
29#include <asm/system.h>
30#include <asm/page.h>
31#include <asm/pgtable.h>
32#include <asm/sgi/hpc3.h> 22#include <asm/sgi/hpc3.h>
33#include <asm/sgi/ip22.h> 23#include <asm/sgi/ip22.h>
34#include <asm/sgialib.h>
35 24
36#include "sgiseeq.h" 25#include "sgiseeq.h"
37 26
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index fb2b530516..b3750f2842 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -968,10 +968,10 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
968 968
969static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) 969static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
970{ 970{
971 int i = 0; 971 int i;
972 u16 status; 972 u16 status;
973 973
974 while (i++ < 2) 974 for (i = 0; i < 2; i++)
975 status = mdio_read(net_dev, phy_addr, MII_STATUS); 975 status = mdio_read(net_dev, phy_addr, MII_STATUS);
976 976
977 mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET ); 977 mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
@@ -1430,7 +1430,7 @@ static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
1430 int i = 0; 1430 int i = 0;
1431 u32 status; 1431 u32 status;
1432 1432
1433 while (i++ < 2) 1433 for (i = 0; i < 2; i++)
1434 status = mdio_read(net_dev, phy_addr, MII_STATUS); 1434 status = mdio_read(net_dev, phy_addr, MII_STATUS);
1435 1435
1436 if (!(status & MII_STAT_LINK)){ 1436 if (!(status & MII_STAT_LINK)){
@@ -1466,9 +1466,9 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
1466 int phy_addr = sis_priv->cur_phy; 1466 int phy_addr = sis_priv->cur_phy;
1467 u32 status; 1467 u32 status;
1468 u16 autoadv, autorec; 1468 u16 autoadv, autorec;
1469 int i = 0; 1469 int i;
1470 1470
1471 while (i++ < 2) 1471 for (i = 0; i < 2; i++)
1472 status = mdio_read(net_dev, phy_addr, MII_STATUS); 1472 status = mdio_read(net_dev, phy_addr, MII_STATUS);
1473 1473
1474 if (!(status & MII_STAT_LINK)) 1474 if (!(status & MII_STAT_LINK))
diff --git a/drivers/net/skfp/cfm.c b/drivers/net/skfp/cfm.c
index 4c8aaa7623..5310d39b57 100644
--- a/drivers/net/skfp/cfm.c
+++ b/drivers/net/skfp/cfm.c
@@ -73,7 +73,7 @@ static const char * const cfm_events[] = {
73/* 73/*
74 * map from state to downstream port type 74 * map from state to downstream port type
75 */ 75 */
76static const u_char cf_to_ptype[] = { 76static const unsigned char cf_to_ptype[] = {
77 TNONE,TNONE,TNONE,TNONE,TNONE, 77 TNONE,TNONE,TNONE,TNONE,TNONE,
78 TNONE,TB,TB,TS, 78 TNONE,TB,TB,TS,
79 TA,TB,TS,TB 79 TA,TB,TS,TB
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c3d2e0a2c4..eea75a401b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -77,13 +77,13 @@ static const struct pci_device_id skge_id_table[] = {
77 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, 77 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, 80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T) },
81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ 81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
85 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) }, 85 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
86 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, }, 86 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 },
87 { 0 } 87 { 0 }
88}; 88};
89MODULE_DEVICE_TABLE(pci, skge_id_table); 89MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -2767,6 +2767,17 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
2767 return err; 2767 return err;
2768} 2768}
2769 2769
2770static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
2771
2772static void genesis_add_filter(u8 filter[8], const u8 *addr)
2773{
2774 u32 crc, bit;
2775
2776 crc = ether_crc_le(ETH_ALEN, addr);
2777 bit = ~crc & 0x3f;
2778 filter[bit/8] |= 1 << (bit%8);
2779}
2780
2770static void genesis_set_multicast(struct net_device *dev) 2781static void genesis_set_multicast(struct net_device *dev)
2771{ 2782{
2772 struct skge_port *skge = netdev_priv(dev); 2783 struct skge_port *skge = netdev_priv(dev);
@@ -2788,24 +2799,33 @@ static void genesis_set_multicast(struct net_device *dev)
2788 memset(filter, 0xff, sizeof(filter)); 2799 memset(filter, 0xff, sizeof(filter));
2789 else { 2800 else {
2790 memset(filter, 0, sizeof(filter)); 2801 memset(filter, 0, sizeof(filter));
2791 for (i = 0; list && i < count; i++, list = list->next) { 2802
2792 u32 crc, bit; 2803 if (skge->flow_status == FLOW_STAT_REM_SEND
2793 crc = ether_crc_le(ETH_ALEN, list->dmi_addr); 2804 || skge->flow_status == FLOW_STAT_SYMMETRIC)
2794 bit = ~crc & 0x3f; 2805 genesis_add_filter(filter, pause_mc_addr);
2795 filter[bit/8] |= 1 << (bit%8); 2806
2796 } 2807 for (i = 0; list && i < count; i++, list = list->next)
2808 genesis_add_filter(filter, list->dmi_addr);
2797 } 2809 }
2798 2810
2799 xm_write32(hw, port, XM_MODE, mode); 2811 xm_write32(hw, port, XM_MODE, mode);
2800 xm_outhash(hw, port, XM_HSM, filter); 2812 xm_outhash(hw, port, XM_HSM, filter);
2801} 2813}
2802 2814
2815static void yukon_add_filter(u8 filter[8], const u8 *addr)
2816{
2817 u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
2818 filter[bit/8] |= 1 << (bit%8);
2819}
2820
2803static void yukon_set_multicast(struct net_device *dev) 2821static void yukon_set_multicast(struct net_device *dev)
2804{ 2822{
2805 struct skge_port *skge = netdev_priv(dev); 2823 struct skge_port *skge = netdev_priv(dev);
2806 struct skge_hw *hw = skge->hw; 2824 struct skge_hw *hw = skge->hw;
2807 int port = skge->port; 2825 int port = skge->port;
2808 struct dev_mc_list *list = dev->mc_list; 2826 struct dev_mc_list *list = dev->mc_list;
2827 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND
2828 || skge->flow_status == FLOW_STAT_SYMMETRIC);
2809 u16 reg; 2829 u16 reg;
2810 u8 filter[8]; 2830 u8 filter[8];
2811 2831
@@ -2818,16 +2838,17 @@ static void yukon_set_multicast(struct net_device *dev)
2818 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 2838 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2819 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ 2839 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2820 memset(filter, 0xff, sizeof(filter)); 2840 memset(filter, 0xff, sizeof(filter));
2821 else if (dev->mc_count == 0) /* no multicast */ 2841 else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
2822 reg &= ~GM_RXCR_MCF_ENA; 2842 reg &= ~GM_RXCR_MCF_ENA;
2823 else { 2843 else {
2824 int i; 2844 int i;
2825 reg |= GM_RXCR_MCF_ENA; 2845 reg |= GM_RXCR_MCF_ENA;
2826 2846
2827 for (i = 0; list && i < dev->mc_count; i++, list = list->next) { 2847 if (rx_pause)
2828 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f; 2848 yukon_add_filter(filter, pause_mc_addr);
2829 filter[bit/8] |= 1 << (bit%8); 2849
2830 } 2850 for (i = 0; list && i < dev->mc_count; i++, list = list->next)
2851 yukon_add_filter(filter, list->dmi_addr);
2831 } 2852 }
2832 2853
2833 2854
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 17b1b479df..e9354dfa7e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -1849,8 +1849,7 @@ enum {
1849 GMR_FS_JABBER, 1849 GMR_FS_JABBER,
1850/* Rx GMAC FIFO Flush Mask (default) */ 1850/* Rx GMAC FIFO Flush Mask (default) */
1851 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | 1851 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
1852 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE | 1852 GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER,
1853 GMR_FS_JABBER,
1854}; 1853};
1855 1854
1856/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ 1855/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 52edbd7ac1..53839979cf 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1053,8 +1053,7 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1053 1053
1054 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 1054 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
1055 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 1055 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
1056 if (sky2->vlgrp) 1056 vlan_group_set_device(sky2->vlgrp, vid, NULL);
1057 sky2->vlgrp->vlan_devices[vid] = NULL;
1058 1057
1059 netif_tx_unlock_bh(dev); 1058 netif_tx_unlock_bh(dev);
1060} 1059}
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 64ed8ff5b0..3b91af89e4 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Network device driver for Cell Processor-Based Blade 2 * Network device driver for Cell Processor-Based Blade and Celleb platform
3 * 3 *
4 * (C) Copyright IBM Corp. 2005 4 * (C) Copyright IBM Corp. 2005
5 * (C) Copyright 2006 TOSHIBA CORPORATION
5 * 6 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com> 7 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 8 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
@@ -166,6 +167,41 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
166} 167}
167 168
168/** 169/**
170 * spider_net_setup_aneg - initial auto-negotiation setup
171 * @card: device structure
172 **/
173static void
174spider_net_setup_aneg(struct spider_net_card *card)
175{
176 struct mii_phy *phy = &card->phy;
177 u32 advertise = 0;
178 u16 bmcr, bmsr, stat1000, estat;
179
180 bmcr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMCR);
181 bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
182 stat1000 = spider_net_read_phy(card->netdev, phy->mii_id, MII_STAT1000);
183 estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
184
185 if (bmsr & BMSR_10HALF)
186 advertise |= ADVERTISED_10baseT_Half;
187 if (bmsr & BMSR_10FULL)
188 advertise |= ADVERTISED_10baseT_Full;
189 if (bmsr & BMSR_100HALF)
190 advertise |= ADVERTISED_100baseT_Half;
191 if (bmsr & BMSR_100FULL)
192 advertise |= ADVERTISED_100baseT_Full;
193
194 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
195 advertise |= SUPPORTED_1000baseT_Full;
196 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
197 advertise |= SUPPORTED_1000baseT_Half;
198
199 mii_phy_probe(phy, phy->mii_id);
200 phy->def->ops->setup_aneg(phy, advertise);
201
202}
203
204/**
169 * spider_net_rx_irq_off - switch off rx irq on this spider card 205 * spider_net_rx_irq_off - switch off rx irq on this spider card
170 * @card: device structure 206 * @card: device structure
171 * 207 *
@@ -263,9 +299,9 @@ spider_net_get_mac_address(struct net_device *netdev)
263 * returns the status as in the dmac_cmd_status field of the descriptor 299 * returns the status as in the dmac_cmd_status field of the descriptor
264 */ 300 */
265static inline int 301static inline int
266spider_net_get_descr_status(struct spider_net_descr *descr) 302spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
267{ 303{
268 return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; 304 return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
269} 305}
270 306
271/** 307/**
@@ -283,12 +319,12 @@ spider_net_free_chain(struct spider_net_card *card,
283 descr = chain->ring; 319 descr = chain->ring;
284 do { 320 do {
285 descr->bus_addr = 0; 321 descr->bus_addr = 0;
286 descr->next_descr_addr = 0; 322 descr->hwdescr->next_descr_addr = 0;
287 descr = descr->next; 323 descr = descr->next;
288 } while (descr != chain->ring); 324 } while (descr != chain->ring);
289 325
290 dma_free_coherent(&card->pdev->dev, chain->num_desc, 326 dma_free_coherent(&card->pdev->dev, chain->num_desc,
291 chain->ring, chain->dma_addr); 327 chain->hwring, chain->dma_addr);
292} 328}
293 329
294/** 330/**
@@ -307,31 +343,34 @@ spider_net_init_chain(struct spider_net_card *card,
307{ 343{
308 int i; 344 int i;
309 struct spider_net_descr *descr; 345 struct spider_net_descr *descr;
346 struct spider_net_hw_descr *hwdescr;
310 dma_addr_t buf; 347 dma_addr_t buf;
311 size_t alloc_size; 348 size_t alloc_size;
312 349
313 alloc_size = chain->num_desc * sizeof (struct spider_net_descr); 350 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
314 351
315 chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size, 352 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
316 &chain->dma_addr, GFP_KERNEL); 353 &chain->dma_addr, GFP_KERNEL);
317 354
318 if (!chain->ring) 355 if (!chain->hwring)
319 return -ENOMEM; 356 return -ENOMEM;
320 357
321 descr = chain->ring; 358 memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
322 memset(descr, 0, alloc_size);
323 359
324 /* Set up the hardware pointers in each descriptor */ 360 /* Set up the hardware pointers in each descriptor */
361 descr = chain->ring;
362 hwdescr = chain->hwring;
325 buf = chain->dma_addr; 363 buf = chain->dma_addr;
326 for (i=0; i < chain->num_desc; i++, descr++) { 364 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
327 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 365 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
366 hwdescr->next_descr_addr = 0;
328 367
368 descr->hwdescr = hwdescr;
329 descr->bus_addr = buf; 369 descr->bus_addr = buf;
330 descr->next_descr_addr = 0;
331 descr->next = descr + 1; 370 descr->next = descr + 1;
332 descr->prev = descr - 1; 371 descr->prev = descr - 1;
333 372
334 buf += sizeof(struct spider_net_descr); 373 buf += sizeof(struct spider_net_hw_descr);
335 } 374 }
336 /* do actual circular list */ 375 /* do actual circular list */
337 (descr-1)->next = chain->ring; 376 (descr-1)->next = chain->ring;
@@ -357,10 +396,11 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
357 descr = card->rx_chain.head; 396 descr = card->rx_chain.head;
358 do { 397 do {
359 if (descr->skb) { 398 if (descr->skb) {
360 dev_kfree_skb(descr->skb); 399 pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
361 pci_unmap_single(card->pdev, descr->buf_addr,
362 SPIDER_NET_MAX_FRAME, 400 SPIDER_NET_MAX_FRAME,
363 PCI_DMA_BIDIRECTIONAL); 401 PCI_DMA_BIDIRECTIONAL);
402 dev_kfree_skb(descr->skb);
403 descr->skb = NULL;
364 } 404 }
365 descr = descr->next; 405 descr = descr->next;
366 } while (descr != card->rx_chain.head); 406 } while (descr != card->rx_chain.head);
@@ -380,6 +420,7 @@ static int
380spider_net_prepare_rx_descr(struct spider_net_card *card, 420spider_net_prepare_rx_descr(struct spider_net_card *card,
381 struct spider_net_descr *descr) 421 struct spider_net_descr *descr)
382{ 422{
423 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
383 dma_addr_t buf; 424 dma_addr_t buf;
384 int offset; 425 int offset;
385 int bufsize; 426 int bufsize;
@@ -398,11 +439,11 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
398 card->spider_stats.alloc_rx_skb_error++; 439 card->spider_stats.alloc_rx_skb_error++;
399 return -ENOMEM; 440 return -ENOMEM;
400 } 441 }
401 descr->buf_size = bufsize; 442 hwdescr->buf_size = bufsize;
402 descr->result_size = 0; 443 hwdescr->result_size = 0;
403 descr->valid_size = 0; 444 hwdescr->valid_size = 0;
404 descr->data_status = 0; 445 hwdescr->data_status = 0;
405 descr->data_error = 0; 446 hwdescr->data_error = 0;
406 447
407 offset = ((unsigned long)descr->skb->data) & 448 offset = ((unsigned long)descr->skb->data) &
408 (SPIDER_NET_RXBUF_ALIGN - 1); 449 (SPIDER_NET_RXBUF_ALIGN - 1);
@@ -411,21 +452,22 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
411 /* iommu-map the skb */ 452 /* iommu-map the skb */
412 buf = pci_map_single(card->pdev, descr->skb->data, 453 buf = pci_map_single(card->pdev, descr->skb->data,
413 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 454 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
414 descr->buf_addr = buf;
415 if (pci_dma_mapping_error(buf)) { 455 if (pci_dma_mapping_error(buf)) {
416 dev_kfree_skb_any(descr->skb); 456 dev_kfree_skb_any(descr->skb);
457 descr->skb = NULL;
417 if (netif_msg_rx_err(card) && net_ratelimit()) 458 if (netif_msg_rx_err(card) && net_ratelimit())
418 pr_err("Could not iommu-map rx buffer\n"); 459 pr_err("Could not iommu-map rx buffer\n");
419 card->spider_stats.rx_iommu_map_error++; 460 card->spider_stats.rx_iommu_map_error++;
420 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 461 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
421 } else { 462 } else {
422 descr->next_descr_addr = 0; 463 hwdescr->buf_addr = buf;
464 hwdescr->next_descr_addr = 0;
423 wmb(); 465 wmb();
424 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | 466 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
425 SPIDER_NET_DMAC_NOINTR_COMPLETE; 467 SPIDER_NET_DMAC_NOINTR_COMPLETE;
426 468
427 wmb(); 469 wmb();
428 descr->prev->next_descr_addr = descr->bus_addr; 470 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
429 } 471 }
430 472
431 return 0; 473 return 0;
@@ -481,7 +523,7 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
481 if (!spin_trylock_irqsave(&chain->lock, flags)) 523 if (!spin_trylock_irqsave(&chain->lock, flags))
482 return; 524 return;
483 525
484 while (spider_net_get_descr_status(chain->head) == 526 while (spider_net_get_descr_status(chain->head->hwdescr) ==
485 SPIDER_NET_DESCR_NOT_IN_USE) { 527 SPIDER_NET_DESCR_NOT_IN_USE) {
486 if (spider_net_prepare_rx_descr(card, chain->head)) 528 if (spider_net_prepare_rx_descr(card, chain->head))
487 break; 529 break;
@@ -642,7 +684,9 @@ static int
642spider_net_prepare_tx_descr(struct spider_net_card *card, 684spider_net_prepare_tx_descr(struct spider_net_card *card,
643 struct sk_buff *skb) 685 struct sk_buff *skb)
644{ 686{
687 struct spider_net_descr_chain *chain = &card->tx_chain;
645 struct spider_net_descr *descr; 688 struct spider_net_descr *descr;
689 struct spider_net_hw_descr *hwdescr;
646 dma_addr_t buf; 690 dma_addr_t buf;
647 unsigned long flags; 691 unsigned long flags;
648 692
@@ -655,32 +699,39 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
655 return -ENOMEM; 699 return -ENOMEM;
656 } 700 }
657 701
658 spin_lock_irqsave(&card->tx_chain.lock, flags); 702 spin_lock_irqsave(&chain->lock, flags);
659 descr = card->tx_chain.head; 703 descr = card->tx_chain.head;
660 card->tx_chain.head = descr->next; 704 if (descr->next == chain->tail->prev) {
705 spin_unlock_irqrestore(&chain->lock, flags);
706 pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
707 return -ENOMEM;
708 }
709 hwdescr = descr->hwdescr;
710 chain->head = descr->next;
661 711
662 descr->buf_addr = buf;
663 descr->buf_size = skb->len;
664 descr->next_descr_addr = 0;
665 descr->skb = skb; 712 descr->skb = skb;
666 descr->data_status = 0; 713 hwdescr->buf_addr = buf;
714 hwdescr->buf_size = skb->len;
715 hwdescr->next_descr_addr = 0;
716 hwdescr->data_status = 0;
667 717
668 descr->dmac_cmd_status = 718 hwdescr->dmac_cmd_status =
669 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; 719 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
670 spin_unlock_irqrestore(&card->tx_chain.lock, flags); 720 spin_unlock_irqrestore(&chain->lock, flags);
671 721
672 if (skb->protocol == htons(ETH_P_IP)) 722 if (skb->protocol == htons(ETH_P_IP))
673 switch (skb->nh.iph->protocol) { 723 switch (skb->nh.iph->protocol) {
674 case IPPROTO_TCP: 724 case IPPROTO_TCP:
675 descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; 725 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
676 break; 726 break;
677 case IPPROTO_UDP: 727 case IPPROTO_UDP:
678 descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; 728 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
679 break; 729 break;
680 } 730 }
681 731
682 /* Chain the bus address, so that the DMA engine finds this descr. */ 732 /* Chain the bus address, so that the DMA engine finds this descr. */
683 descr->prev->next_descr_addr = descr->bus_addr; 733 wmb();
734 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
684 735
685 card->netdev->trans_start = jiffies; /* set netdev watchdog timer */ 736 card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
686 return 0; 737 return 0;
@@ -689,16 +740,17 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
689static int 740static int
690spider_net_set_low_watermark(struct spider_net_card *card) 741spider_net_set_low_watermark(struct spider_net_card *card)
691{ 742{
743 struct spider_net_descr *descr = card->tx_chain.tail;
744 struct spider_net_hw_descr *hwdescr;
692 unsigned long flags; 745 unsigned long flags;
693 int status; 746 int status;
694 int cnt=0; 747 int cnt=0;
695 int i; 748 int i;
696 struct spider_net_descr *descr = card->tx_chain.tail;
697 749
698 /* Measure the length of the queue. Measurement does not 750 /* Measure the length of the queue. Measurement does not
699 * need to be precise -- does not need a lock. */ 751 * need to be precise -- does not need a lock. */
700 while (descr != card->tx_chain.head) { 752 while (descr != card->tx_chain.head) {
701 status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; 753 status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
702 if (status == SPIDER_NET_DESCR_NOT_IN_USE) 754 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
703 break; 755 break;
704 descr = descr->next; 756 descr = descr->next;
@@ -717,10 +769,12 @@ spider_net_set_low_watermark(struct spider_net_card *card)
717 769
718 /* Set the new watermark, clear the old watermark */ 770 /* Set the new watermark, clear the old watermark */
719 spin_lock_irqsave(&card->tx_chain.lock, flags); 771 spin_lock_irqsave(&card->tx_chain.lock, flags);
720 descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; 772 descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
721 if (card->low_watermark && card->low_watermark != descr) 773 if (card->low_watermark && card->low_watermark != descr) {
722 card->low_watermark->dmac_cmd_status = 774 hwdescr = card->low_watermark->hwdescr;
723 card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; 775 hwdescr->dmac_cmd_status =
776 hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
777 }
724 card->low_watermark = descr; 778 card->low_watermark = descr;
725 spin_unlock_irqrestore(&card->tx_chain.lock, flags); 779 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
726 return cnt; 780 return cnt;
@@ -743,16 +797,22 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
743{ 797{
744 struct spider_net_descr_chain *chain = &card->tx_chain; 798 struct spider_net_descr_chain *chain = &card->tx_chain;
745 struct spider_net_descr *descr; 799 struct spider_net_descr *descr;
800 struct spider_net_hw_descr *hwdescr;
746 struct sk_buff *skb; 801 struct sk_buff *skb;
747 u32 buf_addr; 802 u32 buf_addr;
748 unsigned long flags; 803 unsigned long flags;
749 int status; 804 int status;
750 805
751 while (chain->tail != chain->head) { 806 while (1) {
752 spin_lock_irqsave(&chain->lock, flags); 807 spin_lock_irqsave(&chain->lock, flags);
808 if (chain->tail == chain->head) {
809 spin_unlock_irqrestore(&chain->lock, flags);
810 return 0;
811 }
753 descr = chain->tail; 812 descr = chain->tail;
813 hwdescr = descr->hwdescr;
754 814
755 status = spider_net_get_descr_status(descr); 815 status = spider_net_get_descr_status(hwdescr);
756 switch (status) { 816 switch (status) {
757 case SPIDER_NET_DESCR_COMPLETE: 817 case SPIDER_NET_DESCR_COMPLETE:
758 card->netdev_stats.tx_packets++; 818 card->netdev_stats.tx_packets++;
@@ -788,9 +848,10 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
788 } 848 }
789 849
790 chain->tail = descr->next; 850 chain->tail = descr->next;
791 descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; 851 hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
792 skb = descr->skb; 852 skb = descr->skb;
793 buf_addr = descr->buf_addr; 853 descr->skb = NULL;
854 buf_addr = hwdescr->buf_addr;
794 spin_unlock_irqrestore(&chain->lock, flags); 855 spin_unlock_irqrestore(&chain->lock, flags);
795 856
796 /* unmap the skb */ 857 /* unmap the skb */
@@ -826,7 +887,7 @@ spider_net_kick_tx_dma(struct spider_net_card *card)
826 887
827 descr = card->tx_chain.tail; 888 descr = card->tx_chain.tail;
828 for (;;) { 889 for (;;) {
829 if (spider_net_get_descr_status(descr) == 890 if (spider_net_get_descr_status(descr->hwdescr) ==
830 SPIDER_NET_DESCR_CARDOWNED) { 891 SPIDER_NET_DESCR_CARDOWNED) {
831 spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 892 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
832 descr->bus_addr); 893 descr->bus_addr);
@@ -855,13 +916,10 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
855{ 916{
856 int cnt; 917 int cnt;
857 struct spider_net_card *card = netdev_priv(netdev); 918 struct spider_net_card *card = netdev_priv(netdev);
858 struct spider_net_descr_chain *chain = &card->tx_chain;
859 919
860 spider_net_release_tx_chain(card, 0); 920 spider_net_release_tx_chain(card, 0);
861 921
862 if ((chain->head->next == chain->tail->prev) || 922 if (spider_net_prepare_tx_descr(card, skb) != 0) {
863 (spider_net_prepare_tx_descr(card, skb) != 0)) {
864
865 card->netdev_stats.tx_dropped++; 923 card->netdev_stats.tx_dropped++;
866 netif_stop_queue(netdev); 924 netif_stop_queue(netdev);
867 return NETDEV_TX_BUSY; 925 return NETDEV_TX_BUSY;
@@ -922,17 +980,18 @@ static void
922spider_net_pass_skb_up(struct spider_net_descr *descr, 980spider_net_pass_skb_up(struct spider_net_descr *descr,
923 struct spider_net_card *card) 981 struct spider_net_card *card)
924{ 982{
983 struct spider_net_hw_descr *hwdescr= descr->hwdescr;
925 struct sk_buff *skb; 984 struct sk_buff *skb;
926 struct net_device *netdev; 985 struct net_device *netdev;
927 u32 data_status, data_error; 986 u32 data_status, data_error;
928 987
929 data_status = descr->data_status; 988 data_status = hwdescr->data_status;
930 data_error = descr->data_error; 989 data_error = hwdescr->data_error;
931 netdev = card->netdev; 990 netdev = card->netdev;
932 991
933 skb = descr->skb; 992 skb = descr->skb;
934 skb->dev = netdev; 993 skb->dev = netdev;
935 skb_put(skb, descr->valid_size); 994 skb_put(skb, hwdescr->valid_size);
936 995
937 /* the card seems to add 2 bytes of junk in front 996 /* the card seems to add 2 bytes of junk in front
938 * of the ethernet frame */ 997 * of the ethernet frame */
@@ -994,23 +1053,25 @@ static void show_rx_chain(struct spider_net_card *card)
994#endif 1053#endif
995 1054
996/** 1055/**
997 * spider_net_decode_one_descr - processes an rx descriptor 1056 * spider_net_decode_one_descr - processes an RX descriptor
998 * @card: card structure 1057 * @card: card structure
999 * 1058 *
1000 * Returns 1 if a packet has been sent to the stack, otherwise 0 1059 * Returns 1 if a packet has been sent to the stack, otherwise 0.
1001 * 1060 *
1002 * Processes an rx descriptor by iommu-unmapping the data buffer and passing 1061 * Processes an RX descriptor by iommu-unmapping the data buffer
1003 * the packet up to the stack. This function is called in softirq 1062 * and passing the packet up to the stack. This function is called
1004 * context, e.g. either bottom half from interrupt or NAPI polling context 1063 * in softirq context, e.g. either bottom half from interrupt or
1064 * NAPI polling context.
1005 */ 1065 */
1006static int 1066static int
1007spider_net_decode_one_descr(struct spider_net_card *card) 1067spider_net_decode_one_descr(struct spider_net_card *card)
1008{ 1068{
1009 struct spider_net_descr_chain *chain = &card->rx_chain; 1069 struct spider_net_descr_chain *chain = &card->rx_chain;
1010 struct spider_net_descr *descr = chain->tail; 1070 struct spider_net_descr *descr = chain->tail;
1071 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
1011 int status; 1072 int status;
1012 1073
1013 status = spider_net_get_descr_status(descr); 1074 status = spider_net_get_descr_status(hwdescr);
1014 1075
1015 /* Nothing in the descriptor, or ring must be empty */ 1076 /* Nothing in the descriptor, or ring must be empty */
1016 if ((status == SPIDER_NET_DESCR_CARDOWNED) || 1077 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
@@ -1021,7 +1082,7 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1021 chain->tail = descr->next; 1082 chain->tail = descr->next;
1022 1083
1023 /* unmap descriptor */ 1084 /* unmap descriptor */
1024 pci_unmap_single(card->pdev, descr->buf_addr, 1085 pci_unmap_single(card->pdev, hwdescr->buf_addr,
1025 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 1086 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1026 1087
1027 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || 1088 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
@@ -1037,34 +1098,33 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1037 if ( (status != SPIDER_NET_DESCR_COMPLETE) && 1098 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1038 (status != SPIDER_NET_DESCR_FRAME_END) ) { 1099 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1039 if (netif_msg_rx_err(card)) 1100 if (netif_msg_rx_err(card))
1040 pr_err("%s: RX descriptor with unkown state %d\n", 1101 pr_err("%s: RX descriptor with unknown state %d\n",
1041 card->netdev->name, status); 1102 card->netdev->name, status);
1042 card->spider_stats.rx_desc_unk_state++; 1103 card->spider_stats.rx_desc_unk_state++;
1043 goto bad_desc; 1104 goto bad_desc;
1044 } 1105 }
1045 1106
1046 /* The cases we'll throw away the packet immediately */ 1107 /* The cases we'll throw away the packet immediately */
1047 if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) { 1108 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1048 if (netif_msg_rx_err(card)) 1109 if (netif_msg_rx_err(card))
1049 pr_err("%s: error in received descriptor found, " 1110 pr_err("%s: error in received descriptor found, "
1050 "data_status=x%08x, data_error=x%08x\n", 1111 "data_status=x%08x, data_error=x%08x\n",
1051 card->netdev->name, 1112 card->netdev->name,
1052 descr->data_status, descr->data_error); 1113 hwdescr->data_status, hwdescr->data_error);
1053 goto bad_desc; 1114 goto bad_desc;
1054 } 1115 }
1055 1116
1056 if (descr->dmac_cmd_status & 0xfefe) { 1117 if (hwdescr->dmac_cmd_status & 0xfefe) {
1057 pr_err("%s: bad status, cmd_status=x%08x\n", 1118 pr_err("%s: bad status, cmd_status=x%08x\n",
1058 card->netdev->name, 1119 card->netdev->name,
1059 descr->dmac_cmd_status); 1120 hwdescr->dmac_cmd_status);
1060 pr_err("buf_addr=x%08x\n", descr->buf_addr); 1121 pr_err("buf_addr=x%08x\n", hwdescr->buf_addr);
1061 pr_err("buf_size=x%08x\n", descr->buf_size); 1122 pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1062 pr_err("next_descr_addr=x%08x\n", descr->next_descr_addr); 1123 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1063 pr_err("result_size=x%08x\n", descr->result_size); 1124 pr_err("result_size=x%08x\n", hwdescr->result_size);
1064 pr_err("valid_size=x%08x\n", descr->valid_size); 1125 pr_err("valid_size=x%08x\n", hwdescr->valid_size);
1065 pr_err("data_status=x%08x\n", descr->data_status); 1126 pr_err("data_status=x%08x\n", hwdescr->data_status);
1066 pr_err("data_error=x%08x\n", descr->data_error); 1127 pr_err("data_error=x%08x\n", hwdescr->data_error);
1067 pr_err("bus_addr=x%08x\n", descr->bus_addr);
1068 pr_err("which=%ld\n", descr - card->rx_chain.ring); 1128 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1069 1129
1070 card->spider_stats.rx_desc_error++; 1130 card->spider_stats.rx_desc_error++;
@@ -1073,12 +1133,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1073 1133
1074 /* Ok, we've got a packet in descr */ 1134 /* Ok, we've got a packet in descr */
1075 spider_net_pass_skb_up(descr, card); 1135 spider_net_pass_skb_up(descr, card);
1076 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1136 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1077 return 1; 1137 return 1;
1078 1138
1079bad_desc: 1139bad_desc:
1080 dev_kfree_skb_irq(descr->skb); 1140 dev_kfree_skb_irq(descr->skb);
1081 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1141 descr->skb = NULL;
1142 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1082 return 0; 1143 return 0;
1083} 1144}
1084 1145
@@ -1248,6 +1309,33 @@ spider_net_set_mac(struct net_device *netdev, void *p)
1248} 1309}
1249 1310
1250/** 1311/**
1312 * spider_net_link_reset
1313 * @netdev: net device structure
1314 *
1315 * This is called when the PHY_LINK signal is asserted. For the blade this is
1316 * not connected so we should never get here.
1317 *
1318 */
1319static void
1320spider_net_link_reset(struct net_device *netdev)
1321{
1322
1323 struct spider_net_card *card = netdev_priv(netdev);
1324
1325 del_timer_sync(&card->aneg_timer);
1326
1327 /* clear interrupt, block further interrupts */
1328 spider_net_write_reg(card, SPIDER_NET_GMACST,
1329 spider_net_read_reg(card, SPIDER_NET_GMACST));
1330 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1331
1332 /* reset phy and setup aneg */
1333 spider_net_setup_aneg(card);
1334 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1335
1336}
1337
1338/**
1251 * spider_net_handle_error_irq - handles errors raised by an interrupt 1339 * spider_net_handle_error_irq - handles errors raised by an interrupt
1252 * @card: card structure 1340 * @card: card structure
1253 * @status_reg: interrupt status register 0 (GHIINT0STS) 1341 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1359,8 +1447,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1359 switch (i) 1447 switch (i)
1360 { 1448 {
1361 case SPIDER_NET_GTMFLLINT: 1449 case SPIDER_NET_GTMFLLINT:
1362 if (netif_msg_intr(card) && net_ratelimit()) 1450 /* TX RAM full may happen on a usual case.
1363 pr_err("Spider TX RAM full\n"); 1451 * Logging is not needed. */
1364 show_error = 0; 1452 show_error = 0;
1365 break; 1453 break;
1366 case SPIDER_NET_GRFDFLLINT: /* fallthrough */ 1454 case SPIDER_NET_GRFDFLLINT: /* fallthrough */
@@ -1500,6 +1588,9 @@ spider_net_interrupt(int irq, void *ptr)
1500 if (status_reg & SPIDER_NET_TXINT) 1588 if (status_reg & SPIDER_NET_TXINT)
1501 netif_rx_schedule(netdev); 1589 netif_rx_schedule(netdev);
1502 1590
1591 if (status_reg & SPIDER_NET_LINKINT)
1592 spider_net_link_reset(netdev);
1593
1503 if (status_reg & SPIDER_NET_ERRINT ) 1594 if (status_reg & SPIDER_NET_ERRINT )
1504 spider_net_handle_error_irq(card, status_reg); 1595 spider_net_handle_error_irq(card, status_reg);
1505 1596
@@ -1540,6 +1631,11 @@ spider_net_init_card(struct spider_net_card *card)
1540 1631
1541 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 1632 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1542 SPIDER_NET_CKRCTRL_RUN_VALUE); 1633 SPIDER_NET_CKRCTRL_RUN_VALUE);
1634
1635 /* trigger ETOMOD signal */
1636 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1637 spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
1638
1543} 1639}
1544 1640
1545/** 1641/**
@@ -1624,8 +1720,6 @@ spider_net_enable_card(struct spider_net_card *card)
1624 1720
1625 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, 1721 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1626 SPIDER_NET_LENLMT_VALUE); 1722 SPIDER_NET_LENLMT_VALUE);
1627 spider_net_write_reg(card, SPIDER_NET_GMACMODE,
1628 SPIDER_NET_MACMODE_VALUE);
1629 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, 1723 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1630 SPIDER_NET_OPMODE_VALUE); 1724 SPIDER_NET_OPMODE_VALUE);
1631 1725
@@ -1642,98 +1736,6 @@ spider_net_enable_card(struct spider_net_card *card)
1642} 1736}
1643 1737
1644/** 1738/**
1645 * spider_net_open - called upon ifonfig up
1646 * @netdev: interface device structure
1647 *
1648 * returns 0 on success, <0 on failure
1649 *
1650 * spider_net_open allocates all the descriptors and memory needed for
1651 * operation, sets up multicast list and enables interrupts
1652 */
1653int
1654spider_net_open(struct net_device *netdev)
1655{
1656 struct spider_net_card *card = netdev_priv(netdev);
1657 int result;
1658
1659 result = spider_net_init_chain(card, &card->tx_chain);
1660 if (result)
1661 goto alloc_tx_failed;
1662 card->low_watermark = NULL;
1663
1664 result = spider_net_init_chain(card, &card->rx_chain);
1665 if (result)
1666 goto alloc_rx_failed;
1667
1668 /* Allocate rx skbs */
1669 if (spider_net_alloc_rx_skbs(card))
1670 goto alloc_skbs_failed;
1671
1672 spider_net_set_multi(netdev);
1673
1674 /* further enhancement: setup hw vlan, if needed */
1675
1676 result = -EBUSY;
1677 if (request_irq(netdev->irq, spider_net_interrupt,
1678 IRQF_SHARED, netdev->name, netdev))
1679 goto register_int_failed;
1680
1681 spider_net_enable_card(card);
1682
1683 netif_start_queue(netdev);
1684 netif_carrier_on(netdev);
1685 netif_poll_enable(netdev);
1686
1687 return 0;
1688
1689register_int_failed:
1690 spider_net_free_rx_chain_contents(card);
1691alloc_skbs_failed:
1692 spider_net_free_chain(card, &card->rx_chain);
1693alloc_rx_failed:
1694 spider_net_free_chain(card, &card->tx_chain);
1695alloc_tx_failed:
1696 return result;
1697}
1698
1699/**
1700 * spider_net_setup_phy - setup PHY
1701 * @card: card structure
1702 *
1703 * returns 0 on success, <0 on failure
1704 *
1705 * spider_net_setup_phy is used as part of spider_net_probe. Sets
1706 * the PHY to 1000 Mbps
1707 **/
1708static int
1709spider_net_setup_phy(struct spider_net_card *card)
1710{
1711 struct mii_phy *phy = &card->phy;
1712
1713 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
1714 SPIDER_NET_DMASEL_VALUE);
1715 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
1716 SPIDER_NET_PHY_CTRL_VALUE);
1717 phy->mii_id = 1;
1718 phy->dev = card->netdev;
1719 phy->mdio_read = spider_net_read_phy;
1720 phy->mdio_write = spider_net_write_phy;
1721
1722 mii_phy_probe(phy, phy->mii_id);
1723
1724 if (phy->def->ops->setup_forced)
1725 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
1726
1727 phy->def->ops->enable_fiber(phy);
1728
1729 phy->def->ops->read_link(phy);
1730 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
1731 phy->speed, phy->duplex==1 ? "Full" : "Half");
1732
1733 return 0;
1734}
1735
1736/**
1737 * spider_net_download_firmware - loads firmware into the adapter 1739 * spider_net_download_firmware - loads firmware into the adapter
1738 * @card: card structure 1740 * @card: card structure
1739 * @firmware_ptr: pointer to firmware data 1741 * @firmware_ptr: pointer to firmware data
@@ -1852,6 +1854,179 @@ out_err:
1852} 1854}
1853 1855
1854/** 1856/**
1857 * spider_net_open - called upon ifonfig up
1858 * @netdev: interface device structure
1859 *
1860 * returns 0 on success, <0 on failure
1861 *
1862 * spider_net_open allocates all the descriptors and memory needed for
1863 * operation, sets up multicast list and enables interrupts
1864 */
1865int
1866spider_net_open(struct net_device *netdev)
1867{
1868 struct spider_net_card *card = netdev_priv(netdev);
1869 int result;
1870
1871 result = spider_net_init_firmware(card);
1872 if (result)
1873 goto init_firmware_failed;
1874
1875 /* start probing with copper */
1876 spider_net_setup_aneg(card);
1877 if (card->phy.def->phy_id)
1878 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1879
1880 result = spider_net_init_chain(card, &card->tx_chain);
1881 if (result)
1882 goto alloc_tx_failed;
1883 card->low_watermark = NULL;
1884
1885 result = spider_net_init_chain(card, &card->rx_chain);
1886 if (result)
1887 goto alloc_rx_failed;
1888
1889 /* Allocate rx skbs */
1890 if (spider_net_alloc_rx_skbs(card))
1891 goto alloc_skbs_failed;
1892
1893 spider_net_set_multi(netdev);
1894
1895 /* further enhancement: setup hw vlan, if needed */
1896
1897 result = -EBUSY;
1898 if (request_irq(netdev->irq, spider_net_interrupt,
1899 IRQF_SHARED, netdev->name, netdev))
1900 goto register_int_failed;
1901
1902 spider_net_enable_card(card);
1903
1904 netif_start_queue(netdev);
1905 netif_carrier_on(netdev);
1906 netif_poll_enable(netdev);
1907
1908 return 0;
1909
1910register_int_failed:
1911 spider_net_free_rx_chain_contents(card);
1912alloc_skbs_failed:
1913 spider_net_free_chain(card, &card->rx_chain);
1914alloc_rx_failed:
1915 spider_net_free_chain(card, &card->tx_chain);
1916alloc_tx_failed:
1917 del_timer_sync(&card->aneg_timer);
1918init_firmware_failed:
1919 return result;
1920}
1921
1922/**
1923 * spider_net_link_phy
1924 * @data: used for pointer to card structure
1925 *
1926 */
1927static void spider_net_link_phy(unsigned long data)
1928{
1929 struct spider_net_card *card = (struct spider_net_card *)data;
1930 struct mii_phy *phy = &card->phy;
1931
1932 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
1933 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
1934
1935 pr_info("%s: link is down trying to bring it up\n", card->netdev->name);
1936
1937 switch (card->medium) {
1938 case BCM54XX_COPPER:
1939 /* enable fiber with autonegotiation first */
1940 if (phy->def->ops->enable_fiber)
1941 phy->def->ops->enable_fiber(phy, 1);
1942 card->medium = BCM54XX_FIBER;
1943 break;
1944
1945 case BCM54XX_FIBER:
1946 /* fiber didn't come up, try to disable fiber autoneg */
1947 if (phy->def->ops->enable_fiber)
1948 phy->def->ops->enable_fiber(phy, 0);
1949 card->medium = BCM54XX_UNKNOWN;
1950 break;
1951
1952 case BCM54XX_UNKNOWN:
1953 /* copper, fiber with and without failed,
1954 * retry from beginning */
1955 spider_net_setup_aneg(card);
1956 card->medium = BCM54XX_COPPER;
1957 break;
1958 }
1959
1960 card->aneg_count = 0;
1961 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1962 return;
1963 }
1964
1965 /* link still not up, try again later */
1966 if (!(phy->def->ops->poll_link(phy))) {
1967 card->aneg_count++;
1968 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1969 return;
1970 }
1971
1972 /* link came up, get abilities */
1973 phy->def->ops->read_link(phy);
1974
1975 spider_net_write_reg(card, SPIDER_NET_GMACST,
1976 spider_net_read_reg(card, SPIDER_NET_GMACST));
1977 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
1978
1979 if (phy->speed == 1000)
1980 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
1981 else
1982 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
1983
1984 card->aneg_count = 0;
1985
1986 pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n",
1987 phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half",
1988 phy->autoneg==1 ? "" : "no ");
1989
1990 return;
1991}
1992
1993/**
1994 * spider_net_setup_phy - setup PHY
1995 * @card: card structure
1996 *
1997 * returns 0 on success, <0 on failure
1998 *
1999 * spider_net_setup_phy is used as part of spider_net_probe.
2000 **/
2001static int
2002spider_net_setup_phy(struct spider_net_card *card)
2003{
2004 struct mii_phy *phy = &card->phy;
2005
2006 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
2007 SPIDER_NET_DMASEL_VALUE);
2008 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
2009 SPIDER_NET_PHY_CTRL_VALUE);
2010
2011 phy->dev = card->netdev;
2012 phy->mdio_read = spider_net_read_phy;
2013 phy->mdio_write = spider_net_write_phy;
2014
2015 for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
2016 unsigned short id;
2017 id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
2018 if (id != 0x0000 && id != 0xffff) {
2019 if (!mii_phy_probe(phy, phy->mii_id)) {
2020 pr_info("Found %s.\n", phy->def->name);
2021 break;
2022 }
2023 }
2024 }
2025
2026 return 0;
2027}
2028
2029/**
1855 * spider_net_workaround_rxramfull - work around firmware bug 2030 * spider_net_workaround_rxramfull - work around firmware bug
1856 * @card: card structure 2031 * @card: card structure
1857 * 2032 *
@@ -1900,14 +2075,15 @@ spider_net_stop(struct net_device *netdev)
1900 netif_carrier_off(netdev); 2075 netif_carrier_off(netdev);
1901 netif_stop_queue(netdev); 2076 netif_stop_queue(netdev);
1902 del_timer_sync(&card->tx_timer); 2077 del_timer_sync(&card->tx_timer);
2078 del_timer_sync(&card->aneg_timer);
1903 2079
1904 /* disable/mask all interrupts */ 2080 /* disable/mask all interrupts */
1905 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 2081 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1906 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); 2082 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1907 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); 2083 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
2084 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1908 2085
1909 /* free_irq(netdev->irq, netdev);*/ 2086 free_irq(netdev->irq, netdev);
1910 free_irq(to_pci_dev(netdev->dev.parent)->irq, netdev);
1911 2087
1912 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 2088 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1913 SPIDER_NET_DMA_TX_FEND_VALUE); 2089 SPIDER_NET_DMA_TX_FEND_VALUE);
@@ -1919,8 +2095,6 @@ spider_net_stop(struct net_device *netdev)
1919 spider_net_release_tx_chain(card, 1); 2095 spider_net_release_tx_chain(card, 1);
1920 spider_net_free_rx_chain_contents(card); 2096 spider_net_free_rx_chain_contents(card);
1921 2097
1922 spider_net_free_rx_chain_contents(card);
1923
1924 spider_net_free_chain(card, &card->tx_chain); 2098 spider_net_free_chain(card, &card->tx_chain);
1925 spider_net_free_chain(card, &card->rx_chain); 2099 spider_net_free_chain(card, &card->rx_chain);
1926 2100
@@ -1952,8 +2126,6 @@ spider_net_tx_timeout_task(struct work_struct *work)
1952 2126
1953 if (spider_net_setup_phy(card)) 2127 if (spider_net_setup_phy(card))
1954 goto out; 2128 goto out;
1955 if (spider_net_init_firmware(card))
1956 goto out;
1957 2129
1958 spider_net_open(netdev); 2130 spider_net_open(netdev);
1959 spider_net_kick_tx_dma(card); 2131 spider_net_kick_tx_dma(card);
@@ -2046,10 +2218,12 @@ spider_net_setup_netdev(struct spider_net_card *card)
2046 card->tx_timer.data = (unsigned long) card; 2218 card->tx_timer.data = (unsigned long) card;
2047 netdev->irq = card->pdev->irq; 2219 netdev->irq = card->pdev->irq;
2048 2220
2049 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2221 card->aneg_count = 0;
2222 init_timer(&card->aneg_timer);
2223 card->aneg_timer.function = spider_net_link_phy;
2224 card->aneg_timer.data = (unsigned long) card;
2050 2225
2051 card->tx_chain.num_desc = tx_descriptors; 2226 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2052 card->rx_chain.num_desc = rx_descriptors;
2053 2227
2054 spider_net_setup_netdev_ops(netdev); 2228 spider_net_setup_netdev_ops(netdev);
2055 2229
@@ -2098,8 +2272,11 @@ spider_net_alloc_card(void)
2098{ 2272{
2099 struct net_device *netdev; 2273 struct net_device *netdev;
2100 struct spider_net_card *card; 2274 struct spider_net_card *card;
2275 size_t alloc_size;
2101 2276
2102 netdev = alloc_etherdev(sizeof(struct spider_net_card)); 2277 alloc_size = sizeof(struct spider_net_card) +
2278 (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
2279 netdev = alloc_etherdev(alloc_size);
2103 if (!netdev) 2280 if (!netdev)
2104 return NULL; 2281 return NULL;
2105 2282
@@ -2110,6 +2287,11 @@ spider_net_alloc_card(void)
2110 init_waitqueue_head(&card->waitq); 2287 init_waitqueue_head(&card->waitq);
2111 atomic_set(&card->tx_timeout_task_counter, 0); 2288 atomic_set(&card->tx_timeout_task_counter, 0);
2112 2289
2290 card->rx_chain.num_desc = rx_descriptors;
2291 card->rx_chain.ring = card->darray;
2292 card->tx_chain.num_desc = tx_descriptors;
2293 card->tx_chain.ring = card->darray + rx_descriptors;
2294
2113 return card; 2295 return card;
2114} 2296}
2115 2297
@@ -2220,10 +2402,6 @@ spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2220 if (err) 2402 if (err)
2221 goto out_undo_pci; 2403 goto out_undo_pci;
2222 2404
2223 err = spider_net_init_firmware(card);
2224 if (err)
2225 goto out_undo_pci;
2226
2227 err = spider_net_setup_netdev(card); 2405 err = spider_net_setup_netdev(card);
2228 if (err) 2406 if (err)
2229 goto out_undo_pci; 2407 goto out_undo_pci;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 2fec5cf769..4a1e0d28a5 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * Network device driver for Cell Processor-Based Blade 2 * Network device driver for Cell Processor-Based Blade and Celleb platform
3 * 3 *
4 * (C) Copyright IBM Corp. 2005 4 * (C) Copyright IBM Corp. 2005
5 * (C) Copyright 2006 TOSHIBA CORPORATION
5 * 6 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com> 7 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 8 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
@@ -24,7 +25,7 @@
24#ifndef _SPIDER_NET_H 25#ifndef _SPIDER_NET_H
25#define _SPIDER_NET_H 26#define _SPIDER_NET_H
26 27
27#define VERSION "1.6 B" 28#define VERSION "2.0 A"
28 29
29#include "sungem_phy.h" 30#include "sungem_phy.h"
30 31
@@ -50,6 +51,8 @@ extern char spider_net_driver_name[];
50#define SPIDER_NET_TX_DESCRIPTORS_MAX 512 51#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
51 52
52#define SPIDER_NET_TX_TIMER (HZ/5) 53#define SPIDER_NET_TX_TIMER (HZ/5)
54#define SPIDER_NET_ANEG_TIMER (HZ)
55#define SPIDER_NET_ANEG_TIMEOUT 2
53 56
54#define SPIDER_NET_RX_CSUM_DEFAULT 1 57#define SPIDER_NET_RX_CSUM_DEFAULT 1
55 58
@@ -104,6 +107,7 @@ extern char spider_net_driver_name[];
104 107
105#define SPIDER_NET_GMACOPEMD 0x00000100 108#define SPIDER_NET_GMACOPEMD 0x00000100
106#define SPIDER_NET_GMACLENLMT 0x00000108 109#define SPIDER_NET_GMACLENLMT 0x00000108
110#define SPIDER_NET_GMACST 0x00000110
107#define SPIDER_NET_GMACINTEN 0x00000118 111#define SPIDER_NET_GMACINTEN 0x00000118
108#define SPIDER_NET_GMACPHYCTRL 0x00000120 112#define SPIDER_NET_GMACPHYCTRL 0x00000120
109 113
@@ -181,7 +185,8 @@ extern char spider_net_driver_name[];
181 185
182/* pause frames: automatic, no upper retransmission count */ 186/* pause frames: automatic, no upper retransmission count */
183/* outside loopback mode: ETOMOD signal dont matter, not connected */ 187/* outside loopback mode: ETOMOD signal dont matter, not connected */
184#define SPIDER_NET_OPMODE_VALUE 0x00000063 188/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
189#define SPIDER_NET_OPMODE_VALUE 0x00000067
185/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/ 190/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
186#define SPIDER_NET_LENLMT_VALUE 0x00000908 191#define SPIDER_NET_LENLMT_VALUE 0x00000908
187 192
@@ -333,9 +338,12 @@ enum spider_net_int2_status {
333/* We rely on flagged descriptor interrupts */ 338/* We rely on flagged descriptor interrupts */
334#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) ) 339#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
335 340
341#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
342
336#define SPIDER_NET_ERRINT ( 0xffffffff & \ 343#define SPIDER_NET_ERRINT ( 0xffffffff & \
337 (~SPIDER_NET_TXINT) & \ 344 (~SPIDER_NET_TXINT) & \
338 (~SPIDER_NET_RXINT) ) 345 (~SPIDER_NET_RXINT) & \
346 (~SPIDER_NET_LINKINT) )
339 347
340#define SPIDER_NET_GPREXEC 0x80000000 348#define SPIDER_NET_GPREXEC 0x80000000
341#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 349#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
@@ -356,8 +364,8 @@ enum spider_net_int2_status {
356#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 364#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
357#define SPIDER_NET_DESCR_TXDESFLG 0x00800000 365#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
358 366
359struct spider_net_descr { 367/* Descriptor, as defined by the hardware */
360 /* as defined by the hardware */ 368struct spider_net_hw_descr {
361 u32 buf_addr; 369 u32 buf_addr;
362 u32 buf_size; 370 u32 buf_size;
363 u32 next_descr_addr; 371 u32 next_descr_addr;
@@ -366,13 +374,15 @@ struct spider_net_descr {
366 u32 valid_size; /* all zeroes for tx */ 374 u32 valid_size; /* all zeroes for tx */
367 u32 data_status; 375 u32 data_status;
368 u32 data_error; /* all zeroes for tx */ 376 u32 data_error; /* all zeroes for tx */
377} __attribute__((aligned(32)));
369 378
370 /* used in the driver */ 379struct spider_net_descr {
380 struct spider_net_hw_descr *hwdescr;
371 struct sk_buff *skb; 381 struct sk_buff *skb;
372 u32 bus_addr; 382 u32 bus_addr;
373 struct spider_net_descr *next; 383 struct spider_net_descr *next;
374 struct spider_net_descr *prev; 384 struct spider_net_descr *prev;
375} __attribute__((aligned(32))); 385};
376 386
377struct spider_net_descr_chain { 387struct spider_net_descr_chain {
378 spinlock_t lock; 388 spinlock_t lock;
@@ -380,6 +390,7 @@ struct spider_net_descr_chain {
380 struct spider_net_descr *tail; 390 struct spider_net_descr *tail;
381 struct spider_net_descr *ring; 391 struct spider_net_descr *ring;
382 int num_desc; 392 int num_desc;
393 struct spider_net_hw_descr *hwring;
383 dma_addr_t dma_addr; 394 dma_addr_t dma_addr;
384}; 395};
385 396
@@ -436,12 +447,16 @@ struct spider_net_card {
436 struct pci_dev *pdev; 447 struct pci_dev *pdev;
437 struct mii_phy phy; 448 struct mii_phy phy;
438 449
450 int medium;
451
439 void __iomem *regs; 452 void __iomem *regs;
440 453
441 struct spider_net_descr_chain tx_chain; 454 struct spider_net_descr_chain tx_chain;
442 struct spider_net_descr_chain rx_chain; 455 struct spider_net_descr_chain rx_chain;
443 struct spider_net_descr *low_watermark; 456 struct spider_net_descr *low_watermark;
444 457
458 int aneg_count;
459 struct timer_list aneg_timer;
445 struct timer_list tx_timer; 460 struct timer_list tx_timer;
446 struct work_struct tx_timeout_task; 461 struct work_struct tx_timeout_task;
447 atomic_t tx_timeout_task_counter; 462 atomic_t tx_timeout_task_counter;
@@ -452,6 +467,9 @@ struct spider_net_card {
452 struct net_device_stats netdev_stats; 467 struct net_device_stats netdev_stats;
453 struct spider_net_extra_stats spider_stats; 468 struct spider_net_extra_stats spider_stats;
454 struct spider_net_options options; 469 struct spider_net_options options;
470
471 /* Must be last item in struct */
472 struct spider_net_descr darray[0];
455}; 473};
456 474
457#define pr_err(fmt,arg...) \ 475#define pr_err(fmt,arg...) \
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index bf873ea257..8bba2e3da7 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -677,8 +677,7 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
677 spin_lock(&np->lock); 677 spin_lock(&np->lock);
678 if (debug > 1) 678 if (debug > 1)
679 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); 679 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
680 if (np->vlgrp) 680 vlan_group_set_device(np->vlgrp, vid, NULL);
681 np->vlgrp->vlan_devices[vid] = NULL;
682 set_rx_mode(dev); 681 set_rx_mode(dev);
683 spin_unlock(&np->lock); 682 spin_unlock(&np->lock);
684} 683}
@@ -1738,7 +1737,7 @@ static void set_rx_mode(struct net_device *dev)
1738 int vlan_count = 0; 1737 int vlan_count = 0;
1739 void __iomem *filter_addr = ioaddr + HashTable + 8; 1738 void __iomem *filter_addr = ioaddr + HashTable + 8;
1740 for (i = 0; i < VLAN_VID_MASK; i++) { 1739 for (i = 0; i < VLAN_VID_MASK; i++) {
1741 if (np->vlgrp->vlan_devices[i]) { 1740 if (vlan_group_get_device(np->vlgrp, i)) {
1742 if (vlan_count >= 32) 1741 if (vlan_count >= 32)
1743 break; 1742 break;
1744 writew(cpu_to_be16(i), filter_addr); 1743 writew(cpu_to_be16(i), filter_addr);
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index a3220a9652..4757aa647c 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -28,8 +28,6 @@ static int automatic_resume = 0; /* experimental .. better should be zero */
28static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ 28static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
29static int fifo=0x8; /* don't change */ 29static int fifo=0x8; /* don't change */
30 30
31/* #define REALLY_SLOW_IO */
32
33#include <linux/module.h> 31#include <linux/module.h>
34#include <linux/kernel.h> 32#include <linux/kernel.h>
35#include <linux/string.h> 33#include <linux/string.h>
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 701ba4f3b6..56a110ca5e 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -310,6 +310,107 @@ static int bcm5411_init(struct mii_phy* phy)
310 return 0; 310 return 0;
311} 311}
312 312
313static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
314{
315 u16 ctl, adv;
316
317 phy->autoneg = 1;
318 phy->speed = SPEED_10;
319 phy->duplex = DUPLEX_HALF;
320 phy->pause = 0;
321 phy->advertising = advertise;
322
323 /* Setup standard advertise */
324 adv = phy_read(phy, MII_ADVERTISE);
325 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
326 if (advertise & ADVERTISED_10baseT_Half)
327 adv |= ADVERTISE_10HALF;
328 if (advertise & ADVERTISED_10baseT_Full)
329 adv |= ADVERTISE_10FULL;
330 if (advertise & ADVERTISED_100baseT_Half)
331 adv |= ADVERTISE_100HALF;
332 if (advertise & ADVERTISED_100baseT_Full)
333 adv |= ADVERTISE_100FULL;
334 phy_write(phy, MII_ADVERTISE, adv);
335
336 /* Start/Restart aneg */
337 ctl = phy_read(phy, MII_BMCR);
338 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
339 phy_write(phy, MII_BMCR, ctl);
340
341 return 0;
342}
343
344static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
345{
346 u16 ctl;
347
348 phy->autoneg = 0;
349 phy->speed = speed;
350 phy->duplex = fd;
351 phy->pause = 0;
352
353 ctl = phy_read(phy, MII_BMCR);
354 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
355
356 /* First reset the PHY */
357 phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
358
359 /* Select speed & duplex */
360 switch(speed) {
361 case SPEED_10:
362 break;
363 case SPEED_100:
364 ctl |= BMCR_SPEED100;
365 break;
366 case SPEED_1000:
367 default:
368 return -EINVAL;
369 }
370 if (fd == DUPLEX_FULL)
371 ctl |= BMCR_FULLDPLX;
372 phy_write(phy, MII_BMCR, ctl);
373
374 return 0;
375}
376
377static int genmii_poll_link(struct mii_phy *phy)
378{
379 u16 status;
380
381 (void)phy_read(phy, MII_BMSR);
382 status = phy_read(phy, MII_BMSR);
383 if ((status & BMSR_LSTATUS) == 0)
384 return 0;
385 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
386 return 0;
387 return 1;
388}
389
390static int genmii_read_link(struct mii_phy *phy)
391{
392 u16 lpa;
393
394 if (phy->autoneg) {
395 lpa = phy_read(phy, MII_LPA);
396
397 if (lpa & (LPA_10FULL | LPA_100FULL))
398 phy->duplex = DUPLEX_FULL;
399 else
400 phy->duplex = DUPLEX_HALF;
401 if (lpa & (LPA_100FULL | LPA_100HALF))
402 phy->speed = SPEED_100;
403 else
404 phy->speed = SPEED_10;
405 phy->pause = 0;
406 }
407 /* On non-aneg, we assume what we put in BMCR is the speed,
408 * though magic-aneg shouldn't prevent this case from occurring
409 */
410
411 return 0;
412}
413
313static int generic_suspend(struct mii_phy* phy) 414static int generic_suspend(struct mii_phy* phy)
314{ 415{
315 phy_write(phy, MII_BMCR, BMCR_PDOWN); 416 phy_write(phy, MII_BMCR, BMCR_PDOWN);
@@ -364,30 +465,6 @@ static int bcm5421_init(struct mii_phy* phy)
364 return 0; 465 return 0;
365} 466}
366 467
367static int bcm5421_enable_fiber(struct mii_phy* phy)
368{
369 /* enable fiber mode */
370 phy_write(phy, MII_NCONFIG, 0x9020);
371 /* LEDs active in both modes, autosense prio = fiber */
372 phy_write(phy, MII_NCONFIG, 0x945f);
373
374 /* switch off fibre autoneg */
375 phy_write(phy, MII_NCONFIG, 0xfc01);
376 phy_write(phy, 0x0b, 0x0004);
377
378 return 0;
379}
380
381static int bcm5461_enable_fiber(struct mii_phy* phy)
382{
383 phy_write(phy, MII_NCONFIG, 0xfc0c);
384 phy_write(phy, MII_BMCR, 0x4140);
385 phy_write(phy, MII_NCONFIG, 0xfc0b);
386 phy_write(phy, MII_BMCR, 0x0140);
387
388 return 0;
389}
390
391static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) 468static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
392{ 469{
393 u16 ctl, adv; 470 u16 ctl, adv;
@@ -515,6 +592,155 @@ static int marvell88e1111_init(struct mii_phy* phy)
515 return 0; 592 return 0;
516} 593}
517 594
595#define BCM5421_MODE_MASK (1 << 5)
596
597static int bcm5421_poll_link(struct mii_phy* phy)
598{
599 u32 phy_reg;
600 int mode;
601
602 /* find out in what mode we are */
603 phy_write(phy, MII_NCONFIG, 0x1000);
604 phy_reg = phy_read(phy, MII_NCONFIG);
605
606 mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
607
608 if ( mode == BCM54XX_COPPER)
609 return genmii_poll_link(phy);
610
611 /* try to find out wether we have a link */
612 phy_write(phy, MII_NCONFIG, 0x2000);
613 phy_reg = phy_read(phy, MII_NCONFIG);
614
615 if (phy_reg & 0x0020)
616 return 0;
617 else
618 return 1;
619}
620
621static int bcm5421_read_link(struct mii_phy* phy)
622{
623 u32 phy_reg;
624 int mode;
625
626 /* find out in what mode we are */
627 phy_write(phy, MII_NCONFIG, 0x1000);
628 phy_reg = phy_read(phy, MII_NCONFIG);
629
630 mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
631
632 if ( mode == BCM54XX_COPPER)
633 return bcm54xx_read_link(phy);
634
635 phy->speed = SPEED_1000;
636
637 /* find out wether we are running half- or full duplex */
638 phy_write(phy, MII_NCONFIG, 0x2000);
639 phy_reg = phy_read(phy, MII_NCONFIG);
640
641 if ( (phy_reg & 0x0080) >> 7)
642 phy->duplex |= DUPLEX_HALF;
643 else
644 phy->duplex |= DUPLEX_FULL;
645
646 return 0;
647}
648
649static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
650{
651 /* enable fiber mode */
652 phy_write(phy, MII_NCONFIG, 0x9020);
653 /* LEDs active in both modes, autosense prio = fiber */
654 phy_write(phy, MII_NCONFIG, 0x945f);
655
656 if (!autoneg) {
657 /* switch off fibre autoneg */
658 phy_write(phy, MII_NCONFIG, 0xfc01);
659 phy_write(phy, 0x0b, 0x0004);
660 }
661
662 phy->autoneg = autoneg;
663
664 return 0;
665}
666
667#define BCM5461_FIBER_LINK (1 << 2)
668#define BCM5461_MODE_MASK (3 << 1)
669
670static int bcm5461_poll_link(struct mii_phy* phy)
671{
672 u32 phy_reg;
673 int mode;
674
675 /* find out in what mode we are */
676 phy_write(phy, MII_NCONFIG, 0x7c00);
677 phy_reg = phy_read(phy, MII_NCONFIG);
678
679 mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
680
681 if ( mode == BCM54XX_COPPER)
682 return genmii_poll_link(phy);
683
684 /* find out wether we have a link */
685 phy_write(phy, MII_NCONFIG, 0x7000);
686 phy_reg = phy_read(phy, MII_NCONFIG);
687
688 if (phy_reg & BCM5461_FIBER_LINK)
689 return 1;
690 else
691 return 0;
692}
693
694#define BCM5461_FIBER_DUPLEX (1 << 3)
695
696static int bcm5461_read_link(struct mii_phy* phy)
697{
698 u32 phy_reg;
699 int mode;
700
701 /* find out in what mode we are */
702 phy_write(phy, MII_NCONFIG, 0x7c00);
703 phy_reg = phy_read(phy, MII_NCONFIG);
704
705 mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
706
707 if ( mode == BCM54XX_COPPER) {
708 return bcm54xx_read_link(phy);
709 }
710
711 phy->speed = SPEED_1000;
712
713 /* find out wether we are running half- or full duplex */
714 phy_write(phy, MII_NCONFIG, 0x7000);
715 phy_reg = phy_read(phy, MII_NCONFIG);
716
717 if (phy_reg & BCM5461_FIBER_DUPLEX)
718 phy->duplex |= DUPLEX_FULL;
719 else
720 phy->duplex |= DUPLEX_HALF;
721
722 return 0;
723}
724
725static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
726{
727 /* select fiber mode, enable 1000 base-X registers */
728 phy_write(phy, MII_NCONFIG, 0xfc0b);
729
730 if (autoneg) {
731 /* enable fiber with no autonegotiation */
732 phy_write(phy, MII_ADVERTISE, 0x01e0);
733 phy_write(phy, MII_BMCR, 0x1140);
734 } else {
735 /* enable fiber with autonegotiation */
736 phy_write(phy, MII_BMCR, 0x0140);
737 }
738
739 phy->autoneg = autoneg;
740
741 return 0;
742}
743
518static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) 744static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
519{ 745{
520 u16 ctl, adv; 746 u16 ctl, adv;
@@ -645,113 +871,6 @@ static int marvell_read_link(struct mii_phy *phy)
645 return 0; 871 return 0;
646} 872}
647 873
648static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
649{
650 u16 ctl, adv;
651
652 phy->autoneg = 1;
653 phy->speed = SPEED_10;
654 phy->duplex = DUPLEX_HALF;
655 phy->pause = 0;
656 phy->advertising = advertise;
657
658 /* Setup standard advertise */
659 adv = phy_read(phy, MII_ADVERTISE);
660 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
661 if (advertise & ADVERTISED_10baseT_Half)
662 adv |= ADVERTISE_10HALF;
663 if (advertise & ADVERTISED_10baseT_Full)
664 adv |= ADVERTISE_10FULL;
665 if (advertise & ADVERTISED_100baseT_Half)
666 adv |= ADVERTISE_100HALF;
667 if (advertise & ADVERTISED_100baseT_Full)
668 adv |= ADVERTISE_100FULL;
669 if (advertise & ADVERTISED_Pause)
670 adv |= ADVERTISE_PAUSE_CAP;
671 if (advertise & ADVERTISED_Asym_Pause)
672 adv |= ADVERTISE_PAUSE_ASYM;
673 phy_write(phy, MII_ADVERTISE, adv);
674
675 /* Start/Restart aneg */
676 ctl = phy_read(phy, MII_BMCR);
677 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
678 phy_write(phy, MII_BMCR, ctl);
679
680 return 0;
681}
682
683static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
684{
685 u16 ctl;
686
687 phy->autoneg = 0;
688 phy->speed = speed;
689 phy->duplex = fd;
690 phy->pause = 0;
691
692 ctl = phy_read(phy, MII_BMCR);
693 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
694
695 /* First reset the PHY */
696 phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
697
698 /* Select speed & duplex */
699 switch(speed) {
700 case SPEED_10:
701 break;
702 case SPEED_100:
703 ctl |= BMCR_SPEED100;
704 break;
705 case SPEED_1000:
706 default:
707 return -EINVAL;
708 }
709 if (fd == DUPLEX_FULL)
710 ctl |= BMCR_FULLDPLX;
711 phy_write(phy, MII_BMCR, ctl);
712
713 return 0;
714}
715
716static int genmii_poll_link(struct mii_phy *phy)
717{
718 u16 status;
719
720 (void)phy_read(phy, MII_BMSR);
721 status = phy_read(phy, MII_BMSR);
722 if ((status & BMSR_LSTATUS) == 0)
723 return 0;
724 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
725 return 0;
726 return 1;
727}
728
729static int genmii_read_link(struct mii_phy *phy)
730{
731 u16 lpa;
732
733 if (phy->autoneg) {
734 lpa = phy_read(phy, MII_LPA);
735
736 if (lpa & (LPA_10FULL | LPA_100FULL))
737 phy->duplex = DUPLEX_FULL;
738 else
739 phy->duplex = DUPLEX_HALF;
740 if (lpa & (LPA_100FULL | LPA_100HALF))
741 phy->speed = SPEED_100;
742 else
743 phy->speed = SPEED_10;
744 phy->pause = (phy->duplex == DUPLEX_FULL) &&
745 ((lpa & LPA_PAUSE) != 0);
746 }
747 /* On non-aneg, we assume what we put in BMCR is the speed,
748 * though magic-aneg shouldn't prevent this case from occurring
749 */
750
751 return 0;
752}
753
754
755#define MII_BASIC_FEATURES \ 874#define MII_BASIC_FEATURES \
756 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 875 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
757 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 876 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
@@ -885,8 +1004,8 @@ static struct mii_phy_ops bcm5421_phy_ops = {
885 .suspend = generic_suspend, 1004 .suspend = generic_suspend,
886 .setup_aneg = bcm54xx_setup_aneg, 1005 .setup_aneg = bcm54xx_setup_aneg,
887 .setup_forced = bcm54xx_setup_forced, 1006 .setup_forced = bcm54xx_setup_forced,
888 .poll_link = genmii_poll_link, 1007 .poll_link = bcm5421_poll_link,
889 .read_link = bcm54xx_read_link, 1008 .read_link = bcm5421_read_link,
890 .enable_fiber = bcm5421_enable_fiber, 1009 .enable_fiber = bcm5421_enable_fiber,
891}; 1010};
892 1011
@@ -923,8 +1042,8 @@ static struct mii_phy_ops bcm5461_phy_ops = {
923 .suspend = generic_suspend, 1042 .suspend = generic_suspend,
924 .setup_aneg = bcm54xx_setup_aneg, 1043 .setup_aneg = bcm54xx_setup_aneg,
925 .setup_forced = bcm54xx_setup_forced, 1044 .setup_forced = bcm54xx_setup_forced,
926 .poll_link = genmii_poll_link, 1045 .poll_link = bcm5461_poll_link,
927 .read_link = bcm54xx_read_link, 1046 .read_link = bcm5461_read_link,
928 .enable_fiber = bcm5461_enable_fiber, 1047 .enable_fiber = bcm5461_enable_fiber,
929}; 1048};
930 1049
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
index 1d70ba6f9f..af02f9479c 100644
--- a/drivers/net/sungem_phy.h
+++ b/drivers/net/sungem_phy.h
@@ -12,7 +12,7 @@ struct mii_phy_ops
12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd); 12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
13 int (*poll_link)(struct mii_phy *phy); 13 int (*poll_link)(struct mii_phy *phy);
14 int (*read_link)(struct mii_phy *phy); 14 int (*read_link)(struct mii_phy *phy);
15 int (*enable_fiber)(struct mii_phy *phy); 15 int (*enable_fiber)(struct mii_phy *phy, int autoneg);
16}; 16};
17 17
18/* Structure used to statically define an mii/gii based PHY */ 18/* Structure used to statically define an mii/gii based PHY */
@@ -26,6 +26,14 @@ struct mii_phy_def
26 const struct mii_phy_ops* ops; 26 const struct mii_phy_ops* ops;
27}; 27};
28 28
29enum {
30 BCM54XX_COPPER,
31 BCM54XX_FIBER,
32 BCM54XX_GBIC,
33 BCM54XX_SGMII,
34 BCM54XX_UNKNOWN,
35};
36
29/* An instance of a PHY, partially borrowed from mii_if_info */ 37/* An instance of a PHY, partially borrowed from mii_if_info */
30struct mii_phy 38struct mii_phy
31{ 39{
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 81ed82f0b5..e3a7e3ceab 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -657,7 +657,7 @@ tc35815_init_queues(struct net_device *dev)
657 dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM); 657 dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
658#endif 658#endif
659 } else { 659 } else {
660 clear_page(lp->fd_buf); 660 memset(lp->fd_buf, 0, PAGE_SIZE * FD_PAGE_NUM);
661#ifdef __mips__ 661#ifdef __mips__
662 dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM); 662 dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
663#endif 663#endif
@@ -1703,19 +1703,6 @@ static void tc35815_chip_init(struct net_device *dev)
1703 spin_unlock_irqrestore(&lp->lock, flags); 1703 spin_unlock_irqrestore(&lp->lock, flags);
1704} 1704}
1705 1705
1706/* XXX */
1707void
1708tc35815_killall(void)
1709{
1710 struct net_device *dev;
1711
1712 for (dev = root_tc35815_dev; dev; dev = ((struct tc35815_local *)dev->priv)->next_module) {
1713 if (dev->flags&IFF_UP){
1714 dev->stop(dev);
1715 }
1716 }
1717}
1718
1719static struct pci_driver tc35815_driver = { 1706static struct pci_driver tc35815_driver = {
1720 .name = TC35815_MODULE_NAME, 1707 .name = TC35815_MODULE_NAME,
1721 .probe = tc35815_probe, 1708 .probe = tc35815_probe,
@@ -1732,6 +1719,11 @@ static void __exit tc35815_cleanup_module(void)
1732{ 1719{
1733 struct net_device *next_dev; 1720 struct net_device *next_dev;
1734 1721
1722 /*
1723 * TODO: implement a tc35815_driver.remove hook, and
1724 * move this code into that function. Then, delete
1725 * all root_tc35815_dev list handling code.
1726 */
1735 while (root_tc35815_dev) { 1727 while (root_tc35815_dev) {
1736 struct net_device *dev = root_tc35815_dev; 1728 struct net_device *dev = root_tc35815_dev;
1737 next_dev = ((struct tc35815_local *)dev->priv)->next_module; 1729 next_dev = ((struct tc35815_local *)dev->priv)->next_module;
@@ -1740,6 +1732,9 @@ static void __exit tc35815_cleanup_module(void)
1740 free_netdev(dev); 1732 free_netdev(dev);
1741 root_tc35815_dev = next_dev; 1733 root_tc35815_dev = next_dev;
1742 } 1734 }
1735
1736 pci_unregister_driver(&tc35815_driver);
1743} 1737}
1738
1744module_init(tc35815_init_module); 1739module_init(tc35815_init_module);
1745module_exit(tc35815_cleanup_module); 1740module_exit(tc35815_cleanup_module);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 26c6ac4828..8c8f9f4d47 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9114,8 +9114,7 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
9114 tg3_netif_stop(tp); 9114 tg3_netif_stop(tp);
9115 9115
9116 tg3_full_lock(tp, 0); 9116 tg3_full_lock(tp, 0);
9117 if (tp->vlgrp) 9117 vlan_group_set_device(tp->vlgrp, vid, NULL);
9118 tp->vlgrp->vlan_devices[vid] = NULL;
9119 tg3_full_unlock(tp); 9118 tg3_full_unlock(tp);
9120 9119
9121 if (netif_running(dev)) 9120 if (netif_running(dev))
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 9d67f11422..c82befa209 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -63,7 +63,7 @@ MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
63 63
64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ 64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ 65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
66 || defined(__sparc_) || defined(__ia64__) \ 66 || defined(__sparc__) || defined(__ia64__) \
67 || defined(__sh__) || defined(__mips__) 67 || defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518; 68static int rx_copybreak = 1518;
69#else 69#else
@@ -1685,7 +1685,7 @@ static const struct ethtool_ops de_ethtool_ops = {
1685 .get_regs = de_get_regs, 1685 .get_regs = de_get_regs,
1686}; 1686};
1687 1687
1688static void __init de21040_get_mac_address (struct de_private *de) 1688static void __devinit de21040_get_mac_address (struct de_private *de)
1689{ 1689{
1690 unsigned i; 1690 unsigned i;
1691 1691
@@ -1703,7 +1703,7 @@ static void __init de21040_get_mac_address (struct de_private *de)
1703 } 1703 }
1704} 1704}
1705 1705
1706static void __init de21040_get_media_info(struct de_private *de) 1706static void __devinit de21040_get_media_info(struct de_private *de)
1707{ 1707{
1708 unsigned int i; 1708 unsigned int i;
1709 1709
@@ -1765,7 +1765,7 @@ static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, in
1765 return retval; 1765 return retval;
1766} 1766}
1767 1767
1768static void __init de21041_get_srom_info (struct de_private *de) 1768static void __devinit de21041_get_srom_info (struct de_private *de)
1769{ 1769{
1770 unsigned i, sa_offset = 0, ofs; 1770 unsigned i, sa_offset = 0, ofs;
1771 u8 ee_data[DE_EEPROM_SIZE + 6] = {}; 1771 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 7f59a3d4fd..24a29c99ba 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -143,9 +143,16 @@
143#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ 143#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
144#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ 144#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
145 145
146#define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value)) 146#define DMFE_DBUG(dbug_now, msg, value) \
147 do { \
148 if (dmfe_debug || (dbug_now)) \
149 printk(KERN_ERR DRV_NAME ": %s %lx\n",\
150 (msg), (long) (value)); \
151 } while (0)
147 152
148#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half"); 153#define SHOW_MEDIA_TYPE(mode) \
154 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
155 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
149 156
150 157
151/* CR9 definition: SROM/MII */ 158/* CR9 definition: SROM/MII */
@@ -163,10 +170,20 @@
163 170
164#define SROM_V41_CODE 0x14 171#define SROM_V41_CODE 0x14
165 172
166#define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5); 173#define SROM_CLK_WRITE(data, ioaddr) \
174 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
175 udelay(5); \
176 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
177 udelay(5); \
178 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
179 udelay(5);
180
181#define __CHK_IO_SIZE(pci_id, dev_rev) \
182 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
183 DM9102A_IO_SIZE: DM9102_IO_SIZE)
167 184
168#define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE 185#define CHK_IO_SIZE(pci_dev, dev_rev) \
169#define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev) 186 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
170 187
171/* Sten Check */ 188/* Sten Check */
172#define DEVICE net_device 189#define DEVICE net_device
@@ -187,7 +204,7 @@ struct rx_desc {
187struct dmfe_board_info { 204struct dmfe_board_info {
188 u32 chip_id; /* Chip vendor/Device ID */ 205 u32 chip_id; /* Chip vendor/Device ID */
189 u32 chip_revision; /* Chip revision */ 206 u32 chip_revision; /* Chip revision */
190 struct DEVICE *dev; /* net device */ 207 struct DEVICE *next_dev; /* next device */
191 struct pci_dev *pdev; /* PCI device */ 208 struct pci_dev *pdev; /* PCI device */
192 spinlock_t lock; 209 spinlock_t lock;
193 210
@@ -231,7 +248,6 @@ struct dmfe_board_info {
231 u8 media_mode; /* user specify media mode */ 248 u8 media_mode; /* user specify media mode */
232 u8 op_mode; /* real work media mode */ 249 u8 op_mode; /* real work media mode */
233 u8 phy_addr; 250 u8 phy_addr;
234 u8 link_failed; /* Ever link failed */
235 u8 wait_reset; /* Hardware failed, need to reset */ 251 u8 wait_reset; /* Hardware failed, need to reset */
236 u8 dm910x_chk_mode; /* Operating mode check */ 252 u8 dm910x_chk_mode; /* Operating mode check */
237 u8 first_in_callback; /* Flag to record state */ 253 u8 first_in_callback; /* Flag to record state */
@@ -329,7 +345,7 @@ static void dmfe_program_DM9802(struct dmfe_board_info *);
329static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); 345static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
330static void dmfe_set_phyxcer(struct dmfe_board_info *); 346static void dmfe_set_phyxcer(struct dmfe_board_info *);
331 347
332/* DM910X network baord routine ---------------------------- */ 348/* DM910X network board routine ---------------------------- */
333 349
334/* 350/*
335 * Search DM910X board ,allocate space and register it 351 * Search DM910X board ,allocate space and register it
@@ -356,7 +372,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
356 SET_NETDEV_DEV(dev, &pdev->dev); 372 SET_NETDEV_DEV(dev, &pdev->dev);
357 373
358 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 374 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
359 printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n"); 375 printk(KERN_WARNING DRV_NAME
376 ": 32-bit PCI DMA not available.\n");
360 err = -ENODEV; 377 err = -ENODEV;
361 goto err_out_free; 378 goto err_out_free;
362 } 379 }
@@ -399,11 +416,12 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
399 /* Init system & device */ 416 /* Init system & device */
400 db = netdev_priv(dev); 417 db = netdev_priv(dev);
401 418
402 db->dev = dev;
403
404 /* Allocate Tx/Rx descriptor memory */ 419 /* Allocate Tx/Rx descriptor memory */
405 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 420 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
406 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 421 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
422
423 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
424 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
407 425
408 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 426 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
409 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 427 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -428,7 +446,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
428 dev->poll_controller = &poll_dmfe; 446 dev->poll_controller = &poll_dmfe;
429#endif 447#endif
430 dev->ethtool_ops = &netdev_ethtool_ops; 448 dev->ethtool_ops = &netdev_ethtool_ops;
431 netif_carrier_off(db->dev); 449 netif_carrier_off(dev);
432 spin_lock_init(&db->lock); 450 spin_lock_init(&db->lock);
433 451
434 pci_read_config_dword(pdev, 0x50, &pci_pmr); 452 pci_read_config_dword(pdev, 0x50, &pci_pmr);
@@ -440,7 +458,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
440 458
441 /* read 64 word srom data */ 459 /* read 64 word srom data */
442 for (i = 0; i < 64; i++) 460 for (i = 0; i < 64; i++)
443 ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); 461 ((u16 *) db->srom)[i] =
462 cpu_to_le16(read_srom_word(db->ioaddr, i));
444 463
445 /* Set Node address */ 464 /* Set Node address */
446 for (i = 0; i < 6; i++) 465 for (i = 0; i < 6; i++)
@@ -482,14 +501,17 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
482 DMFE_DBUG(0, "dmfe_remove_one()", 0); 501 DMFE_DBUG(0, "dmfe_remove_one()", 0);
483 502
484 if (dev) { 503 if (dev) {
504
505 unregister_netdev(dev);
506
485 pci_free_consistent(db->pdev, sizeof(struct tx_desc) * 507 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
486 DESC_ALL_CNT + 0x20, db->desc_pool_ptr, 508 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
487 db->desc_pool_dma_ptr); 509 db->desc_pool_dma_ptr);
488 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 510 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
489 db->buf_pool_ptr, db->buf_pool_dma_ptr); 511 db->buf_pool_ptr, db->buf_pool_dma_ptr);
490 unregister_netdev(dev);
491 pci_release_regions(pdev); 512 pci_release_regions(pdev);
492 free_netdev(dev); /* free board information */ 513 free_netdev(dev); /* free board information */
514
493 pci_set_drvdata(pdev, NULL); 515 pci_set_drvdata(pdev, NULL);
494 } 516 }
495 517
@@ -509,7 +531,8 @@ static int dmfe_open(struct DEVICE *dev)
509 531
510 DMFE_DBUG(0, "dmfe_open", 0); 532 DMFE_DBUG(0, "dmfe_open", 0);
511 533
512 ret = request_irq(dev->irq, &dmfe_interrupt, IRQF_SHARED, dev->name, dev); 534 ret = request_irq(dev->irq, &dmfe_interrupt,
535 IRQF_SHARED, dev->name, dev);
513 if (ret) 536 if (ret)
514 return ret; 537 return ret;
515 538
@@ -518,7 +541,6 @@ static int dmfe_open(struct DEVICE *dev)
518 db->tx_packet_cnt = 0; 541 db->tx_packet_cnt = 0;
519 db->tx_queue_cnt = 0; 542 db->tx_queue_cnt = 0;
520 db->rx_avail_cnt = 0; 543 db->rx_avail_cnt = 0;
521 db->link_failed = 1;
522 db->wait_reset = 0; 544 db->wait_reset = 0;
523 545
524 db->first_in_callback = 0; 546 db->first_in_callback = 0;
@@ -650,7 +672,8 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
650 /* No Tx resource check, it never happen nromally */ 672 /* No Tx resource check, it never happen nromally */
651 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { 673 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
652 spin_unlock_irqrestore(&db->lock, flags); 674 spin_unlock_irqrestore(&db->lock, flags);
653 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt); 675 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
676 db->tx_queue_cnt);
654 return 1; 677 return 1;
655 } 678 }
656 679
@@ -722,7 +745,8 @@ static int dmfe_stop(struct DEVICE *dev)
722 745
723#if 0 746#if 0
724 /* show statistic counter */ 747 /* show statistic counter */
725 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", 748 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
749 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
726 db->tx_fifo_underrun, db->tx_excessive_collision, 750 db->tx_fifo_underrun, db->tx_excessive_collision,
727 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, 751 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
728 db->tx_jabber_timeout, db->reset_count, db->reset_cr8, 752 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
@@ -905,7 +929,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
905static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) 929static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
906{ 930{
907 struct rx_desc *rxptr; 931 struct rx_desc *rxptr;
908 struct sk_buff *skb; 932 struct sk_buff *skb, *newskb;
909 int rxlen; 933 int rxlen;
910 u32 rdes0; 934 u32 rdes0;
911 935
@@ -919,7 +943,9 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
919 db->rx_avail_cnt--; 943 db->rx_avail_cnt--;
920 db->interval_rx_cnt++; 944 db->interval_rx_cnt++;
921 945
922 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 946 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
947 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
948
923 if ( (rdes0 & 0x300) != 0x300) { 949 if ( (rdes0 & 0x300) != 0x300) {
924 /* A packet without First/Last flag */ 950 /* A packet without First/Last flag */
925 /* reuse this SKB */ 951 /* reuse this SKB */
@@ -956,9 +982,11 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
956 } else { 982 } else {
957 /* Good packet, send to upper layer */ 983 /* Good packet, send to upper layer */
958 /* Shorst packet used new SKB */ 984 /* Shorst packet used new SKB */
959 if ( (rxlen < RX_COPY_SIZE) && 985 if ((rxlen < RX_COPY_SIZE) &&
960 ( (skb = dev_alloc_skb(rxlen + 2) ) 986 ((newskb = dev_alloc_skb(rxlen + 2))
961 != NULL) ) { 987 != NULL)) {
988
989 skb = newskb;
962 /* size less than COPY_SIZE, allocate a rxlen SKB */ 990 /* size less than COPY_SIZE, allocate a rxlen SKB */
963 skb->dev = dev; 991 skb->dev = dev;
964 skb_reserve(skb, 2); /* 16byte align */ 992 skb_reserve(skb, 2); /* 16byte align */
@@ -1069,6 +1097,8 @@ static void dmfe_timer(unsigned long data)
1069 struct dmfe_board_info *db = netdev_priv(dev); 1097 struct dmfe_board_info *db = netdev_priv(dev);
1070 unsigned long flags; 1098 unsigned long flags;
1071 1099
1100 int link_ok, link_ok_phy;
1101
1072 DMFE_DBUG(0, "dmfe_timer()", 0); 1102 DMFE_DBUG(0, "dmfe_timer()", 0);
1073 spin_lock_irqsave(&db->lock, flags); 1103 spin_lock_irqsave(&db->lock, flags);
1074 1104
@@ -1078,7 +1108,8 @@ static void dmfe_timer(unsigned long data)
1078 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { 1108 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1079 db->cr6_data &= ~0x40000; 1109 db->cr6_data &= ~0x40000;
1080 update_cr6(db->cr6_data, db->ioaddr); 1110 update_cr6(db->cr6_data, db->ioaddr);
1081 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); 1111 phy_write(db->ioaddr,
1112 db->phy_addr, 0, 0x1000, db->chip_id);
1082 db->cr6_data |= 0x40000; 1113 db->cr6_data |= 0x40000;
1083 update_cr6(db->cr6_data, db->ioaddr); 1114 update_cr6(db->cr6_data, db->ioaddr);
1084 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 1115 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
@@ -1139,21 +1170,41 @@ static void dmfe_timer(unsigned long data)
1139 (db->chip_revision == 0x02000010)) ) { 1170 (db->chip_revision == 0x02000010)) ) {
1140 /* DM9102A Chip */ 1171 /* DM9102A Chip */
1141 if (tmp_cr12 & 2) 1172 if (tmp_cr12 & 2)
1142 tmp_cr12 = 0x0; /* Link failed */ 1173 link_ok = 0;
1143 else 1174 else
1144 tmp_cr12 = 0x3; /* Link OK */ 1175 link_ok = 1;
1145 } 1176 }
1177 else
1178 /*0x43 is used instead of 0x3 because bit 6 should represent
1179 link status of external PHY */
1180 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1181
1182
1183 /* If chip reports that link is failed it could be because external
1184 PHY link status pin is not conected correctly to chip
1185 To be sure ask PHY too.
1186 */
1187
1188 /* need a dummy read because of PHY's register latch*/
1189 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1190 link_ok_phy = (phy_read (db->ioaddr,
1191 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1146 1192
1147 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { 1193 if (link_ok_phy != link_ok) {
1194 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1195 link_ok = link_ok | link_ok_phy;
1196 }
1197
1198 if ( !link_ok && netif_carrier_ok(dev)) {
1148 /* Link Failed */ 1199 /* Link Failed */
1149 DMFE_DBUG(0, "Link Failed", tmp_cr12); 1200 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1150 db->link_failed = 1; 1201 netif_carrier_off(dev);
1151 netif_carrier_off(db->dev);
1152 1202
1153 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1203 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1154 /* AUTO or force 1M Homerun/Longrun don't need */ 1204 /* AUTO or force 1M Homerun/Longrun don't need */
1155 if ( !(db->media_mode & 0x38) ) 1205 if ( !(db->media_mode & 0x38) )
1156 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); 1206 phy_write(db->ioaddr, db->phy_addr,
1207 0, 0x1000, db->chip_id);
1157 1208
1158 /* AUTO mode, if INT phyxcer link failed, select EXT device */ 1209 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1159 if (db->media_mode & DMFE_AUTO) { 1210 if (db->media_mode & DMFE_AUTO) {
@@ -1162,21 +1213,19 @@ static void dmfe_timer(unsigned long data)
1162 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ 1213 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1163 update_cr6(db->cr6_data, db->ioaddr); 1214 update_cr6(db->cr6_data, db->ioaddr);
1164 } 1215 }
1165 } else 1216 } else if (!netif_carrier_ok(dev)) {
1166 if ((tmp_cr12 & 0x3) && db->link_failed) { 1217
1167 DMFE_DBUG(0, "Link link OK", tmp_cr12); 1218 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1168 db->link_failed = 0; 1219
1169 1220 /* Auto Sense Speed */
1170 /* Auto Sense Speed */ 1221 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1171 if ( (db->media_mode & DMFE_AUTO) && 1222 netif_carrier_on(dev);
1172 dmfe_sense_speed(db) ) 1223 SHOW_MEDIA_TYPE(db->op_mode);
1173 db->link_failed = 1;
1174 else
1175 netif_carrier_on(db->dev);
1176 dmfe_process_mode(db);
1177 /* SHOW_MEDIA_TYPE(db->op_mode); */
1178 } 1224 }
1179 1225
1226 dmfe_process_mode(db);
1227 }
1228
1180 /* HPNA remote command check */ 1229 /* HPNA remote command check */
1181 if (db->HPNA_command & 0xf00) { 1230 if (db->HPNA_command & 0xf00) {
1182 db->HPNA_timer--; 1231 db->HPNA_timer--;
@@ -1221,7 +1270,7 @@ static void dmfe_dynamic_reset(struct DEVICE *dev)
1221 db->tx_packet_cnt = 0; 1270 db->tx_packet_cnt = 0;
1222 db->tx_queue_cnt = 0; 1271 db->tx_queue_cnt = 0;
1223 db->rx_avail_cnt = 0; 1272 db->rx_avail_cnt = 0;
1224 db->link_failed = 1; 1273 netif_carrier_off(dev);
1225 db->wait_reset = 0; 1274 db->wait_reset = 0;
1226 1275
1227 /* Re-initilize DM910X board */ 1276 /* Re-initilize DM910X board */
@@ -1259,7 +1308,8 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1259 1308
1260 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { 1309 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1261 rxptr->rx_skb_ptr = skb; 1310 rxptr->rx_skb_ptr = skb;
1262 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1311 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1312 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1263 wmb(); 1313 wmb();
1264 rxptr->rdes0 = cpu_to_le32(0x80000000); 1314 rxptr->rdes0 = cpu_to_le32(0x80000000);
1265 db->rx_avail_cnt++; 1315 db->rx_avail_cnt++;
@@ -1291,8 +1341,11 @@ static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioadd
1291 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ 1341 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1292 1342
1293 /* rx descriptor start pointer */ 1343 /* rx descriptor start pointer */
1294 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; 1344 db->first_rx_desc = (void *)db->first_tx_desc +
1295 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; 1345 sizeof(struct tx_desc) * TX_DESC_CNT;
1346
1347 db->first_rx_desc_dma = db->first_tx_desc_dma +
1348 sizeof(struct tx_desc) * TX_DESC_CNT;
1296 db->rx_insert_ptr = db->first_rx_desc; 1349 db->rx_insert_ptr = db->first_rx_desc;
1297 db->rx_ready_ptr = db->first_rx_desc; 1350 db->rx_ready_ptr = db->first_rx_desc;
1298 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ 1351 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
@@ -1470,7 +1523,8 @@ static void allocate_rx_buffer(struct dmfe_board_info *db)
1470 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1523 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1471 break; 1524 break;
1472 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1525 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1473 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1526 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1527 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1474 wmb(); 1528 wmb();
1475 rxptr->rdes0 = cpu_to_le32(0x80000000); 1529 rxptr->rdes0 = cpu_to_le32(0x80000000);
1476 rxptr = rxptr->next_rx_desc; 1530 rxptr = rxptr->next_rx_desc;
@@ -1510,7 +1564,8 @@ static u16 read_srom_word(long ioaddr, int offset)
1510 for (i = 16; i > 0; i--) { 1564 for (i = 16; i > 0; i--) {
1511 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); 1565 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1512 udelay(5); 1566 udelay(5);
1513 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); 1567 srom_data = (srom_data << 1) |
1568 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1514 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1569 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1515 udelay(5); 1570 udelay(5);
1516 } 1571 }
@@ -1537,9 +1592,11 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1537 1592
1538 if ( (phy_mode & 0x24) == 0x24 ) { 1593 if ( (phy_mode & 0x24) == 0x24 ) {
1539 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ 1594 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1540 phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000; 1595 phy_mode = phy_read(db->ioaddr,
1596 db->phy_addr, 7, db->chip_id) & 0xf000;
1541 else /* DM9102/DM9102A */ 1597 else /* DM9102/DM9102A */
1542 phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000; 1598 phy_mode = phy_read(db->ioaddr,
1599 db->phy_addr, 17, db->chip_id) & 0xf000;
1543 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ 1600 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1544 switch (phy_mode) { 1601 switch (phy_mode) {
1545 case 0x1000: db->op_mode = DMFE_10MHF; break; 1602 case 0x1000: db->op_mode = DMFE_10MHF; break;
@@ -1576,8 +1633,11 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1576 1633
1577 /* DM9009 Chip: Phyxcer reg18 bit12=0 */ 1634 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1578 if (db->chip_id == PCI_DM9009_ID) { 1635 if (db->chip_id == PCI_DM9009_ID) {
1579 phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000; 1636 phy_reg = phy_read(db->ioaddr,
1580 phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id); 1637 db->phy_addr, 18, db->chip_id) & ~0x1000;
1638
1639 phy_write(db->ioaddr,
1640 db->phy_addr, 18, phy_reg, db->chip_id);
1581 } 1641 }
1582 1642
1583 /* Phyxcer capability setting */ 1643 /* Phyxcer capability setting */
@@ -1650,10 +1710,12 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
1650 case DMFE_100MHF: phy_reg = 0x2000; break; 1710 case DMFE_100MHF: phy_reg = 0x2000; break;
1651 case DMFE_100MFD: phy_reg = 0x2100; break; 1711 case DMFE_100MFD: phy_reg = 0x2100; break;
1652 } 1712 }
1653 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); 1713 phy_write(db->ioaddr,
1714 db->phy_addr, 0, phy_reg, db->chip_id);
1654 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) 1715 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1655 mdelay(20); 1716 mdelay(20);
1656 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); 1717 phy_write(db->ioaddr,
1718 db->phy_addr, 0, phy_reg, db->chip_id);
1657 } 1719 }
1658 } 1720 }
1659} 1721}
@@ -1663,7 +1725,8 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
1663 * Write a word to Phy register 1725 * Write a word to Phy register
1664 */ 1726 */
1665 1727
1666static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) 1728static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1729 u16 phy_data, u32 chip_id)
1667{ 1730{
1668 u16 i; 1731 u16 i;
1669 unsigned long ioaddr; 1732 unsigned long ioaddr;
@@ -1689,11 +1752,13 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data
1689 1752
1690 /* Send Phy address */ 1753 /* Send Phy address */
1691 for (i = 0x10; i > 0; i = i >> 1) 1754 for (i = 0x10; i > 0; i = i >> 1)
1692 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); 1755 phy_write_1bit(ioaddr,
1756 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1693 1757
1694 /* Send register address */ 1758 /* Send register address */
1695 for (i = 0x10; i > 0; i = i >> 1) 1759 for (i = 0x10; i > 0; i = i >> 1)
1696 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0); 1760 phy_write_1bit(ioaddr,
1761 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1697 1762
1698 /* written trasnition */ 1763 /* written trasnition */
1699 phy_write_1bit(ioaddr, PHY_DATA_1); 1764 phy_write_1bit(ioaddr, PHY_DATA_1);
@@ -1701,7 +1766,8 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data
1701 1766
1702 /* Write a word data to PHY controller */ 1767 /* Write a word data to PHY controller */
1703 for ( i = 0x8000; i > 0; i >>= 1) 1768 for ( i = 0x8000; i > 0; i >>= 1)
1704 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0); 1769 phy_write_1bit(ioaddr,
1770 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1705 } 1771 }
1706} 1772}
1707 1773
@@ -1738,11 +1804,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1738 1804
1739 /* Send Phy address */ 1805 /* Send Phy address */
1740 for (i = 0x10; i > 0; i = i >> 1) 1806 for (i = 0x10; i > 0; i = i >> 1)
1741 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); 1807 phy_write_1bit(ioaddr,
1808 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1742 1809
1743 /* Send register address */ 1810 /* Send register address */
1744 for (i = 0x10; i > 0; i = i >> 1) 1811 for (i = 0x10; i > 0; i = i >> 1)
1745 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0); 1812 phy_write_1bit(ioaddr,
1813 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1746 1814
1747 /* Skip transition state */ 1815 /* Skip transition state */
1748 phy_read_1bit(ioaddr); 1816 phy_read_1bit(ioaddr);
@@ -1963,7 +2031,8 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
1963 2031
1964 /* Check remote device status match our setting ot not */ 2032 /* Check remote device status match our setting ot not */
1965 if ( phy_reg != (db->HPNA_command & 0x0f00) ) { 2033 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
1966 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); 2034 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2035 db->chip_id);
1967 db->HPNA_timer=8; 2036 db->HPNA_timer=8;
1968 } else 2037 } else
1969 db->HPNA_timer=600; /* Match, every 10 minutes, check */ 2038 db->HPNA_timer=600; /* Match, every 10 minutes, check */
@@ -2003,8 +2072,11 @@ module_param(HPNA_tx_cmd, byte, 0);
2003module_param(HPNA_NoiseFloor, byte, 0); 2072module_param(HPNA_NoiseFloor, byte, 0);
2004module_param(SF_mode, byte, 0); 2073module_param(SF_mode, byte, 0);
2005MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); 2074MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2006MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); 2075MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2007MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); 2076 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2077
2078MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2079 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2008 2080
2009/* Description: 2081/* Description:
2010 * when user used insmod to add module, system invoked init_module() 2082 * when user used insmod to add module, system invoked init_module()
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 5a35354aa5..e3774a5223 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -67,7 +67,7 @@ const char * const medianame[32] = {
67 67
68/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ 68/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
69#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ 69#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
70 || defined(__sparc_) || defined(__ia64__) \ 70 || defined(__sparc__) || defined(__ia64__) \
71 || defined(__sh__) || defined(__mips__) 71 || defined(__sh__) || defined(__mips__)
72static int rx_copybreak = 1518; 72static int rx_copybreak = 1518;
73#else 73#else
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 9781b16bb8..0d91d094ed 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -746,8 +746,7 @@ typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
746{ 746{
747 struct typhoon *tp = netdev_priv(dev); 747 struct typhoon *tp = netdev_priv(dev);
748 spin_lock_bh(&tp->state_lock); 748 spin_lock_bh(&tp->state_lock);
749 if(tp->vlgrp) 749 vlan_group_set_device(tp->vlgrp, vid, NULL);
750 tp->vlgrp->vlan_devices[vid] = NULL;
751 spin_unlock_bh(&tp->state_lock); 750 spin_unlock_bh(&tp->state_lock);
752} 751}
753 752
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a2fc2bbcf9..dab88b958d 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3598,17 +3598,20 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3598 3598
3599 /* Move to next BD in the ring */ 3599 /* Move to next BD in the ring */
3600 if (!(bd_status & T_W)) 3600 if (!(bd_status & T_W))
3601 ugeth->txBd[txQ] = bd + sizeof(struct qe_bd); 3601 bd += sizeof(struct qe_bd);
3602 else 3602 else
3603 ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; 3603 bd = ugeth->p_tx_bd_ring[txQ];
3604 3604
3605 /* If the next BD still needs to be cleaned up, then the bds 3605 /* If the next BD still needs to be cleaned up, then the bds
3606 are full. We need to tell the kernel to stop sending us stuff. */ 3606 are full. We need to tell the kernel to stop sending us stuff. */
3607 if (bd == ugeth->confBd[txQ]) { 3607 if (bd == ugeth->confBd[txQ]) {
3608 if (!netif_queue_stopped(dev)) 3608 if (!netif_queue_stopped(dev))
3609 netif_stop_queue(dev); 3609 netif_stop_queue(dev);
3610 return NETDEV_TX_BUSY;
3610 } 3611 }
3611 3612
3613 ugeth->txBd[txQ] = bd;
3614
3612 if (ugeth->p_scheduler) { 3615 if (ugeth->p_scheduler) {
3613 ugeth->cpucount[txQ]++; 3616 ugeth->cpucount[txQ]++;
3614 /* Indicate to QE that there are more Tx bds ready for 3617 /* Indicate to QE that there are more Tx bds ready for
@@ -3620,7 +3623,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3620 3623
3621 spin_unlock_irq(&ugeth->lock); 3624 spin_unlock_irq(&ugeth->lock);
3622 3625
3623 return 0; 3626 return NETDEV_TX_OK;
3624} 3627}
3625 3628
3626static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3629static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
@@ -3722,7 +3725,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3722 /* Handle the transmitted buffer and release */ 3725 /* Handle the transmitted buffer and release */
3723 /* the BD to be used with the current frame */ 3726 /* the BD to be used with the current frame */
3724 3727
3725 if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) 3728 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3726 break; 3729 break;
3727 3730
3728 ugeth->stats.tx_packets++; 3731 ugeth->stats.tx_packets++;
@@ -3741,10 +3744,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3741 3744
3742 /* Advance the confirmation BD pointer */ 3745 /* Advance the confirmation BD pointer */
3743 if (!(bd_status & T_W)) 3746 if (!(bd_status & T_W))
3744 ugeth->confBd[txQ] += sizeof(struct qe_bd); 3747 bd += sizeof(struct qe_bd);
3745 else 3748 else
3746 ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; 3749 bd = ugeth->p_tx_bd_ring[txQ];
3750 bd_status = in_be32((u32 *)bd);
3747 } 3751 }
3752 ugeth->confBd[txQ] = bd;
3748 return 0; 3753 return 0;
3749} 3754}
3750 3755
@@ -4199,9 +4204,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
4199 ugeth->ug_info = ug_info; 4204 ugeth->ug_info = ug_info;
4200 ugeth->dev = dev; 4205 ugeth->dev = dev;
4201 4206
4202 mac_addr = get_property(np, "mac-address", NULL); 4207 mac_addr = of_get_mac_address(np);
4203 if (mac_addr == NULL)
4204 mac_addr = get_property(np, "local-mac-address", NULL);
4205 if (mac_addr) 4208 if (mac_addr)
4206 memcpy(dev->dev_addr, mac_addr, 6); 4209 memcpy(dev->dev_addr, mac_addr, 6);
4207 4210
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index e91b5a84a2..5b82e4fd0d 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -94,7 +94,6 @@
94#include <linux/device.h> 94#include <linux/device.h>
95 95
96#undef COSA_SLOW_IO /* for testing purposes only */ 96#undef COSA_SLOW_IO /* for testing purposes only */
97#undef REALLY_SLOW_IO
98 97
99#include <asm/io.h> 98#include <asm/io.h>
100#include <asm/dma.h> 99#include <asm/dma.h>
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 9040d7cf65..65ad2e24ca 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -38,7 +38,7 @@
38#include <linux/hdlc.h> 38#include <linux/hdlc.h>
39 39
40 40
41static const char* version = "HDLC support module revision 1.20"; 41static const char* version = "HDLC support module revision 1.21";
42 42
43#undef DEBUG_LINK 43#undef DEBUG_LINK
44 44
@@ -222,19 +222,31 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
222 return -EINVAL; 222 return -EINVAL;
223} 223}
224 224
225static void hdlc_setup_dev(struct net_device *dev)
226{
227 /* Re-init all variables changed by HDLC protocol drivers,
228 * including ether_setup() called from hdlc_raw_eth.c.
229 */
230 dev->get_stats = hdlc_get_stats;
231 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
232 dev->mtu = HDLC_MAX_MTU;
233 dev->type = ARPHRD_RAWHDLC;
234 dev->hard_header_len = 16;
235 dev->addr_len = 0;
236 dev->hard_header = NULL;
237 dev->rebuild_header = NULL;
238 dev->set_mac_address = NULL;
239 dev->hard_header_cache = NULL;
240 dev->header_cache_update = NULL;
241 dev->change_mtu = hdlc_change_mtu;
242 dev->hard_header_parse = NULL;
243}
244
225static void hdlc_setup(struct net_device *dev) 245static void hdlc_setup(struct net_device *dev)
226{ 246{
227 hdlc_device *hdlc = dev_to_hdlc(dev); 247 hdlc_device *hdlc = dev_to_hdlc(dev);
228 248
229 dev->get_stats = hdlc_get_stats; 249 hdlc_setup_dev(dev);
230 dev->change_mtu = hdlc_change_mtu;
231 dev->mtu = HDLC_MAX_MTU;
232
233 dev->type = ARPHRD_RAWHDLC;
234 dev->hard_header_len = 16;
235
236 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
237
238 hdlc->carrier = 1; 250 hdlc->carrier = 1;
239 hdlc->open = 0; 251 hdlc->open = 0;
240 spin_lock_init(&hdlc->state_lock); 252 spin_lock_init(&hdlc->state_lock);
@@ -294,6 +306,7 @@ void detach_hdlc_protocol(struct net_device *dev)
294 } 306 }
295 kfree(hdlc->state); 307 kfree(hdlc->state);
296 hdlc->state = NULL; 308 hdlc->state = NULL;
309 hdlc_setup_dev(dev);
297} 310}
298 311
299 312
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index b0bc5ddcf1..c9664fd8a9 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -365,10 +365,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
365 memcpy(&state(hdlc)->settings, &new_settings, size); 365 memcpy(&state(hdlc)->settings, &new_settings, size);
366 dev->hard_start_xmit = hdlc->xmit; 366 dev->hard_start_xmit = hdlc->xmit;
367 dev->hard_header = cisco_hard_header; 367 dev->hard_header = cisco_hard_header;
368 dev->hard_header_cache = NULL;
369 dev->type = ARPHRD_CISCO; 368 dev->type = ARPHRD_CISCO;
370 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
371 dev->addr_len = 0;
372 netif_dormant_on(dev); 369 netif_dormant_on(dev);
373 return 0; 370 return 0;
374 } 371 }
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index b45ab680d2..c6c3c757d6 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1289,10 +1289,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1289 memcpy(&state(hdlc)->settings, &new_settings, size); 1289 memcpy(&state(hdlc)->settings, &new_settings, size);
1290 1290
1291 dev->hard_start_xmit = hdlc->xmit; 1291 dev->hard_start_xmit = hdlc->xmit;
1292 dev->hard_header = NULL;
1293 dev->type = ARPHRD_FRAD; 1292 dev->type = ARPHRD_FRAD;
1294 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1295 dev->addr_len = 0;
1296 return 0; 1293 return 0;
1297 1294
1298 case IF_PROTO_FR_ADD_PVC: 1295 case IF_PROTO_FR_ADD_PVC:
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index e9f717070f..4591437dd2 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -127,9 +127,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
127 if (result) 127 if (result)
128 return result; 128 return result;
129 dev->hard_start_xmit = hdlc->xmit; 129 dev->hard_start_xmit = hdlc->xmit;
130 dev->hard_header = NULL;
131 dev->type = ARPHRD_PPP; 130 dev->type = ARPHRD_PPP;
132 dev->addr_len = 0;
133 netif_dormant_off(dev); 131 netif_dormant_off(dev);
134 return 0; 132 return 0;
135 } 133 }
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index fe3cae5c6b..e23bc66562 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -88,10 +88,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
88 return result; 88 return result;
89 memcpy(hdlc->state, &new_settings, size); 89 memcpy(hdlc->state, &new_settings, size);
90 dev->hard_start_xmit = hdlc->xmit; 90 dev->hard_start_xmit = hdlc->xmit;
91 dev->hard_header = NULL;
92 dev->type = ARPHRD_RAWHDLC; 91 dev->type = ARPHRD_RAWHDLC;
93 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
94 dev->addr_len = 0;
95 netif_dormant_off(dev); 92 netif_dormant_off(dev);
96 return 0; 93 return 0;
97 } 94 }
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index e4bb9f8ad4..cd7b22f50e 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -215,9 +215,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
215 x25_rx, 0)) != 0) 215 x25_rx, 0)) != 0)
216 return result; 216 return result;
217 dev->hard_start_xmit = x25_xmit; 217 dev->hard_start_xmit = x25_xmit;
218 dev->hard_header = NULL;
219 dev->type = ARPHRD_X25; 218 dev->type = ARPHRD_X25;
220 dev->addr_len = 0;
221 netif_dormant_off(dev); 219 netif_dormant_off(dev);
222 return 0; 220 return 0;
223 } 221 }
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 73c831a3b7..e594af46ff 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -2733,8 +2733,9 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
2733 * dangling pins on the second core. Be careful 2733 * dangling pins on the second core. Be careful
2734 * and ignore these cores here. 2734 * and ignore these cores here.
2735 */ 2735 */
2736 if (bcm->pci_dev->device != 0x4324) { 2736 if (1 /*bcm->pci_dev->device != 0x4324*/ ) {
2737 dprintk(KERN_INFO PFX "Ignoring additional 802.11 core.\n"); 2737 /* TODO: A PHY */
2738 dprintk(KERN_INFO PFX "Ignoring additional 802.11a core.\n");
2738 continue; 2739 continue;
2739 } 2740 }
2740 } 2741 }
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index c250f08c8d..ce9230b2f6 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -26,7 +26,6 @@
26 * Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode 26 * Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode
27 * with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60) 27 * with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60)
28 */ 28 */
29#undef REALLY_SLOW_IO /* most systems can safely undef this */
30 29
31#include <linux/delay.h> 30#include <linux/delay.h>
32#include <linux/types.h> 31#include <linux/types.h>
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 68555c11f5..01869b1782 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -38,6 +38,36 @@ static int msi_cache_init(void)
38 return 0; 38 return 0;
39} 39}
40 40
41static void msi_set_enable(struct pci_dev *dev, int enable)
42{
43 int pos;
44 u16 control;
45
46 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
47 if (pos) {
48 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
49 control &= ~PCI_MSI_FLAGS_ENABLE;
50 if (enable)
51 control |= PCI_MSI_FLAGS_ENABLE;
52 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
53 }
54}
55
56static void msix_set_enable(struct pci_dev *dev, int enable)
57{
58 int pos;
59 u16 control;
60
61 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
62 if (pos) {
63 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
64 control &= ~PCI_MSIX_FLAGS_ENABLE;
65 if (enable)
66 control |= PCI_MSIX_FLAGS_ENABLE;
67 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
68 }
69}
70
41static void msi_set_mask_bit(unsigned int irq, int flag) 71static void msi_set_mask_bit(unsigned int irq, int flag)
42{ 72{
43 struct msi_desc *entry; 73 struct msi_desc *entry;
@@ -55,6 +85,8 @@ static void msi_set_mask_bit(unsigned int irq, int flag)
55 mask_bits &= ~(1); 85 mask_bits &= ~(1);
56 mask_bits |= flag; 86 mask_bits |= flag;
57 pci_write_config_dword(entry->dev, pos, mask_bits); 87 pci_write_config_dword(entry->dev, pos, mask_bits);
88 } else {
89 msi_set_enable(entry->dev, !flag);
58 } 90 }
59 break; 91 break;
60 case PCI_CAP_ID_MSIX: 92 case PCI_CAP_ID_MSIX:
@@ -192,44 +224,6 @@ static struct msi_desc* alloc_msi_entry(void)
192 return entry; 224 return entry;
193} 225}
194 226
195static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
196{
197 u16 control;
198
199 pci_read_config_word(dev, msi_control_reg(pos), &control);
200 if (type == PCI_CAP_ID_MSI) {
201 /* Set enabled bits to single MSI & enable MSI_enable bit */
202 msi_enable(control, 1);
203 pci_write_config_word(dev, msi_control_reg(pos), control);
204 dev->msi_enabled = 1;
205 } else {
206 msix_enable(control);
207 pci_write_config_word(dev, msi_control_reg(pos), control);
208 dev->msix_enabled = 1;
209 }
210
211 pci_intx(dev, 0); /* disable intx */
212}
213
214void disable_msi_mode(struct pci_dev *dev, int pos, int type)
215{
216 u16 control;
217
218 pci_read_config_word(dev, msi_control_reg(pos), &control);
219 if (type == PCI_CAP_ID_MSI) {
220 /* Set enabled bits to single MSI & enable MSI_enable bit */
221 msi_disable(control);
222 pci_write_config_word(dev, msi_control_reg(pos), control);
223 dev->msi_enabled = 0;
224 } else {
225 msix_disable(control);
226 pci_write_config_word(dev, msi_control_reg(pos), control);
227 dev->msix_enabled = 0;
228 }
229
230 pci_intx(dev, 1); /* enable intx */
231}
232
233#ifdef CONFIG_PM 227#ifdef CONFIG_PM
234static int __pci_save_msi_state(struct pci_dev *dev) 228static int __pci_save_msi_state(struct pci_dev *dev)
235{ 229{
@@ -238,12 +232,11 @@ static int __pci_save_msi_state(struct pci_dev *dev)
238 struct pci_cap_saved_state *save_state; 232 struct pci_cap_saved_state *save_state;
239 u32 *cap; 233 u32 *cap;
240 234
241 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 235 if (!dev->msi_enabled)
242 if (pos <= 0 || dev->no_msi)
243 return 0; 236 return 0;
244 237
245 pci_read_config_word(dev, msi_control_reg(pos), &control); 238 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
246 if (!(control & PCI_MSI_FLAGS_ENABLE)) 239 if (pos <= 0)
247 return 0; 240 return 0;
248 241
249 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, 242 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
@@ -276,13 +269,18 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
276 struct pci_cap_saved_state *save_state; 269 struct pci_cap_saved_state *save_state;
277 u32 *cap; 270 u32 *cap;
278 271
272 if (!dev->msi_enabled)
273 return;
274
279 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); 275 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
280 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 276 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
281 if (!save_state || pos <= 0) 277 if (!save_state || pos <= 0)
282 return; 278 return;
283 cap = &save_state->data[0]; 279 cap = &save_state->data[0];
284 280
281 pci_intx(dev, 0); /* disable intx */
285 control = cap[i++] >> 16; 282 control = cap[i++] >> 16;
283 msi_set_enable(dev, 0);
286 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); 284 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
287 if (control & PCI_MSI_FLAGS_64BIT) { 285 if (control & PCI_MSI_FLAGS_64BIT) {
288 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); 286 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
@@ -292,7 +290,6 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
292 if (control & PCI_MSI_FLAGS_MASKBIT) 290 if (control & PCI_MSI_FLAGS_MASKBIT)
293 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); 291 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
294 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 292 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
295 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
296 pci_remove_saved_cap(save_state); 293 pci_remove_saved_cap(save_state);
297 kfree(save_state); 294 kfree(save_state);
298} 295}
@@ -308,13 +305,11 @@ static int __pci_save_msix_state(struct pci_dev *dev)
308 return 0; 305 return 0;
309 306
310 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 307 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
311 if (pos <= 0 || dev->no_msi) 308 if (pos <= 0)
312 return 0; 309 return 0;
313 310
314 /* save the capability */ 311 /* save the capability */
315 pci_read_config_word(dev, msi_control_reg(pos), &control); 312 pci_read_config_word(dev, msi_control_reg(pos), &control);
316 if (!(control & PCI_MSIX_FLAGS_ENABLE))
317 return 0;
318 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), 313 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
319 GFP_KERNEL); 314 GFP_KERNEL);
320 if (!save_state) { 315 if (!save_state) {
@@ -376,6 +371,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
376 return; 371 return;
377 372
378 /* route the table */ 373 /* route the table */
374 pci_intx(dev, 0); /* disable intx */
375 msix_set_enable(dev, 0);
379 irq = head = dev->first_msi_irq; 376 irq = head = dev->first_msi_irq;
380 while (head != tail) { 377 while (head != tail) {
381 entry = get_irq_msi(irq); 378 entry = get_irq_msi(irq);
@@ -386,7 +383,6 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
386 } 383 }
387 384
388 pci_write_config_word(dev, msi_control_reg(pos), save); 385 pci_write_config_word(dev, msi_control_reg(pos), save);
389 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
390} 386}
391 387
392void pci_restore_msi_state(struct pci_dev *dev) 388void pci_restore_msi_state(struct pci_dev *dev)
@@ -411,6 +407,8 @@ static int msi_capability_init(struct pci_dev *dev)
411 int pos, irq; 407 int pos, irq;
412 u16 control; 408 u16 control;
413 409
410 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
411
414 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 412 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
415 pci_read_config_word(dev, msi_control_reg(pos), &control); 413 pci_read_config_word(dev, msi_control_reg(pos), &control);
416 /* MSI Entry Initialization */ 414 /* MSI Entry Initialization */
@@ -454,7 +452,9 @@ static int msi_capability_init(struct pci_dev *dev)
454 set_irq_msi(irq, entry); 452 set_irq_msi(irq, entry);
455 453
456 /* Set MSI enabled bits */ 454 /* Set MSI enabled bits */
457 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 455 pci_intx(dev, 0); /* disable intx */
456 msi_set_enable(dev, 1);
457 dev->msi_enabled = 1;
458 458
459 dev->irq = irq; 459 dev->irq = irq;
460 return 0; 460 return 0;
@@ -481,6 +481,8 @@ static int msix_capability_init(struct pci_dev *dev,
481 u8 bir; 481 u8 bir;
482 void __iomem *base; 482 void __iomem *base;
483 483
484 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
485
484 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 486 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
485 /* Request & Map MSI-X table region */ 487 /* Request & Map MSI-X table region */
486 pci_read_config_word(dev, msi_control_reg(pos), &control); 488 pci_read_config_word(dev, msi_control_reg(pos), &control);
@@ -549,7 +551,9 @@ static int msix_capability_init(struct pci_dev *dev,
549 } 551 }
550 dev->first_msi_irq = entries[0].vector; 552 dev->first_msi_irq = entries[0].vector;
551 /* Set MSI-X enabled bits */ 553 /* Set MSI-X enabled bits */
552 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 554 pci_intx(dev, 0); /* disable intx */
555 msix_set_enable(dev, 1);
556 dev->msix_enabled = 1;
553 557
554 return 0; 558 return 0;
555} 559}
@@ -611,12 +615,11 @@ int pci_enable_msi(struct pci_dev* dev)
611 WARN_ON(!!dev->msi_enabled); 615 WARN_ON(!!dev->msi_enabled);
612 616
613 /* Check whether driver already requested for MSI-X irqs */ 617 /* Check whether driver already requested for MSI-X irqs */
614 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 618 if (dev->msix_enabled) {
615 if (pos > 0 && dev->msix_enabled) { 619 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
616 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 620 "Device already has MSI-X enabled\n",
617 "Device already has MSI-X enabled\n", 621 pci_name(dev));
618 pci_name(dev)); 622 return -EINVAL;
619 return -EINVAL;
620 } 623 }
621 status = msi_capability_init(dev); 624 status = msi_capability_init(dev);
622 return status; 625 return status;
@@ -625,8 +628,7 @@ int pci_enable_msi(struct pci_dev* dev)
625void pci_disable_msi(struct pci_dev* dev) 628void pci_disable_msi(struct pci_dev* dev)
626{ 629{
627 struct msi_desc *entry; 630 struct msi_desc *entry;
628 int pos, default_irq; 631 int default_irq;
629 u16 control;
630 632
631 if (!pci_msi_enable) 633 if (!pci_msi_enable)
632 return; 634 return;
@@ -636,16 +638,9 @@ void pci_disable_msi(struct pci_dev* dev)
636 if (!dev->msi_enabled) 638 if (!dev->msi_enabled)
637 return; 639 return;
638 640
639 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 641 msi_set_enable(dev, 0);
640 if (!pos) 642 pci_intx(dev, 1); /* enable intx */
641 return; 643 dev->msi_enabled = 0;
642
643 pci_read_config_word(dev, msi_control_reg(pos), &control);
644 if (!(control & PCI_MSI_FLAGS_ENABLE))
645 return;
646
647
648 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
649 644
650 entry = get_irq_msi(dev->first_msi_irq); 645 entry = get_irq_msi(dev->first_msi_irq);
651 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 646 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
@@ -746,8 +741,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
746 WARN_ON(!!dev->msix_enabled); 741 WARN_ON(!!dev->msix_enabled);
747 742
748 /* Check whether driver already requested for MSI irq */ 743 /* Check whether driver already requested for MSI irq */
749 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && 744 if (dev->msi_enabled) {
750 dev->msi_enabled) {
751 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 745 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
752 "Device already has an MSI irq assigned\n", 746 "Device already has an MSI irq assigned\n",
753 pci_name(dev)); 747 pci_name(dev));
@@ -760,8 +754,6 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
760void pci_disable_msix(struct pci_dev* dev) 754void pci_disable_msix(struct pci_dev* dev)
761{ 755{
762 int irq, head, tail = 0, warning = 0; 756 int irq, head, tail = 0, warning = 0;
763 int pos;
764 u16 control;
765 757
766 if (!pci_msi_enable) 758 if (!pci_msi_enable)
767 return; 759 return;
@@ -771,15 +763,9 @@ void pci_disable_msix(struct pci_dev* dev)
771 if (!dev->msix_enabled) 763 if (!dev->msix_enabled)
772 return; 764 return;
773 765
774 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 766 msix_set_enable(dev, 0);
775 if (!pos) 767 pci_intx(dev, 1); /* enable intx */
776 return; 768 dev->msix_enabled = 0;
777
778 pci_read_config_word(dev, msi_control_reg(pos), &control);
779 if (!(control & PCI_MSIX_FLAGS_ENABLE))
780 return;
781
782 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
783 769
784 irq = head = dev->first_msi_irq; 770 irq = head = dev->first_msi_irq;
785 while (head != tail) { 771 while (head != tail) {
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1e74e1ee8b..df495300ce 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -881,13 +881,6 @@ pci_disable_device(struct pci_dev *dev)
881 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 881 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
882 return; 882 return;
883 883
884 if (dev->msi_enabled)
885 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
886 PCI_CAP_ID_MSI);
887 if (dev->msix_enabled)
888 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
889 PCI_CAP_ID_MSIX);
890
891 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 884 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
892 if (pci_command & PCI_COMMAND_MASTER) { 885 if (pci_command & PCI_COMMAND_MASTER) {
893 pci_command &= ~PCI_COMMAND_MASTER; 886 pci_command &= ~PCI_COMMAND_MASTER;
@@ -1277,6 +1270,33 @@ pci_intx(struct pci_dev *pdev, int enable)
1277 } 1270 }
1278} 1271}
1279 1272
1273/**
1274 * pci_msi_off - disables any msi or msix capabilities
1275 * @pdev: the PCI device to operate on
1276 *
1277 * If you want to use msi see pci_enable_msi and friends.
1278 * This is a lower level primitive that allows us to disable
1279 * msi operation at the device level.
1280 */
1281void pci_msi_off(struct pci_dev *dev)
1282{
1283 int pos;
1284 u16 control;
1285
1286 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1287 if (pos) {
1288 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
1289 control &= ~PCI_MSI_FLAGS_ENABLE;
1290 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
1291 }
1292 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1293 if (pos) {
1294 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
1295 control &= ~PCI_MSIX_FLAGS_ENABLE;
1296 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
1297 }
1298}
1299
1280#ifndef HAVE_ARCH_PCI_SET_DMA_MASK 1300#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
1281/* 1301/*
1282 * These can be overridden by arch-specific implementations 1302 * These can be overridden by arch-specific implementations
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a4f2d58062..ae7a975995 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -46,10 +46,8 @@ extern struct rw_semaphore pci_bus_sem;
46extern unsigned int pci_pm_d3_delay; 46extern unsigned int pci_pm_d3_delay;
47 47
48#ifdef CONFIG_PCI_MSI 48#ifdef CONFIG_PCI_MSI
49void disable_msi_mode(struct pci_dev *dev, int pos, int type);
50void pci_no_msi(void); 49void pci_no_msi(void);
51#else 50#else
52static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
53static inline void pci_no_msi(void) { } 51static inline void pci_no_msi(void) { }
54#endif 52#endif
55 53
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2fe1d690eb..a4a96826d9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -682,7 +682,34 @@ static void pci_read_irq(struct pci_dev *dev)
682 dev->irq = irq; 682 dev->irq = irq;
683} 683}
684 684
685#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 685static void change_legacy_io_resource(struct pci_dev * dev, unsigned index,
686 unsigned start, unsigned end)
687{
688 unsigned base = start & PCI_BASE_ADDRESS_IO_MASK;
689 unsigned len = (end | ~PCI_BASE_ADDRESS_IO_MASK) - base + 1;
690
691 /*
692 * Some X versions get confused when the BARs reported through
693 * /sys or /proc differ from those seen in config space, thus
694 * try to update the config space values, too.
695 */
696 if (!(pci_resource_flags(dev, index) & IORESOURCE_IO))
697 printk(KERN_WARNING "%s: cannot adjust BAR%u (not I/O)\n",
698 pci_name(dev), index);
699 else if (pci_resource_len(dev, index) != len)
700 printk(KERN_WARNING "%s: cannot adjust BAR%u (size %04X)\n",
701 pci_name(dev), index, (unsigned)pci_resource_len(dev, index));
702 else {
703 printk(KERN_INFO "%s: trying to change BAR%u from %04X to %04X\n",
704 pci_name(dev), index,
705 (unsigned)pci_resource_start(dev, index), base);
706 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + index * 4, base);
707 }
708 pci_resource_start(dev, index) = start;
709 pci_resource_end(dev, index) = end;
710 pci_resource_flags(dev, index) =
711 IORESOURCE_IO | IORESOURCE_PCI_FIXED | PCI_BASE_ADDRESS_SPACE_IO;
712}
686 713
687/** 714/**
688 * pci_setup_device - fill in class and map information of a device 715 * pci_setup_device - fill in class and map information of a device
@@ -735,20 +762,12 @@ static int pci_setup_device(struct pci_dev * dev)
735 u8 progif; 762 u8 progif;
736 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 763 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
737 if ((progif & 1) == 0) { 764 if ((progif & 1) == 0) {
738 dev->resource[0].start = 0x1F0; 765 change_legacy_io_resource(dev, 0, 0x1F0, 0x1F7);
739 dev->resource[0].end = 0x1F7; 766 change_legacy_io_resource(dev, 1, 0x3F6, 0x3F6);
740 dev->resource[0].flags = LEGACY_IO_RESOURCE;
741 dev->resource[1].start = 0x3F6;
742 dev->resource[1].end = 0x3F6;
743 dev->resource[1].flags = LEGACY_IO_RESOURCE;
744 } 767 }
745 if ((progif & 4) == 0) { 768 if ((progif & 4) == 0) {
746 dev->resource[2].start = 0x170; 769 change_legacy_io_resource(dev, 2, 0x170, 0x177);
747 dev->resource[2].end = 0x177; 770 change_legacy_io_resource(dev, 3, 0x376, 0x376);
748 dev->resource[2].flags = LEGACY_IO_RESOURCE;
749 dev->resource[3].start = 0x376;
750 dev->resource[3].end = 0x376;
751 dev->resource[3].flags = LEGACY_IO_RESOURCE;
752 } 771 }
753 } 772 }
754 break; 773 break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 1e6eda25c0..7f94fc098c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1218,45 +1218,68 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_a
1218 * do this early on to make the additional device appear during 1218 * do this early on to make the additional device appear during
1219 * the PCI scanning. 1219 * the PCI scanning.
1220 */ 1220 */
1221 1221static void quirk_jmicron_ata(struct pci_dev *pdev)
1222static void quirk_jmicron_dualfn(struct pci_dev *pdev)
1223{ 1222{
1224 u32 conf; 1223 u32 conf1, conf5, class;
1225 u8 hdr; 1224 u8 hdr;
1226 1225
1227 /* Only poke fn 0 */ 1226 /* Only poke fn 0 */
1228 if (PCI_FUNC(pdev->devfn)) 1227 if (PCI_FUNC(pdev->devfn))
1229 return; 1228 return;
1230 1229
1231 switch(pdev->device) { 1230 pci_read_config_dword(pdev, 0x40, &conf1);
1232 case PCI_DEVICE_ID_JMICRON_JMB365: 1231 pci_read_config_dword(pdev, 0x80, &conf5);
1233 case PCI_DEVICE_ID_JMICRON_JMB366:
1234 /* Redirect IDE second PATA port to the right spot */
1235 pci_read_config_dword(pdev, 0x80, &conf);
1236 conf |= (1 << 24);
1237 /* Fall through */
1238 pci_write_config_dword(pdev, 0x80, conf);
1239 case PCI_DEVICE_ID_JMICRON_JMB361:
1240 case PCI_DEVICE_ID_JMICRON_JMB363:
1241 pci_read_config_dword(pdev, 0x40, &conf);
1242 /* Enable dual function mode, AHCI on fn 0, IDE fn1 */
1243 /* Set the class codes correctly and then direct IDE 0 */
1244 conf &= ~0x000FF200; /* Clear bit 9 and 12-19 */
1245 conf |= 0x00C2A102; /* Set 1, 8, 13, 15, 17, 22, 23 */
1246 pci_write_config_dword(pdev, 0x40, conf);
1247
1248 /* Reconfigure so that the PCI scanner discovers the
1249 device is now multifunction */
1250
1251 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1252 pdev->hdr_type = hdr & 0x7f;
1253 pdev->multifunction = !!(hdr & 0x80);
1254 1232
1255 break; 1233 conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */
1234 conf5 &= ~(1 << 24); /* Clear bit 24 */
1235
1236 switch (pdev->device) {
1237 case PCI_DEVICE_ID_JMICRON_JMB360:
1238 /* The controller should be in single function ahci mode */
1239 conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
1240 break;
1241
1242 case PCI_DEVICE_ID_JMICRON_JMB365:
1243 case PCI_DEVICE_ID_JMICRON_JMB366:
1244 /* Redirect IDE second PATA port to the right spot */
1245 conf5 |= (1 << 24);
1246 /* Fall through */
1247 case PCI_DEVICE_ID_JMICRON_JMB361:
1248 case PCI_DEVICE_ID_JMICRON_JMB363:
1249 /* Enable dual function mode, AHCI on fn 0, IDE fn1 */
1250 /* Set the class codes correctly and then direct IDE 0 */
1251 conf1 |= 0x00C2A102; /* Set 1, 8, 13, 15, 17, 22, 23 */
1252 break;
1253
1254 case PCI_DEVICE_ID_JMICRON_JMB368:
1255 /* The controller should be in single function IDE mode */
1256 conf1 |= 0x00C00000; /* Set 22, 23 */
1257 break;
1256 } 1258 }
1257} 1259
1258DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); 1260 pci_write_config_dword(pdev, 0x40, conf1);
1259DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); 1261 pci_write_config_dword(pdev, 0x80, conf5);
1262
1263 /* Update pdev accordingly */
1264 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1265 pdev->hdr_type = hdr & 0x7f;
1266 pdev->multifunction = !!(hdr & 0x80);
1267
1268 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1269 pdev->class = class >> 8;
1270}
1271DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1272DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1273DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1274DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1275DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1276DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1277DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1278DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1279DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1280DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1281DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1282DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1260 1283
1261#endif 1284#endif
1262 1285
@@ -1415,8 +1438,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
1415 */ 1438 */
1416static void __devinit quirk_pcie_pxh(struct pci_dev *dev) 1439static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
1417{ 1440{
1418 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 1441 pci_msi_off(dev);
1419 PCI_CAP_ID_MSI); 1442
1420 dev->no_msi = 1; 1443 dev->no_msi = 1;
1421 1444
1422 printk(KERN_WARNING "PCI: PXH quirk detected, " 1445 printk(KERN_WARNING "PCI: PXH quirk detected, "
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index deef29646e..95826b92ca 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -207,10 +207,12 @@ config RTC_DRV_PCF8563
207 207
208config RTC_DRV_PCF8583 208config RTC_DRV_PCF8583
209 tristate "Philips PCF8583" 209 tristate "Philips PCF8583"
210 depends on RTC_CLASS && I2C 210 depends on RTC_CLASS && I2C && ARCH_RPC
211 help 211 help
212 If you say yes here you get support for the 212 If you say yes here you get support for the Philips PCF8583
213 Philips PCF8583 RTC chip. 213 RTC chip found on Acorn RiscPCs. This driver supports the
214 platform specific method of retrieving the current year from
215 the RTC's SRAM.
214 216
215 This driver can also be built as a module. If so, the module 217 This driver can also be built as a module. If so, the module
216 will be called rtc-pcf8583. 218 will be called rtc-pcf8583.
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 7a0d8ee2de..04aaa63472 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -113,10 +113,16 @@ EXPORT_SYMBOL_GPL(rtc_device_register);
113 */ 113 */
114void rtc_device_unregister(struct rtc_device *rtc) 114void rtc_device_unregister(struct rtc_device *rtc)
115{ 115{
116 mutex_lock(&rtc->ops_lock); 116 if (class_device_get(&rtc->class_dev) != NULL) {
117 rtc->ops = NULL; 117 mutex_lock(&rtc->ops_lock);
118 mutex_unlock(&rtc->ops_lock); 118 /* remove innards of this RTC, then disable it, before
119 class_device_unregister(&rtc->class_dev); 119 * letting any rtc_class_open() users access it again
120 */
121 class_device_unregister(&rtc->class_dev);
122 rtc->ops = NULL;
123 mutex_unlock(&rtc->ops_lock);
124 class_device_put(&rtc->class_dev);
125 }
120} 126}
121EXPORT_SYMBOL_GPL(rtc_device_unregister); 127EXPORT_SYMBOL_GPL(rtc_device_unregister);
122 128
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 6f11f6dfdd..ef40df0f16 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -179,7 +179,7 @@ struct class_device *rtc_class_open(char *name)
179 down(&rtc_class->sem); 179 down(&rtc_class->sem);
180 list_for_each_entry(class_dev_tmp, &rtc_class->children, node) { 180 list_for_each_entry(class_dev_tmp, &rtc_class->children, node) {
181 if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) { 181 if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) {
182 class_dev = class_dev_tmp; 182 class_dev = class_device_get(class_dev_tmp);
183 break; 183 break;
184 } 184 }
185 } 185 }
@@ -197,6 +197,7 @@ EXPORT_SYMBOL_GPL(rtc_class_open);
197void rtc_class_close(struct class_device *class_dev) 197void rtc_class_close(struct class_device *class_dev)
198{ 198{
199 module_put(to_rtc_device(class_dev)->owner); 199 module_put(to_rtc_device(class_dev)->owner);
200 class_device_put(class_dev);
200} 201}
201EXPORT_SYMBOL_GPL(rtc_class_close); 202EXPORT_SYMBOL_GPL(rtc_class_close);
202 203
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 5875ebb8c7..d48b033745 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -40,7 +40,7 @@ struct pcf8583 {
40#define CTRL_ALARM 0x02 40#define CTRL_ALARM 0x02
41#define CTRL_TIMER 0x01 41#define CTRL_TIMER 0x01
42 42
43static unsigned short normal_i2c[] = { I2C_CLIENT_END }; 43static unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END };
44 44
45/* Module parameters */ 45/* Module parameters */
46I2C_CLIENT_INSMOD; 46I2C_CLIENT_INSMOD;
@@ -81,11 +81,11 @@ static int pcf8583_get_datetime(struct i2c_client *client, struct rtc_time *dt)
81 buf[4] &= 0x3f; 81 buf[4] &= 0x3f;
82 buf[5] &= 0x1f; 82 buf[5] &= 0x1f;
83 83
84 dt->tm_sec = BCD_TO_BIN(buf[1]); 84 dt->tm_sec = BCD2BIN(buf[1]);
85 dt->tm_min = BCD_TO_BIN(buf[2]); 85 dt->tm_min = BCD2BIN(buf[2]);
86 dt->tm_hour = BCD_TO_BIN(buf[3]); 86 dt->tm_hour = BCD2BIN(buf[3]);
87 dt->tm_mday = BCD_TO_BIN(buf[4]); 87 dt->tm_mday = BCD2BIN(buf[4]);
88 dt->tm_mon = BCD_TO_BIN(buf[5]); 88 dt->tm_mon = BCD2BIN(buf[5]) - 1;
89 } 89 }
90 90
91 return ret == 2 ? 0 : -EIO; 91 return ret == 2 ? 0 : -EIO;
@@ -99,14 +99,14 @@ static int pcf8583_set_datetime(struct i2c_client *client, struct rtc_time *dt,
99 buf[0] = 0; 99 buf[0] = 0;
100 buf[1] = get_ctrl(client) | 0x80; 100 buf[1] = get_ctrl(client) | 0x80;
101 buf[2] = 0; 101 buf[2] = 0;
102 buf[3] = BIN_TO_BCD(dt->tm_sec); 102 buf[3] = BIN2BCD(dt->tm_sec);
103 buf[4] = BIN_TO_BCD(dt->tm_min); 103 buf[4] = BIN2BCD(dt->tm_min);
104 buf[5] = BIN_TO_BCD(dt->tm_hour); 104 buf[5] = BIN2BCD(dt->tm_hour);
105 105
106 if (datetoo) { 106 if (datetoo) {
107 len = 8; 107 len = 8;
108 buf[6] = BIN_TO_BCD(dt->tm_mday) | (dt->tm_year << 6); 108 buf[6] = BIN2BCD(dt->tm_mday) | (dt->tm_year << 6);
109 buf[7] = BIN_TO_BCD(dt->tm_mon) | (dt->tm_wday << 5); 109 buf[7] = BIN2BCD(dt->tm_mon + 1) | (dt->tm_wday << 5);
110 } 110 }
111 111
112 ret = i2c_master_send(client, (char *)buf, len); 112 ret = i2c_master_send(client, (char *)buf, len);
@@ -226,7 +226,7 @@ static int pcf8583_rtc_read_time(struct device *dev, struct rtc_time *tm)
226 */ 226 */
227 year_offset += 4; 227 year_offset += 4;
228 228
229 tm->tm_year = real_year + year_offset + year[1] * 100; 229 tm->tm_year = (real_year + year_offset + year[1] * 100) - 1900;
230 230
231 return 0; 231 return 0;
232} 232}
@@ -237,6 +237,7 @@ static int pcf8583_rtc_set_time(struct device *dev, struct rtc_time *tm)
237 unsigned char year[2], chk; 237 unsigned char year[2], chk;
238 struct rtc_mem cmos_year = { CMOS_YEAR, sizeof(year), year }; 238 struct rtc_mem cmos_year = { CMOS_YEAR, sizeof(year), year };
239 struct rtc_mem cmos_check = { CMOS_CHECKSUM, 1, &chk }; 239 struct rtc_mem cmos_check = { CMOS_CHECKSUM, 1, &chk };
240 unsigned int proper_year = tm->tm_year + 1900;
240 int ret; 241 int ret;
241 242
242 /* 243 /*
@@ -258,8 +259,8 @@ static int pcf8583_rtc_set_time(struct device *dev, struct rtc_time *tm)
258 259
259 chk -= year[1] + year[0]; 260 chk -= year[1] + year[0];
260 261
261 year[1] = tm->tm_year / 100; 262 year[1] = proper_year / 100;
262 year[0] = tm->tm_year % 100; 263 year[0] = proper_year % 100;
263 264
264 chk += year[1] + year[0]; 265 chk += year[1] + year[0];
265 266
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 4b8a95fba1..a1dc8c466e 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -461,6 +461,7 @@ int dasd_eer_enable(struct dasd_device *device)
461 cqr->device = device; 461 cqr->device = device;
462 cqr->retries = 255; 462 cqr->retries = 255;
463 cqr->expires = 10 * HZ; 463 cqr->expires = 10 * HZ;
464 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
464 465
465 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; 466 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS;
466 cqr->cpaddr->count = SNSS_DATA_SIZE; 467 cqr->cpaddr->count = SNSS_DATA_SIZE;
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 7a76ec413a..2a1af4e60b 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -647,7 +647,10 @@ tape_std_mtcompression(struct tape_device *device, int mt_count)
647 return PTR_ERR(request); 647 return PTR_ERR(request);
648 request->op = TO_NOP; 648 request->op = TO_NOP;
649 /* setup ccws */ 649 /* setup ccws */
650 *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08; 650 if (mt_count == 0)
651 *device->modeset_byte &= ~0x08;
652 else
653 *device->modeset_byte |= 0x08;
651 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 654 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
652 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); 655 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
653 /* execute it */ 656 /* execute it */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 51238e7555..089a3ddd62 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -144,8 +144,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
144 ret = stsch(sch->schid, &sch->schib); 144 ret = stsch(sch->schid, &sch->schib);
145 if (ret || !sch->schib.pmcw.dnv) 145 if (ret || !sch->schib.pmcw.dnv)
146 return -ENODEV; 146 return -ENODEV;
147 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) 147 if (!sch->schib.pmcw.ena)
148 /* Not operational or no activity -> done. */ 148 /* Not operational -> done. */
149 return 0; 149 return 0;
150 /* Stage 1: cancel io. */ 150 /* Stage 1: cancel io. */
151 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && 151 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
@@ -334,20 +334,29 @@ ccw_device_oper_notify(struct work_struct *work)
334 struct ccw_device *cdev; 334 struct ccw_device *cdev;
335 struct subchannel *sch; 335 struct subchannel *sch;
336 int ret; 336 int ret;
337 unsigned long flags;
337 338
338 priv = container_of(work, struct ccw_device_private, kick_work); 339 priv = container_of(work, struct ccw_device_private, kick_work);
339 cdev = priv->cdev; 340 cdev = priv->cdev;
341 spin_lock_irqsave(cdev->ccwlock, flags);
340 sch = to_subchannel(cdev->dev.parent); 342 sch = to_subchannel(cdev->dev.parent);
341 ret = (sch->driver && sch->driver->notify) ? 343 if (sch->driver && sch->driver->notify) {
342 sch->driver->notify(&sch->dev, CIO_OPER) : 0; 344 spin_unlock_irqrestore(cdev->ccwlock, flags);
343 if (!ret) 345 ret = sch->driver->notify(&sch->dev, CIO_OPER);
344 /* Driver doesn't want device back. */ 346 spin_lock_irqsave(cdev->ccwlock, flags);
345 ccw_device_do_unreg_rereg(work); 347 } else
346 else { 348 ret = 0;
349 if (ret) {
347 /* Reenable channel measurements, if needed. */ 350 /* Reenable channel measurements, if needed. */
351 spin_unlock_irqrestore(cdev->ccwlock, flags);
348 cmf_reenable(cdev); 352 cmf_reenable(cdev);
353 spin_lock_irqsave(cdev->ccwlock, flags);
349 wake_up(&cdev->private->wait_q); 354 wake_up(&cdev->private->wait_q);
350 } 355 }
356 spin_unlock_irqrestore(cdev->ccwlock, flags);
357 if (!ret)
358 /* Driver doesn't want device back. */
359 ccw_device_do_unreg_rereg(work);
351} 360}
352 361
353/* 362/*
@@ -534,15 +543,21 @@ ccw_device_nopath_notify(struct work_struct *work)
534 struct ccw_device *cdev; 543 struct ccw_device *cdev;
535 struct subchannel *sch; 544 struct subchannel *sch;
536 int ret; 545 int ret;
546 unsigned long flags;
537 547
538 priv = container_of(work, struct ccw_device_private, kick_work); 548 priv = container_of(work, struct ccw_device_private, kick_work);
539 cdev = priv->cdev; 549 cdev = priv->cdev;
550 spin_lock_irqsave(cdev->ccwlock, flags);
540 sch = to_subchannel(cdev->dev.parent); 551 sch = to_subchannel(cdev->dev.parent);
541 /* Extra sanity. */ 552 /* Extra sanity. */
542 if (sch->lpm) 553 if (sch->lpm)
543 return; 554 goto out_unlock;
544 ret = (sch->driver && sch->driver->notify) ? 555 if (sch->driver && sch->driver->notify) {
545 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0; 556 spin_unlock_irqrestore(cdev->ccwlock, flags);
557 ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
558 spin_lock_irqsave(cdev->ccwlock, flags);
559 } else
560 ret = 0;
546 if (!ret) { 561 if (!ret) {
547 if (get_device(&sch->dev)) { 562 if (get_device(&sch->dev)) {
548 /* Driver doesn't want to keep device. */ 563 /* Driver doesn't want to keep device. */
@@ -562,6 +577,8 @@ ccw_device_nopath_notify(struct work_struct *work)
562 cdev->private->state = DEV_STATE_DISCONNECTED; 577 cdev->private->state = DEV_STATE_DISCONNECTED;
563 wake_up(&cdev->private->wait_q); 578 wake_up(&cdev->private->wait_q);
564 } 579 }
580out_unlock:
581 spin_unlock_irqrestore(cdev->ccwlock, flags);
565} 582}
566 583
567void 584void
@@ -607,10 +624,13 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
607 default: 624 default:
608 /* Reset oper notify indication after verify error. */ 625 /* Reset oper notify indication after verify error. */
609 cdev->private->flags.donotify = 0; 626 cdev->private->flags.donotify = 0;
610 PREPARE_WORK(&cdev->private->kick_work, 627 if (cdev->online) {
611 ccw_device_nopath_notify); 628 PREPARE_WORK(&cdev->private->kick_work,
612 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 629 ccw_device_nopath_notify);
613 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 630 queue_work(ccw_device_notify_work,
631 &cdev->private->kick_work);
632 } else
633 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
614 break; 634 break;
615 } 635 }
616} 636}
@@ -756,15 +776,22 @@ static void
756ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) 776ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
757{ 777{
758 struct subchannel *sch; 778 struct subchannel *sch;
779 int ret;
759 780
760 sch = to_subchannel(cdev->dev.parent); 781 sch = to_subchannel(cdev->dev.parent);
761 if (sch->driver->notify && 782 if (sch->driver->notify) {
762 sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { 783 spin_unlock_irq(cdev->ccwlock);
763 ccw_device_set_timeout(cdev, 0); 784 ret = sch->driver->notify(&sch->dev,
764 cdev->private->flags.fake_irb = 0; 785 sch->lpm ? CIO_GONE : CIO_NO_PATH);
765 cdev->private->state = DEV_STATE_DISCONNECTED; 786 spin_lock_irq(cdev->ccwlock);
766 wake_up(&cdev->private->wait_q); 787 } else
767 return; 788 ret = 0;
789 if (ret) {
790 ccw_device_set_timeout(cdev, 0);
791 cdev->private->flags.fake_irb = 0;
792 cdev->private->state = DEV_STATE_DISCONNECTED;
793 wake_up(&cdev->private->wait_q);
794 return;
768 } 795 }
769 cdev->private->state = DEV_STATE_NOT_OPER; 796 cdev->private->state = DEV_STATE_NOT_OPER;
770 cio_disable_subchannel(sch); 797 cio_disable_subchannel(sch);
@@ -969,18 +996,12 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
969 996
970 sch = to_subchannel(cdev->dev.parent); 997 sch = to_subchannel(cdev->dev.parent);
971 ccw_device_set_timeout(cdev, 0); 998 ccw_device_set_timeout(cdev, 0);
999 /* Start delayed path verification. */
1000 ccw_device_online_verify(cdev, 0);
972 /* OK, i/o is dead now. Call interrupt handler. */ 1001 /* OK, i/o is dead now. Call interrupt handler. */
973 cdev->private->state = DEV_STATE_ONLINE;
974 if (cdev->handler) 1002 if (cdev->handler)
975 cdev->handler(cdev, cdev->private->intparm, 1003 cdev->handler(cdev, cdev->private->intparm,
976 ERR_PTR(-EIO)); 1004 ERR_PTR(-EIO));
977 if (!sch->lpm) {
978 PREPARE_WORK(&cdev->private->kick_work,
979 ccw_device_nopath_notify);
980 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
981 } else if (cdev->private->flags.doverify)
982 /* Start delayed path verification. */
983 ccw_device_online_verify(cdev, 0);
984} 1005}
985 1006
986static void 1007static void
@@ -993,21 +1014,8 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
993 ccw_device_set_timeout(cdev, 3*HZ); 1014 ccw_device_set_timeout(cdev, 3*HZ);
994 return; 1015 return;
995 } 1016 }
996 if (ret == -ENODEV) { 1017 /* Start delayed path verification. */
997 struct subchannel *sch; 1018 ccw_device_online_verify(cdev, 0);
998
999 sch = to_subchannel(cdev->dev.parent);
1000 if (!sch->lpm) {
1001 PREPARE_WORK(&cdev->private->kick_work,
1002 ccw_device_nopath_notify);
1003 queue_work(ccw_device_notify_work,
1004 &cdev->private->kick_work);
1005 } else
1006 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1007 return;
1008 }
1009 //FIXME: Can we get here?
1010 cdev->private->state = DEV_STATE_ONLINE;
1011 if (cdev->handler) 1019 if (cdev->handler)
1012 cdev->handler(cdev, cdev->private->intparm, 1020 cdev->handler(cdev, cdev->private->intparm,
1013 ERR_PTR(-EIO)); 1021 ERR_PTR(-EIO));
@@ -1025,26 +1033,11 @@ void device_kill_io(struct subchannel *sch)
1025 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 1033 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
1026 return; 1034 return;
1027 } 1035 }
1028 if (ret == -ENODEV) { 1036 /* Start delayed path verification. */
1029 if (!sch->lpm) { 1037 ccw_device_online_verify(cdev, 0);
1030 PREPARE_WORK(&cdev->private->kick_work,
1031 ccw_device_nopath_notify);
1032 queue_work(ccw_device_notify_work,
1033 &cdev->private->kick_work);
1034 } else
1035 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1036 return;
1037 }
1038 if (cdev->handler) 1038 if (cdev->handler)
1039 cdev->handler(cdev, cdev->private->intparm, 1039 cdev->handler(cdev, cdev->private->intparm,
1040 ERR_PTR(-EIO)); 1040 ERR_PTR(-EIO));
1041 if (!sch->lpm) {
1042 PREPARE_WORK(&cdev->private->kick_work,
1043 ccw_device_nopath_notify);
1044 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
1045 } else
1046 /* Start delayed path verification. */
1047 ccw_device_online_verify(cdev, 0);
1048} 1041}
1049 1042
1050static void 1043static void
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 2257e45594..d8a86f5af3 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -3654,7 +3654,7 @@ qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3654 return rc; 3654 return rc;
3655 3655
3656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ 3656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3657 if (vg->vlan_devices[i] == dev){ 3657 if (vlan_group_get_device(vg, i) == dev){
3658 rc = QETH_VLAN_CARD; 3658 rc = QETH_VLAN_CARD;
3659 break; 3659 break;
3660 } 3660 }
@@ -5261,7 +5261,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5261 QETH_DBF_TEXT(trace, 4, "frvaddr4"); 5261 QETH_DBF_TEXT(trace, 4, "frvaddr4");
5262 5262
5263 rcu_read_lock(); 5263 rcu_read_lock();
5264 in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]); 5264 in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid));
5265 if (!in_dev) 5265 if (!in_dev)
5266 goto out; 5266 goto out;
5267 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 5267 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
@@ -5288,7 +5288,7 @@ qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
5288 5288
5289 QETH_DBF_TEXT(trace, 4, "frvaddr6"); 5289 QETH_DBF_TEXT(trace, 4, "frvaddr6");
5290 5290
5291 in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]); 5291 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
5292 if (!in6_dev) 5292 if (!in6_dev)
5293 return; 5293 return;
5294 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ 5294 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
@@ -5360,7 +5360,7 @@ qeth_layer2_process_vlans(struct qeth_card *card, int clear)
5360 if (!card->vlangrp) 5360 if (!card->vlangrp)
5361 return; 5361 return;
5362 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5362 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5363 if (card->vlangrp->vlan_devices[i] == NULL) 5363 if (vlan_group_get_device(card->vlangrp, i) == NULL)
5364 continue; 5364 continue;
5365 if (clear) 5365 if (clear)
5366 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); 5366 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
@@ -5398,8 +5398,7 @@ qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5398 spin_lock_irqsave(&card->vlanlock, flags); 5398 spin_lock_irqsave(&card->vlanlock, flags);
5399 /* unregister IP addresses of vlan device */ 5399 /* unregister IP addresses of vlan device */
5400 qeth_free_vlan_addresses(card, vid); 5400 qeth_free_vlan_addresses(card, vid);
5401 if (card->vlangrp) 5401 vlan_group_set_device(card->vlangrp, vid, NULL);
5402 card->vlangrp->vlan_devices[vid] = NULL;
5403 spin_unlock_irqrestore(&card->vlanlock, flags); 5402 spin_unlock_irqrestore(&card->vlanlock, flags);
5404 if (card->options.layer2) 5403 if (card->options.layer2)
5405 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); 5404 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
@@ -5662,10 +5661,11 @@ qeth_add_vlan_mc(struct qeth_card *card)
5662 5661
5663 vg = card->vlangrp; 5662 vg = card->vlangrp;
5664 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5663 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5665 if (vg->vlan_devices[i] == NULL || 5664 struct net_device *netdev = vlan_group_get_device(vg, i);
5666 !(vg->vlan_devices[i]->flags & IFF_UP)) 5665 if (netdev == NULL ||
5666 !(netdev->flags & IFF_UP))
5667 continue; 5667 continue;
5668 in_dev = in_dev_get(vg->vlan_devices[i]); 5668 in_dev = in_dev_get(netdev);
5669 if (!in_dev) 5669 if (!in_dev)
5670 continue; 5670 continue;
5671 read_lock(&in_dev->mc_list_lock); 5671 read_lock(&in_dev->mc_list_lock);
@@ -5749,10 +5749,11 @@ qeth_add_vlan_mc6(struct qeth_card *card)
5749 5749
5750 vg = card->vlangrp; 5750 vg = card->vlangrp;
5751 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5751 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5752 if (vg->vlan_devices[i] == NULL || 5752 struct net_device *netdev = vlan_group_get_device(vg, i);
5753 !(vg->vlan_devices[i]->flags & IFF_UP)) 5753 if (netdev == NULL ||
5754 !(netdev->flags & IFF_UP))
5754 continue; 5755 continue;
5755 in_dev = in6_dev_get(vg->vlan_devices[i]); 5756 in_dev = in6_dev_get(netdev);
5756 if (!in_dev) 5757 if (!in_dev)
5757 continue; 5758 continue;
5758 read_lock(&in_dev->lock); 5759 read_lock(&in_dev->lock);
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index d2d51dc51a..82add77ad1 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -178,10 +178,10 @@ cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
178 dma_dir = DMA_MODE_READ, 178 dma_dir = DMA_MODE_READ,
179 alatch_dir = ALATCH_DMA_IN; 179 alatch_dir = ALATCH_DMA_IN;
180 180
181 dma_map_sg(dev, info->sg, bufs + 1, map_dir); 181 dma_map_sg(dev, info->sg, bufs, map_dir);
182 182
183 disable_dma(dmach); 183 disable_dma(dmach);
184 set_dma_sg(dmach, info->sg, bufs + 1); 184 set_dma_sg(dmach, info->sg, bufs);
185 writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH); 185 writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH);
186 set_dma_mode(dmach, dma_dir); 186 set_dma_mode(dmach, dma_dir);
187 enable_dma(dmach); 187 enable_dma(dmach);
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index d4136524fc..ed06a8c19a 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -175,10 +175,10 @@ eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
175 map_dir = DMA_FROM_DEVICE, 175 map_dir = DMA_FROM_DEVICE,
176 dma_dir = DMA_MODE_READ; 176 dma_dir = DMA_MODE_READ;
177 177
178 dma_map_sg(dev, info->sg, bufs + 1, map_dir); 178 dma_map_sg(dev, info->sg, bufs, map_dir);
179 179
180 disable_dma(dmach); 180 disable_dma(dmach);
181 set_dma_sg(dmach, info->sg, bufs + 1); 181 set_dma_sg(dmach, info->sg, bufs);
182 set_dma_mode(dmach, dma_dir); 182 set_dma_mode(dmach, dma_dir);
183 enable_dma(dmach); 183 enable_dma(dmach);
184 return fasdma_real_all; 184 return fasdma_real_all;
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 2969cc0ff2..fb5f202843 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -633,7 +633,7 @@ static void fas216_updateptrs(FAS216_Info *info, int bytes_transferred)
633 633
634 BUG_ON(bytes_transferred < 0); 634 BUG_ON(bytes_transferred < 0);
635 635
636 info->SCpnt->request_bufflen -= bytes_transferred; 636 SCp->phase -= bytes_transferred;
637 637
638 while (bytes_transferred != 0) { 638 while (bytes_transferred != 0) {
639 if (SCp->this_residual > bytes_transferred) 639 if (SCp->this_residual > bytes_transferred)
@@ -715,7 +715,7 @@ static void fas216_cleanuptransfer(FAS216_Info *info)
715 return; 715 return;
716 716
717 if (dmatype == fasdma_real_all) 717 if (dmatype == fasdma_real_all)
718 total = info->SCpnt->request_bufflen; 718 total = info->scsi.SCp.phase;
719 else 719 else
720 total = info->scsi.SCp.this_residual; 720 total = info->scsi.SCp.this_residual;
721 721
@@ -753,7 +753,7 @@ static void fas216_transfer(FAS216_Info *info)
753 fas216_log(info, LOG_BUFFER, 753 fas216_log(info, LOG_BUFFER,
754 "starttransfer: buffer %p length 0x%06x reqlen 0x%06x", 754 "starttransfer: buffer %p length 0x%06x reqlen 0x%06x",
755 info->scsi.SCp.ptr, info->scsi.SCp.this_residual, 755 info->scsi.SCp.ptr, info->scsi.SCp.this_residual,
756 info->SCpnt->request_bufflen); 756 info->scsi.SCp.phase);
757 757
758 if (!info->scsi.SCp.ptr) { 758 if (!info->scsi.SCp.ptr) {
759 fas216_log(info, LOG_ERROR, "null buffer passed to " 759 fas216_log(info, LOG_ERROR, "null buffer passed to "
@@ -784,7 +784,7 @@ static void fas216_transfer(FAS216_Info *info)
784 info->dma.transfer_type = dmatype; 784 info->dma.transfer_type = dmatype;
785 785
786 if (dmatype == fasdma_real_all) 786 if (dmatype == fasdma_real_all)
787 fas216_set_stc(info, info->SCpnt->request_bufflen); 787 fas216_set_stc(info, info->scsi.SCp.phase);
788 else 788 else
789 fas216_set_stc(info, info->scsi.SCp.this_residual); 789 fas216_set_stc(info, info->scsi.SCp.this_residual);
790 790
@@ -2114,6 +2114,7 @@ request_sense:
2114 SCpnt->SCp.buffers_residual = 0; 2114 SCpnt->SCp.buffers_residual = 0;
2115 SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer; 2115 SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer;
2116 SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer); 2116 SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer);
2117 SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer);
2117 SCpnt->SCp.Message = 0; 2118 SCpnt->SCp.Message = 0;
2118 SCpnt->SCp.Status = 0; 2119 SCpnt->SCp.Status = 0;
2119 SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); 2120 SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index f9cd20bfb9..159047a349 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -148,10 +148,10 @@ powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
148 map_dir = DMA_FROM_DEVICE, 148 map_dir = DMA_FROM_DEVICE,
149 dma_dir = DMA_MODE_READ; 149 dma_dir = DMA_MODE_READ;
150 150
151 dma_map_sg(dev, info->sg, bufs + 1, map_dir); 151 dma_map_sg(dev, info->sg, bufs, map_dir);
152 152
153 disable_dma(dmach); 153 disable_dma(dmach);
154 set_dma_sg(dmach, info->sg, bufs + 1); 154 set_dma_sg(dmach, info->sg, bufs);
155 set_dma_mode(dmach, dma_dir); 155 set_dma_mode(dmach, dma_dir);
156 enable_dma(dmach); 156 enable_dma(dmach);
157 return fasdma_real_all; 157 return fasdma_real_all;
@@ -342,6 +342,7 @@ powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
342 info->base = base; 342 info->base = base;
343 powertecscsi_terminator_ctl(host, term[ec->slot_no]); 343 powertecscsi_terminator_ctl(host, term[ec->slot_no]);
344 344
345 info->ec = ec;
345 info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET; 346 info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET;
346 info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT; 347 info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT;
347 info->info.scsi.irq = ec->irq; 348 info->info.scsi.irq = ec->irq;
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index 3a39579bd0..21ba57155b 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -80,6 +80,7 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
80 (page_address(SCpnt->SCp.buffer->page) + 80 (page_address(SCpnt->SCp.buffer->page) +
81 SCpnt->SCp.buffer->offset); 81 SCpnt->SCp.buffer->offset);
82 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; 82 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
83 SCpnt->SCp.phase = SCpnt->request_bufflen;
83 84
84#ifdef BELT_AND_BRACES 85#ifdef BELT_AND_BRACES
85 /* 86 /*
@@ -98,6 +99,7 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
98 } else { 99 } else {
99 SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer; 100 SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer;
100 SCpnt->SCp.this_residual = SCpnt->request_bufflen; 101 SCpnt->SCp.this_residual = SCpnt->request_bufflen;
102 SCpnt->SCp.phase = SCpnt->request_bufflen;
101 } 103 }
102 104
103 /* 105 /*
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index 587d87b9eb..d31721f274 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -170,8 +170,7 @@ static void dz_enable_ms(struct uart_port *port)
170 * This routine deals with inputs from any lines. 170 * This routine deals with inputs from any lines.
171 * ------------------------------------------------------------ 171 * ------------------------------------------------------------
172 */ 172 */
173static inline void dz_receive_chars(struct dz_port *dport_in, 173static inline void dz_receive_chars(struct dz_port *dport_in)
174 struct pt_regs *regs)
175{ 174{
176 struct dz_port *dport; 175 struct dz_port *dport;
177 struct tty_struct *tty = NULL; 176 struct tty_struct *tty = NULL;
@@ -226,7 +225,7 @@ static inline void dz_receive_chars(struct dz_port *dport_in,
226 break; 225 break;
227 } 226 }
228 227
229 if (uart_handle_sysrq_char(&dport->port, ch, regs)) 228 if (uart_handle_sysrq_char(&dport->port, ch))
230 continue; 229 continue;
231 230
232 if ((status & dport->port.ignore_status_mask) == 0) { 231 if ((status & dport->port.ignore_status_mask) == 0) {
@@ -332,7 +331,7 @@ static irqreturn_t dz_interrupt(int irq, void *dev)
332 status = dz_in(dport, DZ_CSR); 331 status = dz_in(dport, DZ_CSR);
333 332
334 if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE)) 333 if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
335 dz_receive_chars(dport, regs); 334 dz_receive_chars(dport);
336 335
337 if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE)) 336 if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
338 dz_transmit_chars(dport); 337 dz_transmit_chars(dport);
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index 08430961a8..99af084c7c 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -425,15 +425,13 @@ irqreturn_t mcfrs_interrupt(int irq, void *dev_id)
425 * ------------------------------------------------------------------- 425 * -------------------------------------------------------------------
426 */ 426 */
427 427
428static void mcfrs_offintr(void *private) 428static void mcfrs_offintr(struct work_struct *work)
429{ 429{
430 struct mcf_serial *info = (struct mcf_serial *) private; 430 struct mcf_serial *info = container_of(work, struct mcf_serial, tqueue);
431 struct tty_struct *tty; 431 struct tty_struct *tty = info->tty;
432 432
433 tty = info->tty; 433 if (tty)
434 if (!tty) 434 tty_wakeup(tty);
435 return;
436 tty_wakeup(tty);
437} 435}
438 436
439 437
@@ -497,16 +495,13 @@ static void mcfrs_timer(void)
497 * do_serial_hangup() -> tty->hangup() -> mcfrs_hangup() 495 * do_serial_hangup() -> tty->hangup() -> mcfrs_hangup()
498 * 496 *
499 */ 497 */
500static void do_serial_hangup(void *private) 498static void do_serial_hangup(struct work_struct *work)
501{ 499{
502 struct mcf_serial *info = (struct mcf_serial *) private; 500 struct mcf_serial *info = container_of(work, struct mcf_serial, tqueue_hangup);
503 struct tty_struct *tty; 501 struct tty_struct *tty = info->tty;
504 502
505 tty = info->tty; 503 if (tty)
506 if (!tty) 504 tty_hangup(tty);
507 return;
508
509 tty_hangup(tty);
510} 505}
511 506
512static int startup(struct mcf_serial * info) 507static int startup(struct mcf_serial * info)
@@ -857,7 +852,7 @@ static void mcfrs_throttle(struct tty_struct * tty)
857#ifdef SERIAL_DEBUG_THROTTLE 852#ifdef SERIAL_DEBUG_THROTTLE
858 char buf[64]; 853 char buf[64];
859 854
860 printk("throttle %s: %d....\n", _tty_name(tty, buf), 855 printk("throttle %s: %d....\n", tty_name(tty, buf),
861 tty->ldisc.chars_in_buffer(tty)); 856 tty->ldisc.chars_in_buffer(tty));
862#endif 857#endif
863 858
@@ -876,7 +871,7 @@ static void mcfrs_unthrottle(struct tty_struct * tty)
876#ifdef SERIAL_DEBUG_THROTTLE 871#ifdef SERIAL_DEBUG_THROTTLE
877 char buf[64]; 872 char buf[64];
878 873
879 printk("unthrottle %s: %d....\n", _tty_name(tty, buf), 874 printk("unthrottle %s: %d....\n", tty_name(tty, buf),
880 tty->ldisc.chars_in_buffer(tty)); 875 tty->ldisc.chars_in_buffer(tty));
881#endif 876#endif
882 877
@@ -1541,8 +1536,8 @@ static void mcfrs_irqinit(struct mcf_serial *info)
1541 * External Pin Mask Setting & Enable External Pin for Interface 1536 * External Pin Mask Setting & Enable External Pin for Interface
1542 * mrcbis@aliceposta.it 1537 * mrcbis@aliceposta.it
1543 */ 1538 */
1544 unsigned short *serpin_enable_mask; 1539 u16 *serpin_enable_mask;
1545 serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART); 1540 serpin_enable_mask = (u16 *) (MCF_IPSBAR + MCF_GPIO_PAR_UART);
1546 if (info->line == 0) 1541 if (info->line == 0)
1547 *serpin_enable_mask |= UART0_ENABLE_MASK; 1542 *serpin_enable_mask |= UART0_ENABLE_MASK;
1548 else if (info->line == 1) 1543 else if (info->line == 1)
@@ -1551,6 +1546,13 @@ static void mcfrs_irqinit(struct mcf_serial *info)
1551 *serpin_enable_mask |= UART2_ENABLE_MASK; 1546 *serpin_enable_mask |= UART2_ENABLE_MASK;
1552 } 1547 }
1553#endif 1548#endif
1549#if defined(CONFIG_M528x)
1550 /* make sure PUAPAR is set for UART0 and UART1 */
1551 if (info->line < 2) {
1552 volatile unsigned char *portp = (volatile unsigned char *) (MCF_MBAR + MCF5282_GPIO_PUAPAR);
1553 *portp |= (0x03 << (info->line * 2));
1554 }
1555#endif
1554#elif defined(CONFIG_M520x) 1556#elif defined(CONFIG_M520x)
1555 volatile unsigned char *icrp, *uartp; 1557 volatile unsigned char *icrp, *uartp;
1556 volatile unsigned long *imrp; 1558 volatile unsigned long *imrp;
@@ -1783,8 +1785,8 @@ mcfrs_init(void)
1783 info->event = 0; 1785 info->event = 0;
1784 info->count = 0; 1786 info->count = 0;
1785 info->blocked_open = 0; 1787 info->blocked_open = 0;
1786 INIT_WORK(&info->tqueue, mcfrs_offintr, info); 1788 INIT_WORK(&info->tqueue, mcfrs_offintr);
1787 INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); 1789 INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
1788 init_waitqueue_head(&info->open_wait); 1790 init_waitqueue_head(&info->open_wait);
1789 init_waitqueue_head(&info->close_wait); 1791 init_waitqueue_head(&info->close_wait);
1790 1792
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 253ceb895c..a27e9e92cb 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -636,25 +636,6 @@ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
636} 636}
637 637
638/** 638/**
639 * sn_sal_connect_interrupt - Request interrupt, handled by sn_sal_interrupt
640 * @port: Our sn_cons_port (which contains the uart port)
641 *
642 * returns the console irq if interrupt is successfully registered, else 0
643 *
644 */
645static int sn_sal_connect_interrupt(struct sn_cons_port *port)
646{
647 if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
648 IRQF_DISABLED | IRQF_SHARED,
649 "SAL console driver", port) >= 0) {
650 return SGI_UART_VECTOR;
651 }
652
653 printk(KERN_INFO "sn_console: console proceeding in polled mode\n");
654 return 0;
655}
656
657/**
658 * sn_sal_timer_poll - this function handles polled console mode 639 * sn_sal_timer_poll - this function handles polled console mode
659 * @data: A pointer to our sn_cons_port (which contains the uart port) 640 * @data: A pointer to our sn_cons_port (which contains the uart port)
660 * 641 *
@@ -746,30 +727,31 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
746 * mode. We were previously in asynch/polling mode (using init_timer). 727 * mode. We were previously in asynch/polling mode (using init_timer).
747 * 728 *
748 * We attempt to switch to interrupt mode here by calling 729 * We attempt to switch to interrupt mode here by calling
749 * sn_sal_connect_interrupt. If that works out, we enable receive interrupts. 730 * request_irq. If that works out, we enable receive interrupts.
750 */ 731 */
751static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port) 732static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
752{ 733{
753 int irq;
754 unsigned long flags; 734 unsigned long flags;
755 735
756 if (!port) 736 if (port) {
757 return; 737 DPRINTF("sn_console: switching to interrupt driven console\n");
758
759 DPRINTF("sn_console: switching to interrupt driven console\n");
760
761 spin_lock_irqsave(&port->sc_port.lock, flags);
762 738
763 irq = sn_sal_connect_interrupt(port); 739 if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
740 IRQF_DISABLED | IRQF_SHARED,
741 "SAL console driver", port) >= 0) {
742 spin_lock_irqsave(&port->sc_port.lock, flags);
743 port->sc_port.irq = SGI_UART_VECTOR;
744 port->sc_ops = &intr_ops;
764 745
765 if (irq) { 746 /* turn on receive interrupts */
766 port->sc_port.irq = irq; 747 ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
767 port->sc_ops = &intr_ops; 748 spin_unlock_irqrestore(&port->sc_port.lock, flags);
768 749 }
769 /* turn on receive interrupts */ 750 else {
770 ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV); 751 printk(KERN_INFO
752 "sn_console: console proceeding in polled mode\n");
753 }
771 } 754 }
772 spin_unlock_irqrestore(&port->sc_port.lock, flags);
773} 755}
774 756
775/* 757/*
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index ef09952f20..827a75a186 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -4,7 +4,7 @@
4 * Copyright (c) 1999 Andreas Gal 4 * Copyright (c) 1999 Andreas Gal
5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
7 * Copyright (c) 2006 Jiri Kosina 7 * Copyright (c) 2006-2007 Jiri Kosina
8 */ 8 */
9 9
10/* 10/*
@@ -27,9 +27,6 @@
27#include <linux/input.h> 27#include <linux/input.h>
28#include <linux/wait.h> 28#include <linux/wait.h>
29 29
30#undef DEBUG
31#undef DEBUG_DATA
32
33#include <linux/usb.h> 30#include <linux/usb.h>
34 31
35#include <linux/hid.h> 32#include <linux/hid.h>
@@ -689,10 +686,8 @@ void usbhid_init_reports(struct hid_device *hid)
689#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802 686#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802
690 687
691#define USB_VENDOR_ID_CODEMERCS 0x07c0 688#define USB_VENDOR_ID_CODEMERCS 0x07c0
692#define USB_DEVICE_ID_CODEMERCS_IOW40 0x1500 689#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
693#define USB_DEVICE_ID_CODEMERCS_IOW24 0x1501 690#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
694#define USB_DEVICE_ID_CODEMERCS_IOW48 0x1502
695#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1503
696 691
697#define USB_VENDOR_ID_DELORME 0x1163 692#define USB_VENDOR_ID_DELORME 0x1163
698#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 693#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
@@ -758,6 +753,8 @@ void usbhid_init_reports(struct hid_device *hid)
758 753
759#define USB_VENDOR_ID_LOGITECH 0x046d 754#define USB_VENDOR_ID_LOGITECH 0x046d
760#define USB_DEVICE_ID_LOGITECH_USB_RECEIVER 0xc101 755#define USB_DEVICE_ID_LOGITECH_USB_RECEIVER 0xc101
756#define USB_DEVICE_ID_LOGITECH_USB_RECEIVER_2 0xc517
757#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
761 758
762#define USB_VENDOR_ID_IMATION 0x0718 759#define USB_VENDOR_ID_IMATION 0x0718
763#define USB_DEVICE_ID_DISC_STAKKA 0xd000 760#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -778,6 +775,8 @@ static const struct hid_blacklist {
778 unsigned quirks; 775 unsigned quirks;
779} hid_blacklist[] = { 776} hid_blacklist[] = {
780 777
778 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
779
781 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01, HID_QUIRK_IGNORE }, 780 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01, HID_QUIRK_IGNORE },
782 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10, HID_QUIRK_IGNORE }, 781 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10, HID_QUIRK_IGNORE },
783 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20, HID_QUIRK_IGNORE }, 782 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20, HID_QUIRK_IGNORE },
@@ -788,10 +787,6 @@ static const struct hid_blacklist {
788 { USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE }, 787 { USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE },
789 { USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE }, 788 { USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE },
790 { USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE }, 789 { USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE },
791 { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40, HID_QUIRK_IGNORE },
792 { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24, HID_QUIRK_IGNORE },
793 { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW48, HID_QUIRK_IGNORE },
794 { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28, HID_QUIRK_IGNORE },
795 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE }, 790 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE },
796 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE }, 791 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE },
797 { USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE, HID_QUIRK_IGNORE }, 792 { USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE, HID_QUIRK_IGNORE },
@@ -944,6 +939,7 @@ static const struct hid_blacklist {
944 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 939 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
945 940
946 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS }, 941 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS },
942 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER_2, HID_QUIRK_LOGITECH_S510_DESCRIPTOR },
947 943
948 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 944 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
949 945
@@ -1041,6 +1037,22 @@ static void hid_fixup_sony_ps3_controller(struct usb_device *dev, int ifnum)
1041 kfree(buf); 1037 kfree(buf);
1042} 1038}
1043 1039
1040/*
1041 * Logitech S510 keyboard sends in report #3 keys which are far
1042 * above the logical maximum described in descriptor. This extends
1043 * the original value of 0x28c of logical maximum to 0x104d
1044 */
1045static void hid_fixup_s510_descriptor(unsigned char *rdesc, int rsize)
1046{
1047 if (rsize >= 90 && rdesc[83] == 0x26
1048 && rdesc[84] == 0x8c
1049 && rdesc[85] == 0x02) {
1050 info("Fixing up Logitech S510 report descriptor");
1051 rdesc[84] = rdesc[89] = 0x4d;
1052 rdesc[85] = rdesc[90] = 0x10;
1053 }
1054}
1055
1044static struct hid_device *usb_hid_configure(struct usb_interface *intf) 1056static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1045{ 1057{
1046 struct usb_host_interface *interface = intf->cur_altsetting; 1058 struct usb_host_interface *interface = intf->cur_altsetting;
@@ -1052,9 +1064,14 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1052 int n, len, insize = 0; 1064 int n, len, insize = 0;
1053 struct usbhid_device *usbhid; 1065 struct usbhid_device *usbhid;
1054 1066
1055 /* Ignore all Wacom devices */ 1067 /* Ignore all Wacom devices */
1056 if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM) 1068 if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM)
1057 return NULL; 1069 return NULL;
1070 /* ignore all Code Mercenaries IOWarrior devices */
1071 if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_CODEMERCS)
1072 if (le16_to_cpu(dev->descriptor.idProduct) >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
1073 le16_to_cpu(dev->descriptor.idProduct) <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
1074 return NULL;
1058 1075
1059 for (n = 0; hid_blacklist[n].idVendor; n++) 1076 for (n = 0; hid_blacklist[n].idVendor; n++)
1060 if ((hid_blacklist[n].idVendor == le16_to_cpu(dev->descriptor.idVendor)) && 1077 if ((hid_blacklist[n].idVendor == le16_to_cpu(dev->descriptor.idVendor)) &&
@@ -1109,7 +1126,10 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1109 if ((quirks & HID_QUIRK_CYMOTION)) 1126 if ((quirks & HID_QUIRK_CYMOTION))
1110 hid_fixup_cymotion_descriptor(rdesc, rsize); 1127 hid_fixup_cymotion_descriptor(rdesc, rsize);
1111 1128
1112#ifdef DEBUG_DATA 1129 if (quirks & HID_QUIRK_LOGITECH_S510_DESCRIPTOR)
1130 hid_fixup_s510_descriptor(rdesc, rsize);
1131
1132#ifdef CONFIG_HID_DEBUG
1113 printk(KERN_DEBUG __FILE__ ": report descriptor (size %u, read %d) = ", rsize, n); 1133 printk(KERN_DEBUG __FILE__ ": report descriptor (size %u, read %d) = ", rsize, n);
1114 for (n = 0; n < rsize; n++) 1134 for (n = 0; n < rsize; n++)
1115 printk(" %02x", (unsigned char) rdesc[n]); 1135 printk(" %02x", (unsigned char) rdesc[n]);
@@ -1225,8 +1245,8 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1225 le16_to_cpu(dev->descriptor.idProduct)); 1245 le16_to_cpu(dev->descriptor.idProduct));
1226 1246
1227 hid->bus = BUS_USB; 1247 hid->bus = BUS_USB;
1228 hid->vendor = dev->descriptor.idVendor; 1248 hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
1229 hid->product = dev->descriptor.idProduct; 1249 hid->product = le16_to_cpu(dev->descriptor.idProduct);
1230 1250
1231 usb_make_path(dev, hid->phys, sizeof(hid->phys)); 1251 usb_make_path(dev, hid->phys, sizeof(hid->phys));
1232 strlcat(hid->phys, "/input", sizeof(hid->phys)); 1252 strlcat(hid->phys, "/input", sizeof(hid->phys));
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 7e7ec29782..8e898e3d86 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -55,7 +55,7 @@
55#include <linux/slab.h> 55#include <linux/slab.h>
56#include <linux/kthread.h> 56#include <linux/kthread.h>
57#include <linux/mutex.h> 57#include <linux/mutex.h>
58#include <linux/utsrelease.h> 58#include <linux/utsname.h>
59 59
60#include <scsi/scsi.h> 60#include <scsi/scsi.h>
61#include <scsi/scsi_cmnd.h> 61#include <scsi/scsi_cmnd.h>
@@ -547,7 +547,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id)
547 idesc->bInterfaceSubClass, 547 idesc->bInterfaceSubClass,
548 idesc->bInterfaceProtocol, 548 idesc->bInterfaceProtocol,
549 msgs[msg], 549 msgs[msg],
550 UTS_RELEASE); 550 utsname()->release);
551 } 551 }
552 552
553 return 0; 553 return 0;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index c1536d7855..7f5a598368 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -677,8 +677,6 @@ config FB_S1D13XXX
677config FB_NVIDIA 677config FB_NVIDIA
678 tristate "nVidia Framebuffer Support" 678 tristate "nVidia Framebuffer Support"
679 depends on FB && PCI 679 depends on FB && PCI
680 select I2C_ALGOBIT if FB_NVIDIA_I2C
681 select I2C if FB_NVIDIA_I2C
682 select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT 680 select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT
683 select FB_MODE_HELPERS 681 select FB_MODE_HELPERS
684 select FB_CFB_FILLRECT 682 select FB_CFB_FILLRECT
@@ -697,6 +695,7 @@ config FB_NVIDIA
697config FB_NVIDIA_I2C 695config FB_NVIDIA_I2C
698 bool "Enable DDC Support" 696 bool "Enable DDC Support"
699 depends on FB_NVIDIA 697 depends on FB_NVIDIA
698 select FB_DDC
700 help 699 help
701 This enables I2C support for nVidia Chipsets. This is used 700 This enables I2C support for nVidia Chipsets. This is used
702 only for getting EDID information from the attached display 701 only for getting EDID information from the attached display
@@ -716,7 +715,6 @@ config FB_NVIDIA_BACKLIGHT
716config FB_RIVA 715config FB_RIVA
717 tristate "nVidia Riva support" 716 tristate "nVidia Riva support"
718 depends on FB && PCI 717 depends on FB && PCI
719 select FB_DDC if FB_RIVA_I2C
720 select FB_BACKLIGHT if FB_RIVA_BACKLIGHT 718 select FB_BACKLIGHT if FB_RIVA_BACKLIGHT
721 select FB_MODE_HELPERS 719 select FB_MODE_HELPERS
722 select FB_CFB_FILLRECT 720 select FB_CFB_FILLRECT
@@ -734,6 +732,7 @@ config FB_RIVA
734config FB_RIVA_I2C 732config FB_RIVA_I2C
735 bool "Enable DDC Support" 733 bool "Enable DDC Support"
736 depends on FB_RIVA 734 depends on FB_RIVA
735 select FB_DDC
737 help 736 help
738 This enables I2C support for nVidia Chipsets. This is used 737 This enables I2C support for nVidia Chipsets. This is used
739 only for getting EDID information from the attached display 738 only for getting EDID information from the attached display
@@ -812,8 +811,6 @@ config FB_INTEL
812 depends on FB && EXPERIMENTAL && PCI && X86 811 depends on FB && EXPERIMENTAL && PCI && X86
813 select AGP 812 select AGP
814 select AGP_INTEL 813 select AGP_INTEL
815 select I2C_ALGOBIT if FB_INTEL_I2C
816 select I2C if FB_INTEL_I2C
817 select FB_MODE_HELPERS 814 select FB_MODE_HELPERS
818 select FB_CFB_FILLRECT 815 select FB_CFB_FILLRECT
819 select FB_CFB_COPYAREA 816 select FB_CFB_COPYAREA
@@ -846,6 +843,7 @@ config FB_INTEL_DEBUG
846config FB_INTEL_I2C 843config FB_INTEL_I2C
847 bool "DDC/I2C for Intel framebuffer support" 844 bool "DDC/I2C for Intel framebuffer support"
848 depends on FB_INTEL 845 depends on FB_INTEL
846 select FB_DDC
849 default y 847 default y
850 help 848 help
851 Say Y here if you want DDC/I2C support for your on-board Intel graphics. 849 Say Y here if you want DDC/I2C support for your on-board Intel graphics.
@@ -924,8 +922,8 @@ config FB_MATROX_G
924 922
925config FB_MATROX_I2C 923config FB_MATROX_I2C
926 tristate "Matrox I2C support" 924 tristate "Matrox I2C support"
927 depends on FB_MATROX && I2C 925 depends on FB_MATROX
928 select I2C_ALGOBIT 926 select FB_DDC
929 ---help--- 927 ---help---
930 This drivers creates I2C buses which are needed for accessing the 928 This drivers creates I2C buses which are needed for accessing the
931 DDC (I2C) bus present on all Matroxes, an I2C bus which 929 DDC (I2C) bus present on all Matroxes, an I2C bus which
@@ -993,7 +991,6 @@ config FB_MATROX_MULTIHEAD
993config FB_RADEON 991config FB_RADEON
994 tristate "ATI Radeon display support" 992 tristate "ATI Radeon display support"
995 depends on FB && PCI 993 depends on FB && PCI
996 select FB_DDC if FB_RADEON_I2C
997 select FB_BACKLIGHT if FB_RADEON_BACKLIGHT 994 select FB_BACKLIGHT if FB_RADEON_BACKLIGHT
998 select FB_MODE_HELPERS 995 select FB_MODE_HELPERS
999 select FB_CFB_FILLRECT 996 select FB_CFB_FILLRECT
@@ -1018,6 +1015,7 @@ config FB_RADEON
1018config FB_RADEON_I2C 1015config FB_RADEON_I2C
1019 bool "DDC/I2C for ATI Radeon support" 1016 bool "DDC/I2C for ATI Radeon support"
1020 depends on FB_RADEON 1017 depends on FB_RADEON
1018 select FB_DDC
1021 default y 1019 default y
1022 help 1020 help
1023 Say Y here if you want DDC/I2C support for your Radeon board. 1021 Say Y here if you want DDC/I2C support for your Radeon board.
@@ -1125,7 +1123,6 @@ config FB_S3
1125config FB_SAVAGE 1123config FB_SAVAGE
1126 tristate "S3 Savage support" 1124 tristate "S3 Savage support"
1127 depends on FB && PCI && EXPERIMENTAL 1125 depends on FB && PCI && EXPERIMENTAL
1128 select FB_DDC if FB_SAVAGE_I2C
1129 select FB_MODE_HELPERS 1126 select FB_MODE_HELPERS
1130 select FB_CFB_FILLRECT 1127 select FB_CFB_FILLRECT
1131 select FB_CFB_COPYAREA 1128 select FB_CFB_COPYAREA
@@ -1142,6 +1139,7 @@ config FB_SAVAGE
1142config FB_SAVAGE_I2C 1139config FB_SAVAGE_I2C
1143 bool "Enable DDC2 Support" 1140 bool "Enable DDC2 Support"
1144 depends on FB_SAVAGE 1141 depends on FB_SAVAGE
1142 select FB_DDC
1145 help 1143 help
1146 This enables I2C support for S3 Savage Chipsets. This is used 1144 This enables I2C support for S3 Savage Chipsets. This is used
1147 only for getting EDID information from the attached display 1145 only for getting EDID information from the attached display
@@ -1618,8 +1616,7 @@ config FB_IBM_GXT4500
1618 1616
1619config FB_PS3 1617config FB_PS3
1620 bool "PS3 GPU framebuffer driver" 1618 bool "PS3 GPU framebuffer driver"
1621 depends on FB && PPC_PS3 1619 depends on FB && PS3_PS3AV
1622 select PS3_PS3AV
1623 select FB_CFB_FILLRECT 1620 select FB_CFB_FILLRECT
1624 select FB_CFB_COPYAREA 1621 select FB_CFB_COPYAREA
1625 select FB_CFB_IMAGEBLIT 1622 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 8726c36697..e86d7e0c98 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -357,6 +357,12 @@ static int default_lcd_on __devinitdata = 1;
357static int mtrr = 1; 357static int mtrr = 1;
358#endif 358#endif
359 359
360#ifdef CONFIG_PMAC_BACKLIGHT
361static int backlight __devinitdata = 1;
362#else
363static int backlight __devinitdata = 0;
364#endif
365
360/* PLL constants */ 366/* PLL constants */
361struct aty128_constants { 367struct aty128_constants {
362 u32 ref_clk; 368 u32 ref_clk;
@@ -1652,6 +1658,9 @@ static int __devinit aty128fb_setup(char *options)
1652 } else if (!strncmp(this_opt, "crt:", 4)) { 1658 } else if (!strncmp(this_opt, "crt:", 4)) {
1653 default_crt_on = simple_strtoul(this_opt+4, NULL, 0); 1659 default_crt_on = simple_strtoul(this_opt+4, NULL, 0);
1654 continue; 1660 continue;
1661 } else if (!strncmp(this_opt, "backlight:", 10)) {
1662 backlight = simple_strtoul(this_opt+10, NULL, 0);
1663 continue;
1655 } 1664 }
1656#ifdef CONFIG_MTRR 1665#ifdef CONFIG_MTRR
1657 if(!strncmp(this_opt, "nomtrr", 6)) { 1666 if(!strncmp(this_opt, "nomtrr", 6)) {
@@ -1985,7 +1994,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
1985 par->lock_blank = 0; 1994 par->lock_blank = 0;
1986 1995
1987#ifdef CONFIG_FB_ATY128_BACKLIGHT 1996#ifdef CONFIG_FB_ATY128_BACKLIGHT
1988 aty128_bl_init(par); 1997 if (backlight)
1998 aty128_bl_init(par);
1989#endif 1999#endif
1990 2000
1991 if (register_framebuffer(info) < 0) 2001 if (register_framebuffer(info) < 0)
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index f72faff33c..dc62f8e282 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -284,7 +284,8 @@ static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par)
284#endif 284#endif
285} 285}
286 286
287#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) 287#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
288defined (CONFIG_FB_ATY_GENERIC_LCD) || defined (CONFIG_FB_ATY_BACKLIGHT)
288extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); 289extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par);
289extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); 290extern u32 aty_ld_lcd(int index, const struct atyfb_par *par);
290#endif 291#endif
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index a7e0062233..d7627fc4f1 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -131,7 +131,8 @@
131#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args) 131#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
132#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) 132#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
133 133
134#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) 134#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
135defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT)
135static const u32 lt_lcd_regs[] = { 136static const u32 lt_lcd_regs[] = {
136 CONFIG_PANEL_LG, 137 CONFIG_PANEL_LG,
137 LCD_GEN_CNTL_LG, 138 LCD_GEN_CNTL_LG,
@@ -308,6 +309,12 @@ static int xclk;
308static int comp_sync __devinitdata = -1; 309static int comp_sync __devinitdata = -1;
309static char *mode; 310static char *mode;
310 311
312#ifdef CONFIG_PMAC_BACKLIGHT
313static int backlight __devinitdata = 1;
314#else
315static int backlight __devinitdata = 0;
316#endif
317
311#ifdef CONFIG_PPC 318#ifdef CONFIG_PPC
312static int default_vmode __devinitdata = VMODE_CHOOSE; 319static int default_vmode __devinitdata = VMODE_CHOOSE;
313static int default_cmode __devinitdata = CMODE_CHOOSE; 320static int default_cmode __devinitdata = CMODE_CHOOSE;
@@ -2575,7 +2582,7 @@ static int __devinit aty_init(struct fb_info *info)
2575 | (USE_F32KHZ | TRISTATE_MEM_EN), par); 2582 | (USE_F32KHZ | TRISTATE_MEM_EN), par);
2576 } else 2583 } else
2577#endif 2584#endif
2578 if (M64_HAS(MOBIL_BUS)) { 2585 if (M64_HAS(MOBIL_BUS) && backlight) {
2579#ifdef CONFIG_FB_ATY_BACKLIGHT 2586#ifdef CONFIG_FB_ATY_BACKLIGHT
2580 aty_bl_init (par); 2587 aty_bl_init (par);
2581#endif 2588#endif
@@ -3757,6 +3764,8 @@ static int __init atyfb_setup(char *options)
3757 xclk = simple_strtoul(this_opt+5, NULL, 0); 3764 xclk = simple_strtoul(this_opt+5, NULL, 0);
3758 else if (!strncmp(this_opt, "comp_sync:", 10)) 3765 else if (!strncmp(this_opt, "comp_sync:", 10))
3759 comp_sync = simple_strtoul(this_opt+10, NULL, 0); 3766 comp_sync = simple_strtoul(this_opt+10, NULL, 0);
3767 else if (!strncmp(this_opt, "backlight:", 10))
3768 backlight = simple_strtoul(this_opt+10, NULL, 0);
3760#ifdef CONFIG_PPC 3769#ifdef CONFIG_PPC
3761 else if (!strncmp(this_opt, "vmode:", 6)) { 3770 else if (!strncmp(this_opt, "vmode:", 6)) {
3762 unsigned int vmode = 3771 unsigned int vmode =
diff --git a/drivers/video/aty/mach64_ct.c b/drivers/video/aty/mach64_ct.c
index f3b487b871..1fdcfdbf66 100644
--- a/drivers/video/aty/mach64_ct.c
+++ b/drivers/video/aty/mach64_ct.c
@@ -598,7 +598,6 @@ static void aty_resume_pll_ct(const struct fb_info *info,
598 struct atyfb_par *par = info->par; 598 struct atyfb_par *par = info->par;
599 599
600 if (par->mclk_per != par->xclk_per) { 600 if (par->mclk_per != par->xclk_per) {
601 int i;
602 /* 601 /*
603 * This disables the sclk, crashes the computer as reported: 602 * This disables the sclk, crashes the computer as reported:
604 * aty_st_pll_ct(SPLL_CNTL2, 3, info); 603 * aty_st_pll_ct(SPLL_CNTL2, 3, info);
@@ -614,7 +613,7 @@ static void aty_resume_pll_ct(const struct fb_info *info,
614 * helps for Rage Mobilities that sometimes crash when 613 * helps for Rage Mobilities that sometimes crash when
615 * we switch to sclk. (Daniel Mantione, 13-05-2003) 614 * we switch to sclk. (Daniel Mantione, 13-05-2003)
616 */ 615 */
617 for (i=0;i<=0x1ffff;i++); 616 udelay(500);
618 } 617 }
619 618
620 aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par); 619 aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par);
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 7e228aded4..1bf6f42eb4 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -268,6 +268,11 @@ static int nomtrr = 0;
268#endif 268#endif
269static int force_sleep; 269static int force_sleep;
270static int ignore_devlist; 270static int ignore_devlist;
271#ifdef CONFIG_PMAC_BACKLIGHT
272static int backlight = 1;
273#else
274static int backlight = 0;
275#endif
271 276
272/* 277/*
273 * prototypes 278 * prototypes
@@ -1026,8 +1031,7 @@ int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch)
1026 break; 1031 break;
1027 } 1032 }
1028 1033
1029 /* let fbcon do a soft blank for us */ 1034 return 0;
1030 return (blank == FB_BLANK_NORMAL) ? -EINVAL : 0;
1031} 1035}
1032 1036
1033static int radeonfb_blank (int blank, struct fb_info *info) 1037static int radeonfb_blank (int blank, struct fb_info *info)
@@ -2349,7 +2353,8 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2349 MTRR_TYPE_WRCOMB, 1); 2353 MTRR_TYPE_WRCOMB, 1);
2350#endif 2354#endif
2351 2355
2352 radeonfb_bl_init(rinfo); 2356 if (backlight)
2357 radeonfb_bl_init(rinfo);
2353 2358
2354 printk ("radeonfb (%s): %s\n", pci_name(rinfo->pdev), rinfo->name); 2359 printk ("radeonfb (%s): %s\n", pci_name(rinfo->pdev), rinfo->name);
2355 2360
@@ -2470,6 +2475,8 @@ static int __init radeonfb_setup (char *options)
2470 force_dfp = 1; 2475 force_dfp = 1;
2471 } else if (!strncmp(this_opt, "panel_yres:", 11)) { 2476 } else if (!strncmp(this_opt, "panel_yres:", 11)) {
2472 panel_yres = simple_strtoul((this_opt+11), NULL, 0); 2477 panel_yres = simple_strtoul((this_opt+11), NULL, 0);
2478 } else if (!strncmp(this_opt, "backlight:", 10)) {
2479 backlight = simple_strtoul(this_opt+10, NULL, 0);
2473#ifdef CONFIG_MTRR 2480#ifdef CONFIG_MTRR
2474 } else if (!strncmp(this_opt, "nomtrr", 6)) { 2481 } else if (!strncmp(this_opt, "nomtrr", 6)) {
2475 nomtrr = 1; 2482 nomtrr = 1;
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index b7016e9b9e..43f62d8ee4 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -12,6 +12,11 @@
12#include <linux/backlight.h> 12#include <linux/backlight.h>
13#include <linux/fb.h> 13#include <linux/fb.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15
16#ifdef CONFIG_PMAC_BACKLIGHT
17#include <asm/backlight.h>
18#endif
19
15#include "nv_local.h" 20#include "nv_local.h"
16#include "nv_type.h" 21#include "nv_type.h"
17#include "nv_proto.h" 22#include "nv_proto.h"
@@ -23,8 +28,6 @@
23#define MAX_LEVEL 0x534 28#define MAX_LEVEL 0x534
24#define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX) 29#define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX)
25 30
26static struct backlight_properties nvidia_bl_data;
27
28static int nvidia_bl_get_level_brightness(struct nvidia_par *par, 31static int nvidia_bl_get_level_brightness(struct nvidia_par *par,
29 int level) 32 int level)
30{ 33{
@@ -119,7 +122,7 @@ void nvidia_bl_init(struct nvidia_par *par)
119 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL); 122 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
120 123
121 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 124 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
122 bd->props.brightness = nvidia_bl_data.max_brightness; 125 bd->props.brightness = bd->props.max_brightness;
123 bd->props.power = FB_BLANK_UNBLANK; 126 bd->props.power = FB_BLANK_UNBLANK;
124 backlight_update_status(bd); 127 backlight_update_status(bd);
125 128
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index c18e9557ca..b97ec69012 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -83,6 +83,11 @@ static int bpp __devinitdata = 8;
83#ifdef CONFIG_MTRR 83#ifdef CONFIG_MTRR
84static int nomtrr __devinitdata = 0; 84static int nomtrr __devinitdata = 0;
85#endif 85#endif
86#ifdef CONFIG_PMAC_BACKLIGHT
87static int backlight __devinitdata = 1;
88#else
89static int backlight __devinitdata = 0;
90#endif
86 91
87static char *mode_option __devinitdata = NULL; 92static char *mode_option __devinitdata = NULL;
88 93
@@ -1311,7 +1316,10 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
1311 nvidia_save_vga(par, &par->SavedReg); 1316 nvidia_save_vga(par, &par->SavedReg);
1312 1317
1313 pci_set_drvdata(pd, info); 1318 pci_set_drvdata(pd, info);
1314 nvidia_bl_init(par); 1319
1320 if (backlight)
1321 nvidia_bl_init(par);
1322
1315 if (register_framebuffer(info) < 0) { 1323 if (register_framebuffer(info) < 0) {
1316 printk(KERN_ERR PFX "error registering nVidia framebuffer\n"); 1324 printk(KERN_ERR PFX "error registering nVidia framebuffer\n");
1317 goto err_out_iounmap_fb; 1325 goto err_out_iounmap_fb;
@@ -1408,6 +1416,8 @@ static int __devinit nvidiafb_setup(char *options)
1408 paneltweak = simple_strtoul(this_opt+11, NULL, 0); 1416 paneltweak = simple_strtoul(this_opt+11, NULL, 0);
1409 } else if (!strncmp(this_opt, "vram:", 5)) { 1417 } else if (!strncmp(this_opt, "vram:", 5)) {
1410 vram = simple_strtoul(this_opt+5, NULL, 0); 1418 vram = simple_strtoul(this_opt+5, NULL, 0);
1419 } else if (!strncmp(this_opt, "backlight:", 10)) {
1420 backlight = simple_strtoul(this_opt+10, NULL, 0);
1411#ifdef CONFIG_MTRR 1421#ifdef CONFIG_MTRR
1412 } else if (!strncmp(this_opt, "nomtrr", 6)) { 1422 } else if (!strncmp(this_opt, "nomtrr", 6)) {
1413 nomtrr = 1; 1423 nomtrr = 1;
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index f8a3d608b2..1d1c7c624d 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -215,6 +215,11 @@ static int noaccel __devinitdata = 0;
215#ifdef CONFIG_MTRR 215#ifdef CONFIG_MTRR
216static int nomtrr __devinitdata = 0; 216static int nomtrr __devinitdata = 0;
217#endif 217#endif
218#ifdef CONFIG_PMAC_BACKLIGHT
219static int backlight __devinitdata = 1;
220#else
221static int backlight __devinitdata = 0;
222#endif
218 223
219static char *mode_option __devinitdata = NULL; 224static char *mode_option __devinitdata = NULL;
220static int strictmode = 0; 225static int strictmode = 0;
@@ -2059,7 +2064,10 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
2059 info->monspecs.modedb = NULL; 2064 info->monspecs.modedb = NULL;
2060 2065
2061 pci_set_drvdata(pd, info); 2066 pci_set_drvdata(pd, info);
2062 riva_bl_init(info->par); 2067
2068 if (backlight)
2069 riva_bl_init(info->par);
2070
2063 ret = register_framebuffer(info); 2071 ret = register_framebuffer(info);
2064 if (ret < 0) { 2072 if (ret < 0) {
2065 printk(KERN_ERR PFX 2073 printk(KERN_ERR PFX
@@ -2157,6 +2165,8 @@ static int __init rivafb_setup(char *options)
2157 forceCRTC = -1; 2165 forceCRTC = -1;
2158 } else if (!strncmp(this_opt, "flatpanel", 9)) { 2166 } else if (!strncmp(this_opt, "flatpanel", 9)) {
2159 flatpanel = 1; 2167 flatpanel = 1;
2168 } else if (!strncmp(this_opt, "backlight:", 10)) {
2169 backlight = simple_strtoul(this_opt+10, NULL, 0);
2160#ifdef CONFIG_MTRR 2170#ifdef CONFIG_MTRR
2161 } else if (!strncmp(this_opt, "nomtrr", 6)) { 2171 } else if (!strncmp(this_opt, "nomtrr", 6)) {
2162 nomtrr = 1; 2172 nomtrr = 1;
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 02b290ca01..0a44c44672 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -136,8 +136,8 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
136#define SM501_MEMF_CRT (4) 136#define SM501_MEMF_CRT (4)
137#define SM501_MEMF_ACCEL (8) 137#define SM501_MEMF_ACCEL (8)
138 138
139int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, 139static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
140 unsigned int why, size_t size) 140 unsigned int why, size_t size)
141{ 141{
142 unsigned int ptr = 0; 142 unsigned int ptr = 0;
143 143
@@ -926,7 +926,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
926 * set or change the hardware cursor parameters 926 * set or change the hardware cursor parameters
927*/ 927*/
928 928
929int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor) 929static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
930{ 930{
931 struct sm501fb_par *par = info->par; 931 struct sm501fb_par *par = info->par;
932 struct sm501fb_info *fbi = par->info; 932 struct sm501fb_info *fbi = par->info;
@@ -1074,9 +1074,9 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev,
1074 if (len < 1) 1074 if (len < 1)
1075 return -EINVAL; 1075 return -EINVAL;
1076 1076
1077 if (strnicmp(buf, "crt", sizeof("crt")) == 0) 1077 if (strnicmp(buf, "crt", 3) == 0)
1078 head = HEAD_CRT; 1078 head = HEAD_CRT;
1079 else if (strnicmp(buf, "panel", sizeof("panel")) == 0) 1079 else if (strnicmp(buf, "panel", 5) == 0)
1080 head = HEAD_PANEL; 1080 head = HEAD_PANEL;
1081 else 1081 else
1082 return -EINVAL; 1082 return -EINVAL;
@@ -1098,7 +1098,7 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev,
1098 writel(ctrl, info->regs + SM501_DC_CRT_CONTROL); 1098 writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
1099 sm501fb_sync_regs(info); 1099 sm501fb_sync_regs(info);
1100 1100
1101 return (head == HEAD_CRT) ? 3 : 5; 1101 return len;
1102} 1102}
1103 1103
1104/* Prepare the device_attr for registration with sysfs later */ 1104/* Prepare the device_attr for registration with sysfs later */
@@ -1225,7 +1225,7 @@ static struct sm501fb_info *sm501fb_info_alloc(struct fb_info *fbinfo_crt,
1225 * initialise hw cursor parameters 1225 * initialise hw cursor parameters
1226*/ 1226*/
1227 1227
1228int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base) 1228static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
1229{ 1229{
1230 struct sm501fb_par *par = fbi->par; 1230 struct sm501fb_par *par = fbi->par;
1231 struct sm501fb_info *info = par->info; 1231 struct sm501fb_info *info = par->info;
@@ -1768,7 +1768,7 @@ static struct platform_driver sm501fb_driver = {
1768 }, 1768 },
1769}; 1769};
1770 1770
1771int __devinit sm501fb_init(void) 1771static int __devinit sm501fb_init(void)
1772{ 1772{
1773 return platform_driver_register(&sm501fb_driver); 1773 return platform_driver_register(&sm501fb_driver);
1774} 1774}