aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/Kconfig12
-rw-r--r--drivers/ata/Kconfig13
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ata_generic.c6
-rw-r--r--drivers/ata/ata_piix.c25
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-sff.c6
-rw-r--r--drivers/ata/pata_acpi.c6
-rw-r--r--drivers/ata/pata_sch.c206
-rw-r--r--drivers/ata/sata_inic162x.c646
-rw-r--r--drivers/ata/sata_mv.c690
-rw-r--r--drivers/base/sys.c3
-rw-r--r--drivers/block/aoe/aoecmd.c10
-rw-r--r--drivers/char/sx.c9
-rw-r--r--drivers/char/vt.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c90
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c36
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c75
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c138
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c72
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c26
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c95
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c80
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c44
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/video/cx18/cx18-driver.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/net/irda/nsc-ircc.c6
-rw-r--r--drivers/net/irda/smsc-ircc2.c5
-rw-r--r--drivers/net/niu.c11
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/wan/lapbether.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig3
-rw-r--r--drivers/rtc/rtc-ds1511.c4
-rw-r--r--drivers/s390/char/tty3270.c15
-rw-r--r--drivers/s390/cio/blacklist.c323
-rw-r--r--drivers/s390/cio/cio.c39
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/cio_debug.h6
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/s390/cio/device.c25
-rw-r--r--drivers/s390/cio/device_fsm.c44
-rw-r--r--drivers/s390/cio/device_id.c4
-rw-r--r--drivers/s390/cio/device_pgid.c12
-rw-r--r--drivers/s390/s390mach.c3
-rw-r--r--drivers/sbus/char/bpp.c2
-rw-r--r--drivers/scsi/dpt_i2o.c78
-rw-r--r--drivers/scsi/dpti.h13
-rw-r--r--drivers/serial/serial_core.c3
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/serial/iuu_phoenix.c6
-rw-r--r--drivers/video/bw2.c2
-rw-r--r--drivers/video/cg3.c2
-rw-r--r--drivers/video/cg6.c2
-rw-r--r--drivers/video/ffb.c2
-rw-r--r--drivers/video/leo.c2
-rw-r--r--drivers/video/p9100.c2
-rw-r--r--drivers/video/tcx.c20
71 files changed, 2012 insertions, 1063 deletions
diff --git a/drivers/accessibility/Kconfig b/drivers/accessibility/Kconfig
index 1264c4b9809..ef3b65bfdd0 100644
--- a/drivers/accessibility/Kconfig
+++ b/drivers/accessibility/Kconfig
@@ -1,7 +1,17 @@
1menuconfig ACCESSIBILITY 1menuconfig ACCESSIBILITY
2 bool "Accessibility support" 2 bool "Accessibility support"
3 ---help--- 3 ---help---
4 Enable a submenu where accessibility items may be enabled. 4 Accessibility handles all special kinds of hardware devices or
5 software adapters which help people with disabilities (e.g.
6 blindness) to use computers.
7
8 That includes braille devices, speech synthesis, keyboard
9 remapping, etc.
10
11 Say Y here to get to see options for accessibility.
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and disabled.
5 15
6 If unsure, say N. 16 If unsure, say N.
7 17
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 1c11df9a5f3..9bf2986a278 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -205,8 +205,8 @@ config SATA_VITESSE
205 If unsure, say N. 205 If unsure, say N.
206 206
207config SATA_INIC162X 207config SATA_INIC162X
208 tristate "Initio 162x SATA support (HIGHLY EXPERIMENTAL)" 208 tristate "Initio 162x SATA support"
209 depends on PCI && EXPERIMENTAL 209 depends on PCI
210 help 210 help
211 This option enables support for Initio 162x Serial ATA. 211 This option enables support for Initio 162x Serial ATA.
212 212
@@ -697,6 +697,15 @@ config PATA_SCC
697 697
698 If unsure, say N. 698 If unsure, say N.
699 699
700config PATA_SCH
701 tristate "Intel SCH PATA support"
702 depends on PCI
703 help
704 This option enables support for Intel SCH PATA on the Intel
705 SCH (US15W, US15L, UL11L) series host controllers.
706
707 If unsure, say N.
708
700config PATA_BF54X 709config PATA_BF54X
701 tristate "Blackfin 54x ATAPI support" 710 tristate "Blackfin 54x ATAPI support"
702 depends on BF542 || BF548 || BF549 711 depends on BF542 || BF548 || BF549
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index b693d829383..674965fa326 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_PATA_SIS) += pata_sis.o
67obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 67obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
68obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o 68obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
69obj-$(CONFIG_PATA_SCC) += pata_scc.o 69obj-$(CONFIG_PATA_SCC) += pata_scc.o
70obj-$(CONFIG_PATA_SCH) += pata_sch.o
70obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o 71obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
71obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 72obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
72obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o 73obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 8cace9aa9c0..97f83fb2ee2 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1267,9 +1267,7 @@ static int ahci_check_ready(struct ata_link *link)
1267 void __iomem *port_mmio = ahci_port_base(link->ap); 1267 void __iomem *port_mmio = ahci_port_base(link->ap);
1268 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1268 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1269 1269
1270 if (!(status & ATA_BUSY)) 1270 return ata_check_ready(status);
1271 return 1;
1272 return 0;
1273} 1271}
1274 1272
1275static int ahci_softreset(struct ata_link *link, unsigned int *class, 1273static int ahci_softreset(struct ata_link *link, unsigned int *class,
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 47aeccd52fa..75a406f5e69 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -152,6 +152,12 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
152 if (dev->vendor == PCI_VENDOR_ID_AL) 152 if (dev->vendor == PCI_VENDOR_ID_AL)
153 ata_pci_bmdma_clear_simplex(dev); 153 ata_pci_bmdma_clear_simplex(dev);
154 154
155 if (dev->vendor == PCI_VENDOR_ID_ATI) {
156 int rc = pcim_enable_device(dev);
157 if (rc < 0)
158 return rc;
159 pcim_pin_device(dev);
160 }
155 return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL); 161 return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL);
156} 162}
157 163
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ea2c7649d39..a9027b8fbdd 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1348,6 +1348,8 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
1348{ 1348{
1349 struct pci_dev *pdev = to_pci_dev(host->dev); 1349 struct pci_dev *pdev = to_pci_dev(host->dev);
1350 struct piix_host_priv *hpriv = host->private_data; 1350 struct piix_host_priv *hpriv = host->private_data;
1351 struct ata_device *dev0 = &host->ports[0]->link.device[0];
1352 u32 scontrol;
1351 int i; 1353 int i;
1352 1354
1353 /* check for availability */ 1355 /* check for availability */
@@ -1366,6 +1368,29 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
1366 return; 1368 return;
1367 1369
1368 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; 1370 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
1371
1372 /* SCR access via SIDPR doesn't work on some configurations.
1373 * Give it a test drive by inhibiting power save modes which
1374 * we'll do anyway.
1375 */
1376 scontrol = piix_sidpr_read(dev0, SCR_CONTROL);
1377
1378 /* if IPM is already 3, SCR access is probably working. Don't
1379 * un-inhibit power save modes as BIOS might have inhibited
1380 * them for a reason.
1381 */
1382 if ((scontrol & 0xf00) != 0x300) {
1383 scontrol |= 0x300;
1384 piix_sidpr_write(dev0, SCR_CONTROL, scontrol);
1385 scontrol = piix_sidpr_read(dev0, SCR_CONTROL);
1386
1387 if ((scontrol & 0xf00) != 0x300) {
1388 dev_printk(KERN_INFO, host->dev, "SCR access via "
1389 "SIDPR is available but doesn't work\n");
1390 return;
1391 }
1392 }
1393
1369 host->ports[0]->ops = &piix_sidpr_sata_ops; 1394 host->ports[0]->ops = &piix_sidpr_sata_ops;
1370 host->ports[1]->ops = &piix_sidpr_sata_ops; 1395 host->ports[1]->ops = &piix_sidpr_sata_ops;
1371} 1396}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3bc48853820..927b692d723 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6292,6 +6292,7 @@ EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6292EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6292EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6293EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6293EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6294EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6294EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6295EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6295EXPORT_SYMBOL_GPL(ata_do_eh); 6296EXPORT_SYMBOL_GPL(ata_do_eh);
6296EXPORT_SYMBOL_GPL(ata_std_error_handler); 6297EXPORT_SYMBOL_GPL(ata_std_error_handler);
6297 6298
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 61dcd0026c6..62e033146be 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1357,7 +1357,7 @@ static void ata_eh_analyze_serror(struct ata_link *link)
1357 * LOCKING: 1357 * LOCKING:
1358 * Kernel thread context (may sleep). 1358 * Kernel thread context (may sleep).
1359 */ 1359 */
1360static void ata_eh_analyze_ncq_error(struct ata_link *link) 1360void ata_eh_analyze_ncq_error(struct ata_link *link)
1361{ 1361{
1362 struct ata_port *ap = link->ap; 1362 struct ata_port *ap = link->ap;
1363 struct ata_eh_context *ehc = &link->eh_context; 1363 struct ata_eh_context *ehc = &link->eh_context;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2ec65a8fda7..3c2d2289f85 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -314,11 +314,7 @@ static int ata_sff_check_ready(struct ata_link *link)
314{ 314{
315 u8 status = link->ap->ops->sff_check_status(link->ap); 315 u8 status = link->ap->ops->sff_check_status(link->ap);
316 316
317 if (!(status & ATA_BUSY)) 317 return ata_check_ready(status);
318 return 1;
319 if (status == 0xff)
320 return -ENODEV;
321 return 0;
322} 318}
323 319
324/** 320/**
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index c5f91e62994..fbe60571155 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -259,6 +259,12 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
259 .port_ops = &pacpi_ops, 259 .port_ops = &pacpi_ops,
260 }; 260 };
261 const struct ata_port_info *ppi[] = { &info, NULL }; 261 const struct ata_port_info *ppi[] = { &info, NULL };
262 if (pdev->vendor == PCI_VENDOR_ID_ATI) {
263 int rc = pcim_enable_device(pdev);
264 if (rc < 0)
265 return rc;
266 pcim_pin_device(pdev);
267 }
262 return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL); 268 return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL);
263} 269}
264 270
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
new file mode 100644
index 00000000000..c8cc027789f
--- /dev/null
+++ b/drivers/ata/pata_sch.c
@@ -0,0 +1,206 @@
1/*
2 * pata_sch.c - Intel SCH PATA controllers
3 *
4 * Copyright (c) 2008 Alek Du <alek.du@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 */
20
21/*
22 * Supports:
23 * Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at:
24 * http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/device.h>
34#include <scsi/scsi_host.h>
35#include <linux/libata.h>
36#include <linux/dmi.h>
37
38#define DRV_NAME "pata_sch"
39#define DRV_VERSION "0.2"
40
41/* see SCH datasheet page 351 */
42enum {
43 D0TIM = 0x80, /* Device 0 Timing Register */
44 D1TIM = 0x84, /* Device 1 Timing Register */
45 PM = 0x07, /* PIO Mode Bit Mask */
46 MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */
47 UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */
48 PPE = (1 << 30), /* Prefetch/Post Enable */
49 USD = (1 << 31), /* Use Synchronous DMA */
50};
51
52static int sch_init_one(struct pci_dev *pdev,
53 const struct pci_device_id *ent);
54static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev);
55static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev);
56
57static const struct pci_device_id sch_pci_tbl[] = {
58 /* Intel SCH PATA Controller */
59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 },
60 { } /* terminate list */
61};
62
63static struct pci_driver sch_pci_driver = {
64 .name = DRV_NAME,
65 .id_table = sch_pci_tbl,
66 .probe = sch_init_one,
67 .remove = ata_pci_remove_one,
68#ifdef CONFIG_PM
69 .suspend = ata_pci_device_suspend,
70 .resume = ata_pci_device_resume,
71#endif
72};
73
74static struct scsi_host_template sch_sht = {
75 ATA_BMDMA_SHT(DRV_NAME),
76};
77
78static struct ata_port_operations sch_pata_ops = {
79 .inherits = &ata_bmdma_port_ops,
80 .cable_detect = ata_cable_unknown,
81 .set_piomode = sch_set_piomode,
82 .set_dmamode = sch_set_dmamode,
83};
84
85static struct ata_port_info sch_port_info = {
86 .flags = 0,
87 .pio_mask = ATA_PIO4, /* pio0-4 */
88 .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */
89 .udma_mask = ATA_UDMA5, /* udma0-5 */
90 .port_ops = &sch_pata_ops,
91};
92
93MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
94MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers");
95MODULE_LICENSE("GPL");
96MODULE_DEVICE_TABLE(pci, sch_pci_tbl);
97MODULE_VERSION(DRV_VERSION);
98
99/**
100 * sch_set_piomode - Initialize host controller PATA PIO timings
101 * @ap: Port whose timings we are configuring
102 * @adev: ATA device
103 *
104 * Set PIO mode for device, in host controller PCI config space.
105 *
106 * LOCKING:
107 * None (inherited from caller).
108 */
109
110static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev)
111{
112 unsigned int pio = adev->pio_mode - XFER_PIO_0;
113 struct pci_dev *dev = to_pci_dev(ap->host->dev);
114 unsigned int port = adev->devno ? D1TIM : D0TIM;
115 unsigned int data;
116
117 pci_read_config_dword(dev, port, &data);
118 /* see SCH datasheet page 351 */
119 /* set PIO mode */
120 data &= ~(PM | PPE);
121 data |= pio;
122 /* enable PPE for block device */
123 if (adev->class == ATA_DEV_ATA)
124 data |= PPE;
125 pci_write_config_dword(dev, port, data);
126}
127
128/**
129 * sch_set_dmamode - Initialize host controller PATA DMA timings
130 * @ap: Port whose timings we are configuring
131 * @adev: ATA device
132 *
133 * Set MW/UDMA mode for device, in host controller PCI config space.
134 *
135 * LOCKING:
136 * None (inherited from caller).
137 */
138
139static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
140{
141 unsigned int dma_mode = adev->dma_mode;
142 struct pci_dev *dev = to_pci_dev(ap->host->dev);
143 unsigned int port = adev->devno ? D1TIM : D0TIM;
144 unsigned int data;
145
146 pci_read_config_dword(dev, port, &data);
147 /* see SCH datasheet page 351 */
148 if (dma_mode >= XFER_UDMA_0) {
149 /* enable Synchronous DMA mode */
150 data |= USD;
151 data &= ~UDM;
152 data |= (dma_mode - XFER_UDMA_0) << 16;
153 } else { /* must be MWDMA mode, since we masked SWDMA already */
154 data &= ~(USD | MDM);
155 data |= (dma_mode - XFER_MW_DMA_0) << 8;
156 }
157 pci_write_config_dword(dev, port, data);
158}
159
160/**
161 * sch_init_one - Register SCH ATA PCI device with kernel services
162 * @pdev: PCI device to register
163 * @ent: Entry in sch_pci_tbl matching with @pdev
164 *
165 * LOCKING:
166 * Inherited from PCI layer (may sleep).
167 *
168 * RETURNS:
169 * Zero on success, or -ERRNO value.
170 */
171
172static int __devinit sch_init_one(struct pci_dev *pdev,
173 const struct pci_device_id *ent)
174{
175 static int printed_version;
176 const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
177 struct ata_host *host;
178 int rc;
179
180 if (!printed_version++)
181 dev_printk(KERN_DEBUG, &pdev->dev,
182 "version " DRV_VERSION "\n");
183
184 /* enable device and prepare host */
185 rc = pcim_enable_device(pdev);
186 if (rc)
187 return rc;
188 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
189 if (rc)
190 return rc;
191 pci_set_master(pdev);
192 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
193}
194
195static int __init sch_init(void)
196{
197 return pci_register_driver(&sch_pci_driver);
198}
199
200static void __exit sch_exit(void)
201{
202 pci_unregister_driver(&sch_pci_driver);
203}
204
205module_init(sch_init);
206module_exit(sch_exit);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index d27bb9a2568..3ead02fe379 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -10,13 +10,33 @@
10 * right. Documentation is available at initio's website but it only 10 * right. Documentation is available at initio's website but it only
11 * documents registers (not programming model). 11 * documents registers (not programming model).
12 * 12 *
13 * - ATA disks work. 13 * This driver has interesting history. The first version was written
14 * - Hotplug works. 14 * from the documentation and a 2.4 IDE driver posted on a Taiwan
15 * - ATAPI read works but burning doesn't. This thing is really 15 * company, which didn't use any IDMA features and couldn't handle
16 * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and 16 * LBA48. The resulting driver couldn't handle LBA48 devices either
17 * ATAPI DMA WRITE should be programmed. If you've got a clue, be 17 * making it pretty useless.
18 * my guest. 18 *
19 * - Both STR and STD work. 19 * After a while, initio picked the driver up, renamed it to
20 * sata_initio162x, updated it to use IDMA for ATA DMA commands and
21 * posted it on their website. It only used ATA_PROT_DMA for IDMA and
22 * attaching both devices and issuing IDMA and !IDMA commands
23 * simultaneously broke it due to PIRQ masking interaction but it did
24 * show how to use the IDMA (ADMA + some initio specific twists)
25 * engine.
26 *
27 * Then, I picked up their changes again and here's the usable driver
28 * which uses IDMA for everything. Everything works now including
29 * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some
30 * issues tho. Result Tf is not resported properly, NCQ isn't
31 * supported yet and CD/DVD writing works with DMA assisted PIO
32 * protocol (which, for native SATA devices, shouldn't cause any
33 * noticeable difference).
34 *
35 * Anyways, so, here's finally a working driver for inic162x. Enjoy!
36 *
37 * initio: If you guys wanna improve the driver regarding result TF
38 * access and other stuff, please feel free to contact me. I'll be
39 * happy to assist.
20 */ 40 */
21 41
22#include <linux/kernel.h> 42#include <linux/kernel.h>
@@ -28,13 +48,19 @@
28#include <scsi/scsi_device.h> 48#include <scsi/scsi_device.h>
29 49
30#define DRV_NAME "sata_inic162x" 50#define DRV_NAME "sata_inic162x"
31#define DRV_VERSION "0.3" 51#define DRV_VERSION "0.4"
32 52
33enum { 53enum {
34 MMIO_BAR = 5, 54 MMIO_BAR_PCI = 5,
55 MMIO_BAR_CARDBUS = 1,
35 56
36 NR_PORTS = 2, 57 NR_PORTS = 2,
37 58
59 IDMA_CPB_TBL_SIZE = 4 * 32,
60
61 INIC_DMA_BOUNDARY = 0xffffff,
62
63 HOST_ACTRL = 0x08,
38 HOST_CTL = 0x7c, 64 HOST_CTL = 0x7c,
39 HOST_STAT = 0x7e, 65 HOST_STAT = 0x7e,
40 HOST_IRQ_STAT = 0xbc, 66 HOST_IRQ_STAT = 0xbc,
@@ -43,22 +69,37 @@ enum {
43 PORT_SIZE = 0x40, 69 PORT_SIZE = 0x40,
44 70
45 /* registers for ATA TF operation */ 71 /* registers for ATA TF operation */
46 PORT_TF = 0x00, 72 PORT_TF_DATA = 0x00,
47 PORT_ALT_STAT = 0x08, 73 PORT_TF_FEATURE = 0x01,
74 PORT_TF_NSECT = 0x02,
75 PORT_TF_LBAL = 0x03,
76 PORT_TF_LBAM = 0x04,
77 PORT_TF_LBAH = 0x05,
78 PORT_TF_DEVICE = 0x06,
79 PORT_TF_COMMAND = 0x07,
80 PORT_TF_ALT_STAT = 0x08,
48 PORT_IRQ_STAT = 0x09, 81 PORT_IRQ_STAT = 0x09,
49 PORT_IRQ_MASK = 0x0a, 82 PORT_IRQ_MASK = 0x0a,
50 PORT_PRD_CTL = 0x0b, 83 PORT_PRD_CTL = 0x0b,
51 PORT_PRD_ADDR = 0x0c, 84 PORT_PRD_ADDR = 0x0c,
52 PORT_PRD_XFERLEN = 0x10, 85 PORT_PRD_XFERLEN = 0x10,
86 PORT_CPB_CPBLAR = 0x18,
87 PORT_CPB_PTQFIFO = 0x1c,
53 88
54 /* IDMA register */ 89 /* IDMA register */
55 PORT_IDMA_CTL = 0x14, 90 PORT_IDMA_CTL = 0x14,
91 PORT_IDMA_STAT = 0x16,
92
93 PORT_RPQ_FIFO = 0x1e,
94 PORT_RPQ_CNT = 0x1f,
56 95
57 PORT_SCR = 0x20, 96 PORT_SCR = 0x20,
58 97
59 /* HOST_CTL bits */ 98 /* HOST_CTL bits */
60 HCTL_IRQOFF = (1 << 8), /* global IRQ off */ 99 HCTL_IRQOFF = (1 << 8), /* global IRQ off */
61 HCTL_PWRDWN = (1 << 13), /* power down PHYs */ 100 HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */
101 HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/
102 HCTL_PWRDWN = (1 << 12), /* power down PHYs */
62 HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ 103 HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */
63 HCTL_RPGSEL = (1 << 15), /* register page select */ 104 HCTL_RPGSEL = (1 << 15), /* register page select */
64 105
@@ -81,9 +122,7 @@ enum {
81 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 122 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */
82 123
83 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 124 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
84 125 PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA,
85 PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA,
86 PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE,
87 PIRQ_MASK_FREEZE = 0xff, 126 PIRQ_MASK_FREEZE = 0xff,
88 127
89 /* PORT_PRD_CTL bits */ 128 /* PORT_PRD_CTL bits */
@@ -96,20 +135,104 @@ enum {
96 IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ 135 IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */
97 IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ 136 IDMA_CTL_GO = (1 << 7), /* IDMA mode go */
98 IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ 137 IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */
138
139 /* PORT_IDMA_STAT bits */
140 IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */
141 IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */
142 IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */
143 IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */
144 IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */
145 IDMA_STAT_PSD = (1 << 6), /* ADMA pause */
146 IDMA_STAT_DONE = (1 << 7), /* ADMA done */
147
148 IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR,
149
150 /* CPB Control Flags*/
151 CPB_CTL_VALID = (1 << 0), /* CPB valid */
152 CPB_CTL_QUEUED = (1 << 1), /* queued command */
153 CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */
154 CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */
155 CPB_CTL_DEVDIR = (1 << 4), /* device direction control */
156
157 /* CPB Response Flags */
158 CPB_RESP_DONE = (1 << 0), /* ATA command complete */
159 CPB_RESP_REL = (1 << 1), /* ATA release */
160 CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */
161 CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */
162 CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */
163 CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */
164 CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */
165 CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */
166
167 /* PRD Control Flags */
168 PRD_DRAIN = (1 << 1), /* ignore data excess */
169 PRD_CDB = (1 << 2), /* atapi packet command pointer */
170 PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */
171 PRD_DMA = (1 << 4), /* data transfer method */
172 PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */
173 PRD_IOM = (1 << 6), /* io/memory transfer */
174 PRD_END = (1 << 7), /* APRD chain end */
99}; 175};
100 176
177/* Comman Parameter Block */
178struct inic_cpb {
179 u8 resp_flags; /* Response Flags */
180 u8 error; /* ATA Error */
181 u8 status; /* ATA Status */
182 u8 ctl_flags; /* Control Flags */
183 __le32 len; /* Total Transfer Length */
184 __le32 prd; /* First PRD pointer */
185 u8 rsvd[4];
186 /* 16 bytes */
187 u8 feature; /* ATA Feature */
188 u8 hob_feature; /* ATA Ex. Feature */
189 u8 device; /* ATA Device/Head */
190 u8 mirctl; /* Mirror Control */
191 u8 nsect; /* ATA Sector Count */
192 u8 hob_nsect; /* ATA Ex. Sector Count */
193 u8 lbal; /* ATA Sector Number */
194 u8 hob_lbal; /* ATA Ex. Sector Number */
195 u8 lbam; /* ATA Cylinder Low */
196 u8 hob_lbam; /* ATA Ex. Cylinder Low */
197 u8 lbah; /* ATA Cylinder High */
198 u8 hob_lbah; /* ATA Ex. Cylinder High */
199 u8 command; /* ATA Command */
200 u8 ctl; /* ATA Control */
201 u8 slave_error; /* Slave ATA Error */
202 u8 slave_status; /* Slave ATA Status */
203 /* 32 bytes */
204} __packed;
205
206/* Physical Region Descriptor */
207struct inic_prd {
208 __le32 mad; /* Physical Memory Address */
209 __le16 len; /* Transfer Length */
210 u8 rsvd;
211 u8 flags; /* Control Flags */
212} __packed;
213
214struct inic_pkt {
215 struct inic_cpb cpb;
216 struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */
217 u8 cdb[ATAPI_CDB_LEN];
218} __packed;
219
101struct inic_host_priv { 220struct inic_host_priv {
102 u16 cached_hctl; 221 void __iomem *mmio_base;
222 u16 cached_hctl;
103}; 223};
104 224
105struct inic_port_priv { 225struct inic_port_priv {
106 u8 dfl_prdctl; 226 struct inic_pkt *pkt;
107 u8 cached_prdctl; 227 dma_addr_t pkt_dma;
108 u8 cached_pirq_mask; 228 u32 *cpb_tbl;
229 dma_addr_t cpb_tbl_dma;
109}; 230};
110 231
111static struct scsi_host_template inic_sht = { 232static struct scsi_host_template inic_sht = {
112 ATA_BMDMA_SHT(DRV_NAME), 233 ATA_BASE_SHT(DRV_NAME),
234 .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
235 .dma_boundary = INIC_DMA_BOUNDARY,
113}; 236};
114 237
115static const int scr_map[] = { 238static const int scr_map[] = {
@@ -120,54 +243,34 @@ static const int scr_map[] = {
120 243
121static void __iomem *inic_port_base(struct ata_port *ap) 244static void __iomem *inic_port_base(struct ata_port *ap)
122{ 245{
123 return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 246 struct inic_host_priv *hpriv = ap->host->private_data;
124}
125
126static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask)
127{
128 void __iomem *port_base = inic_port_base(ap);
129 struct inic_port_priv *pp = ap->private_data;
130 247
131 writeb(mask, port_base + PORT_IRQ_MASK); 248 return hpriv->mmio_base + ap->port_no * PORT_SIZE;
132 pp->cached_pirq_mask = mask;
133}
134
135static void inic_set_pirq_mask(struct ata_port *ap, u8 mask)
136{
137 struct inic_port_priv *pp = ap->private_data;
138
139 if (pp->cached_pirq_mask != mask)
140 __inic_set_pirq_mask(ap, mask);
141} 249}
142 250
143static void inic_reset_port(void __iomem *port_base) 251static void inic_reset_port(void __iomem *port_base)
144{ 252{
145 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 253 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
146 u16 ctl;
147 254
148 ctl = readw(idma_ctl); 255 /* stop IDMA engine */
149 ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); 256 readw(idma_ctl); /* flush */
257 msleep(1);
150 258
151 /* mask IRQ and assert reset */ 259 /* mask IRQ and assert reset */
152 writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); 260 writew(IDMA_CTL_RST_IDMA, idma_ctl);
153 readw(idma_ctl); /* flush */ 261 readw(idma_ctl); /* flush */
154
155 /* give it some time */
156 msleep(1); 262 msleep(1);
157 263
158 /* release reset */ 264 /* release reset */
159 writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); 265 writew(0, idma_ctl);
160 266
161 /* clear irq */ 267 /* clear irq */
162 writeb(0xff, port_base + PORT_IRQ_STAT); 268 writeb(0xff, port_base + PORT_IRQ_STAT);
163
164 /* reenable ATA IRQ, turn off IDMA mode */
165 writew(ctl, idma_ctl);
166} 269}
167 270
168static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 271static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
169{ 272{
170 void __iomem *scr_addr = ap->ioaddr.scr_addr; 273 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
171 void __iomem *addr; 274 void __iomem *addr;
172 275
173 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 276 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
@@ -184,120 +287,126 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
184 287
185static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 288static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
186{ 289{
187 void __iomem *scr_addr = ap->ioaddr.scr_addr; 290 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
188 void __iomem *addr;
189 291
190 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 292 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
191 return -EINVAL; 293 return -EINVAL;
192 294
193 addr = scr_addr + scr_map[sc_reg] * 4;
194 writel(val, scr_addr + scr_map[sc_reg] * 4); 295 writel(val, scr_addr + scr_map[sc_reg] * 4);
195 return 0; 296 return 0;
196} 297}
197 298
198/* 299static void inic_stop_idma(struct ata_port *ap)
199 * In TF mode, inic162x is very similar to SFF device. TF registers
200 * function the same. DMA engine behaves similary using the same PRD
201 * format as BMDMA but different command register, interrupt and event
202 * notification methods are used. The following inic_bmdma_*()
203 * functions do the impedance matching.
204 */
205static void inic_bmdma_setup(struct ata_queued_cmd *qc)
206{ 300{
207 struct ata_port *ap = qc->ap;
208 struct inic_port_priv *pp = ap->private_data;
209 void __iomem *port_base = inic_port_base(ap); 301 void __iomem *port_base = inic_port_base(ap);
210 int rw = qc->tf.flags & ATA_TFLAG_WRITE;
211
212 /* make sure device sees PRD table writes */
213 wmb();
214
215 /* load transfer length */
216 writel(qc->nbytes, port_base + PORT_PRD_XFERLEN);
217
218 /* turn on DMA and specify data direction */
219 pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN;
220 if (!rw)
221 pp->cached_prdctl |= PRD_CTL_WR;
222 writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
223 302
224 /* issue r/w command */ 303 readb(port_base + PORT_RPQ_FIFO);
225 ap->ops->sff_exec_command(ap, &qc->tf); 304 readb(port_base + PORT_RPQ_CNT);
305 writew(0, port_base + PORT_IDMA_CTL);
226} 306}
227 307
228static void inic_bmdma_start(struct ata_queued_cmd *qc) 308static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
229{ 309{
230 struct ata_port *ap = qc->ap; 310 struct ata_eh_info *ehi = &ap->link.eh_info;
231 struct inic_port_priv *pp = ap->private_data; 311 struct inic_port_priv *pp = ap->private_data;
232 void __iomem *port_base = inic_port_base(ap); 312 struct inic_cpb *cpb = &pp->pkt->cpb;
313 bool freeze = false;
233 314
234 /* start host DMA transaction */ 315 ata_ehi_clear_desc(ehi);
235 pp->cached_prdctl |= PRD_CTL_START; 316 ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
236 writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 317 irq_stat, idma_stat);
237}
238 318
239static void inic_bmdma_stop(struct ata_queued_cmd *qc) 319 inic_stop_idma(ap);
240{
241 struct ata_port *ap = qc->ap;
242 struct inic_port_priv *pp = ap->private_data;
243 void __iomem *port_base = inic_port_base(ap);
244 320
245 /* stop DMA engine */ 321 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
246 writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 322 ata_ehi_push_desc(ehi, "hotplug");
247} 323 ata_ehi_hotplugged(ehi);
324 freeze = true;
325 }
248 326
249static u8 inic_bmdma_status(struct ata_port *ap) 327 if (idma_stat & IDMA_STAT_PERR) {
250{ 328 ata_ehi_push_desc(ehi, "PCI error");
251 /* event is already verified by the interrupt handler */ 329 freeze = true;
252 return ATA_DMA_INTR; 330 }
331
332 if (idma_stat & IDMA_STAT_CPBERR) {
333 ata_ehi_push_desc(ehi, "CPB error");
334
335 if (cpb->resp_flags & CPB_RESP_IGNORED) {
336 __ata_ehi_push_desc(ehi, " ignored");
337 ehi->err_mask |= AC_ERR_INVALID;
338 freeze = true;
339 }
340
341 if (cpb->resp_flags & CPB_RESP_ATA_ERR)
342 ehi->err_mask |= AC_ERR_DEV;
343
344 if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
345 __ata_ehi_push_desc(ehi, " spurious-intr");
346 ehi->err_mask |= AC_ERR_HSM;
347 freeze = true;
348 }
349
350 if (cpb->resp_flags &
351 (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
352 __ata_ehi_push_desc(ehi, " data-over/underflow");
353 ehi->err_mask |= AC_ERR_HSM;
354 freeze = true;
355 }
356 }
357
358 if (freeze)
359 ata_port_freeze(ap);
360 else
361 ata_port_abort(ap);
253} 362}
254 363
255static void inic_host_intr(struct ata_port *ap) 364static void inic_host_intr(struct ata_port *ap)
256{ 365{
257 void __iomem *port_base = inic_port_base(ap); 366 void __iomem *port_base = inic_port_base(ap);
258 struct ata_eh_info *ehi = &ap->link.eh_info; 367 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
259 u8 irq_stat; 368 u8 irq_stat;
369 u16 idma_stat;
260 370
261 /* fetch and clear irq */ 371 /* read and clear IRQ status */
262 irq_stat = readb(port_base + PORT_IRQ_STAT); 372 irq_stat = readb(port_base + PORT_IRQ_STAT);
263 writeb(irq_stat, port_base + PORT_IRQ_STAT); 373 writeb(irq_stat, port_base + PORT_IRQ_STAT);
374 idma_stat = readw(port_base + PORT_IDMA_STAT);
264 375
265 if (likely(!(irq_stat & PIRQ_ERR))) { 376 if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
266 struct ata_queued_cmd *qc = 377 inic_host_err_intr(ap, irq_stat, idma_stat);
267 ata_qc_from_tag(ap, ap->link.active_tag);
268 378
269 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 379 if (unlikely(!qc))
270 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 380 goto spurious;
271 return;
272 }
273 381
274 if (likely(ata_sff_host_intr(ap, qc))) 382 if (likely(idma_stat & IDMA_STAT_DONE)) {
275 return; 383 inic_stop_idma(ap);
276 384
277 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 385 /* Depending on circumstances, device error
278 ata_port_printk(ap, KERN_WARNING, "unhandled " 386 * isn't reported by IDMA, check it explicitly.
279 "interrupt, irq_stat=%x\n", irq_stat); 387 */
388 if (unlikely(readb(port_base + PORT_TF_COMMAND) &
389 (ATA_DF | ATA_ERR)))
390 qc->err_mask |= AC_ERR_DEV;
391
392 ata_qc_complete(qc);
280 return; 393 return;
281 } 394 }
282 395
283 /* error */ 396 spurious:
284 ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); 397 ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: "
285 398 "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
286 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { 399 qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
287 ata_ehi_hotplugged(ehi);
288 ata_port_freeze(ap);
289 } else
290 ata_port_abort(ap);
291} 400}
292 401
293static irqreturn_t inic_interrupt(int irq, void *dev_instance) 402static irqreturn_t inic_interrupt(int irq, void *dev_instance)
294{ 403{
295 struct ata_host *host = dev_instance; 404 struct ata_host *host = dev_instance;
296 void __iomem *mmio_base = host->iomap[MMIO_BAR]; 405 struct inic_host_priv *hpriv = host->private_data;
297 u16 host_irq_stat; 406 u16 host_irq_stat;
298 int i, handled = 0;; 407 int i, handled = 0;;
299 408
300 host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); 409 host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
301 410
302 if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) 411 if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
303 goto out; 412 goto out;
@@ -327,60 +436,173 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
327 return IRQ_RETVAL(handled); 436 return IRQ_RETVAL(handled);
328} 437}
329 438
439static int inic_check_atapi_dma(struct ata_queued_cmd *qc)
440{
441 /* For some reason ATAPI_PROT_DMA doesn't work for some
442 * commands including writes and other misc ops. Use PIO
443 * protocol instead, which BTW is driven by the DMA engine
444 * anyway, so it shouldn't make much difference for native
445 * SATA devices.
446 */
447 if (atapi_cmd_type(qc->cdb[0]) == READ)
448 return 0;
449 return 1;
450}
451
452static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
453{
454 struct scatterlist *sg;
455 unsigned int si;
456 u8 flags = 0;
457
458 if (qc->tf.flags & ATA_TFLAG_WRITE)
459 flags |= PRD_WRITE;
460
461 if (ata_is_dma(qc->tf.protocol))
462 flags |= PRD_DMA;
463
464 for_each_sg(qc->sg, sg, qc->n_elem, si) {
465 prd->mad = cpu_to_le32(sg_dma_address(sg));
466 prd->len = cpu_to_le16(sg_dma_len(sg));
467 prd->flags = flags;
468 prd++;
469 }
470
471 WARN_ON(!si);
472 prd[-1].flags |= PRD_END;
473}
474
475static void inic_qc_prep(struct ata_queued_cmd *qc)
476{
477 struct inic_port_priv *pp = qc->ap->private_data;
478 struct inic_pkt *pkt = pp->pkt;
479 struct inic_cpb *cpb = &pkt->cpb;
480 struct inic_prd *prd = pkt->prd;
481 bool is_atapi = ata_is_atapi(qc->tf.protocol);
482 bool is_data = ata_is_data(qc->tf.protocol);
483 unsigned int cdb_len = 0;
484
485 VPRINTK("ENTER\n");
486
487 if (is_atapi)
488 cdb_len = qc->dev->cdb_len;
489
490 /* prepare packet, based on initio driver */
491 memset(pkt, 0, sizeof(struct inic_pkt));
492
493 cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN;
494 if (is_atapi || is_data)
495 cpb->ctl_flags |= CPB_CTL_DATA;
496
497 cpb->len = cpu_to_le32(qc->nbytes + cdb_len);
498 cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
499
500 cpb->device = qc->tf.device;
501 cpb->feature = qc->tf.feature;
502 cpb->nsect = qc->tf.nsect;
503 cpb->lbal = qc->tf.lbal;
504 cpb->lbam = qc->tf.lbam;
505 cpb->lbah = qc->tf.lbah;
506
507 if (qc->tf.flags & ATA_TFLAG_LBA48) {
508 cpb->hob_feature = qc->tf.hob_feature;
509 cpb->hob_nsect = qc->tf.hob_nsect;
510 cpb->hob_lbal = qc->tf.hob_lbal;
511 cpb->hob_lbam = qc->tf.hob_lbam;
512 cpb->hob_lbah = qc->tf.hob_lbah;
513 }
514
515 cpb->command = qc->tf.command;
516 /* don't load ctl - dunno why. it's like that in the initio driver */
517
518 /* setup PRD for CDB */
519 if (is_atapi) {
520 memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
521 prd->mad = cpu_to_le32(pp->pkt_dma +
522 offsetof(struct inic_pkt, cdb));
523 prd->len = cpu_to_le16(cdb_len);
524 prd->flags = PRD_CDB | PRD_WRITE;
525 if (!is_data)
526 prd->flags |= PRD_END;
527 prd++;
528 }
529
530 /* setup sg table */
531 if (is_data)
532 inic_fill_sg(prd, qc);
533
534 pp->cpb_tbl[0] = pp->pkt_dma;
535}
536
330static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 537static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
331{ 538{
332 struct ata_port *ap = qc->ap; 539 struct ata_port *ap = qc->ap;
540 void __iomem *port_base = inic_port_base(ap);
333 541
334 /* ATA IRQ doesn't wait for DMA transfer completion and vice 542 /* fire up the ADMA engine */
335 * versa. Mask IRQ selectively to detect command completion. 543 writew(HCTL_FTHD0, port_base + HOST_CTL);
336 * Without it, ATA DMA read command can cause data corruption. 544 writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
337 * 545 writeb(0, port_base + PORT_CPB_PTQFIFO);
338 * Something similar might be needed for ATAPI writes. I 546
339 * tried a lot of combinations but couldn't find the solution. 547 return 0;
340 */ 548}
341 if (qc->tf.protocol == ATA_PROT_DMA && 549
342 !(qc->tf.flags & ATA_TFLAG_WRITE)) 550static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
343 inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); 551{
344 else 552 void __iomem *port_base = inic_port_base(ap);
345 inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 553
554 tf->feature = readb(port_base + PORT_TF_FEATURE);
555 tf->nsect = readb(port_base + PORT_TF_NSECT);
556 tf->lbal = readb(port_base + PORT_TF_LBAL);
557 tf->lbam = readb(port_base + PORT_TF_LBAM);
558 tf->lbah = readb(port_base + PORT_TF_LBAH);
559 tf->device = readb(port_base + PORT_TF_DEVICE);
560 tf->command = readb(port_base + PORT_TF_COMMAND);
561}
346 562
347 /* Issuing a command to yet uninitialized port locks up the 563static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
348 * controller. Most of the time, this happens for the first 564{
349 * command after reset which are ATA and ATAPI IDENTIFYs. 565 struct ata_taskfile *rtf = &qc->result_tf;
350 * Fast fail if stat is 0x7f or 0xff for those commands. 566 struct ata_taskfile tf;
567
568 /* FIXME: Except for status and error, result TF access
569 * doesn't work. I tried reading from BAR0/2, CPB and BAR5.
570 * None works regardless of which command interface is used.
571 * For now return true iff status indicates device error.
572 * This means that we're reporting bogus sector for RW
573 * failures. Eeekk....
351 */ 574 */
352 if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || 575 inic_tf_read(qc->ap, &tf);
353 qc->tf.command == ATA_CMD_ID_ATAPI)) {
354 u8 stat = ap->ops->sff_check_status(ap);
355 if (stat == 0x7f || stat == 0xff)
356 return AC_ERR_HSM;
357 }
358 576
359 return ata_sff_qc_issue(qc); 577 if (!(tf.command & ATA_ERR))
578 return false;
579
580 rtf->command = tf.command;
581 rtf->feature = tf.feature;
582 return true;
360} 583}
361 584
362static void inic_freeze(struct ata_port *ap) 585static void inic_freeze(struct ata_port *ap)
363{ 586{
364 void __iomem *port_base = inic_port_base(ap); 587 void __iomem *port_base = inic_port_base(ap);
365 588
366 __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); 589 writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
367
368 ap->ops->sff_check_status(ap);
369 writeb(0xff, port_base + PORT_IRQ_STAT); 590 writeb(0xff, port_base + PORT_IRQ_STAT);
370
371 readb(port_base + PORT_IRQ_STAT); /* flush */
372} 591}
373 592
374static void inic_thaw(struct ata_port *ap) 593static void inic_thaw(struct ata_port *ap)
375{ 594{
376 void __iomem *port_base = inic_port_base(ap); 595 void __iomem *port_base = inic_port_base(ap);
377 596
378 ap->ops->sff_check_status(ap);
379 writeb(0xff, port_base + PORT_IRQ_STAT); 597 writeb(0xff, port_base + PORT_IRQ_STAT);
598 writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
599}
380 600
381 __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 601static int inic_check_ready(struct ata_link *link)
602{
603 void __iomem *port_base = inic_port_base(link->ap);
382 604
383 readb(port_base + PORT_IRQ_STAT); /* flush */ 605 return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
384} 606}
385 607
386/* 608/*
@@ -394,17 +616,15 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
394 void __iomem *port_base = inic_port_base(ap); 616 void __iomem *port_base = inic_port_base(ap);
395 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 617 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
396 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 618 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
397 u16 val;
398 int rc; 619 int rc;
399 620
400 /* hammer it into sane state */ 621 /* hammer it into sane state */
401 inic_reset_port(port_base); 622 inic_reset_port(port_base);
402 623
403 val = readw(idma_ctl); 624 writew(IDMA_CTL_RST_ATA, idma_ctl);
404 writew(val | IDMA_CTL_RST_ATA, idma_ctl);
405 readw(idma_ctl); /* flush */ 625 readw(idma_ctl); /* flush */
406 msleep(1); 626 msleep(1);
407 writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 627 writew(0, idma_ctl);
408 628
409 rc = sata_link_resume(link, timing, deadline); 629 rc = sata_link_resume(link, timing, deadline);
410 if (rc) { 630 if (rc) {
@@ -418,7 +638,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
418 struct ata_taskfile tf; 638 struct ata_taskfile tf;
419 639
420 /* wait for link to become ready */ 640 /* wait for link to become ready */
421 rc = ata_sff_wait_after_reset(link, 1, deadline); 641 rc = ata_wait_after_reset(link, deadline, inic_check_ready);
422 /* link occupied, -ENODEV too is an error */ 642 /* link occupied, -ENODEV too is an error */
423 if (rc) { 643 if (rc) {
424 ata_link_printk(link, KERN_WARNING, "device not ready " 644 ata_link_printk(link, KERN_WARNING, "device not ready "
@@ -426,7 +646,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
426 return rc; 646 return rc;
427 } 647 }
428 648
429 ata_sff_tf_read(ap, &tf); 649 inic_tf_read(ap, &tf);
430 *class = ata_dev_classify(&tf); 650 *class = ata_dev_classify(&tf);
431 } 651 }
432 652
@@ -436,18 +656,8 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
436static void inic_error_handler(struct ata_port *ap) 656static void inic_error_handler(struct ata_port *ap)
437{ 657{
438 void __iomem *port_base = inic_port_base(ap); 658 void __iomem *port_base = inic_port_base(ap);
439 struct inic_port_priv *pp = ap->private_data;
440 unsigned long flags;
441 659
442 /* reset PIO HSM and stop DMA engine */
443 inic_reset_port(port_base); 660 inic_reset_port(port_base);
444
445 spin_lock_irqsave(ap->lock, flags);
446 ap->hsm_task_state = HSM_ST_IDLE;
447 writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
448 spin_unlock_irqrestore(ap->lock, flags);
449
450 /* PIO and DMA engines have been stopped, perform recovery */
451 ata_std_error_handler(ap); 661 ata_std_error_handler(ap);
452} 662}
453 663
@@ -458,26 +668,18 @@ static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
458 inic_reset_port(inic_port_base(qc->ap)); 668 inic_reset_port(inic_port_base(qc->ap));
459} 669}
460 670
461static void inic_dev_config(struct ata_device *dev)
462{
463 /* inic can only handle upto LBA28 max sectors */
464 if (dev->max_sectors > ATA_MAX_SECTORS)
465 dev->max_sectors = ATA_MAX_SECTORS;
466
467 if (dev->n_sectors >= 1 << 28) {
468 ata_dev_printk(dev, KERN_ERR,
469 "ERROR: This driver doesn't support LBA48 yet and may cause\n"
470 " data corruption on such devices. Disabling.\n");
471 ata_dev_disable(dev);
472 }
473}
474
475static void init_port(struct ata_port *ap) 671static void init_port(struct ata_port *ap)
476{ 672{
477 void __iomem *port_base = inic_port_base(ap); 673 void __iomem *port_base = inic_port_base(ap);
674 struct inic_port_priv *pp = ap->private_data;
478 675
479 /* Setup PRD address */ 676 /* clear packet and CPB table */
677 memset(pp->pkt, 0, sizeof(struct inic_pkt));
678 memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
679
680 /* setup PRD and CPB lookup table addresses */
480 writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 681 writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
682 writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
481} 683}
482 684
483static int inic_port_resume(struct ata_port *ap) 685static int inic_port_resume(struct ata_port *ap)
@@ -488,28 +690,30 @@ static int inic_port_resume(struct ata_port *ap)
488 690
489static int inic_port_start(struct ata_port *ap) 691static int inic_port_start(struct ata_port *ap)
490{ 692{
491 void __iomem *port_base = inic_port_base(ap); 693 struct device *dev = ap->host->dev;
492 struct inic_port_priv *pp; 694 struct inic_port_priv *pp;
493 u8 tmp;
494 int rc; 695 int rc;
495 696
496 /* alloc and initialize private data */ 697 /* alloc and initialize private data */
497 pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); 698 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
498 if (!pp) 699 if (!pp)
499 return -ENOMEM; 700 return -ENOMEM;
500 ap->private_data = pp; 701 ap->private_data = pp;
501 702
502 /* default PRD_CTL value, DMAEN, WR and START off */
503 tmp = readb(port_base + PORT_PRD_CTL);
504 tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START);
505 pp->dfl_prdctl = tmp;
506
507 /* Alloc resources */ 703 /* Alloc resources */
508 rc = ata_port_start(ap); 704 rc = ata_port_start(ap);
509 if (rc) { 705 if (rc)
510 kfree(pp);
511 return rc; 706 return rc;
512 } 707
708 pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
709 &pp->pkt_dma, GFP_KERNEL);
710 if (!pp->pkt)
711 return -ENOMEM;
712
713 pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
714 &pp->cpb_tbl_dma, GFP_KERNEL);
715 if (!pp->cpb_tbl)
716 return -ENOMEM;
513 717
514 init_port(ap); 718 init_port(ap);
515 719
@@ -517,21 +721,18 @@ static int inic_port_start(struct ata_port *ap)
517} 721}
518 722
519static struct ata_port_operations inic_port_ops = { 723static struct ata_port_operations inic_port_ops = {
520 .inherits = &ata_sff_port_ops, 724 .inherits = &sata_port_ops,
521 725
522 .bmdma_setup = inic_bmdma_setup, 726 .check_atapi_dma = inic_check_atapi_dma,
523 .bmdma_start = inic_bmdma_start, 727 .qc_prep = inic_qc_prep,
524 .bmdma_stop = inic_bmdma_stop,
525 .bmdma_status = inic_bmdma_status,
526 .qc_issue = inic_qc_issue, 728 .qc_issue = inic_qc_issue,
729 .qc_fill_rtf = inic_qc_fill_rtf,
527 730
528 .freeze = inic_freeze, 731 .freeze = inic_freeze,
529 .thaw = inic_thaw, 732 .thaw = inic_thaw,
530 .softreset = ATA_OP_NULL, /* softreset is broken */
531 .hardreset = inic_hardreset, 733 .hardreset = inic_hardreset,
532 .error_handler = inic_error_handler, 734 .error_handler = inic_error_handler,
533 .post_internal_cmd = inic_post_internal_cmd, 735 .post_internal_cmd = inic_post_internal_cmd,
534 .dev_config = inic_dev_config,
535 736
536 .scr_read = inic_scr_read, 737 .scr_read = inic_scr_read,
537 .scr_write = inic_scr_write, 738 .scr_write = inic_scr_write,
@@ -541,12 +742,6 @@ static struct ata_port_operations inic_port_ops = {
541}; 742};
542 743
543static struct ata_port_info inic_port_info = { 744static struct ata_port_info inic_port_info = {
544 /* For some reason, ATAPI_PROT_PIO is broken on this
545 * controller, and no, PIO_POLLING does't fix it. It somehow
546 * manages to report the wrong ireason and ignoring ireason
547 * results in machine lock up. Tell libata to always prefer
548 * DMA.
549 */
550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 745 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
551 .pio_mask = 0x1f, /* pio0-4 */ 746 .pio_mask = 0x1f, /* pio0-4 */
552 .mwdma_mask = 0x07, /* mwdma0-2 */ 747 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -599,7 +794,6 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
599{ 794{
600 struct ata_host *host = dev_get_drvdata(&pdev->dev); 795 struct ata_host *host = dev_get_drvdata(&pdev->dev);
601 struct inic_host_priv *hpriv = host->private_data; 796 struct inic_host_priv *hpriv = host->private_data;
602 void __iomem *mmio_base = host->iomap[MMIO_BAR];
603 int rc; 797 int rc;
604 798
605 rc = ata_pci_device_do_resume(pdev); 799 rc = ata_pci_device_do_resume(pdev);
@@ -607,7 +801,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
607 return rc; 801 return rc;
608 802
609 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 803 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
610 rc = init_controller(mmio_base, hpriv->cached_hctl); 804 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
611 if (rc) 805 if (rc)
612 return rc; 806 return rc;
613 } 807 }
@@ -625,6 +819,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
625 struct ata_host *host; 819 struct ata_host *host;
626 struct inic_host_priv *hpriv; 820 struct inic_host_priv *hpriv;
627 void __iomem * const *iomap; 821 void __iomem * const *iomap;
822 int mmio_bar;
628 int i, rc; 823 int i, rc;
629 824
630 if (!printed_version++) 825 if (!printed_version++)
@@ -638,38 +833,31 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
638 833
639 host->private_data = hpriv; 834 host->private_data = hpriv;
640 835
641 /* acquire resources and fill host */ 836 /* Acquire resources and fill host. Note that PCI and cardbus
837 * use different BARs.
838 */
642 rc = pcim_enable_device(pdev); 839 rc = pcim_enable_device(pdev);
643 if (rc) 840 if (rc)
644 return rc; 841 return rc;
645 842
646 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 843 if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM)
844 mmio_bar = MMIO_BAR_PCI;
845 else
846 mmio_bar = MMIO_BAR_CARDBUS;
847
848 rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME);
647 if (rc) 849 if (rc)
648 return rc; 850 return rc;
649 host->iomap = iomap = pcim_iomap_table(pdev); 851 host->iomap = iomap = pcim_iomap_table(pdev);
852 hpriv->mmio_base = iomap[mmio_bar];
853 hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL);
650 854
651 for (i = 0; i < NR_PORTS; i++) { 855 for (i = 0; i < NR_PORTS; i++) {
652 struct ata_port *ap = host->ports[i]; 856 struct ata_port *ap = host->ports[i];
653 struct ata_ioports *port = &ap->ioaddr;
654 unsigned int offset = i * PORT_SIZE;
655
656 port->cmd_addr = iomap[2 * i];
657 port->altstatus_addr =
658 port->ctl_addr = (void __iomem *)
659 ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
660 port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR;
661
662 ata_sff_std_ports(port);
663
664 ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio");
665 ata_port_pbar_desc(ap, MMIO_BAR, offset, "port");
666 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
667 (unsigned long long)pci_resource_start(pdev, 2 * i),
668 (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) |
669 ATA_PCI_CTL_OFS);
670 }
671 857
672 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 858 ata_port_pbar_desc(ap, mmio_bar, -1, "mmio");
859 ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port");
860 }
673 861
674 /* Set dma_mask. This devices doesn't support 64bit addressing. */ 862 /* Set dma_mask. This devices doesn't support 64bit addressing. */
675 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 863 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
@@ -698,7 +886,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
698 return rc; 886 return rc;
699 } 887 }
700 888
701 rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); 889 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
702 if (rc) { 890 if (rc) {
703 dev_printk(KERN_ERR, &pdev->dev, 891 dev_printk(KERN_ERR, &pdev->dev,
704 "failed to initialize controller\n"); 892 "failed to initialize controller\n");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 842b1a15b78..bb73b222262 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -65,6 +65,7 @@
65#include <linux/platform_device.h> 65#include <linux/platform_device.h>
66#include <linux/ata_platform.h> 66#include <linux/ata_platform.h>
67#include <linux/mbus.h> 67#include <linux/mbus.h>
68#include <linux/bitops.h>
68#include <scsi/scsi_host.h> 69#include <scsi/scsi_host.h>
69#include <scsi/scsi_cmnd.h> 70#include <scsi/scsi_cmnd.h>
70#include <scsi/scsi_device.h> 71#include <scsi/scsi_device.h>
@@ -91,9 +92,9 @@ enum {
91 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
92 93
93 MV_SATAHC0_REG_BASE = 0x20000, 94 MV_SATAHC0_REG_BASE = 0x20000,
94 MV_FLASH_CTL = 0x1046c, 95 MV_FLASH_CTL_OFS = 0x1046c,
95 MV_GPIO_PORT_CTL = 0x104f0, 96 MV_GPIO_PORT_CTL_OFS = 0x104f0,
96 MV_RESET_CFG = 0x180d8, 97 MV_RESET_CFG_OFS = 0x180d8,
97 98
98 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
99 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
@@ -147,18 +148,21 @@ enum {
147 /* PCI interface registers */ 148 /* PCI interface registers */
148 149
149 PCI_COMMAND_OFS = 0xc00, 150 PCI_COMMAND_OFS = 0xc00,
151 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
150 152
151 PCI_MAIN_CMD_STS_OFS = 0xd30, 153 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2), 154 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3), 155 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4), 156 GLOB_SFT_RST = (1 << 4),
155 157
156 MV_PCI_MODE = 0xd00, 158 MV_PCI_MODE_OFS = 0xd00,
159 MV_PCI_MODE_MASK = 0x30,
160
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 161 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04, 162 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38, 163 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28, 164 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04, 165 MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 166 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 167 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 168 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
@@ -225,16 +229,18 @@ enum {
225 PHY_MODE4 = 0x314, 229 PHY_MODE4 = 0x314,
226 PHY_MODE2 = 0x330, 230 PHY_MODE2 = 0x330,
227 SATA_IFCTL_OFS = 0x344, 231 SATA_IFCTL_OFS = 0x344,
232 SATA_TESTCTL_OFS = 0x348,
228 SATA_IFSTAT_OFS = 0x34c, 233 SATA_IFSTAT_OFS = 0x34c,
229 VENDOR_UNIQUE_FIS_OFS = 0x35c, 234 VENDOR_UNIQUE_FIS_OFS = 0x35c,
230 235
231 FIS_CFG_OFS = 0x360, 236 FISCFG_OFS = 0x360,
232 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 237 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
238 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
233 239
234 MV5_PHY_MODE = 0x74, 240 MV5_PHY_MODE = 0x74,
235 MV5_LT_MODE = 0x30, 241 MV5_LTMODE_OFS = 0x30,
236 MV5_PHY_CTL = 0x0C, 242 MV5_PHY_CTL_OFS = 0x0C,
237 SATA_INTERFACE_CFG = 0x050, 243 SATA_INTERFACE_CFG_OFS = 0x050,
238 244
239 MV_M2_PREAMP_MASK = 0x7e0, 245 MV_M2_PREAMP_MASK = 0x7e0,
240 246
@@ -332,10 +338,16 @@ enum {
332 EDMA_CMD_OFS = 0x28, /* EDMA command register */ 338 EDMA_CMD_OFS = 0x28, /* EDMA command register */
333 EDMA_EN = (1 << 0), /* enable EDMA */ 339 EDMA_EN = (1 << 0), /* enable EDMA */
334 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 340 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
335 ATA_RST = (1 << 2), /* reset trans/link/phy */ 341 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
342
343 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
344 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
345 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
336 346
337 EDMA_IORDY_TMOUT = 0x34, 347 EDMA_IORDY_TMOUT_OFS = 0x34,
338 EDMA_ARB_CFG = 0x38, 348 EDMA_ARB_CFG_OFS = 0x38,
349
350 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
339 351
340 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ 352 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
341 353
@@ -350,15 +362,19 @@ enum {
350 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 362 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
351 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 363 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
352 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 364 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
365 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
353 366
354 /* Port private flags (pp_flags) */ 367 /* Port private flags (pp_flags) */
355 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
356 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
370 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
371 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
357}; 372};
358 373
359#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 374#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
360#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 375#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
361#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 376#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
377#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
362#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) 378#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
363 379
364#define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 380#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
@@ -433,6 +449,7 @@ struct mv_port_priv {
433 unsigned int resp_idx; 449 unsigned int resp_idx;
434 450
435 u32 pp_flags; 451 u32 pp_flags;
452 unsigned int delayed_eh_pmp_map;
436}; 453};
437 454
438struct mv_port_signal { 455struct mv_port_signal {
@@ -479,6 +496,7 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
479static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 496static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
480static int mv_port_start(struct ata_port *ap); 497static int mv_port_start(struct ata_port *ap);
481static void mv_port_stop(struct ata_port *ap); 498static void mv_port_stop(struct ata_port *ap);
499static int mv_qc_defer(struct ata_queued_cmd *qc);
482static void mv_qc_prep(struct ata_queued_cmd *qc); 500static void mv_qc_prep(struct ata_queued_cmd *qc);
483static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 501static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
484static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 502static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
@@ -527,6 +545,9 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
527 unsigned long deadline); 545 unsigned long deadline);
528static int mv_softreset(struct ata_link *link, unsigned int *class, 546static int mv_softreset(struct ata_link *link, unsigned int *class,
529 unsigned long deadline); 547 unsigned long deadline);
548static void mv_pmp_error_handler(struct ata_port *ap);
549static void mv_process_crpb_entries(struct ata_port *ap,
550 struct mv_port_priv *pp);
530 551
531/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 552/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
532 * because we have to allow room for worst case splitting of 553 * because we have to allow room for worst case splitting of
@@ -548,6 +569,7 @@ static struct scsi_host_template mv6_sht = {
548static struct ata_port_operations mv5_ops = { 569static struct ata_port_operations mv5_ops = {
549 .inherits = &ata_sff_port_ops, 570 .inherits = &ata_sff_port_ops,
550 571
572 .qc_defer = mv_qc_defer,
551 .qc_prep = mv_qc_prep, 573 .qc_prep = mv_qc_prep,
552 .qc_issue = mv_qc_issue, 574 .qc_issue = mv_qc_issue,
553 575
@@ -566,7 +588,6 @@ static struct ata_port_operations mv5_ops = {
566 588
567static struct ata_port_operations mv6_ops = { 589static struct ata_port_operations mv6_ops = {
568 .inherits = &mv5_ops, 590 .inherits = &mv5_ops,
569 .qc_defer = sata_pmp_qc_defer_cmd_switch,
570 .dev_config = mv6_dev_config, 591 .dev_config = mv6_dev_config,
571 .scr_read = mv_scr_read, 592 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write, 593 .scr_write = mv_scr_write,
@@ -574,12 +595,11 @@ static struct ata_port_operations mv6_ops = {
574 .pmp_hardreset = mv_pmp_hardreset, 595 .pmp_hardreset = mv_pmp_hardreset,
575 .pmp_softreset = mv_softreset, 596 .pmp_softreset = mv_softreset,
576 .softreset = mv_softreset, 597 .softreset = mv_softreset,
577 .error_handler = sata_pmp_error_handler, 598 .error_handler = mv_pmp_error_handler,
578}; 599};
579 600
580static struct ata_port_operations mv_iie_ops = { 601static struct ata_port_operations mv_iie_ops = {
581 .inherits = &mv6_ops, 602 .inherits = &mv6_ops,
582 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
583 .dev_config = ATA_OP_NULL, 603 .dev_config = ATA_OP_NULL,
584 .qc_prep = mv_qc_prep_iie, 604 .qc_prep = mv_qc_prep_iie,
585}; 605};
@@ -875,6 +895,29 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
875 } 895 }
876} 896}
877 897
898static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
899{
900 void __iomem *port_mmio = mv_ap_base(ap);
901 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
902 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
903 int i;
904
905 /*
906 * Wait for the EDMA engine to finish transactions in progress.
907 * No idea what a good "timeout" value might be, but measurements
908 * indicate that it often requires hundreds of microseconds
909 * with two drives in-use. So we use the 15msec value above
910 * as a rough guess at what even more drives might require.
911 */
912 for (i = 0; i < timeout; ++i) {
913 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
914 if ((edma_stat & empty_idle) == empty_idle)
915 break;
916 udelay(per_loop);
917 }
918 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
919}
920
878/** 921/**
879 * mv_stop_edma_engine - Disable eDMA engine 922 * mv_stop_edma_engine - Disable eDMA engine
880 * @port_mmio: io base address 923 * @port_mmio: io base address
@@ -907,6 +950,7 @@ static int mv_stop_edma(struct ata_port *ap)
907 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 950 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
908 return 0; 951 return 0;
909 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 952 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
953 mv_wait_for_edma_empty_idle(ap);
910 if (mv_stop_edma_engine(port_mmio)) { 954 if (mv_stop_edma_engine(port_mmio)) {
911 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 955 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
912 return -EIO; 956 return -EIO;
@@ -1057,26 +1101,95 @@ static void mv6_dev_config(struct ata_device *adev)
1057 } 1101 }
1058} 1102}
1059 1103
1060static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs) 1104static int mv_qc_defer(struct ata_queued_cmd *qc)
1061{ 1105{
1062 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode; 1106 struct ata_link *link = qc->dev->link;
1107 struct ata_port *ap = link->ap;
1108 struct mv_port_priv *pp = ap->private_data;
1109
1110 /*
1111 * Don't allow new commands if we're in a delayed EH state
1112 * for NCQ and/or FIS-based switching.
1113 */
1114 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1115 return ATA_DEFER_PORT;
1063 /* 1116 /*
1064 * Various bit settings required for operation 1117 * If the port is completely idle, then allow the new qc.
1065 * in FIS-based switching (fbs) mode on GenIIe:
1066 */ 1118 */
1067 old_fcfg = readl(port_mmio + FIS_CFG_OFS); 1119 if (ap->nr_active_links == 0)
1068 old_ltmode = readl(port_mmio + LTMODE_OFS); 1120 return 0;
1069 if (enable_fbs) { 1121
1070 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC; 1122 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1071 new_ltmode = old_ltmode | LTMODE_BIT8; 1123 /*
1072 } else { /* disable fbs */ 1124 * The port is operating in host queuing mode (EDMA).
1073 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC; 1125 * It can accomodate a new qc if the qc protocol
1074 new_ltmode = old_ltmode & ~LTMODE_BIT8; 1126 * is compatible with the current host queue mode.
1075 } 1127 */
1076 if (new_fcfg != old_fcfg) 1128 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1077 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS); 1129 /*
1130 * The host queue (EDMA) is in NCQ mode.
1131 * If the new qc is also an NCQ command,
1132 * then allow the new qc.
1133 */
1134 if (qc->tf.protocol == ATA_PROT_NCQ)
1135 return 0;
1136 } else {
1137 /*
1138 * The host queue (EDMA) is in non-NCQ, DMA mode.
1139 * If the new qc is also a non-NCQ, DMA command,
1140 * then allow the new qc.
1141 */
1142 if (qc->tf.protocol == ATA_PROT_DMA)
1143 return 0;
1144 }
1145 }
1146 return ATA_DEFER_PORT;
1147}
1148
1149static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
1150{
1151 u32 new_fiscfg, old_fiscfg;
1152 u32 new_ltmode, old_ltmode;
1153 u32 new_haltcond, old_haltcond;
1154
1155 old_fiscfg = readl(port_mmio + FISCFG_OFS);
1156 old_ltmode = readl(port_mmio + LTMODE_OFS);
1157 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
1158
1159 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1160 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1161 new_haltcond = old_haltcond | EDMA_ERR_DEV;
1162
1163 if (want_fbs) {
1164 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
1165 new_ltmode = old_ltmode | LTMODE_BIT8;
1166 if (want_ncq)
1167 new_haltcond &= ~EDMA_ERR_DEV;
1168 else
1169 new_fiscfg |= FISCFG_WAIT_DEV_ERR;
1170 }
1171
1172 if (new_fiscfg != old_fiscfg)
1173 writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
1078 if (new_ltmode != old_ltmode) 1174 if (new_ltmode != old_ltmode)
1079 writelfl(new_ltmode, port_mmio + LTMODE_OFS); 1175 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1176 if (new_haltcond != old_haltcond)
1177 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
1178}
1179
1180static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1181{
1182 struct mv_host_priv *hpriv = ap->host->private_data;
1183 u32 old, new;
1184
1185 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1186 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
1187 if (want_ncq)
1188 new = old | (1 << 22);
1189 else
1190 new = old & ~(1 << 22);
1191 if (new != old)
1192 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
1080} 1193}
1081 1194
1082static void mv_edma_cfg(struct ata_port *ap, int want_ncq) 1195static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
@@ -1088,25 +1201,40 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1088 1201
1089 /* set up non-NCQ EDMA configuration */ 1202 /* set up non-NCQ EDMA configuration */
1090 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1203 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1204 pp->pp_flags &= ~MV_PP_FLAG_FBS_EN;
1091 1205
1092 if (IS_GEN_I(hpriv)) 1206 if (IS_GEN_I(hpriv))
1093 cfg |= (1 << 8); /* enab config burst size mask */ 1207 cfg |= (1 << 8); /* enab config burst size mask */
1094 1208
1095 else if (IS_GEN_II(hpriv)) 1209 else if (IS_GEN_II(hpriv)) {
1096 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1210 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1211 mv_60x1_errata_sata25(ap, want_ncq);
1097 1212
1098 else if (IS_GEN_IIE(hpriv)) { 1213 } else if (IS_GEN_IIE(hpriv)) {
1099 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1214 int want_fbs = sata_pmp_attached(ap);
1100 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1215 /*
1101 cfg |= (1 << 18); /* enab early completion */ 1216 * Possible future enhancement:
1102 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1217 *
1218 * The chip can use FBS with non-NCQ, if we allow it,
1219 * But first we need to have the error handling in place
1220 * for this mode (datasheet section 7.3.15.4.2.3).
1221 * So disallow non-NCQ FBS for now.
1222 */
1223 want_fbs &= want_ncq;
1224
1225 mv_config_fbs(port_mmio, want_ncq, want_fbs);
1103 1226
1104 if (want_ncq && sata_pmp_attached(ap)) { 1227 if (want_fbs) {
1228 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1105 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1229 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1106 mv_config_fbs(port_mmio, 1);
1107 } else {
1108 mv_config_fbs(port_mmio, 0);
1109 } 1230 }
1231
1232 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1233 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1234 if (HAS_PCI(ap->host))
1235 cfg |= (1 << 18); /* enab early completion */
1236 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1237 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1110 } 1238 }
1111 1239
1112 if (want_ncq) { 1240 if (want_ncq) {
@@ -1483,25 +1611,186 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
1483 return qc; 1611 return qc;
1484} 1612}
1485 1613
1486static void mv_unexpected_intr(struct ata_port *ap) 1614static void mv_pmp_error_handler(struct ata_port *ap)
1487{ 1615{
1616 unsigned int pmp, pmp_map;
1488 struct mv_port_priv *pp = ap->private_data; 1617 struct mv_port_priv *pp = ap->private_data;
1489 struct ata_eh_info *ehi = &ap->link.eh_info;
1490 char *when = "";
1491 1618
1619 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
1620 /*
1621 * Perform NCQ error analysis on failed PMPs
1622 * before we freeze the port entirely.
1623 *
1624 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
1625 */
1626 pmp_map = pp->delayed_eh_pmp_map;
1627 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
1628 for (pmp = 0; pmp_map != 0; pmp++) {
1629 unsigned int this_pmp = (1 << pmp);
1630 if (pmp_map & this_pmp) {
1631 struct ata_link *link = &ap->pmp_link[pmp];
1632 pmp_map &= ~this_pmp;
1633 ata_eh_analyze_ncq_error(link);
1634 }
1635 }
1636 ata_port_freeze(ap);
1637 }
1638 sata_pmp_error_handler(ap);
1639}
1640
1641static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
1642{
1643 void __iomem *port_mmio = mv_ap_base(ap);
1644
1645 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
1646}
1647
1648static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
1649{
1650 struct ata_eh_info *ehi;
1651 unsigned int pmp;
1652
1653 /*
1654 * Initialize EH info for PMPs which saw device errors
1655 */
1656 ehi = &ap->link.eh_info;
1657 for (pmp = 0; pmp_map != 0; pmp++) {
1658 unsigned int this_pmp = (1 << pmp);
1659 if (pmp_map & this_pmp) {
1660 struct ata_link *link = &ap->pmp_link[pmp];
1661
1662 pmp_map &= ~this_pmp;
1663 ehi = &link->eh_info;
1664 ata_ehi_clear_desc(ehi);
1665 ata_ehi_push_desc(ehi, "dev err");
1666 ehi->err_mask |= AC_ERR_DEV;
1667 ehi->action |= ATA_EH_RESET;
1668 ata_link_abort(link);
1669 }
1670 }
1671}
1672
1673static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1674{
1675 struct mv_port_priv *pp = ap->private_data;
1676 int failed_links;
1677 unsigned int old_map, new_map;
1678
1679 /*
1680 * Device error during FBS+NCQ operation:
1681 *
1682 * Set a port flag to prevent further I/O being enqueued.
1683 * Leave the EDMA running to drain outstanding commands from this port.
1684 * Perform the post-mortem/EH only when all responses are complete.
1685 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
1686 */
1687 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
1688 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
1689 pp->delayed_eh_pmp_map = 0;
1690 }
1691 old_map = pp->delayed_eh_pmp_map;
1692 new_map = old_map | mv_get_err_pmp_map(ap);
1693
1694 if (old_map != new_map) {
1695 pp->delayed_eh_pmp_map = new_map;
1696 mv_pmp_eh_prep(ap, new_map & ~old_map);
1697 }
1698 failed_links = hweight16(new_map);
1699
1700 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
1701 "failed_links=%d nr_active_links=%d\n",
1702 __func__, pp->delayed_eh_pmp_map,
1703 ap->qc_active, failed_links,
1704 ap->nr_active_links);
1705
1706 if (ap->nr_active_links <= failed_links) {
1707 mv_process_crpb_entries(ap, pp);
1708 mv_stop_edma(ap);
1709 mv_eh_freeze(ap);
1710 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
1711 return 1; /* handled */
1712 }
1713 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
1714 return 1; /* handled */
1715}
1716
1717static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
1718{
1492 /* 1719 /*
1493 * We got a device interrupt from something that 1720 * Possible future enhancement:
1494 * was supposed to be using EDMA or polling. 1721 *
1722 * FBS+non-NCQ operation is not yet implemented.
1723 * See related notes in mv_edma_cfg().
1724 *
1725 * Device error during FBS+non-NCQ operation:
1726 *
1727 * We need to snapshot the shadow registers for each failed command.
1728 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
1495 */ 1729 */
1730 return 0; /* not handled */
1731}
1732
1733static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
1734{
1735 struct mv_port_priv *pp = ap->private_data;
1736
1737 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1738 return 0; /* EDMA was not active: not handled */
1739 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
1740 return 0; /* FBS was not active: not handled */
1741
1742 if (!(edma_err_cause & EDMA_ERR_DEV))
1743 return 0; /* non DEV error: not handled */
1744 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
1745 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
1746 return 0; /* other problems: not handled */
1747
1748 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1749 /*
1750 * EDMA should NOT have self-disabled for this case.
1751 * If it did, then something is wrong elsewhere,
1752 * and we cannot handle it here.
1753 */
1754 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1755 ata_port_printk(ap, KERN_WARNING,
1756 "%s: err_cause=0x%x pp_flags=0x%x\n",
1757 __func__, edma_err_cause, pp->pp_flags);
1758 return 0; /* not handled */
1759 }
1760 return mv_handle_fbs_ncq_dev_err(ap);
1761 } else {
1762 /*
1763 * EDMA should have self-disabled for this case.
1764 * If it did not, then something is wrong elsewhere,
1765 * and we cannot handle it here.
1766 */
1767 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
1768 ata_port_printk(ap, KERN_WARNING,
1769 "%s: err_cause=0x%x pp_flags=0x%x\n",
1770 __func__, edma_err_cause, pp->pp_flags);
1771 return 0; /* not handled */
1772 }
1773 return mv_handle_fbs_non_ncq_dev_err(ap);
1774 }
1775 return 0; /* not handled */
1776}
1777
1778static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
1779{
1780 struct ata_eh_info *ehi = &ap->link.eh_info;
1781 char *when = "idle";
1782
1496 ata_ehi_clear_desc(ehi); 1783 ata_ehi_clear_desc(ehi);
1497 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1784 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
1498 when = " while EDMA enabled"; 1785 when = "disabled";
1786 } else if (edma_was_enabled) {
1787 when = "EDMA enabled";
1499 } else { 1788 } else {
1500 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 1789 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
1501 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1790 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1502 when = " while polling"; 1791 when = "polling";
1503 } 1792 }
1504 ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when); 1793 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
1505 ehi->err_mask |= AC_ERR_OTHER; 1794 ehi->err_mask |= AC_ERR_OTHER;
1506 ehi->action |= ATA_EH_RESET; 1795 ehi->action |= ATA_EH_RESET;
1507 ata_port_freeze(ap); 1796 ata_port_freeze(ap);
@@ -1519,7 +1808,7 @@ static void mv_unexpected_intr(struct ata_port *ap)
1519 * LOCKING: 1808 * LOCKING:
1520 * Inherited from caller. 1809 * Inherited from caller.
1521 */ 1810 */
1522static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 1811static void mv_err_intr(struct ata_port *ap)
1523{ 1812{
1524 void __iomem *port_mmio = mv_ap_base(ap); 1813 void __iomem *port_mmio = mv_ap_base(ap);
1525 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1814 u32 edma_err_cause, eh_freeze_mask, serr = 0;
@@ -1527,24 +1816,42 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1527 struct mv_host_priv *hpriv = ap->host->private_data; 1816 struct mv_host_priv *hpriv = ap->host->private_data;
1528 unsigned int action = 0, err_mask = 0; 1817 unsigned int action = 0, err_mask = 0;
1529 struct ata_eh_info *ehi = &ap->link.eh_info; 1818 struct ata_eh_info *ehi = &ap->link.eh_info;
1530 1819 struct ata_queued_cmd *qc;
1531 ata_ehi_clear_desc(ehi); 1820 int abort = 0;
1532 1821
1533 /* 1822 /*
1534 * Read and clear the err_cause bits. This won't actually 1823 * Read and clear the SError and err_cause bits.
1535 * clear for some errors (eg. SError), but we will be doing
1536 * a hard reset in those cases regardless, which *will* clear it.
1537 */ 1824 */
1825 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1826 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1827
1538 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1828 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1539 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1829 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1540 1830
1541 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause); 1831 ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n",
1832 __func__, edma_err_cause, pp->pp_flags);
1833
1834 if (edma_err_cause & EDMA_ERR_DEV) {
1835 /*
1836 * Device errors during FIS-based switching operation
1837 * require special handling.
1838 */
1839 if (mv_handle_dev_err(ap, edma_err_cause))
1840 return;
1841 }
1542 1842
1843 qc = mv_get_active_qc(ap);
1844 ata_ehi_clear_desc(ehi);
1845 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
1846 edma_err_cause, pp->pp_flags);
1543 /* 1847 /*
1544 * All generations share these EDMA error cause bits: 1848 * All generations share these EDMA error cause bits:
1545 */ 1849 */
1546 if (edma_err_cause & EDMA_ERR_DEV) 1850 if (edma_err_cause & EDMA_ERR_DEV) {
1547 err_mask |= AC_ERR_DEV; 1851 err_mask |= AC_ERR_DEV;
1852 action |= ATA_EH_RESET;
1853 ata_ehi_push_desc(ehi, "dev error");
1854 }
1548 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1855 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1549 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1856 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1550 EDMA_ERR_INTRL_PAR)) { 1857 EDMA_ERR_INTRL_PAR)) {
@@ -1576,13 +1883,6 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1576 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1883 ata_ehi_push_desc(ehi, "EDMA self-disable");
1577 } 1884 }
1578 if (edma_err_cause & EDMA_ERR_SERR) { 1885 if (edma_err_cause & EDMA_ERR_SERR) {
1579 /*
1580 * Ensure that we read our own SCR, not a pmp link SCR:
1581 */
1582 ap->ops->scr_read(ap, SCR_ERROR, &serr);
1583 /*
1584 * Don't clear SError here; leave it for libata-eh:
1585 */
1586 ata_ehi_push_desc(ehi, "SError=%08x", serr); 1886 ata_ehi_push_desc(ehi, "SError=%08x", serr);
1587 err_mask |= AC_ERR_ATA_BUS; 1887 err_mask |= AC_ERR_ATA_BUS;
1588 action |= ATA_EH_RESET; 1888 action |= ATA_EH_RESET;
@@ -1602,10 +1902,29 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1602 else 1902 else
1603 ehi->err_mask |= err_mask; 1903 ehi->err_mask |= err_mask;
1604 1904
1605 if (edma_err_cause & eh_freeze_mask) 1905 if (err_mask == AC_ERR_DEV) {
1906 /*
1907 * Cannot do ata_port_freeze() here,
1908 * because it would kill PIO access,
1909 * which is needed for further diagnosis.
1910 */
1911 mv_eh_freeze(ap);
1912 abort = 1;
1913 } else if (edma_err_cause & eh_freeze_mask) {
1914 /*
1915 * Note to self: ata_port_freeze() calls ata_port_abort()
1916 */
1606 ata_port_freeze(ap); 1917 ata_port_freeze(ap);
1607 else 1918 } else {
1608 ata_port_abort(ap); 1919 abort = 1;
1920 }
1921
1922 if (abort) {
1923 if (qc)
1924 ata_link_abort(qc->dev->link);
1925 else
1926 ata_port_abort(ap);
1927 }
1609} 1928}
1610 1929
1611static void mv_process_crpb_response(struct ata_port *ap, 1930static void mv_process_crpb_response(struct ata_port *ap,
@@ -1632,8 +1951,9 @@ static void mv_process_crpb_response(struct ata_port *ap,
1632 } 1951 }
1633 } 1952 }
1634 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 1953 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
1635 qc->err_mask |= ac_err_mask(ata_status); 1954 if (!ac_err_mask(ata_status))
1636 ata_qc_complete(qc); 1955 ata_qc_complete(qc);
1956 /* else: leave it for mv_err_intr() */
1637 } else { 1957 } else {
1638 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 1958 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
1639 __func__, tag); 1959 __func__, tag);
@@ -1677,6 +1997,44 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1677 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1997 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1678} 1998}
1679 1999
2000static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2001{
2002 struct mv_port_priv *pp;
2003 int edma_was_enabled;
2004
2005 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2006 mv_unexpected_intr(ap, 0);
2007 return;
2008 }
2009 /*
2010 * Grab a snapshot of the EDMA_EN flag setting,
2011 * so that we have a consistent view for this port,
2012 * even if something we call of our routines changes it.
2013 */
2014 pp = ap->private_data;
2015 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2016 /*
2017 * Process completed CRPB response(s) before other events.
2018 */
2019 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2020 mv_process_crpb_entries(ap, pp);
2021 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2022 mv_handle_fbs_ncq_dev_err(ap);
2023 }
2024 /*
2025 * Handle chip-reported errors, or continue on to handle PIO.
2026 */
2027 if (unlikely(port_cause & ERR_IRQ)) {
2028 mv_err_intr(ap);
2029 } else if (!edma_was_enabled) {
2030 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2031 if (qc)
2032 ata_sff_host_intr(ap, qc);
2033 else
2034 mv_unexpected_intr(ap, edma_was_enabled);
2035 }
2036}
2037
1680/** 2038/**
1681 * mv_host_intr - Handle all interrupts on the given host controller 2039 * mv_host_intr - Handle all interrupts on the given host controller
1682 * @host: host specific structure 2040 * @host: host specific structure
@@ -1688,66 +2046,58 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1688static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2046static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
1689{ 2047{
1690 struct mv_host_priv *hpriv = host->private_data; 2048 struct mv_host_priv *hpriv = host->private_data;
1691 void __iomem *mmio = hpriv->base, *hc_mmio = NULL; 2049 void __iomem *mmio = hpriv->base, *hc_mmio;
1692 u32 hc_irq_cause = 0;
1693 unsigned int handled = 0, port; 2050 unsigned int handled = 0, port;
1694 2051
1695 for (port = 0; port < hpriv->n_ports; port++) { 2052 for (port = 0; port < hpriv->n_ports; port++) {
1696 struct ata_port *ap = host->ports[port]; 2053 struct ata_port *ap = host->ports[port];
1697 struct mv_port_priv *pp; 2054 unsigned int p, shift, hardport, port_cause;
1698 unsigned int shift, hardport, port_cause; 2055
1699 /*
1700 * When we move to the second hc, flag our cached
1701 * copies of hc_mmio (and hc_irq_cause) as invalid again.
1702 */
1703 if (port == MV_PORTS_PER_HC)
1704 hc_mmio = NULL;
1705 /*
1706 * Do nothing if port is not interrupting or is disabled:
1707 */
1708 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2056 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1709 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1710 if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
1711 continue;
1712 /* 2057 /*
1713 * Each hc within the host has its own hc_irq_cause register. 2058 * Each hc within the host has its own hc_irq_cause register,
1714 * We defer reading it until we know we need it, right now: 2059 * where the interrupting ports bits get ack'd.
1715 *
1716 * FIXME later: we don't really need to read this register
1717 * (some logic changes required below if we go that way),
1718 * because it doesn't tell us anything new. But we do need
1719 * to write to it, outside the top of this loop,
1720 * to reset the interrupt triggers for next time.
1721 */ 2060 */
1722 if (!hc_mmio) { 2061 if (hardport == 0) { /* first port on this hc ? */
2062 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2063 u32 port_mask, ack_irqs;
2064 /*
2065 * Skip this entire hc if nothing pending for any ports
2066 */
2067 if (!hc_cause) {
2068 port += MV_PORTS_PER_HC - 1;
2069 continue;
2070 }
2071 /*
2072 * We don't need/want to read the hc_irq_cause register,
2073 * because doing so hurts performance, and
2074 * main_irq_cause already gives us everything we need.
2075 *
2076 * But we do have to *write* to the hc_irq_cause to ack
2077 * the ports that we are handling this time through.
2078 *
2079 * This requires that we create a bitmap for those
2080 * ports which interrupted us, and use that bitmap
2081 * to ack (only) those ports via hc_irq_cause.
2082 */
2083 ack_irqs = 0;
2084 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2085 if ((port + p) >= hpriv->n_ports)
2086 break;
2087 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2088 if (hc_cause & port_mask)
2089 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2090 }
1723 hc_mmio = mv_hc_base_from_port(mmio, port); 2091 hc_mmio = mv_hc_base_from_port(mmio, port);
1724 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 2092 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
1725 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1726 handled = 1; 2093 handled = 1;
1727 } 2094 }
1728 /* 2095 /*
1729 * Process completed CRPB response(s) before other events. 2096 * Handle interrupts signalled for this port:
1730 */
1731 pp = ap->private_data;
1732 if (hc_irq_cause & (DMA_IRQ << hardport)) {
1733 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
1734 mv_process_crpb_entries(ap, pp);
1735 }
1736 /*
1737 * Handle chip-reported errors, or continue on to handle PIO.
1738 */ 2097 */
1739 if (unlikely(port_cause & ERR_IRQ)) { 2098 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1740 mv_err_intr(ap, mv_get_active_qc(ap)); 2099 if (port_cause)
1741 } else if (hc_irq_cause & (DEV_IRQ << hardport)) { 2100 mv_port_intr(ap, port_cause);
1742 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1743 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
1744 if (qc) {
1745 ata_sff_host_intr(ap, qc);
1746 continue;
1747 }
1748 }
1749 mv_unexpected_intr(ap);
1750 }
1751 } 2101 }
1752 return handled; 2102 return handled;
1753} 2103}
@@ -1894,7 +2244,7 @@ static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1894 2244
1895static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2245static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1896{ 2246{
1897 writel(0x0fcfffff, mmio + MV_FLASH_CTL); 2247 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
1898} 2248}
1899 2249
1900static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 2250static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
@@ -1913,7 +2263,7 @@ static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1913{ 2263{
1914 u32 tmp; 2264 u32 tmp;
1915 2265
1916 writel(0, mmio + MV_GPIO_PORT_CTL); 2266 writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
1917 2267
1918 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 2268 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1919 2269
@@ -1931,14 +2281,14 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1931 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 2281 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1932 2282
1933 if (fix_apm_sq) { 2283 if (fix_apm_sq) {
1934 tmp = readl(phy_mmio + MV5_LT_MODE); 2284 tmp = readl(phy_mmio + MV5_LTMODE_OFS);
1935 tmp |= (1 << 19); 2285 tmp |= (1 << 19);
1936 writel(tmp, phy_mmio + MV5_LT_MODE); 2286 writel(tmp, phy_mmio + MV5_LTMODE_OFS);
1937 2287
1938 tmp = readl(phy_mmio + MV5_PHY_CTL); 2288 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
1939 tmp &= ~0x3; 2289 tmp &= ~0x3;
1940 tmp |= 0x1; 2290 tmp |= 0x1;
1941 writel(tmp, phy_mmio + MV5_PHY_CTL); 2291 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
1942 } 2292 }
1943 2293
1944 tmp = readl(phy_mmio + MV5_PHY_MODE); 2294 tmp = readl(phy_mmio + MV5_PHY_MODE);
@@ -1956,11 +2306,6 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1956{ 2306{
1957 void __iomem *port_mmio = mv_port_base(mmio, port); 2307 void __iomem *port_mmio = mv_port_base(mmio, port);
1958 2308
1959 /*
1960 * The datasheet warns against setting ATA_RST when EDMA is active
1961 * (but doesn't say what the problem might be). So we first try
1962 * to disable the EDMA engine before doing the ATA_RST operation.
1963 */
1964 mv_reset_channel(hpriv, mmio, port); 2309 mv_reset_channel(hpriv, mmio, port);
1965 2310
1966 ZERO(0x028); /* command */ 2311 ZERO(0x028); /* command */
@@ -1975,7 +2320,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1975 ZERO(0x024); /* respq outp */ 2320 ZERO(0x024); /* respq outp */
1976 ZERO(0x020); /* respq inp */ 2321 ZERO(0x020); /* respq inp */
1977 ZERO(0x02c); /* test control */ 2322 ZERO(0x02c); /* test control */
1978 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 2323 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
1979} 2324}
1980#undef ZERO 2325#undef ZERO
1981 2326
@@ -2021,13 +2366,13 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2021 struct mv_host_priv *hpriv = host->private_data; 2366 struct mv_host_priv *hpriv = host->private_data;
2022 u32 tmp; 2367 u32 tmp;
2023 2368
2024 tmp = readl(mmio + MV_PCI_MODE); 2369 tmp = readl(mmio + MV_PCI_MODE_OFS);
2025 tmp &= 0xff00ffff; 2370 tmp &= 0xff00ffff;
2026 writel(tmp, mmio + MV_PCI_MODE); 2371 writel(tmp, mmio + MV_PCI_MODE_OFS);
2027 2372
2028 ZERO(MV_PCI_DISC_TIMER); 2373 ZERO(MV_PCI_DISC_TIMER);
2029 ZERO(MV_PCI_MSI_TRIGGER); 2374 ZERO(MV_PCI_MSI_TRIGGER);
2030 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); 2375 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
2031 ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); 2376 ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
2032 ZERO(MV_PCI_SERR_MASK); 2377 ZERO(MV_PCI_SERR_MASK);
2033 ZERO(hpriv->irq_cause_ofs); 2378 ZERO(hpriv->irq_cause_ofs);
@@ -2045,10 +2390,10 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2045 2390
2046 mv5_reset_flash(hpriv, mmio); 2391 mv5_reset_flash(hpriv, mmio);
2047 2392
2048 tmp = readl(mmio + MV_GPIO_PORT_CTL); 2393 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
2049 tmp &= 0x3; 2394 tmp &= 0x3;
2050 tmp |= (1 << 5) | (1 << 6); 2395 tmp |= (1 << 5) | (1 << 6);
2051 writel(tmp, mmio + MV_GPIO_PORT_CTL); 2396 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
2052} 2397}
2053 2398
2054/** 2399/**
@@ -2121,7 +2466,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2121 void __iomem *port_mmio; 2466 void __iomem *port_mmio;
2122 u32 tmp; 2467 u32 tmp;
2123 2468
2124 tmp = readl(mmio + MV_RESET_CFG); 2469 tmp = readl(mmio + MV_RESET_CFG_OFS);
2125 if ((tmp & (1 << 0)) == 0) { 2470 if ((tmp & (1 << 0)) == 0) {
2126 hpriv->signal[idx].amps = 0x7 << 8; 2471 hpriv->signal[idx].amps = 0x7 << 8;
2127 hpriv->signal[idx].pre = 0x1 << 5; 2472 hpriv->signal[idx].pre = 0x1 << 5;
@@ -2137,7 +2482,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2137 2482
2138static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2483static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2139{ 2484{
2140 writel(0x00000060, mmio + MV_GPIO_PORT_CTL); 2485 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
2141} 2486}
2142 2487
2143static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
@@ -2235,11 +2580,6 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2235{ 2580{
2236 void __iomem *port_mmio = mv_port_base(mmio, port); 2581 void __iomem *port_mmio = mv_port_base(mmio, port);
2237 2582
2238 /*
2239 * The datasheet warns against setting ATA_RST when EDMA is active
2240 * (but doesn't say what the problem might be). So we first try
2241 * to disable the EDMA engine before doing the ATA_RST operation.
2242 */
2243 mv_reset_channel(hpriv, mmio, port); 2583 mv_reset_channel(hpriv, mmio, port);
2244 2584
2245 ZERO(0x028); /* command */ 2585 ZERO(0x028); /* command */
@@ -2254,7 +2594,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2254 ZERO(0x024); /* respq outp */ 2594 ZERO(0x024); /* respq outp */
2255 ZERO(0x020); /* respq inp */ 2595 ZERO(0x020); /* respq inp */
2256 ZERO(0x02c); /* test control */ 2596 ZERO(0x02c); /* test control */
2257 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 2597 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
2258} 2598}
2259 2599
2260#undef ZERO 2600#undef ZERO
@@ -2297,38 +2637,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2297 return; 2637 return;
2298} 2638}
2299 2639
2300static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i) 2640static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
2301{ 2641{
2302 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); 2642 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
2303 2643
2304 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */ 2644 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
2305 if (want_gen2i) 2645 if (want_gen2i)
2306 ifctl |= (1 << 7); /* enable gen2i speed */ 2646 ifcfg |= (1 << 7); /* enable gen2i speed */
2307 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); 2647 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
2308} 2648}
2309 2649
2310/*
2311 * Caller must ensure that EDMA is not active,
2312 * by first doing mv_stop_edma() where needed.
2313 */
2314static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 2650static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2315 unsigned int port_no) 2651 unsigned int port_no)
2316{ 2652{
2317 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2653 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2318 2654
2655 /*
2656 * The datasheet warns against setting EDMA_RESET when EDMA is active
2657 * (but doesn't say what the problem might be). So we first try
2658 * to disable the EDMA engine before doing the EDMA_RESET operation.
2659 */
2319 mv_stop_edma_engine(port_mmio); 2660 mv_stop_edma_engine(port_mmio);
2320 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 2661 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2321 2662
2322 if (!IS_GEN_I(hpriv)) { 2663 if (!IS_GEN_I(hpriv)) {
2323 /* Enable 3.0gb/s link speed */ 2664 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
2324 mv_setup_ifctl(port_mmio, 1); 2665 mv_setup_ifcfg(port_mmio, 1);
2325 } 2666 }
2326 /* 2667 /*
2327 * Strobing ATA_RST here causes a hard reset of the SATA transport, 2668 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
2328 * link, and physical layers. It resets all SATA interface registers 2669 * link, and physical layers. It resets all SATA interface registers
2329 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. 2670 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2330 */ 2671 */
2331 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 2672 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2332 udelay(25); /* allow reset propagation */ 2673 udelay(25); /* allow reset propagation */
2333 writelfl(0, port_mmio + EDMA_CMD_OFS); 2674 writelfl(0, port_mmio + EDMA_CMD_OFS);
2334 2675
@@ -2392,7 +2733,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2392 sata_scr_read(link, SCR_STATUS, &sstatus); 2733 sata_scr_read(link, SCR_STATUS, &sstatus);
2393 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 2734 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2394 /* Force 1.5gb/s link speed and try again */ 2735 /* Force 1.5gb/s link speed and try again */
2395 mv_setup_ifctl(mv_ap_base(ap), 0); 2736 mv_setup_ifcfg(mv_ap_base(ap), 0);
2396 if (time_after(jiffies + HZ, deadline)) 2737 if (time_after(jiffies + HZ, deadline))
2397 extra = HZ; /* only extend it once, max */ 2738 extra = HZ; /* only extend it once, max */
2398 } 2739 }
@@ -2493,6 +2834,34 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2493 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 2834 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2494} 2835}
2495 2836
2837static unsigned int mv_in_pcix_mode(struct ata_host *host)
2838{
2839 struct mv_host_priv *hpriv = host->private_data;
2840 void __iomem *mmio = hpriv->base;
2841 u32 reg;
2842
2843 if (!HAS_PCI(host) || !IS_PCIE(hpriv))
2844 return 0; /* not PCI-X capable */
2845 reg = readl(mmio + MV_PCI_MODE_OFS);
2846 if ((reg & MV_PCI_MODE_MASK) == 0)
2847 return 0; /* conventional PCI mode */
2848 return 1; /* chip is in PCI-X mode */
2849}
2850
2851static int mv_pci_cut_through_okay(struct ata_host *host)
2852{
2853 struct mv_host_priv *hpriv = host->private_data;
2854 void __iomem *mmio = hpriv->base;
2855 u32 reg;
2856
2857 if (!mv_in_pcix_mode(host)) {
2858 reg = readl(mmio + PCI_COMMAND_OFS);
2859 if (reg & PCI_COMMAND_MRDTRIG)
2860 return 0; /* not okay */
2861 }
2862 return 1; /* okay */
2863}
2864
2496static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 2865static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2497{ 2866{
2498 struct pci_dev *pdev = to_pci_dev(host->dev); 2867 struct pci_dev *pdev = to_pci_dev(host->dev);
@@ -2560,7 +2929,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2560 break; 2929 break;
2561 2930
2562 case chip_7042: 2931 case chip_7042:
2563 hp_flags |= MV_HP_PCIE; 2932 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
2564 if (pdev->vendor == PCI_VENDOR_ID_TTI && 2933 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2565 (pdev->device == 0x2300 || pdev->device == 0x2310)) 2934 (pdev->device == 0x2300 || pdev->device == 0x2310))
2566 { 2935 {
@@ -2590,9 +2959,12 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2590 " and avoid the final two gigabytes on" 2959 " and avoid the final two gigabytes on"
2591 " all RocketRAID BIOS initialized drives.\n"); 2960 " all RocketRAID BIOS initialized drives.\n");
2592 } 2961 }
2962 /* drop through */
2593 case chip_6042: 2963 case chip_6042:
2594 hpriv->ops = &mv6xxx_ops; 2964 hpriv->ops = &mv6xxx_ops;
2595 hp_flags |= MV_HP_GEN_IIE; 2965 hp_flags |= MV_HP_GEN_IIE;
2966 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
2967 hp_flags |= MV_HP_CUT_THROUGH;
2596 2968
2597 switch (pdev->revision) { 2969 switch (pdev->revision) {
2598 case 0x0: 2970 case 0x0:
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 4fbb56bcb1e..358bb0be3c0 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -175,8 +175,7 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv)
175 } 175 }
176 176
177 /* Check whether this driver has already been added to a class. */ 177 /* Check whether this driver has already been added to a class. */
178 if ((drv->entry.next != drv->entry.prev) || 178 if (drv->entry.next && !list_empty(&drv->entry)) {
179 (drv->entry.next != NULL)) {
180 printk(KERN_WARNING "sysdev: class %s: driver (%p) has already" 179 printk(KERN_WARNING "sysdev: class %s: driver (%p) has already"
181 " been registered to a class, something is wrong, but " 180 " been registered to a class, something is wrong, but "
182 "will forge on!\n", cls->name, drv); 181 "will forge on!\n", cls->name, drv);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8fc429cf82b..41f818be2f7 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -755,11 +755,13 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
755{ 755{
756 unsigned long n_sect = bio->bi_size >> 9; 756 unsigned long n_sect = bio->bi_size >> 9;
757 const int rw = bio_data_dir(bio); 757 const int rw = bio_data_dir(bio);
758 struct hd_struct *part;
758 759
759 all_stat_inc(disk, ios[rw], sector); 760 part = get_part(disk, sector);
760 all_stat_add(disk, ticks[rw], duration, sector); 761 all_stat_inc(disk, part, ios[rw], sector);
761 all_stat_add(disk, sectors[rw], n_sect, sector); 762 all_stat_add(disk, part, ticks[rw], duration, sector);
762 all_stat_add(disk, io_ticks, duration, sector); 763 all_stat_add(disk, part, sectors[rw], n_sect, sector);
764 all_stat_add(disk, part, io_ticks, duration, sector);
763} 765}
764 766
765void 767void
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index f39f6fd8935..b1a7a8cb65e 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -970,7 +970,8 @@ static int sx_set_real_termios(void *ptr)
970 sx_write_channel_byte(port, hi_mask, 0x1f); 970 sx_write_channel_byte(port, hi_mask, 0x1f);
971 break; 971 break;
972 default: 972 default:
973 printk(KERN_INFO "sx: Invalid wordsize: %u\n", CFLAG & CSIZE); 973 printk(KERN_INFO "sx: Invalid wordsize: %u\n",
974 (unsigned int)CFLAG & CSIZE);
974 break; 975 break;
975 } 976 }
976 977
@@ -997,7 +998,8 @@ static int sx_set_real_termios(void *ptr)
997 set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags); 998 set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags);
998 } 999 }
999 sx_dprintk(SX_DEBUG_TERMIOS, "iflags: %x(%d) ", 1000 sx_dprintk(SX_DEBUG_TERMIOS, "iflags: %x(%d) ",
1000 port->gs.tty->termios->c_iflag, I_OTHER(port->gs.tty)); 1001 (unsigned int)port->gs.tty->termios->c_iflag,
1002 I_OTHER(port->gs.tty));
1001 1003
1002/* Tell line discipline whether we will do output cooking. 1004/* Tell line discipline whether we will do output cooking.
1003 * If OPOST is set and no other output flags are set then we can do output 1005 * If OPOST is set and no other output flags are set then we can do output
@@ -1010,7 +1012,8 @@ static int sx_set_real_termios(void *ptr)
1010 clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags); 1012 clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags);
1011 } 1013 }
1012 sx_dprintk(SX_DEBUG_TERMIOS, "oflags: %x(%d)\n", 1014 sx_dprintk(SX_DEBUG_TERMIOS, "oflags: %x(%d)\n",
1013 port->gs.tty->termios->c_oflag, O_OTHER(port->gs.tty)); 1015 (unsigned int)port->gs.tty->termios->c_oflag,
1016 O_OTHER(port->gs.tty));
1014 /* port->c_dcd = sx_get_CD (port); */ 1017 /* port->c_dcd = sx_get_CD (port); */
1015 func_exit(); 1018 func_exit();
1016 return 0; 1019 return 0;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index e458b08139a..fa1ffbf2c62 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2742,6 +2742,10 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2742 tty->winsize.ws_row = vc_cons[currcons].d->vc_rows; 2742 tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
2743 tty->winsize.ws_col = vc_cons[currcons].d->vc_cols; 2743 tty->winsize.ws_col = vc_cons[currcons].d->vc_cols;
2744 } 2744 }
2745 if (vc->vc_utf)
2746 tty->termios->c_iflag |= IUTF8;
2747 else
2748 tty->termios->c_iflag &= ~IUTF8;
2745 release_console_sem(); 2749 release_console_sem();
2746 vcs_make_sysfs(tty); 2750 vcs_make_sysfs(tty);
2747 return ret; 2751 return ret;
@@ -2918,6 +2922,8 @@ int __init vty_init(void)
2918 console_driver->minor_start = 1; 2922 console_driver->minor_start = 1;
2919 console_driver->type = TTY_DRIVER_TYPE_CONSOLE; 2923 console_driver->type = TTY_DRIVER_TYPE_CONSOLE;
2920 console_driver->init_termios = tty_std_termios; 2924 console_driver->init_termios = tty_std_termios;
2925 if (default_utf8)
2926 console_driver->init_termios.c_iflag |= IUTF8;
2921 console_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; 2927 console_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
2922 tty_set_operations(console_driver, &con_ops); 2928 tty_set_operations(console_driver, &con_ops);
2923 if (tty_register_driver(console_driver)) 2929 if (tty_register_driver(console_driver))
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 5fd8506a865..ebf9d3043f8 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -588,7 +588,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
588 * caller aquires the ctrl_qp lock before the call 588 * caller aquires the ctrl_qp lock before the call
589 */ 589 */
590static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, 590static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
591 u32 len, void *data, int completion) 591 u32 len, void *data)
592{ 592{
593 u32 i, nr_wqe, copy_len; 593 u32 i, nr_wqe, copy_len;
594 u8 *copy_data; 594 u8 *copy_data;
@@ -624,7 +624,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
624 flag = 0; 624 flag = 0;
625 if (i == (nr_wqe - 1)) { 625 if (i == (nr_wqe - 1)) {
626 /* last WQE */ 626 /* last WQE */
627 flag = completion ? T3_COMPLETION_FLAG : 0; 627 flag = T3_COMPLETION_FLAG;
628 if (len % 32) 628 if (len % 32)
629 utx_len = len / 32 + 1; 629 utx_len = len / 32 + 1;
630 else 630 else
@@ -683,21 +683,20 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
683 return 0; 683 return 0;
684} 684}
685 685
686/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size 686/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
687 * OUT: stag index, actual pbl_size, pbl_addr allocated. 687 * OUT: stag index
688 * TBD: shared memory region support 688 * TBD: shared memory region support
689 */ 689 */
690static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, 690static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
691 u32 *stag, u8 stag_state, u32 pdid, 691 u32 *stag, u8 stag_state, u32 pdid,
692 enum tpt_mem_type type, enum tpt_mem_perm perm, 692 enum tpt_mem_type type, enum tpt_mem_perm perm,
693 u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl, 693 u32 zbva, u64 to, u32 len, u8 page_size,
694 u32 *pbl_size, u32 *pbl_addr) 694 u32 pbl_size, u32 pbl_addr)
695{ 695{
696 int err; 696 int err;
697 struct tpt_entry tpt; 697 struct tpt_entry tpt;
698 u32 stag_idx; 698 u32 stag_idx;
699 u32 wptr; 699 u32 wptr;
700 int rereg = (*stag != T3_STAG_UNSET);
701 700
702 stag_state = stag_state > 0; 701 stag_state = stag_state > 0;
703 stag_idx = (*stag) >> 8; 702 stag_idx = (*stag) >> 8;
@@ -711,30 +710,8 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
711 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", 710 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
712 __func__, stag_state, type, pdid, stag_idx); 711 __func__, stag_state, type, pdid, stag_idx);
713 712
714 if (reset_tpt_entry)
715 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
716 else if (!rereg) {
717 *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3);
718 if (!*pbl_addr) {
719 return -ENOMEM;
720 }
721 }
722
723 mutex_lock(&rdev_p->ctrl_qp.lock); 713 mutex_lock(&rdev_p->ctrl_qp.lock);
724 714
725 /* write PBL first if any - update pbl only if pbl list exist */
726 if (pbl) {
727
728 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
729 __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
730 *pbl_size);
731 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
732 (*pbl_addr >> 5),
733 (*pbl_size << 3), pbl, 0);
734 if (err)
735 goto ret;
736 }
737
738 /* write TPT entry */ 715 /* write TPT entry */
739 if (reset_tpt_entry) 716 if (reset_tpt_entry)
740 memset(&tpt, 0, sizeof(tpt)); 717 memset(&tpt, 0, sizeof(tpt));
@@ -749,23 +726,23 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
749 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | 726 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
750 V_TPT_PAGE_SIZE(page_size)); 727 V_TPT_PAGE_SIZE(page_size));
751 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : 728 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
752 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3)); 729 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
753 tpt.len = cpu_to_be32(len); 730 tpt.len = cpu_to_be32(len);
754 tpt.va_hi = cpu_to_be32((u32) (to >> 32)); 731 tpt.va_hi = cpu_to_be32((u32) (to >> 32));
755 tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL)); 732 tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
756 tpt.rsvd_bind_cnt_or_pstag = 0; 733 tpt.rsvd_bind_cnt_or_pstag = 0;
757 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 : 734 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
758 cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2)); 735 cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
759 } 736 }
760 err = cxio_hal_ctrl_qp_write_mem(rdev_p, 737 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
761 stag_idx + 738 stag_idx +
762 (rdev_p->rnic_info.tpt_base >> 5), 739 (rdev_p->rnic_info.tpt_base >> 5),
763 sizeof(tpt), &tpt, 1); 740 sizeof(tpt), &tpt);
764 741
765 /* release the stag index to free pool */ 742 /* release the stag index to free pool */
766 if (reset_tpt_entry) 743 if (reset_tpt_entry)
767 cxio_hal_put_stag(rdev_p->rscp, stag_idx); 744 cxio_hal_put_stag(rdev_p->rscp, stag_idx);
768ret: 745
769 wptr = rdev_p->ctrl_qp.wptr; 746 wptr = rdev_p->ctrl_qp.wptr;
770 mutex_unlock(&rdev_p->ctrl_qp.lock); 747 mutex_unlock(&rdev_p->ctrl_qp.lock);
771 if (!err) 748 if (!err)
@@ -776,44 +753,67 @@ ret:
776 return err; 753 return err;
777} 754}
778 755
756int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
757 u32 pbl_addr, u32 pbl_size)
758{
759 u32 wptr;
760 int err;
761
762 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
763 __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
764 pbl_size);
765
766 mutex_lock(&rdev_p->ctrl_qp.lock);
767 err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
768 pbl);
769 wptr = rdev_p->ctrl_qp.wptr;
770 mutex_unlock(&rdev_p->ctrl_qp.lock);
771 if (err)
772 return err;
773
774 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
775 SEQ32_GE(rdev_p->ctrl_qp.rptr,
776 wptr)))
777 return -ERESTARTSYS;
778
779 return 0;
780}
781
779int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 782int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
780 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 783 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
781 u8 page_size, __be64 *pbl, u32 *pbl_size, 784 u8 page_size, u32 pbl_size, u32 pbl_addr)
782 u32 *pbl_addr)
783{ 785{
784 *stag = T3_STAG_UNSET; 786 *stag = T3_STAG_UNSET;
785 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, 787 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
786 zbva, to, len, page_size, pbl, pbl_size, pbl_addr); 788 zbva, to, len, page_size, pbl_size, pbl_addr);
787} 789}
788 790
789int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 791int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
790 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 792 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
791 u8 page_size, __be64 *pbl, u32 *pbl_size, 793 u8 page_size, u32 pbl_size, u32 pbl_addr)
792 u32 *pbl_addr)
793{ 794{
794 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, 795 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
795 zbva, to, len, page_size, pbl, pbl_size, pbl_addr); 796 zbva, to, len, page_size, pbl_size, pbl_addr);
796} 797}
797 798
798int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size, 799int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
799 u32 pbl_addr) 800 u32 pbl_addr)
800{ 801{
801 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, 802 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
802 &pbl_size, &pbl_addr); 803 pbl_size, pbl_addr);
803} 804}
804 805
805int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid) 806int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
806{ 807{
807 u32 pbl_size = 0;
808 *stag = T3_STAG_UNSET; 808 *stag = T3_STAG_UNSET;
809 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0, 809 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
810 NULL, &pbl_size, NULL); 810 0, 0);
811} 811}
812 812
813int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag) 813int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
814{ 814{
815 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, 815 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
816 NULL, NULL); 816 0, 0);
817} 817}
818 818
819int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) 819int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 69ab08ebc68..6e128f6bab0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -154,14 +154,14 @@ int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
154int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 154int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
155 struct cxio_ucontext *uctx); 155 struct cxio_ucontext *uctx);
156int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); 156int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
157int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
158 u32 pbl_addr, u32 pbl_size);
157int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 159int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
158 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 160 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
159 u8 page_size, __be64 *pbl, u32 *pbl_size, 161 u8 page_size, u32 pbl_size, u32 pbl_addr);
160 u32 *pbl_addr);
161int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 162int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
162 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 163 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
163 u8 page_size, __be64 *pbl, u32 *pbl_size, 164 u8 page_size, u32 pbl_size, u32 pbl_addr);
164 u32 *pbl_addr);
165int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size, 165int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
166 u32 pbl_addr); 166 u32 pbl_addr);
167int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid); 167int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 45ed4f25ef7..bd233c08765 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -250,7 +250,6 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
250 */ 250 */
251 251
252#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ 252#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
253#define PBL_CHUNK 2*1024*1024
254 253
255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) 254u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
256{ 255{
@@ -267,14 +266,35 @@ void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
267 266
268int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p) 267int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
269{ 268{
270 unsigned long i; 269 unsigned pbl_start, pbl_chunk;
270
271 rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); 271 rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
272 if (rdev_p->pbl_pool) 272 if (!rdev_p->pbl_pool)
273 for (i = rdev_p->rnic_info.pbl_base; 273 return -ENOMEM;
274 i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1; 274
275 i += PBL_CHUNK) 275 pbl_start = rdev_p->rnic_info.pbl_base;
276 gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1); 276 pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
277 return rdev_p->pbl_pool ? 0 : -ENOMEM; 277
278 while (pbl_start < rdev_p->rnic_info.pbl_top) {
279 pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
280 pbl_chunk);
281 if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
282 PDBG("%s failed to add PBL chunk (%x/%x)\n",
283 __func__, pbl_start, pbl_chunk);
284 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
285 printk(KERN_WARNING MOD "%s: Failed to add all PBL chunks (%x/%x)\n",
286 __func__, pbl_start, rdev_p->rnic_info.pbl_top - pbl_start);
287 return 0;
288 }
289 pbl_chunk >>= 1;
290 } else {
291 PDBG("%s added PBL chunk (%x/%x)\n",
292 __func__, pbl_start, pbl_chunk);
293 pbl_start += pbl_chunk;
294 }
295 }
296
297 return 0;
278} 298}
279 299
280void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p) 300void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index 58c3d61bcd1..ec49a5cbdeb 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -35,17 +35,26 @@
35#include <rdma/ib_verbs.h> 35#include <rdma/ib_verbs.h>
36 36
37#include "cxio_hal.h" 37#include "cxio_hal.h"
38#include "cxio_resource.h"
38#include "iwch.h" 39#include "iwch.h"
39#include "iwch_provider.h" 40#include "iwch_provider.h"
40 41
41int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 42static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
42 struct iwch_mr *mhp,
43 int shift,
44 __be64 *page_list)
45{ 43{
46 u32 stag;
47 u32 mmid; 44 u32 mmid;
48 45
46 mhp->attr.state = 1;
47 mhp->attr.stag = stag;
48 mmid = stag >> 8;
49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
50 insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
51 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
52}
53
54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
55 struct iwch_mr *mhp, int shift)
56{
57 u32 stag;
49 58
50 if (cxio_register_phys_mem(&rhp->rdev, 59 if (cxio_register_phys_mem(&rhp->rdev,
51 &stag, mhp->attr.pdid, 60 &stag, mhp->attr.pdid,
@@ -53,28 +62,21 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
53 mhp->attr.zbva, 62 mhp->attr.zbva,
54 mhp->attr.va_fbo, 63 mhp->attr.va_fbo,
55 mhp->attr.len, 64 mhp->attr.len,
56 shift-12, 65 shift - 12,
57 page_list, 66 mhp->attr.pbl_size, mhp->attr.pbl_addr))
58 &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
59 return -ENOMEM; 67 return -ENOMEM;
60 mhp->attr.state = 1; 68
61 mhp->attr.stag = stag; 69 iwch_finish_mem_reg(mhp, stag);
62 mmid = stag >> 8; 70
63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
64 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
65 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
66 return 0; 71 return 0;
67} 72}
68 73
69int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 74int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
70 struct iwch_mr *mhp, 75 struct iwch_mr *mhp,
71 int shift, 76 int shift,
72 __be64 *page_list,
73 int npages) 77 int npages)
74{ 78{
75 u32 stag; 79 u32 stag;
76 u32 mmid;
77
78 80
79 /* We could support this... */ 81 /* We could support this... */
80 if (npages > mhp->attr.pbl_size) 82 if (npages > mhp->attr.pbl_size)
@@ -87,19 +89,40 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
87 mhp->attr.zbva, 89 mhp->attr.zbva,
88 mhp->attr.va_fbo, 90 mhp->attr.va_fbo,
89 mhp->attr.len, 91 mhp->attr.len,
90 shift-12, 92 shift - 12,
91 page_list, 93 mhp->attr.pbl_size, mhp->attr.pbl_addr))
92 &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
93 return -ENOMEM; 94 return -ENOMEM;
94 mhp->attr.state = 1; 95
95 mhp->attr.stag = stag; 96 iwch_finish_mem_reg(mhp, stag);
96 mmid = stag >> 8; 97
97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 98 return 0;
98 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 99}
99 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); 100
101int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
102{
103 mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
104 npages << 3);
105
106 if (!mhp->attr.pbl_addr)
107 return -ENOMEM;
108
109 mhp->attr.pbl_size = npages;
110
100 return 0; 111 return 0;
101} 112}
102 113
114void iwch_free_pbl(struct iwch_mr *mhp)
115{
116 cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
117 mhp->attr.pbl_size << 3);
118}
119
120int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
121{
122 return cxio_write_pbl(&mhp->rhp->rdev, pages,
123 mhp->attr.pbl_addr + (offset << 3), npages);
124}
125
103int build_phys_page_list(struct ib_phys_buf *buffer_list, 126int build_phys_page_list(struct ib_phys_buf *buffer_list,
104 int num_phys_buf, 127 int num_phys_buf,
105 u64 *iova_start, 128 u64 *iova_start,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index d07d3a377b5..8934178a23e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -442,6 +442,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
442 mmid = mhp->attr.stag >> 8; 442 mmid = mhp->attr.stag >> 8;
443 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 443 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
444 mhp->attr.pbl_addr); 444 mhp->attr.pbl_addr);
445 iwch_free_pbl(mhp);
445 remove_handle(rhp, &rhp->mmidr, mmid); 446 remove_handle(rhp, &rhp->mmidr, mmid);
446 if (mhp->kva) 447 if (mhp->kva)
447 kfree((void *) (unsigned long) mhp->kva); 448 kfree((void *) (unsigned long) mhp->kva);
@@ -475,6 +476,8 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
475 if (!mhp) 476 if (!mhp)
476 return ERR_PTR(-ENOMEM); 477 return ERR_PTR(-ENOMEM);
477 478
479 mhp->rhp = rhp;
480
478 /* First check that we have enough alignment */ 481 /* First check that we have enough alignment */
479 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { 482 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
480 ret = -EINVAL; 483 ret = -EINVAL;
@@ -492,7 +495,17 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
492 if (ret) 495 if (ret)
493 goto err; 496 goto err;
494 497
495 mhp->rhp = rhp; 498 ret = iwch_alloc_pbl(mhp, npages);
499 if (ret) {
500 kfree(page_list);
501 goto err_pbl;
502 }
503
504 ret = iwch_write_pbl(mhp, page_list, npages, 0);
505 kfree(page_list);
506 if (ret)
507 goto err_pbl;
508
496 mhp->attr.pdid = php->pdid; 509 mhp->attr.pdid = php->pdid;
497 mhp->attr.zbva = 0; 510 mhp->attr.zbva = 0;
498 511
@@ -502,12 +515,15 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
502 515
503 mhp->attr.len = (u32) total_size; 516 mhp->attr.len = (u32) total_size;
504 mhp->attr.pbl_size = npages; 517 mhp->attr.pbl_size = npages;
505 ret = iwch_register_mem(rhp, php, mhp, shift, page_list); 518 ret = iwch_register_mem(rhp, php, mhp, shift);
506 kfree(page_list); 519 if (ret)
507 if (ret) { 520 goto err_pbl;
508 goto err; 521
509 }
510 return &mhp->ibmr; 522 return &mhp->ibmr;
523
524err_pbl:
525 iwch_free_pbl(mhp);
526
511err: 527err:
512 kfree(mhp); 528 kfree(mhp);
513 return ERR_PTR(ret); 529 return ERR_PTR(ret);
@@ -560,7 +576,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
560 return ret; 576 return ret;
561 } 577 }
562 578
563 ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); 579 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
564 kfree(page_list); 580 kfree(page_list);
565 if (ret) { 581 if (ret) {
566 return ret; 582 return ret;
@@ -602,6 +618,8 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
602 if (!mhp) 618 if (!mhp)
603 return ERR_PTR(-ENOMEM); 619 return ERR_PTR(-ENOMEM);
604 620
621 mhp->rhp = rhp;
622
605 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 623 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
606 if (IS_ERR(mhp->umem)) { 624 if (IS_ERR(mhp->umem)) {
607 err = PTR_ERR(mhp->umem); 625 err = PTR_ERR(mhp->umem);
@@ -615,10 +633,14 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
615 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) 633 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
616 n += chunk->nents; 634 n += chunk->nents;
617 635
618 pages = kmalloc(n * sizeof(u64), GFP_KERNEL); 636 err = iwch_alloc_pbl(mhp, n);
637 if (err)
638 goto err;
639
640 pages = (__be64 *) __get_free_page(GFP_KERNEL);
619 if (!pages) { 641 if (!pages) {
620 err = -ENOMEM; 642 err = -ENOMEM;
621 goto err; 643 goto err_pbl;
622 } 644 }
623 645
624 i = n = 0; 646 i = n = 0;
@@ -630,25 +652,38 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
630 pages[i++] = cpu_to_be64(sg_dma_address( 652 pages[i++] = cpu_to_be64(sg_dma_address(
631 &chunk->page_list[j]) + 653 &chunk->page_list[j]) +
632 mhp->umem->page_size * k); 654 mhp->umem->page_size * k);
655 if (i == PAGE_SIZE / sizeof *pages) {
656 err = iwch_write_pbl(mhp, pages, i, n);
657 if (err)
658 goto pbl_done;
659 n += i;
660 i = 0;
661 }
633 } 662 }
634 } 663 }
635 664
636 mhp->rhp = rhp; 665 if (i)
666 err = iwch_write_pbl(mhp, pages, i, n);
667
668pbl_done:
669 free_page((unsigned long) pages);
670 if (err)
671 goto err_pbl;
672
637 mhp->attr.pdid = php->pdid; 673 mhp->attr.pdid = php->pdid;
638 mhp->attr.zbva = 0; 674 mhp->attr.zbva = 0;
639 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 675 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
640 mhp->attr.va_fbo = virt; 676 mhp->attr.va_fbo = virt;
641 mhp->attr.page_size = shift - 12; 677 mhp->attr.page_size = shift - 12;
642 mhp->attr.len = (u32) length; 678 mhp->attr.len = (u32) length;
643 mhp->attr.pbl_size = i; 679
644 err = iwch_register_mem(rhp, php, mhp, shift, pages); 680 err = iwch_register_mem(rhp, php, mhp, shift);
645 kfree(pages);
646 if (err) 681 if (err)
647 goto err; 682 goto err_pbl;
648 683
649 if (udata && !t3a_device(rhp)) { 684 if (udata && !t3a_device(rhp)) {
650 uresp.pbl_addr = (mhp->attr.pbl_addr - 685 uresp.pbl_addr = (mhp->attr.pbl_addr -
651 rhp->rdev.rnic_info.pbl_base) >> 3; 686 rhp->rdev.rnic_info.pbl_base) >> 3;
652 PDBG("%s user resp pbl_addr 0x%x\n", __func__, 687 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
653 uresp.pbl_addr); 688 uresp.pbl_addr);
654 689
@@ -661,6 +696,9 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
661 696
662 return &mhp->ibmr; 697 return &mhp->ibmr;
663 698
699err_pbl:
700 iwch_free_pbl(mhp);
701
664err: 702err:
665 ib_umem_release(mhp->umem); 703 ib_umem_release(mhp->umem);
666 kfree(mhp); 704 kfree(mhp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index db5100d27ca..836163fc542 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -340,14 +340,14 @@ int iwch_quiesce_qps(struct iwch_cq *chp);
340int iwch_resume_qps(struct iwch_cq *chp); 340int iwch_resume_qps(struct iwch_cq *chp);
341void stop_read_rep_timer(struct iwch_qp *qhp); 341void stop_read_rep_timer(struct iwch_qp *qhp);
342int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 342int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
343 struct iwch_mr *mhp, 343 struct iwch_mr *mhp, int shift);
344 int shift,
345 __be64 *page_list);
346int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 344int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
347 struct iwch_mr *mhp, 345 struct iwch_mr *mhp,
348 int shift, 346 int shift,
349 __be64 *page_list,
350 int npages); 347 int npages);
348int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
349void iwch_free_pbl(struct iwch_mr *mhp);
350int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
351int build_phys_page_list(struct ib_phys_buf *buffer_list, 351int build_phys_page_list(struct ib_phys_buf *buffer_list,
352 int num_phys_buf, 352 int num_phys_buf,
353 u64 *iova_start, 353 u64 *iova_start,
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 00bab60f6de..1e9e99a1393 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -192,6 +192,8 @@ struct ehca_qp {
192 int mtu_shift; 192 int mtu_shift;
193 u32 message_count; 193 u32 message_count;
194 u32 packet_count; 194 u32 packet_count;
195 atomic_t nr_events; /* events seen */
196 wait_queue_head_t wait_completion;
195}; 197};
196 198
197#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 199#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index ca5eb0cb628..ce1ab0571be 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -204,6 +204,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
204 204
205 read_lock(&ehca_qp_idr_lock); 205 read_lock(&ehca_qp_idr_lock);
206 qp = idr_find(&ehca_qp_idr, token); 206 qp = idr_find(&ehca_qp_idr, token);
207 if (qp)
208 atomic_inc(&qp->nr_events);
207 read_unlock(&ehca_qp_idr_lock); 209 read_unlock(&ehca_qp_idr_lock);
208 210
209 if (!qp) 211 if (!qp)
@@ -223,6 +225,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
223 if (fatal && qp->ext_type == EQPT_SRQBASE) 225 if (fatal && qp->ext_type == EQPT_SRQBASE)
224 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); 226 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
225 227
228 if (atomic_dec_and_test(&qp->nr_events))
229 wake_up(&qp->wait_completion);
226 return; 230 return;
227} 231}
228 232
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 18fba92fa7a..3f59587338e 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -566,6 +566,8 @@ static struct ehca_qp *internal_create_qp(
566 return ERR_PTR(-ENOMEM); 566 return ERR_PTR(-ENOMEM);
567 } 567 }
568 568
569 atomic_set(&my_qp->nr_events, 0);
570 init_waitqueue_head(&my_qp->wait_completion);
569 spin_lock_init(&my_qp->spinlock_s); 571 spin_lock_init(&my_qp->spinlock_s);
570 spin_lock_init(&my_qp->spinlock_r); 572 spin_lock_init(&my_qp->spinlock_r);
571 my_qp->qp_type = qp_type; 573 my_qp->qp_type = qp_type;
@@ -1934,6 +1936,9 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1934 idr_remove(&ehca_qp_idr, my_qp->token); 1936 idr_remove(&ehca_qp_idr, my_qp->token);
1935 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 1937 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1936 1938
1939 /* now wait until all pending events have completed */
1940 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
1941
1937 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 1942 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1938 if (h_ret != H_SUCCESS) { 1943 if (h_ret != H_SUCCESS) {
1939 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " 1944 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index acf30c06a0c..ce7b7c34360 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1197,7 +1197,7 @@ void ipath_kreceive(struct ipath_portdata *pd)
1197 } 1197 }
1198 1198
1199reloop: 1199reloop:
1200 for (last = 0, i = 1; !last; i++) { 1200 for (last = 0, i = 1; !last; i += !last) {
1201 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr); 1201 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1202 eflags = ipath_hdrget_err_flags(rhf_addr); 1202 eflags = ipath_hdrget_err_flags(rhf_addr);
1203 etype = ipath_hdrget_rcv_type(rhf_addr); 1203 etype = ipath_hdrget_rcv_type(rhf_addr);
@@ -1428,6 +1428,40 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1428 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1428 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1429} 1429}
1430 1430
1431/*
1432 * used to force update of pioavailshadow if we can't get a pio buffer.
1433 * Needed primarily due to exitting freeze mode after recovering
1434 * from errors. Done lazily, because it's safer (known to not
1435 * be writing pio buffers).
1436 */
1437static void ipath_reset_availshadow(struct ipath_devdata *dd)
1438{
1439 int i, im;
1440 unsigned long flags;
1441
1442 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1443 for (i = 0; i < dd->ipath_pioavregs; i++) {
1444 u64 val, oldval;
1445 /* deal with 6110 chip bug on high register #s */
1446 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1447 i ^ 1 : i;
1448 val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
1449 /*
1450 * busy out the buffers not in the kernel avail list,
1451 * without changing the generation bits.
1452 */
1453 oldval = dd->ipath_pioavailshadow[i];
1454 dd->ipath_pioavailshadow[i] = val |
1455 ((~dd->ipath_pioavailkernel[i] <<
1456 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
1457 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
1458 if (oldval != dd->ipath_pioavailshadow[i])
1459 ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1460 i, oldval, dd->ipath_pioavailshadow[i]);
1461 }
1462 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1463}
1464
1431/** 1465/**
1432 * ipath_setrcvhdrsize - set the receive header size 1466 * ipath_setrcvhdrsize - set the receive header size
1433 * @dd: the infinipath device 1467 * @dd: the infinipath device
@@ -1482,9 +1516,12 @@ static noinline void no_pio_bufs(struct ipath_devdata *dd)
1482 */ 1516 */
1483 ipath_stats.sps_nopiobufs++; 1517 ipath_stats.sps_nopiobufs++;
1484 if (!(++dd->ipath_consec_nopiobuf % 100000)) { 1518 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1485 ipath_dbg("%u pio sends with no bufavail; dmacopy: " 1519 ipath_force_pio_avail_update(dd); /* at start */
1486 "%llx %llx %llx %llx; shadow: %lx %lx %lx %lx\n", 1520 ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
1521 "%llx %llx %llx %llx\n"
1522 "ipath shadow: %lx %lx %lx %lx\n",
1487 dd->ipath_consec_nopiobuf, 1523 dd->ipath_consec_nopiobuf,
1524 (unsigned long)get_cycles(),
1488 (unsigned long long) le64_to_cpu(dma[0]), 1525 (unsigned long long) le64_to_cpu(dma[0]),
1489 (unsigned long long) le64_to_cpu(dma[1]), 1526 (unsigned long long) le64_to_cpu(dma[1]),
1490 (unsigned long long) le64_to_cpu(dma[2]), 1527 (unsigned long long) le64_to_cpu(dma[2]),
@@ -1496,14 +1533,17 @@ static noinline void no_pio_bufs(struct ipath_devdata *dd)
1496 */ 1533 */
1497 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 1534 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1498 (sizeof(shadow[0]) * 4 * 4)) 1535 (sizeof(shadow[0]) * 4 * 4))
1499 ipath_dbg("2nd group: dmacopy: %llx %llx " 1536 ipath_dbg("2nd group: dmacopy: "
1500 "%llx %llx; shadow: %lx %lx %lx %lx\n", 1537 "%llx %llx %llx %llx\n"
1538 "ipath shadow: %lx %lx %lx %lx\n",
1501 (unsigned long long)le64_to_cpu(dma[4]), 1539 (unsigned long long)le64_to_cpu(dma[4]),
1502 (unsigned long long)le64_to_cpu(dma[5]), 1540 (unsigned long long)le64_to_cpu(dma[5]),
1503 (unsigned long long)le64_to_cpu(dma[6]), 1541 (unsigned long long)le64_to_cpu(dma[6]),
1504 (unsigned long long)le64_to_cpu(dma[7]), 1542 (unsigned long long)le64_to_cpu(dma[7]),
1505 shadow[4], shadow[5], shadow[6], 1543 shadow[4], shadow[5], shadow[6], shadow[7]);
1506 shadow[7]); 1544
1545 /* at end, so update likely happened */
1546 ipath_reset_availshadow(dd);
1507 } 1547 }
1508} 1548}
1509 1549
@@ -1652,19 +1692,46 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1652 unsigned len, int avail) 1692 unsigned len, int avail)
1653{ 1693{
1654 unsigned long flags; 1694 unsigned long flags;
1655 unsigned end; 1695 unsigned end, cnt = 0, next;
1656 1696
1657 /* There are two bits per send buffer (busy and generation) */ 1697 /* There are two bits per send buffer (busy and generation) */
1658 start *= 2; 1698 start *= 2;
1659 len *= 2; 1699 end = start + len * 2;
1660 end = start + len;
1661 1700
1662 /* Set or clear the generation bits. */
1663 spin_lock_irqsave(&ipath_pioavail_lock, flags); 1701 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1702 /* Set or clear the busy bit in the shadow. */
1664 while (start < end) { 1703 while (start < end) {
1665 if (avail) { 1704 if (avail) {
1666 __clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT, 1705 unsigned long dma;
1667 dd->ipath_pioavailshadow); 1706 int i, im;
1707 /*
1708 * the BUSY bit will never be set, because we disarm
1709 * the user buffers before we hand them back to the
1710 * kernel. We do have to make sure the generation
1711 * bit is set correctly in shadow, since it could
1712 * have changed many times while allocated to user.
1713 * We can't use the bitmap functions on the full
1714 * dma array because it is always little-endian, so
1715 * we have to flip to host-order first.
1716 * BITS_PER_LONG is slightly wrong, since it's
1717 * always 64 bits per register in chip...
1718 * We only work on 64 bit kernels, so that's OK.
1719 */
1720 /* deal with 6110 chip bug on high register #s */
1721 i = start / BITS_PER_LONG;
1722 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1723 i ^ 1 : i;
1724 __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
1725 + start, dd->ipath_pioavailshadow);
1726 dma = (unsigned long) le64_to_cpu(
1727 dd->ipath_pioavailregs_dma[im]);
1728 if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1729 + start) % BITS_PER_LONG, &dma))
1730 __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1731 + start, dd->ipath_pioavailshadow);
1732 else
1733 __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1734 + start, dd->ipath_pioavailshadow);
1668 __set_bit(start, dd->ipath_pioavailkernel); 1735 __set_bit(start, dd->ipath_pioavailkernel);
1669 } else { 1736 } else {
1670 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT, 1737 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
@@ -1673,7 +1740,44 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1673 } 1740 }
1674 start += 2; 1741 start += 2;
1675 } 1742 }
1743
1744 if (dd->ipath_pioupd_thresh) {
1745 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1746 next = find_first_bit(dd->ipath_pioavailkernel, end);
1747 while (next < end) {
1748 cnt++;
1749 next = find_next_bit(dd->ipath_pioavailkernel, end,
1750 next + 1);
1751 }
1752 }
1676 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1753 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1754
1755 /*
1756 * When moving buffers from kernel to user, if number assigned to
1757 * the user is less than the pio update threshold, and threshold
1758 * is supported (cnt was computed > 0), drop the update threshold
1759 * so we update at least once per allocated number of buffers.
1760 * In any case, if the kernel buffers are less than the threshold,
1761 * drop the threshold. We don't bother increasing it, having once
1762 * decreased it, since it would typically just cycle back and forth.
1763 * If we don't decrease below buffers in use, we can wait a long
1764 * time for an update, until some other context uses PIO buffers.
1765 */
1766 if (!avail && len < cnt)
1767 cnt = len;
1768 if (cnt < dd->ipath_pioupd_thresh) {
1769 dd->ipath_pioupd_thresh = cnt;
1770 ipath_dbg("Decreased pio update threshold to %u\n",
1771 dd->ipath_pioupd_thresh);
1772 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1773 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
1774 << INFINIPATH_S_UPDTHRESH_SHIFT);
1775 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
1776 << INFINIPATH_S_UPDTHRESH_SHIFT;
1777 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1778 dd->ipath_sendctrl);
1779 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1780 }
1677} 1781}
1678 1782
1679/** 1783/**
@@ -1794,8 +1898,8 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1794 1898
1795 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 1899 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1796 skip_cancel = 1900 skip_cancel =
1797 !test_bit(IPATH_SDMA_DISABLED, statp) && 1901 test_and_set_bit(IPATH_SDMA_ABORTING, statp)
1798 test_and_set_bit(IPATH_SDMA_ABORTING, statp); 1902 && !test_bit(IPATH_SDMA_DISABLED, statp);
1799 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 1903 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1800 if (skip_cancel) 1904 if (skip_cancel)
1801 goto bail; 1905 goto bail;
@@ -1826,6 +1930,9 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1826 ipath_disarm_piobufs(dd, 0, 1930 ipath_disarm_piobufs(dd, 0,
1827 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k); 1931 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1828 1932
1933 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1934 set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1935
1829 if (restore_sendctrl) { 1936 if (restore_sendctrl) {
1830 /* else done by caller later if needed */ 1937 /* else done by caller later if needed */
1831 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 1938 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
@@ -1845,7 +1952,6 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1845 /* only wait so long for intr */ 1952 /* only wait so long for intr */
1846 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ; 1953 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1847 dd->ipath_sdma_reset_wait = 200; 1954 dd->ipath_sdma_reset_wait = 200;
1848 __set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1849 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) 1955 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1850 tasklet_hi_schedule(&dd->ipath_sdma_abort_task); 1956 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1851 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 1957 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 8b1752202e7..3295177c937 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -173,47 +173,25 @@ static int ipath_get_base_info(struct file *fp,
173 (void *) dd->ipath_statusp - 173 (void *) dd->ipath_statusp -
174 (void *) dd->ipath_pioavailregs_dma; 174 (void *) dd->ipath_pioavailregs_dma;
175 if (!shared) { 175 if (!shared) {
176 kinfo->spi_piocnt = dd->ipath_pbufsport; 176 kinfo->spi_piocnt = pd->port_piocnt;
177 kinfo->spi_piobufbase = (u64) pd->port_piobufs; 177 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
178 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + 178 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
179 dd->ipath_ureg_align * pd->port_port; 179 dd->ipath_ureg_align * pd->port_port;
180 } else if (master) { 180 } else if (master) {
181 kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) + 181 kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
182 (dd->ipath_pbufsport % subport_cnt); 182 (pd->port_piocnt % subport_cnt);
183 /* Master's PIO buffers are after all the slave's */ 183 /* Master's PIO buffers are after all the slave's */
184 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 184 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
185 dd->ipath_palign * 185 dd->ipath_palign *
186 (dd->ipath_pbufsport - kinfo->spi_piocnt); 186 (pd->port_piocnt - kinfo->spi_piocnt);
187 } else { 187 } else {
188 unsigned slave = subport_fp(fp) - 1; 188 unsigned slave = subport_fp(fp) - 1;
189 189
190 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; 190 kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
191 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 191 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
192 dd->ipath_palign * kinfo->spi_piocnt * slave; 192 dd->ipath_palign * kinfo->spi_piocnt * slave;
193 } 193 }
194 194
195 /*
196 * Set the PIO avail update threshold to no larger
197 * than the number of buffers per process. Note that
198 * we decrease it here, but won't ever increase it.
199 */
200 if (dd->ipath_pioupd_thresh &&
201 kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
202 unsigned long flags;
203
204 dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
205 ipath_dbg("Decreased pio update threshold to %u\n",
206 dd->ipath_pioupd_thresh);
207 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
208 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
209 << INFINIPATH_S_UPDTHRESH_SHIFT);
210 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
211 << INFINIPATH_S_UPDTHRESH_SHIFT;
212 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
213 dd->ipath_sendctrl);
214 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
215 }
216
217 if (shared) { 195 if (shared) {
218 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + 196 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
219 dd->ipath_ureg_align * pd->port_port; 197 dd->ipath_ureg_align * pd->port_port;
@@ -1309,19 +1287,19 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1309 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; 1287 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
1310 if (!pd->port_subport_cnt) { 1288 if (!pd->port_subport_cnt) {
1311 /* port is not shared */ 1289 /* port is not shared */
1312 piocnt = dd->ipath_pbufsport; 1290 piocnt = pd->port_piocnt;
1313 piobufs = pd->port_piobufs; 1291 piobufs = pd->port_piobufs;
1314 } else if (!subport_fp(fp)) { 1292 } else if (!subport_fp(fp)) {
1315 /* caller is the master */ 1293 /* caller is the master */
1316 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + 1294 piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
1317 (dd->ipath_pbufsport % pd->port_subport_cnt); 1295 (pd->port_piocnt % pd->port_subport_cnt);
1318 piobufs = pd->port_piobufs + 1296 piobufs = pd->port_piobufs +
1319 dd->ipath_palign * (dd->ipath_pbufsport - piocnt); 1297 dd->ipath_palign * (pd->port_piocnt - piocnt);
1320 } else { 1298 } else {
1321 unsigned slave = subport_fp(fp) - 1; 1299 unsigned slave = subport_fp(fp) - 1;
1322 1300
1323 /* caller is a slave */ 1301 /* caller is a slave */
1324 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; 1302 piocnt = pd->port_piocnt / pd->port_subport_cnt;
1325 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; 1303 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
1326 } 1304 }
1327 1305
@@ -1633,9 +1611,6 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
1633 port_fp(fp) = pd; 1611 port_fp(fp) = pd;
1634 pd->port_pid = current->pid; 1612 pd->port_pid = current->pid;
1635 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1613 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1636 ipath_chg_pioavailkernel(dd,
1637 dd->ipath_pbufsport * (pd->port_port - 1),
1638 dd->ipath_pbufsport, 0);
1639 ipath_stats.sps_ports++; 1614 ipath_stats.sps_ports++;
1640 ret = 0; 1615 ret = 0;
1641 } else 1616 } else
@@ -1938,11 +1913,25 @@ static int ipath_do_user_init(struct file *fp,
1938 1913
1939 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ 1914 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
1940 1915
1916 /* some ports may get extra buffers, calculate that here */
1917 if (pd->port_port <= dd->ipath_ports_extrabuf)
1918 pd->port_piocnt = dd->ipath_pbufsport + 1;
1919 else
1920 pd->port_piocnt = dd->ipath_pbufsport;
1921
1941 /* for right now, kernel piobufs are at end, so port 1 is at 0 */ 1922 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
1923 if (pd->port_port <= dd->ipath_ports_extrabuf)
1924 pd->port_pio_base = (dd->ipath_pbufsport + 1)
1925 * (pd->port_port - 1);
1926 else
1927 pd->port_pio_base = dd->ipath_ports_extrabuf +
1928 dd->ipath_pbufsport * (pd->port_port - 1);
1942 pd->port_piobufs = dd->ipath_piobufbase + 1929 pd->port_piobufs = dd->ipath_piobufbase +
1943 dd->ipath_pbufsport * (pd->port_port - 1) * dd->ipath_palign; 1930 pd->port_pio_base * dd->ipath_palign;
1944 ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n", 1931 ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
1945 pd->port_port, pd->port_piobufs); 1932 " first pio %u\n", pd->port_port, pd->port_piobufs,
1933 pd->port_piocnt, pd->port_pio_base);
1934 ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
1946 1935
1947 /* 1936 /*
1948 * Now allocate the rcvhdr Q and eager TIDs; skip the TID 1937 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
@@ -2107,7 +2096,6 @@ static int ipath_close(struct inode *in, struct file *fp)
2107 } 2096 }
2108 2097
2109 if (dd->ipath_kregbase) { 2098 if (dd->ipath_kregbase) {
2110 int i;
2111 /* atomically clear receive enable port and intr avail. */ 2099 /* atomically clear receive enable port and intr avail. */
2112 clear_bit(dd->ipath_r_portenable_shift + port, 2100 clear_bit(dd->ipath_r_portenable_shift + port,
2113 &dd->ipath_rcvctrl); 2101 &dd->ipath_rcvctrl);
@@ -2136,9 +2124,9 @@ static int ipath_close(struct inode *in, struct file *fp)
2136 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, 2124 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
2137 pd->port_port, dd->ipath_dummy_hdrq_phys); 2125 pd->port_port, dd->ipath_dummy_hdrq_phys);
2138 2126
2139 i = dd->ipath_pbufsport * (port - 1); 2127 ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
2140 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); 2128 ipath_chg_pioavailkernel(dd, pd->port_pio_base,
2141 ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1); 2129 pd->port_piocnt, 1);
2142 2130
2143 dd->ipath_f_clear_tids(dd, pd->port_port); 2131 dd->ipath_f_clear_tids(dd, pd->port_port);
2144 2132
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
index e3ec0d1bdf5..8eee7830f04 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -595,7 +595,7 @@ static void ipath_7220_txe_recover(struct ipath_devdata *dd)
595 595
596 dev_info(&dd->pcidev->dev, 596 dev_info(&dd->pcidev->dev,
597 "Recovering from TXE PIO parity error\n"); 597 "Recovering from TXE PIO parity error\n");
598 ipath_disarm_senderrbufs(dd, 1); 598 ipath_disarm_senderrbufs(dd);
599} 599}
600 600
601 601
@@ -675,10 +675,8 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
675 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); 675 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
676 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { 676 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
677 /* 677 /*
678 * Parity errors in send memory are recoverable, 678 * Parity errors in send memory are recoverable by h/w
679 * just cancel the send (if indicated in * sendbuffererror), 679 * just do housekeeping, exit freeze mode and continue.
680 * count the occurrence, unfreeze (if no other handled
681 * hardware error bits are set), and continue.
682 */ 680 */
683 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 681 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
684 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) 682 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
@@ -687,13 +685,6 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
687 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 685 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
688 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) 686 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
689 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); 687 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
690 if (!hwerrs) {
691 /* else leave in freeze mode */
692 ipath_write_kreg(dd,
693 dd->ipath_kregs->kr_control,
694 dd->ipath_control);
695 goto bail;
696 }
697 } 688 }
698 if (hwerrs) { 689 if (hwerrs) {
699 /* 690 /*
@@ -723,8 +714,8 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
723 *dd->ipath_statusp |= IPATH_STATUS_HWERROR; 714 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
724 dd->ipath_flags &= ~IPATH_INITTED; 715 dd->ipath_flags &= ~IPATH_INITTED;
725 } else { 716 } else {
726 ipath_dbg("Clearing freezemode on ignored hardware " 717 ipath_dbg("Clearing freezemode on ignored or "
727 "error\n"); 718 "recovered hardware error\n");
728 ipath_clear_freeze(dd); 719 ipath_clear_freeze(dd);
729 } 720 }
730 } 721 }
@@ -870,8 +861,9 @@ static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
870 "revision %u.%u!\n", 861 "revision %u.%u!\n",
871 dd->ipath_majrev, dd->ipath_minrev); 862 dd->ipath_majrev, dd->ipath_minrev);
872 ret = 1; 863 ret = 1;
873 } else if (dd->ipath_minrev == 1) { 864 } else if (dd->ipath_minrev == 1 &&
874 /* Rev1 chips are prototype. Complain, but allow use */ 865 !(dd->ipath_flags & IPATH_INITTED)) {
866 /* Rev1 chips are prototype. Complain at init, but allow use */
875 ipath_dev_err(dd, "Unsupported hardware " 867 ipath_dev_err(dd, "Unsupported hardware "
876 "revision %u.%u, Contact support@qlogic.com\n", 868 "revision %u.%u, Contact support@qlogic.com\n",
877 dd->ipath_majrev, dd->ipath_minrev); 869 dd->ipath_majrev, dd->ipath_minrev);
@@ -1966,7 +1958,7 @@ static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
1966 dd->ipath_rcvctrl); 1958 dd->ipath_rcvctrl);
1967 dd->ipath_p0_rcvegrcnt = 2048; /* always */ 1959 dd->ipath_p0_rcvegrcnt = 2048; /* always */
1968 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) 1960 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1969 dd->ipath_pioreserved = 1; /* reserve a buffer */ 1961 dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
1970} 1962}
1971 1963
1972 1964
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 27dd8947666..3e5baa43fc8 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -41,7 +41,7 @@
41/* 41/*
42 * min buffers we want to have per port, after driver 42 * min buffers we want to have per port, after driver
43 */ 43 */
44#define IPATH_MIN_USER_PORT_BUFCNT 8 44#define IPATH_MIN_USER_PORT_BUFCNT 7
45 45
46/* 46/*
47 * Number of ports we are configured to use (to allow for more pio 47 * Number of ports we are configured to use (to allow for more pio
@@ -54,13 +54,9 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
54 54
55/* 55/*
56 * Number of buffers reserved for driver (verbs and layered drivers.) 56 * Number of buffers reserved for driver (verbs and layered drivers.)
57 * Reserved at end of buffer list. Initialized based on 57 * Initialized based on number of PIO buffers if not set via module interface.
58 * number of PIO buffers if not set via module interface.
59 * The problem with this is that it's global, but we'll use different 58 * The problem with this is that it's global, but we'll use different
60 * numbers for different chip types. So the default value is not 59 * numbers for different chip types.
61 * very useful. I've redefined it for the 1.3 release so that it's
62 * zero unless set by the user to something else, in which case we
63 * try to respect it.
64 */ 60 */
65static ushort ipath_kpiobufs; 61static ushort ipath_kpiobufs;
66 62
@@ -546,9 +542,12 @@ static void enable_chip(struct ipath_devdata *dd, int reinit)
546 pioavail = dd->ipath_pioavailregs_dma[i ^ 1]; 542 pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
547 else 543 else
548 pioavail = dd->ipath_pioavailregs_dma[i]; 544 pioavail = dd->ipath_pioavailregs_dma[i];
549 dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) | 545 /*
550 (~dd->ipath_pioavailkernel[i] << 546 * don't need to worry about ipath_pioavailkernel here
551 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT); 547 * because we will call ipath_chg_pioavailkernel() later
548 * in initialization, to busy out buffers as needed
549 */
550 dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
552 } 551 }
553 /* can get counters, stats, etc. */ 552 /* can get counters, stats, etc. */
554 dd->ipath_flags |= IPATH_PRESENT; 553 dd->ipath_flags |= IPATH_PRESENT;
@@ -708,12 +707,11 @@ static void verify_interrupt(unsigned long opaque)
708int ipath_init_chip(struct ipath_devdata *dd, int reinit) 707int ipath_init_chip(struct ipath_devdata *dd, int reinit)
709{ 708{
710 int ret = 0; 709 int ret = 0;
711 u32 val32, kpiobufs; 710 u32 kpiobufs, defkbufs;
712 u32 piobufs, uports; 711 u32 piobufs, uports;
713 u64 val; 712 u64 val;
714 struct ipath_portdata *pd; 713 struct ipath_portdata *pd;
715 gfp_t gfp_flags = GFP_USER | __GFP_COMP; 714 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
716 unsigned long flags;
717 715
718 ret = init_housekeeping(dd, reinit); 716 ret = init_housekeeping(dd, reinit);
719 if (ret) 717 if (ret)
@@ -753,56 +751,46 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
753 dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) 751 dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
754 / (sizeof(u64) * BITS_PER_BYTE / 2); 752 / (sizeof(u64) * BITS_PER_BYTE / 2);
755 uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0; 753 uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
756 if (ipath_kpiobufs == 0) { 754 if (piobufs > 144)
757 /* not set by user (this is default) */ 755 defkbufs = 32 + dd->ipath_pioreserved;
758 if (piobufs > 144)
759 kpiobufs = 32;
760 else
761 kpiobufs = 16;
762 }
763 else 756 else
764 kpiobufs = ipath_kpiobufs; 757 defkbufs = 16 + dd->ipath_pioreserved;
765 758
766 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { 759 if (ipath_kpiobufs && (ipath_kpiobufs +
760 (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {
767 int i = (int) piobufs - 761 int i = (int) piobufs -
768 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); 762 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
769 if (i < 1) 763 if (i < 1)
770 i = 1; 764 i = 1;
771 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " 765 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
772 "%d for kernel leaves too few for %d user ports " 766 "%d for kernel leaves too few for %d user ports "
773 "(%d each); using %u\n", kpiobufs, 767 "(%d each); using %u\n", ipath_kpiobufs,
774 piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i); 768 piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
775 /* 769 /*
776 * shouldn't change ipath_kpiobufs, because could be 770 * shouldn't change ipath_kpiobufs, because could be
777 * different for different devices... 771 * different for different devices...
778 */ 772 */
779 kpiobufs = i; 773 kpiobufs = i;
780 } 774 } else if (ipath_kpiobufs)
775 kpiobufs = ipath_kpiobufs;
776 else
777 kpiobufs = defkbufs;
781 dd->ipath_lastport_piobuf = piobufs - kpiobufs; 778 dd->ipath_lastport_piobuf = piobufs - kpiobufs;
782 dd->ipath_pbufsport = 779 dd->ipath_pbufsport =
783 uports ? dd->ipath_lastport_piobuf / uports : 0; 780 uports ? dd->ipath_lastport_piobuf / uports : 0;
784 val32 = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports); 781 /* if not an even divisor, some user ports get extra buffers */
785 if (val32 > 0) { 782 dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -
786 ipath_dbg("allocating %u pbufs/port leaves %u unused, " 783 (dd->ipath_pbufsport * uports);
787 "add to kernel\n", dd->ipath_pbufsport, val32); 784 if (dd->ipath_ports_extrabuf)
788 dd->ipath_lastport_piobuf -= val32; 785 ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "
789 kpiobufs += val32; 786 "ports <= %u\n", dd->ipath_pbufsport,
790 ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n", 787 dd->ipath_ports_extrabuf);
791 dd->ipath_pbufsport, val32);
792 }
793 dd->ipath_lastpioindex = 0; 788 dd->ipath_lastpioindex = 0;
794 dd->ipath_lastpioindexl = dd->ipath_piobcnt2k; 789 dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
795 ipath_chg_pioavailkernel(dd, 0, piobufs, 1); 790 /* ipath_pioavailshadow initialized earlier */
796 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " 791 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
797 "each for %u user ports\n", kpiobufs, 792 "each for %u user ports\n", kpiobufs,
798 piobufs, dd->ipath_pbufsport, uports); 793 piobufs, dd->ipath_pbufsport, uports);
799 if (dd->ipath_pioupd_thresh) {
800 if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
801 dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
802 if (kpiobufs < dd->ipath_pioupd_thresh)
803 dd->ipath_pioupd_thresh = kpiobufs;
804 }
805
806 ret = dd->ipath_f_early_init(dd); 794 ret = dd->ipath_f_early_init(dd);
807 if (ret) { 795 if (ret) {
808 ipath_dev_err(dd, "Early initialization failure\n"); 796 ipath_dev_err(dd, "Early initialization failure\n");
@@ -810,13 +798,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
810 } 798 }
811 799
812 /* 800 /*
813 * Cancel any possible active sends from early driver load.
814 * Follows early_init because some chips have to initialize
815 * PIO buffers in early_init to avoid false parity errors.
816 */
817 ipath_cancel_sends(dd, 0);
818
819 /*
820 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be 801 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
821 * done after early_init. 802 * done after early_init.
822 */ 803 */
@@ -836,6 +817,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
836 817
837 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr, 818 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
838 dd->ipath_pioavailregs_phys); 819 dd->ipath_pioavailregs_phys);
820
839 /* 821 /*
840 * this is to detect s/w errors, which the h/w works around by 822 * this is to detect s/w errors, which the h/w works around by
841 * ignoring the low 6 bits of address, if it wasn't aligned. 823 * ignoring the low 6 bits of address, if it wasn't aligned.
@@ -862,12 +844,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
862 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); 844 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
863 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); 845 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
864 846
865 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
866 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
867 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
868 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
869 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
870
871 /* 847 /*
872 * before error clears, since we expect serdes pll errors during 848 * before error clears, since we expect serdes pll errors during
873 * this, the first time after reset 849 * this, the first time after reset
@@ -940,6 +916,19 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
940 else 916 else
941 enable_chip(dd, reinit); 917 enable_chip(dd, reinit);
942 918
919 /* after enable_chip, so pioavailshadow setup */
920 ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
921
922 /*
923 * Cancel any possible active sends from early driver load.
924 * Follows early_init because some chips have to initialize
925 * PIO buffers in early_init to avoid false parity errors.
926 * After enable and ipath_chg_pioavailkernel so we can safely
927 * enable pioavail updates and PIOENABLE; packets are now
928 * ready to go out.
929 */
930 ipath_cancel_sends(dd, 1);
931
943 if (!reinit) { 932 if (!reinit) {
944 /* 933 /*
945 * Used when we close a port, for DMA already in flight 934 * Used when we close a port, for DMA already in flight
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 1b58f4737c7..26900b3b7a4 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -38,42 +38,12 @@
38#include "ipath_verbs.h" 38#include "ipath_verbs.h"
39#include "ipath_common.h" 39#include "ipath_common.h"
40 40
41/*
42 * clear (write) a pio buffer, to clear a parity error. This routine
43 * should only be called when in freeze mode, and the buffer should be
44 * canceled afterwards.
45 */
46static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
47{
48 u32 __iomem *pbuf;
49 u32 dwcnt; /* dword count to write */
50 if (pnum < dd->ipath_piobcnt2k) {
51 pbuf = (u32 __iomem *) (dd->ipath_pio2kbase + pnum *
52 dd->ipath_palign);
53 dwcnt = dd->ipath_piosize2k >> 2;
54 }
55 else {
56 pbuf = (u32 __iomem *) (dd->ipath_pio4kbase +
57 (pnum - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
58 dwcnt = dd->ipath_piosize4k >> 2;
59 }
60 dev_info(&dd->pcidev->dev,
61 "Rewrite PIO buffer %u, to recover from parity error\n",
62 pnum);
63
64 /* no flush required, since already in freeze */
65 writel(dwcnt + 1, pbuf);
66 while (--dwcnt)
67 writel(0, pbuf++);
68}
69 41
70/* 42/*
71 * Called when we might have an error that is specific to a particular 43 * Called when we might have an error that is specific to a particular
72 * PIO buffer, and may need to cancel that buffer, so it can be re-used. 44 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
73 * If rewrite is true, and bits are set in the sendbufferror registers,
74 * we'll write to the buffer, for error recovery on parity errors.
75 */ 45 */
76void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite) 46void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
77{ 47{
78 u32 piobcnt; 48 u32 piobcnt;
79 unsigned long sbuf[4]; 49 unsigned long sbuf[4];
@@ -109,11 +79,8 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
109 } 79 }
110 80
111 for (i = 0; i < piobcnt; i++) 81 for (i = 0; i < piobcnt; i++)
112 if (test_bit(i, sbuf)) { 82 if (test_bit(i, sbuf))
113 if (rewrite)
114 ipath_clrpiobuf(dd, i);
115 ipath_disarm_piobufs(dd, i, 1); 83 ipath_disarm_piobufs(dd, i, 1);
116 }
117 /* ignore armlaunch errs for a bit */ 84 /* ignore armlaunch errs for a bit */
118 dd->ipath_lastcancel = jiffies+3; 85 dd->ipath_lastcancel = jiffies+3;
119 } 86 }
@@ -164,7 +131,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
164{ 131{
165 u64 ignore_this_time = 0; 132 u64 ignore_this_time = 0;
166 133
167 ipath_disarm_senderrbufs(dd, 0); 134 ipath_disarm_senderrbufs(dd);
168 if ((errs & E_SUM_LINK_PKTERRS) && 135 if ((errs & E_SUM_LINK_PKTERRS) &&
169 !(dd->ipath_flags & IPATH_LINKACTIVE)) { 136 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
170 /* 137 /*
@@ -909,8 +876,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
909 * processes (causing armlaunch), send errors due to going into freeze mode, 876 * processes (causing armlaunch), send errors due to going into freeze mode,
910 * etc., and try to avoid causing extra interrupts while doing so. 877 * etc., and try to avoid causing extra interrupts while doing so.
911 * Forcibly update the in-memory pioavail register copies after cleanup 878 * Forcibly update the in-memory pioavail register copies after cleanup
912 * because the chip won't do it for anything changing while in freeze mode 879 * because the chip won't do it while in freeze mode (the register values
913 * (we don't want to wait for the next pio buffer state change). 880 * themselves are kept correct).
914 * Make sure that we don't lose any important interrupts by using the chip 881 * Make sure that we don't lose any important interrupts by using the chip
915 * feature that says that writing 0 to a bit in *clear that is set in 882 * feature that says that writing 0 to a bit in *clear that is set in
916 * *status will cause an interrupt to be generated again (if allowed by 883 * *status will cause an interrupt to be generated again (if allowed by
@@ -918,44 +885,23 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
918 */ 885 */
919void ipath_clear_freeze(struct ipath_devdata *dd) 886void ipath_clear_freeze(struct ipath_devdata *dd)
920{ 887{
921 int i, im;
922 u64 val;
923
924 /* disable error interrupts, to avoid confusion */ 888 /* disable error interrupts, to avoid confusion */
925 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); 889 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
926 890
927 /* also disable interrupts; errormask is sometimes overwriten */ 891 /* also disable interrupts; errormask is sometimes overwriten */
928 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL); 892 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
929 893
930 /* 894 ipath_cancel_sends(dd, 1);
931 * clear all sends, because they have may been 895
932 * completed by usercode while in freeze mode, and 896 /* clear the freeze, and be sure chip saw it */
933 * therefore would not be sent, and eventually
934 * might cause the process to run out of bufs
935 */
936 ipath_cancel_sends(dd, 0);
937 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 897 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
938 dd->ipath_control); 898 dd->ipath_control);
899 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
939 900
940 /* ensure pio avail updates continue */ 901 /* force in-memory update now we are out of freeze */
941 ipath_force_pio_avail_update(dd); 902 ipath_force_pio_avail_update(dd);
942 903
943 /* 904 /*
944 * We just enabled pioavailupdate, so dma copy is almost certainly
945 * not yet right, so read the registers directly. Similar to init
946 */
947 for (i = 0; i < dd->ipath_pioavregs; i++) {
948 /* deal with 6110 chip bug */
949 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
950 i ^ 1 : i;
951 val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
952 dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
953 dd->ipath_pioavailshadow[i] = val |
954 (~dd->ipath_pioavailkernel[i] <<
955 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
956 }
957
958 /*
959 * force new interrupt if any hwerr, error or interrupt bits are 905 * force new interrupt if any hwerr, error or interrupt bits are
960 * still set, and clear "safe" send packet errors related to freeze 906 * still set, and clear "safe" send packet errors related to freeze
961 * and cancelling sends. Re-enable error interrupts before possible 907 * and cancelling sends. Re-enable error interrupts before possible
@@ -1312,10 +1258,8 @@ irqreturn_t ipath_intr(int irq, void *data)
1312 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1258 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1313 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 1259 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1314 1260
1315 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) 1261 /* always process; sdma verbs uses PIO for acks and VL15 */
1316 handle_layer_pioavail(dd); 1262 handle_layer_pioavail(dd);
1317 else
1318 ipath_dbg("unexpected BUFAVAIL intr\n");
1319 } 1263 }
1320 1264
1321 ret = IRQ_HANDLED; 1265 ret = IRQ_HANDLED;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 202337ae90d..02b24a34059 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -117,6 +117,10 @@ struct ipath_portdata {
117 u16 port_subport_cnt; 117 u16 port_subport_cnt;
118 /* non-zero if port is being shared. */ 118 /* non-zero if port is being shared. */
119 u16 port_subport_id; 119 u16 port_subport_id;
120 /* number of pio bufs for this port (all procs, if shared) */
121 u32 port_piocnt;
122 /* first pio buffer for this port */
123 u32 port_pio_base;
120 /* chip offset of PIO buffers for this port */ 124 /* chip offset of PIO buffers for this port */
121 u32 port_piobufs; 125 u32 port_piobufs;
122 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */ 126 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
@@ -384,6 +388,8 @@ struct ipath_devdata {
384 u32 ipath_lastrpkts; 388 u32 ipath_lastrpkts;
385 /* pio bufs allocated per port */ 389 /* pio bufs allocated per port */
386 u32 ipath_pbufsport; 390 u32 ipath_pbufsport;
391 /* if remainder on bufs/port, ports < extrabuf get 1 extra */
392 u32 ipath_ports_extrabuf;
387 u32 ipath_pioupd_thresh; /* update threshold, some chips */ 393 u32 ipath_pioupd_thresh; /* update threshold, some chips */
388 /* 394 /*
389 * number of ports configured as max; zero is set to number chip 395 * number of ports configured as max; zero is set to number chip
@@ -1011,7 +1017,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *);
1011int ipath_update_eeprom_log(struct ipath_devdata *dd); 1017int ipath_update_eeprom_log(struct ipath_devdata *dd);
1012void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr); 1018void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
1013u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 1019u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
1014void ipath_disarm_senderrbufs(struct ipath_devdata *, int); 1020void ipath_disarm_senderrbufs(struct ipath_devdata *);
1015void ipath_force_pio_avail_update(struct ipath_devdata *); 1021void ipath_force_pio_avail_update(struct ipath_devdata *);
1016void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev); 1022void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
1017 1023
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index c405dfba553..08b11b56761 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1746,7 +1746,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1746 qp->r_wrid_valid = 0; 1746 qp->r_wrid_valid = 0;
1747 wc.wr_id = qp->r_wr_id; 1747 wc.wr_id = qp->r_wr_id;
1748 wc.status = IB_WC_SUCCESS; 1748 wc.status = IB_WC_SUCCESS;
1749 wc.opcode = IB_WC_RECV; 1749 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1750 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1751 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1752 else
1753 wc.opcode = IB_WC_RECV;
1750 wc.vendor_err = 0; 1754 wc.vendor_err = 0;
1751 wc.qp = &qp->ibqp; 1755 wc.qp = &qp->ibqp;
1752 wc.src_qp = qp->remote_qpn; 1756 wc.src_qp = qp->remote_qpn;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 8ac5c1d82cc..9e3fe61cbd0 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -481,9 +481,10 @@ done:
481 wake_up(&qp->wait); 481 wake_up(&qp->wait);
482} 482}
483 483
484static void want_buffer(struct ipath_devdata *dd) 484static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
485{ 485{
486 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) { 486 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
487 qp->ibqp.qp_type == IB_QPT_SMI) {
487 unsigned long flags; 488 unsigned long flags;
488 489
489 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 490 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
@@ -519,7 +520,7 @@ static void ipath_no_bufs_available(struct ipath_qp *qp,
519 spin_lock_irqsave(&dev->pending_lock, flags); 520 spin_lock_irqsave(&dev->pending_lock, flags);
520 list_add_tail(&qp->piowait, &dev->piowait); 521 list_add_tail(&qp->piowait, &dev->piowait);
521 spin_unlock_irqrestore(&dev->pending_lock, flags); 522 spin_unlock_irqrestore(&dev->pending_lock, flags);
522 want_buffer(dev->dd); 523 want_buffer(dev->dd, qp);
523 dev->n_piowait++; 524 dev->n_piowait++;
524} 525}
525 526
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 1974df7a9f7..3697449c1ba 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -308,13 +308,15 @@ static void sdma_abort_task(unsigned long opaque)
308 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 308 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
309 309
310 /* 310 /*
311 * Don't restart sdma here. Wait until link is up to ACTIVE. 311 * Don't restart sdma here (with the exception
312 * VL15 MADs used to bring the link up use PIO, and multiple 312 * below). Wait until link is up to ACTIVE. VL15 MADs
313 * link transitions otherwise cause the sdma engine to be 313 * used to bring the link up use PIO, and multiple link
314 * transitions otherwise cause the sdma engine to be
314 * stopped and started multiple times. 315 * stopped and started multiple times.
315 * The disable is done here, including the shadow, so the 316 * The disable is done here, including the shadow,
316 * state is kept consistent. 317 * so the state is kept consistent.
317 * See ipath_restart_sdma() for the actual starting of sdma. 318 * See ipath_restart_sdma() for the actual starting
319 * of sdma.
318 */ 320 */
319 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 321 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
320 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; 322 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
@@ -326,6 +328,13 @@ static void sdma_abort_task(unsigned long opaque)
326 /* make sure I see next message */ 328 /* make sure I see next message */
327 dd->ipath_sdma_abort_jiffies = 0; 329 dd->ipath_sdma_abort_jiffies = 0;
328 330
331 /*
332 * Not everything that takes SDMA offline is a link
333 * status change. If the link was up, restart SDMA.
334 */
335 if (dd->ipath_flags & IPATH_LINKACTIVE)
336 ipath_restart_sdma(dd);
337
329 goto done; 338 goto done;
330 } 339 }
331 340
@@ -427,7 +436,12 @@ int setup_sdma(struct ipath_devdata *dd)
427 goto done; 436 goto done;
428 } 437 }
429 438
430 dd->ipath_sdma_status = 0; 439 /*
440 * Set initial status as if we had been up, then gone down.
441 * This lets initial start on transition to ACTIVE be the
442 * same as restart after link flap.
443 */
444 dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
431 dd->ipath_sdma_abort_jiffies = 0; 445 dd->ipath_sdma_abort_jiffies = 0;
432 dd->ipath_sdma_generation = 0; 446 dd->ipath_sdma_generation = 0;
433 dd->ipath_sdma_descq_tail = 0; 447 dd->ipath_sdma_descq_tail = 0;
@@ -449,16 +463,19 @@ int setup_sdma(struct ipath_devdata *dd)
449 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 463 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
450 dd->ipath_sdma_head_phys); 464 dd->ipath_sdma_head_phys);
451 465
452 /* Reserve all the former "kernel" piobufs */ 466 /*
453 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved; 467 * Reserve all the former "kernel" piobufs, using high number range
454 for (i = dd->ipath_lastport_piobuf; i < n; ++i) { 468 * so we get as many 4K buffers as possible
469 */
470 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
471 i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
472 ipath_chg_pioavailkernel(dd, i, n - i , 0);
473 for (; i < n; ++i) {
455 unsigned word = i / 64; 474 unsigned word = i / 64;
456 unsigned bit = i & 63; 475 unsigned bit = i & 63;
457 BUG_ON(word >= 3); 476 BUG_ON(word >= 3);
458 senddmabufmask[word] |= 1ULL << bit; 477 senddmabufmask[word] |= 1ULL << bit;
459 } 478 }
460 ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
461 n - dd->ipath_lastport_piobuf, 0);
462 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 479 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
463 senddmabufmask[0]); 480 senddmabufmask[0]);
464 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 481 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
@@ -615,6 +632,9 @@ void ipath_restart_sdma(struct ipath_devdata *dd)
615 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 632 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
616 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 633 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
617 634
635 /* notify upper layers */
636 ipath_ib_piobufavail(dd->verbs_dev);
637
618bail: 638bail:
619 return; 639 return;
620} 640}
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index e63927cce5b..5015cd2e57b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -396,7 +396,6 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
396 396
397 wqe = get_swqe_ptr(qp, qp->s_head); 397 wqe = get_swqe_ptr(qp, qp->s_head);
398 wqe->wr = *wr; 398 wqe->wr = *wr;
399 wqe->ssn = qp->s_ssn++;
400 wqe->length = 0; 399 wqe->length = 0;
401 if (wr->num_sge) { 400 if (wr->num_sge) {
402 acc = wr->opcode >= IB_WR_RDMA_READ ? 401 acc = wr->opcode >= IB_WR_RDMA_READ ?
@@ -422,6 +421,7 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
422 goto bail_inval; 421 goto bail_inval;
423 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) 422 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
424 goto bail_inval; 423 goto bail_inval;
424 wqe->ssn = qp->s_ssn++;
425 qp->s_head = next; 425 qp->s_head = next;
426 426
427 ret = 0; 427 ret = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5938fa96292..faf3d891297 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -886,7 +886,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
886 */ 886 */
887 raid10_find_phys(conf, r10_bio); 887 raid10_find_phys(conf, r10_bio);
888 retry_write: 888 retry_write:
889 blocked_rdev = 0; 889 blocked_rdev = NULL;
890 rcu_read_lock(); 890 rcu_read_lock();
891 for (i = 0; i < conf->copies; i++) { 891 for (i = 0; i < conf->copies; i++) {
892 int d = r10_bio->devs[i].devnum; 892 int d = r10_bio->devs[i].devnum;
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 73f742c7e81..cc11c4c0e7e 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the kernel multimedia device drivers. 2# Makefile for the kernel multimedia device drivers.
3# 3#
4 4
5obj-y := common/
6
5obj-$(CONFIG_VIDEO_MEDIA) += common/ 7obj-$(CONFIG_VIDEO_MEDIA) += common/
6 8
7# Since hybrid devices are here, should be compiled if DVB and/or V4L 9# Since hybrid devices are here, should be compiled if DVB and/or V4L
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 8f5ed9b4bf8..3f55d47bc4b 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -613,7 +613,7 @@ static int __devinit cx18_probe(struct pci_dev *dev,
613 } 613 }
614 614
615 cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC); 615 cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
616 if (cx == 0) { 616 if (!cx) {
617 spin_unlock(&cx18_cards_lock); 617 spin_unlock(&cx18_cards_lock);
618 return -ENOMEM; 618 return -ENOMEM;
619 } 619 }
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index a0baf2d0ba7..48e1a01718e 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1634,7 +1634,7 @@ static int saa7134_s_fmt_overlay(struct file *file, void *priv,
1634 struct saa7134_fh *fh = priv; 1634 struct saa7134_fh *fh = priv;
1635 struct saa7134_dev *dev = fh->dev; 1635 struct saa7134_dev *dev = fh->dev;
1636 int err; 1636 int err;
1637 unsigned int flags; 1637 unsigned long flags;
1638 1638
1639 if (saa7134_no_overlay > 0) { 1639 if (saa7134_no_overlay > 0) {
1640 printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n"); 1640 printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index a873d2b315c..a7714da7c28 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -100,7 +100,9 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
100static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); 100static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
101static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); 101static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
102static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); 102static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
103#ifdef CONFIG_PNP
103static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); 104static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id);
105#endif
104 106
105/* These are the known NSC chips */ 107/* These are the known NSC chips */
106static nsc_chip_t chips[] = { 108static nsc_chip_t chips[] = {
@@ -156,9 +158,11 @@ static const struct pnp_device_id nsc_ircc_pnp_table[] = {
156MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); 158MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table);
157 159
158static struct pnp_driver nsc_ircc_pnp_driver = { 160static struct pnp_driver nsc_ircc_pnp_driver = {
161#ifdef CONFIG_PNP
159 .name = "nsc-ircc", 162 .name = "nsc-ircc",
160 .id_table = nsc_ircc_pnp_table, 163 .id_table = nsc_ircc_pnp_table,
161 .probe = nsc_ircc_pnp_probe, 164 .probe = nsc_ircc_pnp_probe,
165#endif
162}; 166};
163 167
164/* Some prototypes */ 168/* Some prototypes */
@@ -916,6 +920,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
916 return 0; 920 return 0;
917} 921}
918 922
923#ifdef CONFIG_PNP
919/* PNP probing */ 924/* PNP probing */
920static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) 925static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
921{ 926{
@@ -952,6 +957,7 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
952 957
953 return 0; 958 return 0;
954} 959}
960#endif
955 961
956/* 962/*
957 * Function nsc_ircc_setup (info) 963 * Function nsc_ircc_setup (info)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 1f26da761e9..cfe0194fef7 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -376,6 +376,7 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
376 376
377static int pnp_driver_registered; 377static int pnp_driver_registered;
378 378
379#ifdef CONFIG_PNP
379static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev, 380static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev,
380 const struct pnp_device_id *dev_id) 381 const struct pnp_device_id *dev_id)
381{ 382{
@@ -402,7 +403,9 @@ static struct pnp_driver smsc_ircc_pnp_driver = {
402 .id_table = smsc_ircc_pnp_table, 403 .id_table = smsc_ircc_pnp_table,
403 .probe = smsc_ircc_pnp_probe, 404 .probe = smsc_ircc_pnp_probe,
404}; 405};
405 406#else /* CONFIG_PNP */
407static struct pnp_driver smsc_ircc_pnp_driver;
408#endif
406 409
407/******************************************************************************* 410/*******************************************************************************
408 * 411 *
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 4009c4ce96b..57cfd72ffdf 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -1,6 +1,6 @@
1/* niu.c: Neptune ethernet driver. 1/* niu.c: Neptune ethernet driver.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/module.h> 6#include <linux/module.h>
@@ -33,8 +33,8 @@
33 33
34#define DRV_MODULE_NAME "niu" 34#define DRV_MODULE_NAME "niu"
35#define PFX DRV_MODULE_NAME ": " 35#define PFX DRV_MODULE_NAME ": "
36#define DRV_MODULE_VERSION "0.8" 36#define DRV_MODULE_VERSION "0.9"
37#define DRV_MODULE_RELDATE "April 24, 2008" 37#define DRV_MODULE_RELDATE "May 4, 2008"
38 38
39static char version[] __devinitdata = 39static char version[] __devinitdata =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -7264,8 +7264,11 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
7264 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 7264 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
7265 ESPC_NUM_PORTS_MACS_VAL; 7265 ESPC_NUM_PORTS_MACS_VAL;
7266 7266
7267 /* All of the current probing methods fail on
7268 * Maramba on-board parts.
7269 */
7267 if (!parent->num_ports) 7270 if (!parent->num_ports)
7268 return -ENODEV; 7271 parent->num_ports = 4;
7269 } 7272 }
7270 } 7273 }
7271 } 7274 }
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6f245cfb662..dc6f097062d 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1381,6 +1381,10 @@ static const struct usb_device_id products [] = {
1381 USB_DEVICE (0x0411, 0x003d), 1381 USB_DEVICE (0x0411, 0x003d),
1382 .driver_info = (unsigned long) &ax8817x_info, 1382 .driver_info = (unsigned long) &ax8817x_info,
1383}, { 1383}, {
1384 // Buffalo LUA-U2-GT 10/100/1000
1385 USB_DEVICE (0x0411, 0x006e),
1386 .driver_info = (unsigned long) &ax88178_info,
1387}, {
1384 // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter" 1388 // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
1385 USB_DEVICE (0x6189, 0x182d), 1389 USB_DEVICE (0x6189, 0x182d),
1386 .driver_info = (unsigned long) &ax8817x_info, 1390 .driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index b5860b97a93..24fd613466b 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -459,6 +459,7 @@ static void __exit lapbeth_cleanup_driver(void)
459 list_for_each_safe(entry, tmp, &lapbeth_devices) { 459 list_for_each_safe(entry, tmp, &lapbeth_devices) {
460 lapbeth = list_entry(entry, struct lapbethdev, node); 460 lapbeth = list_entry(entry, struct lapbethdev, node);
461 461
462 dev_put(lapbeth->ethdev);
462 unregister_netdevice(lapbeth->axdev); 463 unregister_netdevice(lapbeth->axdev);
463 } 464 }
464 rtnl_unlock(); 465 rtnl_unlock();
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index d5b7a76fcaa..62fb89d8231 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,6 +1,5 @@
1config IWLWIFI 1config IWLWIFI
2 bool 2 tristate
3 default n
4 3
5config IWLCORE 4config IWLCORE
6 tristate "Intel Wireless Wifi Core" 5 tristate "Intel Wireless Wifi Core"
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index a83a40b3eba..0f0d27d1c4c 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -184,7 +184,7 @@ ds1511_wdog_disable(void)
184static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm) 184static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
185{ 185{
186 u8 mon, day, dow, hrs, min, sec, yrs, cen; 186 u8 mon, day, dow, hrs, min, sec, yrs, cen;
187 unsigned int flags; 187 unsigned long flags;
188 188
189 /* 189 /*
190 * won't have to change this for a while 190 * won't have to change this for a while
@@ -247,7 +247,7 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
247static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm) 247static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
248{ 248{
249 unsigned int century; 249 unsigned int century;
250 unsigned int flags; 250 unsigned long flags;
251 251
252 spin_lock_irqsave(&ds1511_lock, flags); 252 spin_lock_irqsave(&ds1511_lock, flags);
253 rtc_disable_update(); 253 rtc_disable_update();
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index c1f2adefad4..5043150019a 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -965,8 +965,7 @@ tty3270_write_room(struct tty_struct *tty)
965 * Insert character into the screen at the current position with the 965 * Insert character into the screen at the current position with the
966 * current color and highlight. This function does NOT do cursor movement. 966 * current color and highlight. This function does NOT do cursor movement.
967 */ 967 */
968static int 968static void tty3270_put_character(struct tty3270 *tp, char ch)
969tty3270_put_character(struct tty3270 *tp, char ch)
970{ 969{
971 struct tty3270_line *line; 970 struct tty3270_line *line;
972 struct tty3270_cell *cell; 971 struct tty3270_cell *cell;
@@ -986,7 +985,6 @@ tty3270_put_character(struct tty3270 *tp, char ch)
986 cell->character = tp->view.ascebc[(unsigned int) ch]; 985 cell->character = tp->view.ascebc[(unsigned int) ch];
987 cell->highlight = tp->highlight; 986 cell->highlight = tp->highlight;
988 cell->f_color = tp->f_color; 987 cell->f_color = tp->f_color;
989 return 1;
990} 988}
991 989
992/* 990/*
@@ -1612,16 +1610,15 @@ tty3270_write(struct tty_struct * tty,
1612/* 1610/*
1613 * Put single characters to the ttys character buffer 1611 * Put single characters to the ttys character buffer
1614 */ 1612 */
1615static void 1613static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
1616tty3270_put_char(struct tty_struct *tty, unsigned char ch)
1617{ 1614{
1618 struct tty3270 *tp; 1615 struct tty3270 *tp;
1619 1616
1620 tp = tty->driver_data; 1617 tp = tty->driver_data;
1621 if (!tp) 1618 if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
1622 return; 1619 return 0;
1623 if (tp->char_count < TTY3270_CHAR_BUF_SIZE) 1620 tp->char_buf[tp->char_count++] = ch;
1624 tp->char_buf[tp->char_count++] = ch; 1621 return 1;
1625} 1622}
1626 1623
1627/* 1624/*
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 40ef948fcb3..9c21b8f43f9 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/cio.h>
22 23
23#include "blacklist.h" 24#include "blacklist.h"
24#include "cio.h" 25#include "cio.h"
@@ -43,164 +44,169 @@ typedef enum {add, free} range_action;
43 * Function: blacklist_range 44 * Function: blacklist_range
44 * (Un-)blacklist the devices from-to 45 * (Un-)blacklist the devices from-to
45 */ 46 */
46static void 47static int blacklist_range(range_action action, unsigned int from_ssid,
47blacklist_range (range_action action, unsigned int from, unsigned int to, 48 unsigned int to_ssid, unsigned int from,
48 unsigned int ssid) 49 unsigned int to, int msgtrigger)
49{ 50{
50 if (!to) 51 if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
51 to = from; 52 if (msgtrigger)
52 53 printk(KERN_WARNING "cio: Invalid cio_ignore range "
53 if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) { 54 "0.%x.%04x-0.%x.%04x\n", from_ssid, from,
54 printk (KERN_WARNING "cio: Invalid blacklist range " 55 to_ssid, to);
55 "0.%x.%04x to 0.%x.%04x, skipping\n", 56 return 1;
56 ssid, from, ssid, to);
57 return;
58 } 57 }
59 for (; from <= to; from++) { 58
59 while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
60 (from <= to))) {
60 if (action == add) 61 if (action == add)
61 set_bit (from, bl_dev[ssid]); 62 set_bit(from, bl_dev[from_ssid]);
62 else 63 else
63 clear_bit (from, bl_dev[ssid]); 64 clear_bit(from, bl_dev[from_ssid]);
65 from++;
66 if (from > __MAX_SUBCHANNEL) {
67 from_ssid++;
68 from = 0;
69 }
64 } 70 }
71
72 return 0;
65} 73}
66 74
67/* 75static int pure_hex(char **cp, unsigned int *val, int min_digit,
68 * Function: blacklist_busid 76 int max_digit, int max_val)
69 * Get devno/busid from given string.
70 * Shamelessly grabbed from dasd_devmap.c.
71 */
72static int
73blacklist_busid(char **str, int *id0, int *ssid, int *devno)
74{ 77{
75 int val, old_style; 78 int diff;
76 char *sav; 79 unsigned int value;
77 80
78 sav = *str; 81 diff = 0;
82 *val = 0;
79 83
80 /* check for leading '0x' */ 84 while (isxdigit(**cp) && (diff <= max_digit)) {
81 old_style = 0; 85
82 if ((*str)[0] == '0' && (*str)[1] == 'x') { 86 if (isdigit(**cp))
83 *str += 2; 87 value = **cp - '0';
84 old_style = 1; 88 else
85 } 89 value = tolower(**cp) - 'a' + 10;
86 if (!isxdigit((*str)[0])) /* We require at least one hex digit */ 90 *val = *val * 16 + value;
87 goto confused; 91 (*cp)++;
88 val = simple_strtoul(*str, str, 16); 92 diff++;
89 if (old_style || (*str)[0] != '.') {
90 *id0 = *ssid = 0;
91 if (val < 0 || val > 0xffff)
92 goto confused;
93 *devno = val;
94 if ((*str)[0] != ',' && (*str)[0] != '-' &&
95 (*str)[0] != '\n' && (*str)[0] != '\0')
96 goto confused;
97 return 0;
98 } 93 }
99 /* New style x.y.z busid */ 94
100 if (val < 0 || val > 0xff) 95 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
101 goto confused; 96 return 1;
102 *id0 = val; 97
103 (*str)++;
104 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
105 goto confused;
106 val = simple_strtoul(*str, str, 16);
107 if (val < 0 || val > 0xff || (*str)++[0] != '.')
108 goto confused;
109 *ssid = val;
110 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
111 goto confused;
112 val = simple_strtoul(*str, str, 16);
113 if (val < 0 || val > 0xffff)
114 goto confused;
115 *devno = val;
116 if ((*str)[0] != ',' && (*str)[0] != '-' &&
117 (*str)[0] != '\n' && (*str)[0] != '\0')
118 goto confused;
119 return 0; 98 return 0;
120confused:
121 strsep(str, ",\n");
122 printk(KERN_WARNING "cio: Invalid cio_ignore parameter '%s'\n", sav);
123 return 1;
124} 99}
125 100
126static int 101static int parse_busid(char *str, int *cssid, int *ssid, int *devno,
127blacklist_parse_parameters (char *str, range_action action) 102 int msgtrigger)
128{ 103{
129 int from, to, from_id0, to_id0, from_ssid, to_ssid; 104 char *str_work;
130 105 int val, rc, ret;
131 while (*str != 0 && *str != '\n') { 106
132 range_action ra = action; 107 rc = 1;
133 while(*str == ',') 108
134 str++; 109 if (*str == '\0')
135 if (*str == '!') { 110 goto out;
136 ra = !action; 111
137 ++str; 112 /* old style */
113 str_work = str;
114 val = simple_strtoul(str, &str_work, 16);
115
116 if (*str_work == '\0') {
117 if (val <= __MAX_SUBCHANNEL) {
118 *devno = val;
119 *ssid = 0;
120 *cssid = 0;
121 rc = 0;
138 } 122 }
123 goto out;
124 }
139 125
140 /* 126 /* new style */
141 * Since we have to parse the proc commands and the 127 str_work = str;
142 * kernel arguments we have to check four cases 128 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
143 */ 129 if (ret || (str_work[0] != '.'))
144 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || 130 goto out;
145 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { 131 str_work++;
146 int j; 132 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
147 133 if (ret || (str_work[0] != '.'))
148 str += 3; 134 goto out;
149 for (j=0; j <= __MAX_SSID; j++) 135 str_work++;
150 blacklist_range(ra, 0, __MAX_SUBCHANNEL, j); 136 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
151 } else { 137 if (ret || (str_work[0] != '\0'))
152 int rc; 138 goto out;
139
140 rc = 0;
141out:
142 if (rc && msgtrigger)
143 printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n",
144 str);
145
146 return rc;
147}
153 148
154 rc = blacklist_busid(&str, &from_id0, 149static int blacklist_parse_parameters(char *str, range_action action,
155 &from_ssid, &from); 150 int msgtrigger)
156 if (rc) 151{
157 continue; 152 int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
158 to = from; 153 int rc, totalrc;
159 to_id0 = from_id0; 154 char *parm;
160 to_ssid = from_ssid; 155 range_action ra;
161 if (*str == '-') { 156
162 str++; 157 totalrc = 0;
163 rc = blacklist_busid(&str, &to_id0, 158
164 &to_ssid, &to); 159 while ((parm = strsep(&str, ","))) {
165 if (rc) 160 rc = 0;
166 continue; 161 ra = action;
167 } 162 if (*parm == '!') {
168 if (*str == '-') { 163 if (ra == add)
169 printk(KERN_WARNING "cio: invalid cio_ignore " 164 ra = free;
170 "parameter '%s'\n", 165 else
171 strsep(&str, ",\n")); 166 ra = add;
172 continue; 167 parm++;
173 } 168 }
174 if ((from_id0 != to_id0) || 169 if (strcmp(parm, "all") == 0) {
175 (from_ssid != to_ssid)) { 170 from_cssid = 0;
176 printk(KERN_WARNING "cio: invalid cio_ignore " 171 from_ssid = 0;
177 "range %x.%x.%04x-%x.%x.%04x\n", 172 from = 0;
178 from_id0, from_ssid, from, 173 to_cssid = __MAX_CSSID;
179 to_id0, to_ssid, to); 174 to_ssid = __MAX_SSID;
180 continue; 175 to = __MAX_SUBCHANNEL;
176 } else {
177 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
178 &from_ssid, &from, msgtrigger);
179 if (!rc) {
180 if (parm != NULL)
181 rc = parse_busid(parm, &to_cssid,
182 &to_ssid, &to,
183 msgtrigger);
184 else {
185 to_cssid = from_cssid;
186 to_ssid = from_ssid;
187 to = from;
188 }
181 } 189 }
182 blacklist_range (ra, from, to, to_ssid);
183 } 190 }
191 if (!rc) {
192 rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
193 msgtrigger);
194 if (rc)
195 totalrc = 1;
196 } else
197 totalrc = 1;
184 } 198 }
185 return 1; 199
200 return totalrc;
186} 201}
187 202
188/* Parsing the commandline for blacklist parameters, e.g. to blacklist
189 * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of:
190 * - cio_ignore=1234-1236
191 * - cio_ignore=0x1234-0x1235,1236
192 * - cio_ignore=0x1234,1235-1236
193 * - cio_ignore=1236 cio_ignore=1234-0x1236
194 * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235
195 * - cio_ignore=0.0.1234-0.0.1236
196 * - cio_ignore=0.0.1234,0x1235,1236
197 * - ...
198 */
199static int __init 203static int __init
200blacklist_setup (char *str) 204blacklist_setup (char *str)
201{ 205{
202 CIO_MSG_EVENT(6, "Reading blacklist parameters\n"); 206 CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
203 return blacklist_parse_parameters (str, add); 207 if (blacklist_parse_parameters(str, add, 1))
208 return 0;
209 return 1;
204} 210}
205 211
206__setup ("cio_ignore=", blacklist_setup); 212__setup ("cio_ignore=", blacklist_setup);
@@ -224,27 +230,23 @@ is_blacklisted (int ssid, int devno)
224 * Function: blacklist_parse_proc_parameters 230 * Function: blacklist_parse_proc_parameters
225 * parse the stuff which is piped to /proc/cio_ignore 231 * parse the stuff which is piped to /proc/cio_ignore
226 */ 232 */
227static void 233static int blacklist_parse_proc_parameters(char *buf)
228blacklist_parse_proc_parameters (char *buf)
229{ 234{
230 if (strncmp (buf, "free ", 5) == 0) { 235 int rc;
231 blacklist_parse_parameters (buf + 5, free); 236 char *parm;
232 } else if (strncmp (buf, "add ", 4) == 0) { 237
233 /* 238 parm = strsep(&buf, " ");
234 * We don't need to check for known devices since 239
235 * css_probe_device will handle this correctly. 240 if (strcmp("free", parm) == 0)
236 */ 241 rc = blacklist_parse_parameters(buf, free, 0);
237 blacklist_parse_parameters (buf + 4, add); 242 else if (strcmp("add", parm) == 0)
238 } else { 243 rc = blacklist_parse_parameters(buf, add, 0);
239 printk (KERN_WARNING "cio: cio_ignore: Parse error; \n" 244 else
240 KERN_WARNING "try using 'free all|<devno-range>," 245 return 1;
241 "<devno-range>,...'\n"
242 KERN_WARNING "or 'add <devno-range>,"
243 "<devno-range>,...'\n");
244 return;
245 }
246 246
247 css_schedule_reprobe(); 247 css_schedule_reprobe();
248
249 return rc;
248} 250}
249 251
250/* Iterator struct for all devices. */ 252/* Iterator struct for all devices. */
@@ -328,6 +330,8 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
328 size_t user_len, loff_t *offset) 330 size_t user_len, loff_t *offset)
329{ 331{
330 char *buf; 332 char *buf;
333 size_t i;
334 ssize_t rc, ret;
331 335
332 if (*offset) 336 if (*offset)
333 return -EINVAL; 337 return -EINVAL;
@@ -336,16 +340,27 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
336 buf = vmalloc (user_len + 1); /* maybe better use the stack? */ 340 buf = vmalloc (user_len + 1); /* maybe better use the stack? */
337 if (buf == NULL) 341 if (buf == NULL)
338 return -ENOMEM; 342 return -ENOMEM;
343 memset(buf, 0, user_len + 1);
344
339 if (strncpy_from_user (buf, user_buf, user_len) < 0) { 345 if (strncpy_from_user (buf, user_buf, user_len) < 0) {
340 vfree (buf); 346 rc = -EFAULT;
341 return -EFAULT; 347 goto out_free;
342 } 348 }
343 buf[user_len] = '\0';
344 349
345 blacklist_parse_proc_parameters (buf); 350 i = user_len - 1;
351 while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
352 buf[i] = '\0';
353 i--;
354 }
355 ret = blacklist_parse_proc_parameters(buf);
356 if (ret)
357 rc = -EINVAL;
358 else
359 rc = user_len;
346 360
361out_free:
347 vfree (buf); 362 vfree (buf);
348 return user_len; 363 return rc;
349} 364}
350 365
351static const struct seq_operations cio_ignore_proc_seq_ops = { 366static const struct seq_operations cio_ignore_proc_seq_ops = {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 08a57816130..82c6a2d4512 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -39,23 +39,6 @@ debug_info_t *cio_debug_msg_id;
39debug_info_t *cio_debug_trace_id; 39debug_info_t *cio_debug_trace_id;
40debug_info_t *cio_debug_crw_id; 40debug_info_t *cio_debug_crw_id;
41 41
42int cio_show_msg;
43
44static int __init
45cio_setup (char *parm)
46{
47 if (!strcmp (parm, "yes"))
48 cio_show_msg = 1;
49 else if (!strcmp (parm, "no"))
50 cio_show_msg = 0;
51 else
52 printk(KERN_ERR "cio: cio_setup: "
53 "invalid cio_msg parameter '%s'", parm);
54 return 1;
55}
56
57__setup ("cio_msg=", cio_setup);
58
59/* 42/*
60 * Function: cio_debug_init 43 * Function: cio_debug_init
61 * Initializes three debug logs for common I/O: 44 * Initializes three debug logs for common I/O:
@@ -166,7 +149,7 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
166 149
167 stsch (sch->schid, &sch->schib); 150 stsch (sch->schid, &sch->schib);
168 151
169 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " 152 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
170 "subchannel 0.%x.%04x!\n", sch->schid.ssid, 153 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
171 sch->schid.sch_no); 154 sch->schid.sch_no);
172 sprintf(dbf_text, "no%s", sch->dev.bus_id); 155 sprintf(dbf_text, "no%s", sch->dev.bus_id);
@@ -567,10 +550,9 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
567 * ... just being curious we check for non I/O subchannels 550 * ... just being curious we check for non I/O subchannels
568 */ 551 */
569 if (sch->st != 0) { 552 if (sch->st != 0) {
570 CIO_DEBUG(KERN_INFO, 0, 553 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports "
571 "Subchannel 0.%x.%04x reports " 554 "non-I/O subchannel type %04X\n",
572 "non-I/O subchannel type %04X\n", 555 sch->schid.ssid, sch->schid.sch_no, sch->st);
573 sch->schid.ssid, sch->schid.sch_no, sch->st);
574 /* We stop here for non-io subchannels. */ 556 /* We stop here for non-io subchannels. */
575 err = sch->st; 557 err = sch->st;
576 goto out; 558 goto out;
@@ -588,7 +570,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
588 * This device must not be known to Linux. So we simply 570 * This device must not be known to Linux. So we simply
589 * say that there is no device and return ENODEV. 571 * say that there is no device and return ENODEV.
590 */ 572 */
591 CIO_MSG_EVENT(4, "Blacklisted device detected " 573 CIO_MSG_EVENT(6, "Blacklisted device detected "
592 "at devno %04X, subchannel set %x\n", 574 "at devno %04X, subchannel set %x\n",
593 sch->schib.pmcw.dev, sch->schid.ssid); 575 sch->schib.pmcw.dev, sch->schid.ssid);
594 err = -ENODEV; 576 err = -ENODEV;
@@ -601,12 +583,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
601 sch->lpm = sch->schib.pmcw.pam & sch->opm; 583 sch->lpm = sch->schib.pmcw.pam & sch->opm;
602 sch->isc = 3; 584 sch->isc = 3;
603 585
604 CIO_DEBUG(KERN_INFO, 0, 586 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X "
605 "Detected device %04x on subchannel 0.%x.%04X" 587 "- PIM = %02X, PAM = %02X, POM = %02X\n",
606 " - PIM = %02X, PAM = %02X, POM = %02X\n", 588 sch->schib.pmcw.dev, sch->schid.ssid,
607 sch->schib.pmcw.dev, sch->schid.ssid, 589 sch->schid.sch_no, sch->schib.pmcw.pim,
608 sch->schid.sch_no, sch->schib.pmcw.pim, 590 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
609 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
610 591
611 /* 592 /*
612 * We now have to initially ... 593 * We now have to initially ...
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 3c75412904d..6e933aebe01 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -118,6 +118,4 @@ extern void *cio_get_console_priv(void);
118#define cio_get_console_priv() NULL 118#define cio_get_console_priv() NULL
119#endif 119#endif
120 120
121extern int cio_show_msg;
122
123#endif 121#endif
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index d7429ef6c66..e64e8278c42 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -31,10 +31,4 @@ static inline void CIO_HEX_EVENT(int level, void *data, int length)
31 } 31 }
32} 32}
33 33
34#define CIO_DEBUG(printk_level, event_level, msg...) do { \
35 if (cio_show_msg) \
36 printk(printk_level "cio: " msg); \
37 CIO_MSG_EVENT(event_level, msg); \
38 } while (0)
39
40#endif 34#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 595e327d2f7..a76956512b2 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -570,7 +570,7 @@ static void reprobe_all(struct work_struct *unused)
570{ 570{
571 int ret; 571 int ret;
572 572
573 CIO_MSG_EVENT(2, "reprobe start\n"); 573 CIO_MSG_EVENT(4, "reprobe start\n");
574 574
575 need_reprobe = 0; 575 need_reprobe = 0;
576 /* Make sure initial subchannel scan is done. */ 576 /* Make sure initial subchannel scan is done. */
@@ -578,7 +578,7 @@ static void reprobe_all(struct work_struct *unused)
578 atomic_read(&ccw_device_init_count) == 0); 578 atomic_read(&ccw_device_init_count) == 0);
579 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); 579 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
580 580
581 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 581 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
582 need_reprobe); 582 need_reprobe);
583} 583}
584 584
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index abfd601d237..e22813db74a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -341,7 +341,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
341 rc = device_schedule_callback(&cdev->dev, 341 rc = device_schedule_callback(&cdev->dev,
342 ccw_device_remove_orphan_cb); 342 ccw_device_remove_orphan_cb);
343 if (rc) 343 if (rc)
344 CIO_MSG_EVENT(2, "Couldn't unregister orphan " 344 CIO_MSG_EVENT(0, "Couldn't unregister orphan "
345 "0.%x.%04x\n", 345 "0.%x.%04x\n",
346 cdev->private->dev_id.ssid, 346 cdev->private->dev_id.ssid,
347 cdev->private->dev_id.devno); 347 cdev->private->dev_id.devno);
@@ -351,7 +351,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
351 rc = device_schedule_callback(cdev->dev.parent, 351 rc = device_schedule_callback(cdev->dev.parent,
352 ccw_device_remove_sch_cb); 352 ccw_device_remove_sch_cb);
353 if (rc) 353 if (rc)
354 CIO_MSG_EVENT(2, "Couldn't unregister disconnected device " 354 CIO_MSG_EVENT(0, "Couldn't unregister disconnected device "
355 "0.%x.%04x\n", 355 "0.%x.%04x\n",
356 cdev->private->dev_id.ssid, 356 cdev->private->dev_id.ssid,
357 cdev->private->dev_id.devno); 357 cdev->private->dev_id.devno);
@@ -397,7 +397,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
397 if (ret == 0) 397 if (ret == 0)
398 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 398 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
399 else { 399 else {
400 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " 400 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
401 "device 0.%x.%04x\n", 401 "device 0.%x.%04x\n",
402 ret, cdev->private->dev_id.ssid, 402 ret, cdev->private->dev_id.ssid,
403 cdev->private->dev_id.devno); 403 cdev->private->dev_id.devno);
@@ -433,7 +433,7 @@ int ccw_device_set_online(struct ccw_device *cdev)
433 if (ret == 0) 433 if (ret == 0)
434 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 434 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
435 else { 435 else {
436 CIO_MSG_EVENT(2, "ccw_device_online returned %d, " 436 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
437 "device 0.%x.%04x\n", 437 "device 0.%x.%04x\n",
438 ret, cdev->private->dev_id.ssid, 438 ret, cdev->private->dev_id.ssid,
439 cdev->private->dev_id.devno); 439 cdev->private->dev_id.devno);
@@ -451,7 +451,7 @@ int ccw_device_set_online(struct ccw_device *cdev)
451 if (ret == 0) 451 if (ret == 0)
452 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 452 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
453 else 453 else
454 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " 454 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
455 "device 0.%x.%04x\n", 455 "device 0.%x.%04x\n",
456 ret, cdev->private->dev_id.ssid, 456 ret, cdev->private->dev_id.ssid,
457 cdev->private->dev_id.devno); 457 cdev->private->dev_id.devno);
@@ -803,7 +803,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
803 other_sch = to_subchannel(get_device(cdev->dev.parent)); 803 other_sch = to_subchannel(get_device(cdev->dev.parent));
804 ret = device_move(&cdev->dev, &sch->dev); 804 ret = device_move(&cdev->dev, &sch->dev);
805 if (ret) { 805 if (ret) {
806 CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed " 806 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
807 "(ret=%d)!\n", cdev->private->dev_id.ssid, 807 "(ret=%d)!\n", cdev->private->dev_id.ssid,
808 cdev->private->dev_id.devno, ret); 808 cdev->private->dev_id.devno, ret);
809 put_device(&other_sch->dev); 809 put_device(&other_sch->dev);
@@ -933,7 +933,7 @@ io_subchannel_register(struct work_struct *work)
933 ret = device_reprobe(&cdev->dev); 933 ret = device_reprobe(&cdev->dev);
934 if (ret) 934 if (ret)
935 /* We can't do much here. */ 935 /* We can't do much here. */
936 CIO_MSG_EVENT(2, "device_reprobe() returned" 936 CIO_MSG_EVENT(0, "device_reprobe() returned"
937 " %d for 0.%x.%04x\n", ret, 937 " %d for 0.%x.%04x\n", ret,
938 cdev->private->dev_id.ssid, 938 cdev->private->dev_id.ssid,
939 cdev->private->dev_id.devno); 939 cdev->private->dev_id.devno);
@@ -1086,7 +1086,7 @@ static void ccw_device_move_to_sch(struct work_struct *work)
1086 rc = device_move(&cdev->dev, &sch->dev); 1086 rc = device_move(&cdev->dev, &sch->dev);
1087 mutex_unlock(&sch->reg_mutex); 1087 mutex_unlock(&sch->reg_mutex);
1088 if (rc) { 1088 if (rc) {
1089 CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel " 1089 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
1090 "0.%x.%04x failed (ret=%d)!\n", 1090 "0.%x.%04x failed (ret=%d)!\n",
1091 cdev->private->dev_id.ssid, 1091 cdev->private->dev_id.ssid,
1092 cdev->private->dev_id.devno, sch->schid.ssid, 1092 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -1446,8 +1446,7 @@ ccw_device_remove (struct device *dev)
1446 wait_event(cdev->private->wait_q, 1446 wait_event(cdev->private->wait_q,
1447 dev_fsm_final_state(cdev)); 1447 dev_fsm_final_state(cdev));
1448 else 1448 else
1449 //FIXME: we can't fail! 1449 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1450 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
1451 "device 0.%x.%04x\n", 1450 "device 0.%x.%04x\n",
1452 ret, cdev->private->dev_id.ssid, 1451 ret, cdev->private->dev_id.ssid,
1453 cdev->private->dev_id.devno); 1452 cdev->private->dev_id.devno);
@@ -1524,7 +1523,7 @@ static int recovery_check(struct device *dev, void *data)
1524 spin_lock_irq(cdev->ccwlock); 1523 spin_lock_irq(cdev->ccwlock);
1525 switch (cdev->private->state) { 1524 switch (cdev->private->state) {
1526 case DEV_STATE_DISCONNECTED: 1525 case DEV_STATE_DISCONNECTED:
1527 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1526 CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
1528 cdev->private->dev_id.ssid, 1527 cdev->private->dev_id.ssid,
1529 cdev->private->dev_id.devno); 1528 cdev->private->dev_id.devno);
1530 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1529 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
@@ -1554,7 +1553,7 @@ static void recovery_work_func(struct work_struct *unused)
1554 } 1553 }
1555 spin_unlock_irq(&recovery_lock); 1554 spin_unlock_irq(&recovery_lock);
1556 } else 1555 } else
1557 CIO_MSG_EVENT(2, "recovery: end\n"); 1556 CIO_MSG_EVENT(4, "recovery: end\n");
1558} 1557}
1559 1558
1560static DECLARE_WORK(recovery_work, recovery_work_func); 1559static DECLARE_WORK(recovery_work, recovery_work_func);
@@ -1572,7 +1571,7 @@ void ccw_device_schedule_recovery(void)
1572{ 1571{
1573 unsigned long flags; 1572 unsigned long flags;
1574 1573
1575 CIO_MSG_EVENT(2, "recovery: schedule\n"); 1574 CIO_MSG_EVENT(4, "recovery: schedule\n");
1576 spin_lock_irqsave(&recovery_lock, flags); 1575 spin_lock_irqsave(&recovery_lock, flags);
1577 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1576 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1578 recovery_phase = 0; 1577 recovery_phase = 0;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 99403b0a97a..e268d5a77c1 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -322,10 +322,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
322 same_dev = 0; /* Keep the compiler quiet... */ 322 same_dev = 0; /* Keep the compiler quiet... */
323 switch (state) { 323 switch (state) {
324 case DEV_STATE_NOT_OPER: 324 case DEV_STATE_NOT_OPER:
325 CIO_DEBUG(KERN_WARNING, 2, 325 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
326 "SenseID : unknown device %04x on subchannel " 326 "subchannel 0.%x.%04x\n",
327 "0.%x.%04x\n", cdev->private->dev_id.devno, 327 cdev->private->dev_id.devno,
328 sch->schid.ssid, sch->schid.sch_no); 328 sch->schid.ssid, sch->schid.sch_no);
329 break; 329 break;
330 case DEV_STATE_OFFLINE: 330 case DEV_STATE_OFFLINE:
331 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 331 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
@@ -348,20 +348,19 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
348 return; 348 return;
349 } 349 }
350 /* Issue device info message. */ 350 /* Issue device info message. */
351 CIO_DEBUG(KERN_INFO, 2, 351 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
352 "SenseID : device 0.%x.%04x reports: " 352 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
353 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 353 "%04X/%02X\n",
354 "%04X/%02X\n", 354 cdev->private->dev_id.ssid,
355 cdev->private->dev_id.ssid, 355 cdev->private->dev_id.devno,
356 cdev->private->dev_id.devno, 356 cdev->id.cu_type, cdev->id.cu_model,
357 cdev->id.cu_type, cdev->id.cu_model, 357 cdev->id.dev_type, cdev->id.dev_model);
358 cdev->id.dev_type, cdev->id.dev_model);
359 break; 358 break;
360 case DEV_STATE_BOXED: 359 case DEV_STATE_BOXED:
361 CIO_DEBUG(KERN_WARNING, 2, 360 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
362 "SenseID : boxed device %04x on subchannel " 361 " subchannel 0.%x.%04x\n",
363 "0.%x.%04x\n", cdev->private->dev_id.devno, 362 cdev->private->dev_id.devno,
364 sch->schid.ssid, sch->schid.sch_no); 363 sch->schid.ssid, sch->schid.sch_no);
365 break; 364 break;
366 } 365 }
367 cdev->private->state = state; 366 cdev->private->state = state;
@@ -443,9 +442,8 @@ ccw_device_done(struct ccw_device *cdev, int state)
443 442
444 443
445 if (state == DEV_STATE_BOXED) 444 if (state == DEV_STATE_BOXED)
446 CIO_DEBUG(KERN_WARNING, 2, 445 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
447 "Boxed device %04x on subchannel %04x\n", 446 cdev->private->dev_id.devno, sch->schid.sch_no);
448 cdev->private->dev_id.devno, sch->schid.sch_no);
449 447
450 if (cdev->private->flags.donotify) { 448 if (cdev->private->flags.donotify) {
451 cdev->private->flags.donotify = 0; 449 cdev->private->flags.donotify = 0;
@@ -900,7 +898,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
900 /* Basic sense hasn't started. Try again. */ 898 /* Basic sense hasn't started. Try again. */
901 ccw_device_do_sense(cdev, irb); 899 ccw_device_do_sense(cdev, irb);
902 else { 900 else {
903 CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited " 901 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
904 "interrupt during w4sense...\n", 902 "interrupt during w4sense...\n",
905 cdev->private->dev_id.ssid, 903 cdev->private->dev_id.ssid,
906 cdev->private->dev_id.devno); 904 cdev->private->dev_id.devno);
@@ -1169,8 +1167,10 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1169static void 1167static void
1170ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) 1168ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1171{ 1169{
1172 CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n", 1170 CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1173 cdev->private->state, dev_event); 1171 "0.%x.%04x\n", cdev->private->state, dev_event,
1172 cdev->private->dev_id.ssid,
1173 cdev->private->dev_id.devno);
1174 BUG(); 1174 BUG();
1175} 1175}
1176 1176
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index dc4d87f77f6..cba7020517e 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -214,7 +214,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
214 * sense id information. So, for intervention required, 214 * sense id information. So, for intervention required,
215 * we use the "whack it until it talks" strategy... 215 * we use the "whack it until it talks" strategy...
216 */ 216 */
217 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel " 217 CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
218 "0.%x.%04x reports cmd reject\n", 218 "0.%x.%04x reports cmd reject\n",
219 cdev->private->dev_id.devno, sch->schid.ssid, 219 cdev->private->dev_id.devno, sch->schid.ssid,
220 sch->schid.sch_no); 220 sch->schid.sch_no);
@@ -239,7 +239,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
239 239
240 lpm = to_io_private(sch)->orb.lpm; 240 lpm = to_io_private(sch)->orb.lpm;
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
242 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " 242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is " 243 "on subchannel 0.%x.%04x is "
244 "'not operational'\n", lpm, 244 "'not operational'\n", lpm,
245 cdev->private->dev_id.devno, 245 cdev->private->dev_id.devno,
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index c52449a1f9f..ba559053402 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -79,7 +79,7 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
79 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 79 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
80 if (ret != -EACCES) 80 if (ret != -EACCES)
81 return ret; 81 return ret;
82 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 82 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
83 "0.%x.%04x, lpm %02X, became 'not " 83 "0.%x.%04x, lpm %02X, became 'not "
84 "operational'\n", 84 "operational'\n",
85 cdev->private->dev_id.devno, 85 cdev->private->dev_id.devno,
@@ -159,7 +159,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
159 u8 lpm; 159 u8 lpm;
160 160
161 lpm = to_io_private(sch)->orb.lpm; 161 lpm = to_io_private(sch)->orb.lpm;
162 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," 162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n", 163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid, 164 cdev->private->dev_id.devno, sch->schid.ssid,
165 sch->schid.sch_no, lpm); 165 sch->schid.sch_no, lpm);
@@ -275,7 +275,7 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
275 return ret; 275 return ret;
276 } 276 }
277 /* PGID command failed on this path. */ 277 /* PGID command failed on this path. */
278 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 278 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
279 "0.%x.%04x, lpm %02X, became 'not operational'\n", 279 "0.%x.%04x, lpm %02X, became 'not operational'\n",
280 cdev->private->dev_id.devno, sch->schid.ssid, 280 cdev->private->dev_id.devno, sch->schid.ssid,
281 sch->schid.sch_no, cdev->private->imask); 281 sch->schid.sch_no, cdev->private->imask);
@@ -317,7 +317,7 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
317 return ret; 317 return ret;
318 } 318 }
319 /* nop command failed on this path. */ 319 /* nop command failed on this path. */
320 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " 320 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
321 "0.%x.%04x, lpm %02X, became 'not operational'\n", 321 "0.%x.%04x, lpm %02X, became 'not operational'\n",
322 cdev->private->dev_id.devno, sch->schid.ssid, 322 cdev->private->dev_id.devno, sch->schid.ssid,
323 sch->schid.sch_no, cdev->private->imask); 323 sch->schid.sch_no, cdev->private->imask);
@@ -362,7 +362,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
362 return -EAGAIN; 362 return -EAGAIN;
363 } 363 }
364 if (irb->scsw.cc == 3) { 364 if (irb->scsw.cc == 3) {
365 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x," 365 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
366 " lpm %02X, became 'not operational'\n", 366 " lpm %02X, became 'not operational'\n",
367 cdev->private->dev_id.devno, sch->schid.ssid, 367 cdev->private->dev_id.devno, sch->schid.ssid,
368 sch->schid.sch_no, cdev->private->imask); 368 sch->schid.sch_no, cdev->private->imask);
@@ -391,7 +391,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
391 return -ETIME; 391 return -ETIME;
392 } 392 }
393 if (irb->scsw.cc == 3) { 393 if (irb->scsw.cc == 3) {
394 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x," 394 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
395 " lpm %02X, became 'not operational'\n", 395 " lpm %02X, became 'not operational'\n",
396 cdev->private->dev_id.devno, sch->schid.ssid, 396 cdev->private->dev_id.devno, sch->schid.ssid,
397 sch->schid.sch_no, cdev->private->imask); 397 sch->schid.sch_no, cdev->private->imask);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 4d4b54277c4..5080f343ad7 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -48,10 +48,11 @@ s390_collect_crw_info(void *param)
48 int ccode; 48 int ccode;
49 struct semaphore *sem; 49 struct semaphore *sem;
50 unsigned int chain; 50 unsigned int chain;
51 int ignore;
51 52
52 sem = (struct semaphore *)param; 53 sem = (struct semaphore *)param;
53repeat: 54repeat:
54 down_interruptible(sem); 55 ignore = down_interruptible(sem);
55 chain = 0; 56 chain = 0;
56 while (1) { 57 while (1) {
57 if (unlikely(chain > 1)) { 58 if (unlikely(chain > 1)) {
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 4fab0c23814..b87037ec980 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -41,7 +41,7 @@
41#define BPP_DELAY 100 41#define BPP_DELAY 100
42 42
43static const unsigned BPP_MAJOR = LP_MAJOR; 43static const unsigned BPP_MAJOR = LP_MAJOR;
44static const char* dev_name = "bpp"; 44static const char *bpp_dev_name = "bpp";
45 45
46/* When switching from compatibility to a mode where I can read, try 46/* When switching from compatibility to a mode where I can read, try
47 the following mode first. */ 47 the following mode first. */
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 0fb5bf4c43a..8508816f303 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1967,45 +1967,6 @@ cleanup:
1967 return rcode; 1967 return rcode;
1968} 1968}
1969 1969
1970
1971/*
1972 * This routine returns information about the system. This does not effect
1973 * any logic and if the info is wrong - it doesn't matter.
1974 */
1975
1976/* Get all the info we can not get from kernel services */
1977static int adpt_system_info(void __user *buffer)
1978{
1979 sysInfo_S si;
1980
1981 memset(&si, 0, sizeof(si));
1982
1983 si.osType = OS_LINUX;
1984 si.osMajorVersion = 0;
1985 si.osMinorVersion = 0;
1986 si.osRevision = 0;
1987 si.busType = SI_PCI_BUS;
1988 si.processorFamily = DPTI_sig.dsProcessorFamily;
1989
1990#if defined __i386__
1991 adpt_i386_info(&si);
1992#elif defined (__ia64__)
1993 adpt_ia64_info(&si);
1994#elif defined(__sparc__)
1995 adpt_sparc_info(&si);
1996#elif defined (__alpha__)
1997 adpt_alpha_info(&si);
1998#else
1999 si.processorType = 0xff ;
2000#endif
2001 if(copy_to_user(buffer, &si, sizeof(si))){
2002 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2003 return -EFAULT;
2004 }
2005
2006 return 0;
2007}
2008
2009#if defined __ia64__ 1970#if defined __ia64__
2010static void adpt_ia64_info(sysInfo_S* si) 1971static void adpt_ia64_info(sysInfo_S* si)
2011{ 1972{
@@ -2016,7 +1977,6 @@ static void adpt_ia64_info(sysInfo_S* si)
2016} 1977}
2017#endif 1978#endif
2018 1979
2019
2020#if defined __sparc__ 1980#if defined __sparc__
2021static void adpt_sparc_info(sysInfo_S* si) 1981static void adpt_sparc_info(sysInfo_S* si)
2022{ 1982{
@@ -2026,7 +1986,6 @@ static void adpt_sparc_info(sysInfo_S* si)
2026 si->processorType = PROC_ULTRASPARC; 1986 si->processorType = PROC_ULTRASPARC;
2027} 1987}
2028#endif 1988#endif
2029
2030#if defined __alpha__ 1989#if defined __alpha__
2031static void adpt_alpha_info(sysInfo_S* si) 1990static void adpt_alpha_info(sysInfo_S* si)
2032{ 1991{
@@ -2038,7 +1997,6 @@ static void adpt_alpha_info(sysInfo_S* si)
2038#endif 1997#endif
2039 1998
2040#if defined __i386__ 1999#if defined __i386__
2041
2042static void adpt_i386_info(sysInfo_S* si) 2000static void adpt_i386_info(sysInfo_S* si)
2043{ 2001{
2044 // This is all the info we need for now 2002 // This is all the info we need for now
@@ -2059,9 +2017,45 @@ static void adpt_i386_info(sysInfo_S* si)
2059 break; 2017 break;
2060 } 2018 }
2061} 2019}
2020#endif
2021
2022/*
2023 * This routine returns information about the system. This does not effect
2024 * any logic and if the info is wrong - it doesn't matter.
2025 */
2062 2026
2027/* Get all the info we can not get from kernel services */
2028static int adpt_system_info(void __user *buffer)
2029{
2030 sysInfo_S si;
2031
2032 memset(&si, 0, sizeof(si));
2033
2034 si.osType = OS_LINUX;
2035 si.osMajorVersion = 0;
2036 si.osMinorVersion = 0;
2037 si.osRevision = 0;
2038 si.busType = SI_PCI_BUS;
2039 si.processorFamily = DPTI_sig.dsProcessorFamily;
2040
2041#if defined __i386__
2042 adpt_i386_info(&si);
2043#elif defined (__ia64__)
2044 adpt_ia64_info(&si);
2045#elif defined(__sparc__)
2046 adpt_sparc_info(&si);
2047#elif defined (__alpha__)
2048 adpt_alpha_info(&si);
2049#else
2050 si.processorType = 0xff ;
2063#endif 2051#endif
2052 if (copy_to_user(buffer, &si, sizeof(si))){
2053 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2054 return -EFAULT;
2055 }
2064 2056
2057 return 0;
2058}
2065 2059
2066static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, 2060static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2067 ulong arg) 2061 ulong arg)
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 924cd5a5167..337746d4604 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -316,19 +316,6 @@ static int adpt_close(struct inode *inode, struct file *file);
316static void adpt_delay(int millisec); 316static void adpt_delay(int millisec);
317#endif 317#endif
318 318
319#if defined __ia64__
320static void adpt_ia64_info(sysInfo_S* si);
321#endif
322#if defined __sparc__
323static void adpt_sparc_info(sysInfo_S* si);
324#endif
325#if defined __alpha__
326static void adpt_sparc_info(sysInfo_S* si);
327#endif
328#if defined __i386__
329static void adpt_i386_info(sysInfo_S* si);
330#endif
331
332#define PRINT_BUFFER_SIZE 512 319#define PRINT_BUFFER_SIZE 512
333 320
334#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags 321#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 1e2b9d826f6..eab03273379 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -556,7 +556,7 @@ static int uart_chars_in_buffer(struct tty_struct *tty)
556static void uart_flush_buffer(struct tty_struct *tty) 556static void uart_flush_buffer(struct tty_struct *tty)
557{ 557{
558 struct uart_state *state = tty->driver_data; 558 struct uart_state *state = tty->driver_data;
559 struct uart_port *port = state->port; 559 struct uart_port *port;
560 unsigned long flags; 560 unsigned long flags;
561 561
562 /* 562 /*
@@ -568,6 +568,7 @@ static void uart_flush_buffer(struct tty_struct *tty)
568 return; 568 return;
569 } 569 }
570 570
571 port = state->port;
571 pr_debug("uart_flush_buffer(%d) called\n", tty->index); 572 pr_debug("uart_flush_buffer(%d) called\n", tty->index);
572 573
573 spin_lock_irqsave(&port->lock, flags); 574 spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 33b467a8352..1ef6df395e0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -129,7 +129,7 @@ config USB_ISP1760_PCI
129 129
130config USB_ISP1760_OF 130config USB_ISP1760_OF
131 bool "Support for the OF platform bus" 131 bool "Support for the OF platform bus"
132 depends on USB_ISP1760_HCD && OF 132 depends on USB_ISP1760_HCD && PPC_OF
133 ---help--- 133 ---help---
134 Enables support for the device present on the PowerPC 134 Enables support for the device present on the PowerPC
135 OpenFirmware platform bus. 135 OpenFirmware platform bus.
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 8a217648b25..a01e987c7d3 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -643,7 +643,7 @@ static void read_buf_callback(struct urb *urb)
643static int iuu_bulk_write(struct usb_serial_port *port) 643static int iuu_bulk_write(struct usb_serial_port *port)
644{ 644{
645 struct iuu_private *priv = usb_get_serial_port_data(port); 645 struct iuu_private *priv = usb_get_serial_port_data(port);
646 unsigned int flags; 646 unsigned long flags;
647 int result; 647 int result;
648 int i; 648 int i;
649 char *buf_ptr = port->write_urb->transfer_buffer; 649 char *buf_ptr = port->write_urb->transfer_buffer;
@@ -694,7 +694,7 @@ static void iuu_uart_read_callback(struct urb *urb)
694{ 694{
695 struct usb_serial_port *port = urb->context; 695 struct usb_serial_port *port = urb->context;
696 struct iuu_private *priv = usb_get_serial_port_data(port); 696 struct iuu_private *priv = usb_get_serial_port_data(port);
697 unsigned int flags; 697 unsigned long flags;
698 int status; 698 int status;
699 int error = 0; 699 int error = 0;
700 int len = 0; 700 int len = 0;
@@ -759,7 +759,7 @@ static int iuu_uart_write(struct usb_serial_port *port, const u8 *buf,
759 int count) 759 int count)
760{ 760{
761 struct iuu_private *priv = usb_get_serial_port_data(port); 761 struct iuu_private *priv = usb_get_serial_port_data(port);
762 unsigned int flags; 762 unsigned long flags;
763 dbg("%s - enter", __func__); 763 dbg("%s - enter", __func__);
764 764
765 if (count > 256) 765 if (count > 256)
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 275d9dab0c6..79f85dc402d 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -329,7 +329,7 @@ static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *
329 if (!info->screen_base) 329 if (!info->screen_base)
330 goto out_unmap_regs; 330 goto out_unmap_regs;
331 331
332 bw2_blank(0, info); 332 bw2_blank(FB_BLANK_UNBLANK, info);
333 333
334 bw2_init_fix(info, linebytes); 334 bw2_init_fix(info, linebytes);
335 335
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 010ea53978f..e31e26a6bb7 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -398,7 +398,7 @@ static int __devinit cg3_probe(struct of_device *op,
398 if (!info->screen_base) 398 if (!info->screen_base)
399 goto out_unmap_regs; 399 goto out_unmap_regs;
400 400
401 cg3_blank(0, info); 401 cg3_blank(FB_BLANK_UNBLANK, info);
402 402
403 if (!of_find_property(dp, "width", NULL)) { 403 if (!of_find_property(dp, "width", NULL)) {
404 err = cg3_do_default_mode(par); 404 err = cg3_do_default_mode(par);
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index fc90db6da65..8000bccecdc 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -767,7 +767,7 @@ static int __devinit cg6_probe(struct of_device *op,
767 767
768 cg6_bt_init(par); 768 cg6_bt_init(par);
769 cg6_chip_init(info); 769 cg6_chip_init(info);
770 cg6_blank(0, info); 770 cg6_blank(FB_BLANK_UNBLANK, info);
771 771
772 if (fb_alloc_cmap(&info->cmap, 256, 0)) 772 if (fb_alloc_cmap(&info->cmap, 256, 0))
773 goto out_unmap_regs; 773 goto out_unmap_regs;
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 93dca3e2aa5..0f42a696d17 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -987,7 +987,7 @@ static int __devinit ffb_probe(struct of_device *op,
987 * chosen console, it will have video outputs off in 987 * chosen console, it will have video outputs off in
988 * the DAC. 988 * the DAC.
989 */ 989 */
990 ffb_blank(0, info); 990 ffb_blank(FB_BLANK_UNBLANK, info);
991 991
992 if (fb_alloc_cmap(&info->cmap, 256, 0)) 992 if (fb_alloc_cmap(&info->cmap, 256, 0))
993 goto out_unmap_dac; 993 goto out_unmap_dac;
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index f3160fc2979..fb129928d5d 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -601,7 +601,7 @@ static int __devinit leo_probe(struct of_device *op, const struct of_device_id *
601 leo_init_wids(info); 601 leo_init_wids(info);
602 leo_init_hw(info); 602 leo_init_hw(info);
603 603
604 leo_blank(0, info); 604 leo_blank(FB_BLANK_UNBLANK, info);
605 605
606 if (fb_alloc_cmap(&info->cmap, 256, 0)) 606 if (fb_alloc_cmap(&info->cmap, 256, 0))
607 goto out_unmap_regs; 607 goto out_unmap_regs;
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index c95874fe907..676ffb06d1c 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -295,7 +295,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id
295 if (!info->screen_base) 295 if (!info->screen_base)
296 goto out_unmap_regs; 296 goto out_unmap_regs;
297 297
298 p9100_blank(0, info); 298 p9100_blank(FB_BLANK_UNBLANK, info);
299 299
300 if (fb_alloc_cmap(&info->cmap, 256, 0)) 300 if (fb_alloc_cmap(&info->cmap, 256, 0))
301 goto out_unmap_screen; 301 goto out_unmap_screen;
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index a7177430577..44e8c27ed0f 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -84,7 +84,7 @@ struct tcx_tec {
84 84
85struct tcx_thc { 85struct tcx_thc {
86 u32 thc_rev; 86 u32 thc_rev;
87 u32 thc_pad0[511]; 87 u32 thc_pad0[511];
88 u32 thc_hs; /* hsync timing */ 88 u32 thc_hs; /* hsync timing */
89 u32 thc_hsdvs; 89 u32 thc_hsdvs;
90 u32 thc_hd; 90 u32 thc_hd;
@@ -126,10 +126,10 @@ struct tcx_par {
126}; 126};
127 127
128/* Reset control plane so that WID is 8-bit plane. */ 128/* Reset control plane so that WID is 8-bit plane. */
129static void __tcx_set_control_plane (struct tcx_par *par) 129static void __tcx_set_control_plane(struct tcx_par *par)
130{ 130{
131 u32 __iomem *p, *pend; 131 u32 __iomem *p, *pend;
132 132
133 if (par->lowdepth) 133 if (par->lowdepth)
134 return; 134 return;
135 135
@@ -143,8 +143,8 @@ static void __tcx_set_control_plane (struct tcx_par *par)
143 sbus_writel(tmp, p); 143 sbus_writel(tmp, p);
144 } 144 }
145} 145}
146 146
147static void tcx_reset (struct fb_info *info) 147static void tcx_reset(struct fb_info *info)
148{ 148{
149 struct tcx_par *par = (struct tcx_par *) info->par; 149 struct tcx_par *par = (struct tcx_par *) info->par;
150 unsigned long flags; 150 unsigned long flags;
@@ -365,7 +365,8 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
365 info->screen_base, par->fbsize); 365 info->screen_base, par->fbsize);
366} 366}
367 367
368static int __devinit tcx_init_one(struct of_device *op) 368static int __devinit tcx_probe(struct of_device *op,
369 const struct of_device_id *match)
369{ 370{
370 struct device_node *dp = op->node; 371 struct device_node *dp = op->node;
371 struct fb_info *info; 372 struct fb_info *info;
@@ -488,13 +489,6 @@ out_err:
488 return err; 489 return err;
489} 490}
490 491
491static int __devinit tcx_probe(struct of_device *dev, const struct of_device_id *match)
492{
493 struct of_device *op = to_of_device(&dev->dev);
494
495 return tcx_init_one(op);
496}
497
498static int __devexit tcx_remove(struct of_device *op) 492static int __devexit tcx_remove(struct of_device *op)
499{ 493{
500 struct fb_info *info = dev_get_drvdata(&op->dev); 494 struct fb_info *info = dev_get_drvdata(&op->dev);