aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/numa.c9
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/ata/Kconfig18
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-acpi.c3
-rw-r--r--drivers/ata/libata-core.c54
-rw-r--r--drivers/ata/libata-eh.c60
-rw-r--r--drivers/ata/libata-scsi.c15
-rw-r--r--drivers/ata/libata-sff.c17
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_arasan_cf.c983
-rw-r--r--drivers/ata/pata_at32.c2
-rw-r--r--drivers/ata/pata_bf54x.c4
-rw-r--r--drivers/ata/pata_hpt366.c7
-rw-r--r--drivers/ata/pata_hpt37x.c23
-rw-r--r--drivers/ata/pata_hpt3x2n.c13
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_it821x.c4
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_macio.c3
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/pata_palmld.c2
-rw-r--r--drivers/ata/pata_pcmcia.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_pxa.c1
-rw-r--r--drivers/ata/pata_rb532_cf.c1
-rw-r--r--drivers/ata/pata_samsung_cf.c1
-rw-r--r--drivers/ata/pata_scc.c2
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pdc_adma.c4
-rw-r--r--drivers/ata/sata_dwc_460ex.c75
-rw-r--r--drivers/ata/sata_fsl.c22
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/ata/sata_nv.c14
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c3
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/ata/sata_sil24.c3
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_svw.c12
-rw-r--r--drivers/ata/sata_sx4.c5
-rw-r--r--drivers/ata/sata_uli.c3
-rw-r--r--drivers/ata/sata_via.c9
-rw-r--r--drivers/ata/sata_vsc.c3
-rw-r--r--drivers/block/xen-blkfront.c87
-rw-r--r--drivers/char/mmtimer.c30
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c22
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/i2c/busses/i2c-ocores.c14
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c12
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/message/i2o/driver.c3
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h4
-rw-r--r--drivers/misc/iwmc3200top/main.c14
-rw-r--r--drivers/mmc/host/mmc_spi.c4
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c70
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c196
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h2
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/of_pci.c92
-rw-r--r--drivers/pci/xen-pcifront.c31
-rw-r--r--drivers/rtc/class.c7
-rw-r--r--drivers/rtc/interface.c180
-rw-r--r--drivers/rtc/rtc-at91rm9200.c28
-rw-r--r--drivers/rtc/rtc-at91sam9.c28
-rw-r--r--drivers/rtc/rtc-bfin.c27
-rw-r--r--drivers/rtc/rtc-cmos.c111
-rw-r--r--drivers/rtc/rtc-davinci.c55
-rw-r--r--drivers/rtc/rtc-ds1511.c17
-rw-r--r--drivers/rtc/rtc-ds1553.c17
-rw-r--r--drivers/rtc/rtc-ds3232.c18
-rw-r--r--drivers/rtc/rtc-jz4740.c7
-rw-r--r--drivers/rtc/rtc-mc13xxx.c7
-rw-r--r--drivers/rtc/rtc-mpc5121.c20
-rw-r--r--drivers/rtc/rtc-mrst.c33
-rw-r--r--drivers/rtc/rtc-mxc.c7
-rw-r--r--drivers/rtc/rtc-nuc900.c15
-rw-r--r--drivers/rtc/rtc-omap.c39
-rw-r--r--drivers/rtc/rtc-pcap.c6
-rw-r--r--drivers/rtc/rtc-pcf50633.c22
-rw-r--r--drivers/rtc/rtc-pl030.c6
-rw-r--r--drivers/rtc/rtc-pl031.c55
-rw-r--r--drivers/rtc/rtc-proc.c8
-rw-r--r--drivers/rtc/rtc-pxa.c44
-rw-r--r--drivers/rtc/rtc-rs5c372.c52
-rw-r--r--drivers/rtc/rtc-rx8025.c25
-rw-r--r--drivers/rtc/rtc-s3c.c33
-rw-r--r--drivers/rtc/rtc-sa1100.c160
-rw-r--r--drivers/rtc/rtc-sh.c24
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c15
-rw-r--r--drivers/rtc/rtc-test.c13
-rw-r--r--drivers/rtc/rtc-twl.c13
-rw-r--r--drivers/rtc/rtc-vr41xx.c32
-rw-r--r--drivers/rtc/rtc-wm831x.c16
-rw-r--r--drivers/rtc/rtc-wm8350.c21
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/ipr.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c94
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c2
-rw-r--r--drivers/spi/xilinx_spi.c6
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/events.c342
-rw-r--r--drivers/xen/manage.c153
-rw-r--r--drivers/xen/platform-pci.c3
119 files changed, 2216 insertions, 1649 deletions
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 5eb25eb3ea4..3b5c3189fd9 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -274,7 +274,7 @@ acpi_table_parse_srat(enum acpi_srat_type id,
274 274
275int __init acpi_numa_init(void) 275int __init acpi_numa_init(void)
276{ 276{
277 int ret = 0; 277 int cnt = 0;
278 278
279 /* 279 /*
280 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= 280 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
@@ -288,7 +288,7 @@ int __init acpi_numa_init(void)
288 acpi_parse_x2apic_affinity, 0); 288 acpi_parse_x2apic_affinity, 0);
289 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 289 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
290 acpi_parse_processor_affinity, 0); 290 acpi_parse_processor_affinity, 0);
291 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 291 cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
292 acpi_parse_memory_affinity, 292 acpi_parse_memory_affinity,
293 NR_NODE_MEMBLKS); 293 NR_NODE_MEMBLKS);
294 } 294 }
@@ -297,7 +297,10 @@ int __init acpi_numa_init(void)
297 acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); 297 acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
298 298
299 acpi_numa_arch_fixup(); 299 acpi_numa_arch_fixup();
300 return ret; 300
301 if (cnt <= 0)
302 return cnt ?: -ENOENT;
303 return 0;
301} 304}
302 305
303int acpi_get_pxm(acpi_handle h) 306int acpi_get_pxm(acpi_handle h)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c90c76aa7f8..4a6753009d7 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1589,9 +1589,9 @@ acpi_status __init acpi_os_initialize(void)
1589 1589
1590acpi_status __init acpi_os_initialize1(void) 1590acpi_status __init acpi_os_initialize1(void)
1591{ 1591{
1592 kacpid_wq = create_workqueue("kacpid"); 1592 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1593 kacpi_notify_wq = create_workqueue("kacpi_notify"); 1593 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1594 kacpi_hotplug_wq = create_workqueue("kacpi_hotplug"); 1594 kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
1595 BUG_ON(!kacpid_wq); 1595 BUG_ON(!kacpid_wq);
1596 BUG_ON(!kacpi_notify_wq); 1596 BUG_ON(!kacpi_notify_wq);
1597 BUG_ON(!kacpi_hotplug_wq); 1597 BUG_ON(!kacpi_hotplug_wq);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c2328aed083..75afa75a515 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -202,6 +202,18 @@ config SATA_DWC
202 202
203 If unsure, say N. 203 If unsure, say N.
204 204
205config SATA_DWC_DEBUG
206 bool "Debugging driver version"
207 depends on SATA_DWC
208 help
209 This option enables debugging output in the driver.
210
211config SATA_DWC_VDEBUG
212 bool "Verbose debug output"
213 depends on SATA_DWC_DEBUG
214 help
215 This option enables the taskfile dumping and NCQ debugging.
216
205config SATA_MV 217config SATA_MV
206 tristate "Marvell SATA support" 218 tristate "Marvell SATA support"
207 help 219 help
@@ -299,6 +311,12 @@ config PATA_AMD
299 311
300 If unsure, say N. 312 If unsure, say N.
301 313
314config PATA_ARASAN_CF
315 tristate "ARASAN CompactFlash PATA Controller Support"
316 select DMA_ENGINE
317 help
318 Say Y here to support the ARASAN CompactFlash PATA controller
319
302config PATA_ARTOP 320config PATA_ARTOP
303 tristate "ARTOP 6210/6260 PATA support" 321 tristate "ARTOP 6210/6260 PATA support"
304 depends on PCI 322 depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 27291aad6ca..8ac64e1aa05 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
12 12
13# SFF w/ custom DMA 13# SFF w/ custom DMA
14obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 14obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
15obj-$(CONFIG_PATA_ARASAN_CF) += pata_arasan_cf.o
15obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o 16obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
16obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o 17obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
17obj-$(CONFIG_SATA_SX4) += sata_sx4.o 18obj-$(CONFIG_SATA_SX4) += sata_sx4.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8d96ce37fc..e62f693be8e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -175,8 +175,7 @@ static const struct ata_port_info ahci_port_info[] = {
175 { 175 {
176 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 176 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
177 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), 177 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
178 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 178 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
179 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
180 .pio_mask = ATA_PIO4, 179 .pio_mask = ATA_PIO4,
181 .udma_mask = ATA_UDMA6, 180 .udma_mask = ATA_UDMA6,
182 .port_ops = &ahci_ops, 181 .port_ops = &ahci_ops,
@@ -260,6 +259,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ 259 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ 260 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ 261 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
263 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ 263 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
264 264
265 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 265 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
@@ -383,6 +383,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
383 .class = PCI_CLASS_STORAGE_SATA_AHCI, 383 .class = PCI_CLASS_STORAGE_SATA_AHCI,
384 .class_mask = 0xffffff, 384 .class_mask = 0xffffff,
385 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 385 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
386 { PCI_DEVICE(0x1b4b, 0x9125),
387 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
388 { PCI_DEVICE(0x1b4b, 0x91a3),
389 .driver_data = board_ahci_yes_fbs },
386 390
387 /* Promise */ 391 /* Promise */
388 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 392 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 3e606c34f57..ccaf0812205 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -213,10 +213,8 @@ enum {
213 213
214 /* ap->flags bits */ 214 /* ap->flags bits */
215 215
216 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 216 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
217 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 217 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
218 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
219 ATA_FLAG_LPM,
220 218
221 ICH_MAP = 0x90, /* ICH MAP register */ 219 ICH_MAP = 0x90, /* ICH MAP register */
222 220
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 6981f7680a0..721d38bfa33 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -237,7 +237,7 @@ static struct pci_device_id ata_generic[] = {
237#endif 237#endif
238 /* Intel, IDE class device */ 238 /* Intel, IDE class device */
239 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 239 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
240 PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 240 PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL,
241 .driver_data = ATA_GEN_INTEL_IDER }, 241 .driver_data = ATA_GEN_INTEL_IDER },
242 /* Must come last. If you add entries adjust this table appropriately */ 242 /* Must come last. If you add entries adjust this table appropriately */
243 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), 243 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 6cb14ca8ee8..cdec4ab3b15 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -230,7 +230,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
230 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 230 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
231 231
232 /* SATA ports */ 232 /* SATA ports */
233 233
234 /* 82801EB (ICH5) */ 234 /* 82801EB (ICH5) */
235 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 235 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
236 /* 82801EB (ICH5) */ 236 /* 82801EB (ICH5) */
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 8b5ea399a4f..a791b8ce629 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -660,8 +660,7 @@ static int ata_acpi_filter_tf(struct ata_device *dev,
660 * @dev: target ATA device 660 * @dev: target ATA device
661 * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7) 661 * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
662 * 662 *
663 * Outputs ATA taskfile to standard ATA host controller using MMIO 663 * Outputs ATA taskfile to standard ATA host controller.
664 * or PIO as indicated by the ATA_FLAG_MMIO flag.
665 * Writes the control, feature, nsect, lbal, lbam, and lbah registers. 664 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
666 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, 665 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
667 * hob_lbal, hob_lbam, and hob_lbah. 666 * hob_lbal, hob_lbam, and hob_lbah.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d4e52e21485..b91e19cab10 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4210,7 +4210,7 @@ static int glob_match (const char *text, const char *pattern)
4210 return 0; /* End of both strings: match */ 4210 return 0; /* End of both strings: match */
4211 return 1; /* No match */ 4211 return 1; /* No match */
4212} 4212}
4213 4213
4214static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4214static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4215{ 4215{
4216 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4216 unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -5479,7 +5479,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5479 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5479 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5480 if (!ap) 5480 if (!ap)
5481 return NULL; 5481 return NULL;
5482 5482
5483 ap->pflags |= ATA_PFLAG_INITIALIZING; 5483 ap->pflags |= ATA_PFLAG_INITIALIZING;
5484 ap->lock = &host->lock; 5484 ap->lock = &host->lock;
5485 ap->print_id = -1; 5485 ap->print_id = -1;
@@ -5887,21 +5887,9 @@ void ata_host_init(struct ata_host *host, struct device *dev,
5887 host->ops = ops; 5887 host->ops = ops;
5888} 5888}
5889 5889
5890 5890int ata_port_probe(struct ata_port *ap)
5891static void async_port_probe(void *data, async_cookie_t cookie)
5892{ 5891{
5893 int rc; 5892 int rc = 0;
5894 struct ata_port *ap = data;
5895
5896 /*
5897 * If we're not allowed to scan this host in parallel,
5898 * we need to wait until all previous scans have completed
5899 * before going further.
5900 * Jeff Garzik says this is only within a controller, so we
5901 * don't need to wait for port 0, only for later ports.
5902 */
5903 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5904 async_synchronize_cookie(cookie);
5905 5893
5906 /* probe */ 5894 /* probe */
5907 if (ap->ops->error_handler) { 5895 if (ap->ops->error_handler) {
@@ -5927,23 +5915,33 @@ static void async_port_probe(void *data, async_cookie_t cookie)
5927 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 5915 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5928 rc = ata_bus_probe(ap); 5916 rc = ata_bus_probe(ap);
5929 DPRINTK("ata%u: bus probe end\n", ap->print_id); 5917 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5930
5931 if (rc) {
5932 /* FIXME: do something useful here?
5933 * Current libata behavior will
5934 * tear down everything when
5935 * the module is removed
5936 * or the h/w is unplugged.
5937 */
5938 }
5939 } 5918 }
5919 return rc;
5920}
5921
5922
5923static void async_port_probe(void *data, async_cookie_t cookie)
5924{
5925 struct ata_port *ap = data;
5926
5927 /*
5928 * If we're not allowed to scan this host in parallel,
5929 * we need to wait until all previous scans have completed
5930 * before going further.
5931 * Jeff Garzik says this is only within a controller, so we
5932 * don't need to wait for port 0, only for later ports.
5933 */
5934 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5935 async_synchronize_cookie(cookie);
5936
5937 (void)ata_port_probe(ap);
5940 5938
5941 /* in order to keep device order, we need to synchronize at this point */ 5939 /* in order to keep device order, we need to synchronize at this point */
5942 async_synchronize_cookie(cookie); 5940 async_synchronize_cookie(cookie);
5943 5941
5944 ata_scsi_scan_host(ap, 1); 5942 ata_scsi_scan_host(ap, 1);
5945
5946} 5943}
5944
5947/** 5945/**
5948 * ata_host_register - register initialized ATA host 5946 * ata_host_register - register initialized ATA host
5949 * @host: ATA host to register 5947 * @host: ATA host to register
@@ -5983,7 +5981,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5983 for (i = 0; i < host->n_ports; i++) 5981 for (i = 0; i < host->n_ports; i++)
5984 host->ports[i]->print_id = ata_print_id++; 5982 host->ports[i]->print_id = ata_print_id++;
5985 5983
5986 5984
5987 /* Create associated sysfs transport objects */ 5985 /* Create associated sysfs transport objects */
5988 for (i = 0; i < host->n_ports; i++) { 5986 for (i = 0; i < host->n_ports; i++) {
5989 rc = ata_tport_add(host->dev,host->ports[i]); 5987 rc = ata_tport_add(host->dev,host->ports[i]);
@@ -6471,7 +6469,7 @@ static int __init ata_init(void)
6471 ata_sff_exit(); 6469 ata_sff_exit();
6472 rc = -ENOMEM; 6470 rc = -ENOMEM;
6473 goto err_out; 6471 goto err_out;
6474 } 6472 }
6475 6473
6476 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6474 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6477 return 0; 6475 return 0;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 17a637877d0..df3f3140c9c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -587,11 +587,43 @@ static void ata_eh_unload(struct ata_port *ap)
587void ata_scsi_error(struct Scsi_Host *host) 587void ata_scsi_error(struct Scsi_Host *host)
588{ 588{
589 struct ata_port *ap = ata_shost_to_port(host); 589 struct ata_port *ap = ata_shost_to_port(host);
590 int i;
591 unsigned long flags; 590 unsigned long flags;
591 LIST_HEAD(eh_work_q);
592 592
593 DPRINTK("ENTER\n"); 593 DPRINTK("ENTER\n");
594 594
595 spin_lock_irqsave(host->host_lock, flags);
596 list_splice_init(&host->eh_cmd_q, &eh_work_q);
597 spin_unlock_irqrestore(host->host_lock, flags);
598
599 ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
600
601 /* If we timed raced normal completion and there is nothing to
602 recover nr_timedout == 0 why exactly are we doing error recovery ? */
603 ata_scsi_port_error_handler(host, ap);
604
605 /* finish or retry handled scmd's and clean up */
606 WARN_ON(host->host_failed || !list_empty(&eh_work_q));
607
608 DPRINTK("EXIT\n");
609}
610
611/**
612 * ata_scsi_cmd_error_handler - error callback for a list of commands
613 * @host: scsi host containing the port
614 * @ap: ATA port within the host
615 * @eh_work_q: list of commands to process
616 *
617 * process the given list of commands and return those finished to the
618 * ap->eh_done_q. This function is the first part of the libata error
619 * handler which processes a given list of failed commands.
620 */
621void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
622 struct list_head *eh_work_q)
623{
624 int i;
625 unsigned long flags;
626
595 /* make sure sff pio task is not running */ 627 /* make sure sff pio task is not running */
596 ata_sff_flush_pio_task(ap); 628 ata_sff_flush_pio_task(ap);
597 629
@@ -627,7 +659,7 @@ void ata_scsi_error(struct Scsi_Host *host)
627 if (ap->ops->lost_interrupt) 659 if (ap->ops->lost_interrupt)
628 ap->ops->lost_interrupt(ap); 660 ap->ops->lost_interrupt(ap);
629 661
630 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 662 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
631 struct ata_queued_cmd *qc; 663 struct ata_queued_cmd *qc;
632 664
633 for (i = 0; i < ATA_MAX_QUEUE; i++) { 665 for (i = 0; i < ATA_MAX_QUEUE; i++) {
@@ -671,8 +703,20 @@ void ata_scsi_error(struct Scsi_Host *host)
671 } else 703 } else
672 spin_unlock_wait(ap->lock); 704 spin_unlock_wait(ap->lock);
673 705
674 /* If we timed raced normal completion and there is nothing to 706}
675 recover nr_timedout == 0 why exactly are we doing error recovery ? */ 707EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
708
709/**
710 * ata_scsi_port_error_handler - recover the port after the commands
711 * @host: SCSI host containing the port
712 * @ap: the ATA port
713 *
714 * Handle the recovery of the port @ap after all the commands
715 * have been recovered.
716 */
717void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
718{
719 unsigned long flags;
676 720
677 /* invoke error handler */ 721 /* invoke error handler */
678 if (ap->ops->error_handler) { 722 if (ap->ops->error_handler) {
@@ -761,9 +805,6 @@ void ata_scsi_error(struct Scsi_Host *host)
761 ap->ops->eng_timeout(ap); 805 ap->ops->eng_timeout(ap);
762 } 806 }
763 807
764 /* finish or retry handled scmd's and clean up */
765 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
766
767 scsi_eh_flush_done_q(&ap->eh_done_q); 808 scsi_eh_flush_done_q(&ap->eh_done_q);
768 809
769 /* clean up */ 810 /* clean up */
@@ -784,9 +825,8 @@ void ata_scsi_error(struct Scsi_Host *host)
784 wake_up_all(&ap->eh_wait_q); 825 wake_up_all(&ap->eh_wait_q);
785 826
786 spin_unlock_irqrestore(ap->lock, flags); 827 spin_unlock_irqrestore(ap->lock, flags);
787
788 DPRINTK("EXIT\n");
789} 828}
829EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
790 830
791/** 831/**
792 * ata_port_wait_eh - Wait for the currently pending EH to complete 832 * ata_port_wait_eh - Wait for the currently pending EH to complete
@@ -1618,7 +1658,7 @@ static void ata_eh_analyze_serror(struct ata_link *link)
1618 * host links. For disabled PMP links, only N bit is 1658 * host links. For disabled PMP links, only N bit is
1619 * considered as X bit is left at 1 for link plugging. 1659 * considered as X bit is left at 1 for link plugging.
1620 */ 1660 */
1621 if (link->lpm_policy != ATA_LPM_MAX_POWER) 1661 if (link->lpm_policy > ATA_LPM_MAX_POWER)
1622 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 1662 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1623 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1663 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1624 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1664 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 600f6353ecf..a8341999135 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2056,6 +2056,17 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
2056 ATA_ID_SERNO_LEN); 2056 ATA_ID_SERNO_LEN);
2057 num += ATA_ID_SERNO_LEN; 2057 num += ATA_ID_SERNO_LEN;
2058 2058
2059 if (ata_id_has_wwn(args->id)) {
2060 /* SAT defined lu world wide name */
2061 /* piv=0, assoc=lu, code_set=binary, designator=NAA */
2062 rbuf[num + 0] = 1;
2063 rbuf[num + 1] = 3;
2064 rbuf[num + 3] = ATA_ID_WWN_LEN;
2065 num += 4;
2066 ata_id_string(args->id, (unsigned char *) rbuf + num,
2067 ATA_ID_WWN, ATA_ID_WWN_LEN);
2068 num += ATA_ID_WWN_LEN;
2069 }
2059 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 2070 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
2060 return 0; 2071 return 0;
2061} 2072}
@@ -3759,7 +3770,7 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3759 return NULL; 3770 return NULL;
3760 3771
3761 ap->port_no = 0; 3772 ap->port_no = 0;
3762 ap->lock = shost->host_lock; 3773 ap->lock = &host->lock;
3763 ap->pio_mask = port_info->pio_mask; 3774 ap->pio_mask = port_info->pio_mask;
3764 ap->mwdma_mask = port_info->mwdma_mask; 3775 ap->mwdma_mask = port_info->mwdma_mask;
3765 ap->udma_mask = port_info->udma_mask; 3776 ap->udma_mask = port_info->udma_mask;
@@ -3821,7 +3832,7 @@ int ata_sas_port_init(struct ata_port *ap)
3821 3832
3822 if (!rc) { 3833 if (!rc) {
3823 ap->print_id = ata_print_id++; 3834 ap->print_id = ata_print_id++;
3824 rc = ata_bus_probe(ap); 3835 rc = ata_port_probe(ap);
3825 } 3836 }
3826 3837
3827 return rc; 3838 return rc;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index af6141bb1ba..cf7acbc0cfc 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1302,6 +1302,18 @@ fsm_start:
1302} 1302}
1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1304 1304
1305void ata_sff_queue_work(struct work_struct *work)
1306{
1307 queue_work(ata_sff_wq, work);
1308}
1309EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1310
1311void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1312{
1313 queue_delayed_work(ata_sff_wq, dwork, delay);
1314}
1315EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1316
1305void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) 1317void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1306{ 1318{
1307 struct ata_port *ap = link->ap; 1319 struct ata_port *ap = link->ap;
@@ -1311,8 +1323,7 @@ void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1311 ap->sff_pio_task_link = link; 1323 ap->sff_pio_task_link = link;
1312 1324
1313 /* may fail if ata_sff_flush_pio_task() in progress */ 1325 /* may fail if ata_sff_flush_pio_task() in progress */
1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, 1326 ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1315 msecs_to_jiffies(delay));
1316} 1327}
1317EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); 1328EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1318 1329
@@ -1336,7 +1347,7 @@ static void ata_sff_pio_task(struct work_struct *work)
1336 u8 status; 1347 u8 status;
1337 int poll_next; 1348 int poll_next;
1338 1349
1339 BUG_ON(ap->sff_pio_task_link == NULL); 1350 BUG_ON(ap->sff_pio_task_link == NULL);
1340 /* qc can be NULL if timeout occurred */ 1351 /* qc can be NULL if timeout occurred */
1341 qc = ata_qc_from_tag(ap, link->active_tag); 1352 qc = ata_qc_from_tag(ap, link->active_tag);
1342 if (!qc) { 1353 if (!qc) {
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a9be110dbf5..773de97988a 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -103,6 +103,7 @@ extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
103extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); 103extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
104extern struct ata_port *ata_port_alloc(struct ata_host *host); 104extern struct ata_port *ata_port_alloc(struct ata_host *host);
105extern const char *sata_spd_string(unsigned int spd); 105extern const char *sata_spd_string(unsigned int spd);
106extern int ata_port_probe(struct ata_port *ap);
106 107
107/* libata-acpi.c */ 108/* libata-acpi.c */
108#ifdef CONFIG_ATA_ACPI 109#ifdef CONFIG_ATA_ACPI
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index c8d47034d5e..91949d99755 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -245,7 +245,7 @@ static struct ata_port_operations pacpi_ops = {
245static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) 245static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
246{ 246{
247 static const struct ata_port_info info = { 247 static const struct ata_port_info info = {
248 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 248 .flags = ATA_FLAG_SLAVE_POSS,
249 249
250 .pio_mask = ATA_PIO4, 250 .pio_mask = ATA_PIO4,
251 .mwdma_mask = ATA_MWDMA2, 251 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
new file mode 100644
index 00000000000..65cee74605b
--- /dev/null
+++ b/drivers/ata/pata_arasan_cf.c
@@ -0,0 +1,983 @@
1/*
2 * drivers/ata/pata_arasan_cf.c
3 *
4 * Arasan Compact Flash host controller source file
5 *
6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14/*
15 * The Arasan CompactFlash Device Controller IP core has three basic modes of
16 * operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
17 * ATA using true IDE modes. This driver supports only True IDE mode currently.
18 *
19 * Arasan CF Controller shares global irq register with Arasan XD Controller.
20 *
21 * Tested on arch/arm/mach-spear13xx
22 */
23
24#include <linux/ata.h>
25#include <linux/clk.h>
26#include <linux/completion.h>
27#include <linux/delay.h>
28#include <linux/dmaengine.h>
29#include <linux/io.h>
30#include <linux/irq.h>
31#include <linux/kernel.h>
32#include <linux/libata.h>
33#include <linux/module.h>
34#include <linux/pata_arasan_cf_data.h>
35#include <linux/platform_device.h>
36#include <linux/pm.h>
37#include <linux/slab.h>
38#include <linux/spinlock.h>
39#include <linux/types.h>
40#include <linux/workqueue.h>
41
42#define DRIVER_NAME "arasan_cf"
43#define TIMEOUT msecs_to_jiffies(3000)
44
45/* Registers */
46/* CompactFlash Interface Status */
47#define CFI_STS 0x000
48 #define STS_CHG (1)
49 #define BIN_AUDIO_OUT (1 << 1)
50 #define CARD_DETECT1 (1 << 2)
51 #define CARD_DETECT2 (1 << 3)
52 #define INP_ACK (1 << 4)
53 #define CARD_READY (1 << 5)
54 #define IO_READY (1 << 6)
55 #define B16_IO_PORT_SEL (1 << 7)
56/* IRQ */
57#define IRQ_STS 0x004
58/* Interrupt Enable */
59#define IRQ_EN 0x008
60 #define CARD_DETECT_IRQ (1)
61 #define STATUS_CHNG_IRQ (1 << 1)
62 #define MEM_MODE_IRQ (1 << 2)
63 #define IO_MODE_IRQ (1 << 3)
64 #define TRUE_IDE_MODE_IRQ (1 << 8)
65 #define PIO_XFER_ERR_IRQ (1 << 9)
66 #define BUF_AVAIL_IRQ (1 << 10)
67 #define XFER_DONE_IRQ (1 << 11)
68 #define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
69 TRUE_IDE_MODE_IRQ)
70 #define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
71 BUF_AVAIL_IRQ | XFER_DONE_IRQ)
72/* Operation Mode */
73#define OP_MODE 0x00C
74 #define CARD_MODE_MASK (0x3)
75 #define MEM_MODE (0x0)
76 #define IO_MODE (0x1)
77 #define TRUE_IDE_MODE (0x2)
78
79 #define CARD_TYPE_MASK (1 << 2)
80 #define CF_CARD (0)
81 #define CF_PLUS_CARD (1 << 2)
82
83 #define CARD_RESET (1 << 3)
84 #define CFHOST_ENB (1 << 4)
85 #define OUTPUTS_TRISTATE (1 << 5)
86 #define ULTRA_DMA_ENB (1 << 8)
87 #define MULTI_WORD_DMA_ENB (1 << 9)
88 #define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
89 #define DRQ_BLOCK_SIZE_512 (0)
90 #define DRQ_BLOCK_SIZE_1024 (1 << 11)
91 #define DRQ_BLOCK_SIZE_2048 (2 << 11)
92 #define DRQ_BLOCK_SIZE_4096 (3 << 11)
93/* CF Interface Clock Configuration */
94#define CLK_CFG 0x010
95 #define CF_IF_CLK_MASK (0XF)
96/* CF Timing Mode Configuration */
97#define TM_CFG 0x014
98 #define MEM_MODE_TIMING_MASK (0x3)
99 #define MEM_MODE_TIMING_250NS (0x0)
100 #define MEM_MODE_TIMING_120NS (0x1)
101 #define MEM_MODE_TIMING_100NS (0x2)
102 #define MEM_MODE_TIMING_80NS (0x3)
103
104 #define IO_MODE_TIMING_MASK (0x3 << 2)
105 #define IO_MODE_TIMING_250NS (0x0 << 2)
106 #define IO_MODE_TIMING_120NS (0x1 << 2)
107 #define IO_MODE_TIMING_100NS (0x2 << 2)
108 #define IO_MODE_TIMING_80NS (0x3 << 2)
109
110 #define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
111 #define TRUEIDE_PIO_TIMING_SHIFT 4
112
113 #define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
114 #define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
115
116 #define ULTRA_DMA_TIMING_MASK (0x7 << 10)
117 #define ULTRA_DMA_TIMING_SHIFT 10
118/* CF Transfer Address */
119#define XFER_ADDR 0x014
120 #define XFER_ADDR_MASK (0x7FF)
121 #define MAX_XFER_COUNT 0x20000u
122/* Transfer Control */
123#define XFER_CTR 0x01C
124 #define XFER_COUNT_MASK (0x3FFFF)
125 #define ADDR_INC_DISABLE (1 << 24)
126 #define XFER_WIDTH_MASK (1 << 25)
127 #define XFER_WIDTH_8B (0)
128 #define XFER_WIDTH_16B (1 << 25)
129
130 #define MEM_TYPE_MASK (1 << 26)
131 #define MEM_TYPE_COMMON (0)
132 #define MEM_TYPE_ATTRIBUTE (1 << 26)
133
134 #define MEM_IO_XFER_MASK (1 << 27)
135 #define MEM_XFER (0)
136 #define IO_XFER (1 << 27)
137
138 #define DMA_XFER_MODE (1 << 28)
139
140 #define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
141 #define XFER_DIR_MASK (1 << 30)
142 #define XFER_READ (0)
143 #define XFER_WRITE (1 << 30)
144
145 #define XFER_START (1 << 31)
146/* Write Data Port */
147#define WRITE_PORT 0x024
148/* Read Data Port */
149#define READ_PORT 0x028
150/* ATA Data Port */
151#define ATA_DATA_PORT 0x030
152 #define ATA_DATA_PORT_MASK (0xFFFF)
153/* ATA Error/Features */
154#define ATA_ERR_FTR 0x034
155/* ATA Sector Count */
156#define ATA_SC 0x038
157/* ATA Sector Number */
158#define ATA_SN 0x03C
159/* ATA Cylinder Low */
160#define ATA_CL 0x040
161/* ATA Cylinder High */
162#define ATA_CH 0x044
163/* ATA Select Card/Head */
164#define ATA_SH 0x048
165/* ATA Status-Command */
166#define ATA_STS_CMD 0x04C
167/* ATA Alternate Status/Device Control */
168#define ATA_ASTS_DCTR 0x050
169/* Extended Write Data Port 0x200-0x3FC */
170#define EXT_WRITE_PORT 0x200
171/* Extended Read Data Port 0x400-0x5FC */
172#define EXT_READ_PORT 0x400
173 #define FIFO_SIZE 0x200u
174/* Global Interrupt Status */
175#define GIRQ_STS 0x800
176/* Global Interrupt Status enable */
177#define GIRQ_STS_EN 0x804
178/* Global Interrupt Signal enable */
179#define GIRQ_SGN_EN 0x808
180 #define GIRQ_CF (1)
181 #define GIRQ_XD (1 << 1)
182
183/* Compact Flash Controller Dev Structure */
184struct arasan_cf_dev {
185 /* pointer to ata_host structure */
186 struct ata_host *host;
187 /* clk structure, only if HAVE_CLK is defined */
188#ifdef CONFIG_HAVE_CLK
189 struct clk *clk;
190#endif
191
192 /* physical base address of controller */
193 dma_addr_t pbase;
194 /* virtual base address of controller */
195 void __iomem *vbase;
196 /* irq number*/
197 int irq;
198
199 /* status to be updated to framework regarding DMA transfer */
200 u8 dma_status;
201 /* Card is present or Not */
202 u8 card_present;
203
204 /* dma specific */
205 /* Completion for transfer complete interrupt from controller */
206 struct completion cf_completion;
207 /* Completion for DMA transfer complete. */
208 struct completion dma_completion;
209 /* Dma channel allocated */
210 struct dma_chan *dma_chan;
211 /* Mask for DMA transfers */
212 dma_cap_mask_t mask;
213 /* dma channel private data */
214 void *dma_priv;
215 /* DMA transfer work */
216 struct work_struct work;
217 /* DMA delayed finish work */
218 struct delayed_work dwork;
219 /* qc to be transferred using DMA */
220 struct ata_queued_cmd *qc;
221};
222
223static struct scsi_host_template arasan_cf_sht = {
224 ATA_BASE_SHT(DRIVER_NAME),
225 .sg_tablesize = SG_NONE,
226 .dma_boundary = 0xFFFFFFFFUL,
227};
228
229static void cf_dumpregs(struct arasan_cf_dev *acdev)
230{
231 struct device *dev = acdev->host->dev;
232
233 dev_dbg(dev, ": =========== REGISTER DUMP ===========");
234 dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
235 dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
236 dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
237 dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
238 dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
239 dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
240 dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
241 dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
242 dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
243 dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
244 dev_dbg(dev, ": =====================================");
245}
246
247/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
248static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
249{
250 /* enable should be 0 or 1 */
251 writel(enable, acdev->vbase + GIRQ_STS_EN);
252 writel(enable, acdev->vbase + GIRQ_SGN_EN);
253}
254
255/* Enable/Disable CF interrupts */
256static inline void
257cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
258{
259 u32 val = readl(acdev->vbase + IRQ_EN);
260 /* clear & enable/disable irqs */
261 if (enable) {
262 writel(mask, acdev->vbase + IRQ_STS);
263 writel(val | mask, acdev->vbase + IRQ_EN);
264 } else
265 writel(val & ~mask, acdev->vbase + IRQ_EN);
266}
267
268static inline void cf_card_reset(struct arasan_cf_dev *acdev)
269{
270 u32 val = readl(acdev->vbase + OP_MODE);
271
272 writel(val | CARD_RESET, acdev->vbase + OP_MODE);
273 udelay(200);
274 writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
275}
276
277static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
278{
279 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
280 acdev->vbase + OP_MODE);
281 writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
282 acdev->vbase + OP_MODE);
283}
284
285static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
286{
287 struct ata_port *ap = acdev->host->ports[0];
288 struct ata_eh_info *ehi = &ap->link.eh_info;
289 u32 val = readl(acdev->vbase + CFI_STS);
290
291 /* Both CD1 & CD2 should be low if card inserted completely */
292 if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
293 if (acdev->card_present)
294 return;
295 acdev->card_present = 1;
296 cf_card_reset(acdev);
297 } else {
298 if (!acdev->card_present)
299 return;
300 acdev->card_present = 0;
301 }
302
303 if (hotplugged) {
304 ata_ehi_hotplugged(ehi);
305 ata_port_freeze(ap);
306 }
307}
308
309static int cf_init(struct arasan_cf_dev *acdev)
310{
311 struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
312 unsigned long flags;
313 int ret = 0;
314
315#ifdef CONFIG_HAVE_CLK
316 ret = clk_enable(acdev->clk);
317 if (ret) {
318 dev_dbg(acdev->host->dev, "clock enable failed");
319 return ret;
320 }
321#endif
322
323 spin_lock_irqsave(&acdev->host->lock, flags);
324 /* configure CF interface clock */
325 writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
326 CF_IF_CLK_166M, acdev->vbase + CLK_CFG);
327
328 writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
329 cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
330 cf_ginterrupt_enable(acdev, 1);
331 spin_unlock_irqrestore(&acdev->host->lock, flags);
332
333 return ret;
334}
335
336static void cf_exit(struct arasan_cf_dev *acdev)
337{
338 unsigned long flags;
339
340 spin_lock_irqsave(&acdev->host->lock, flags);
341 cf_ginterrupt_enable(acdev, 0);
342 cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
343 cf_card_reset(acdev);
344 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
345 acdev->vbase + OP_MODE);
346 spin_unlock_irqrestore(&acdev->host->lock, flags);
347#ifdef CONFIG_HAVE_CLK
348 clk_disable(acdev->clk);
349#endif
350}
351
352static void dma_callback(void *dev)
353{
354 struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev;
355
356 complete(&acdev->dma_completion);
357}
358
359static bool filter(struct dma_chan *chan, void *slave)
360{
361 chan->private = slave;
362 return true;
363}
364
365static inline void dma_complete(struct arasan_cf_dev *acdev)
366{
367 struct ata_queued_cmd *qc = acdev->qc;
368 unsigned long flags;
369
370 acdev->qc = NULL;
371 ata_sff_interrupt(acdev->irq, acdev->host);
372
373 spin_lock_irqsave(&acdev->host->lock, flags);
374 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
375 ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
376 spin_unlock_irqrestore(&acdev->host->lock, flags);
377}
378
379static inline int wait4buf(struct arasan_cf_dev *acdev)
380{
381 if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
382 u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
383
384 dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
385 return -ETIMEDOUT;
386 }
387
388 /* Check if PIO Error interrupt has occured */
389 if (acdev->dma_status & ATA_DMA_ERR)
390 return -EAGAIN;
391
392 return 0;
393}
394
395static int
396dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
397{
398 struct dma_async_tx_descriptor *tx;
399 struct dma_chan *chan = acdev->dma_chan;
400 dma_cookie_t cookie;
401 unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
402 DMA_COMPL_SKIP_DEST_UNMAP;
403 int ret = 0;
404
405 tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
406 if (!tx) {
407 dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
408 return -EAGAIN;
409 }
410
411 tx->callback = dma_callback;
412 tx->callback_param = acdev;
413 cookie = tx->tx_submit(tx);
414
415 ret = dma_submit_error(cookie);
416 if (ret) {
417 dev_err(acdev->host->dev, "dma_submit_error\n");
418 return ret;
419 }
420
421 chan->device->device_issue_pending(chan);
422
423 /* Wait for DMA to complete */
424 if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
425 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
426 dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
427 return -ETIMEDOUT;
428 }
429
430 return ret;
431}
432
433static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
434{
435 dma_addr_t dest = 0, src = 0;
436 u32 xfer_cnt, sglen, dma_len, xfer_ctr;
437 u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
438 unsigned long flags;
439 int ret = 0;
440
441 sglen = sg_dma_len(sg);
442 if (write) {
443 src = sg_dma_address(sg);
444 dest = acdev->pbase + EXT_WRITE_PORT;
445 } else {
446 dest = sg_dma_address(sg);
447 src = acdev->pbase + EXT_READ_PORT;
448 }
449
450 /*
451 * For each sg:
452 * MAX_XFER_COUNT data will be transferred before we get transfer
453 * complete interrupt. Inbetween after FIFO_SIZE data
454 * buffer available interrupt will be generated. At this time we will
455 * fill FIFO again: max FIFO_SIZE data.
456 */
457 while (sglen) {
458 xfer_cnt = min(sglen, MAX_XFER_COUNT);
459 spin_lock_irqsave(&acdev->host->lock, flags);
460 xfer_ctr = readl(acdev->vbase + XFER_CTR) &
461 ~XFER_COUNT_MASK;
462 writel(xfer_ctr | xfer_cnt | XFER_START,
463 acdev->vbase + XFER_CTR);
464 spin_unlock_irqrestore(&acdev->host->lock, flags);
465
466 /* continue dma xfers untill current sg is completed */
467 while (xfer_cnt) {
468 /* wait for read to complete */
469 if (!write) {
470 ret = wait4buf(acdev);
471 if (ret)
472 goto fail;
473 }
474
475 /* read/write FIFO in chunk of FIFO_SIZE */
476 dma_len = min(xfer_cnt, FIFO_SIZE);
477 ret = dma_xfer(acdev, src, dest, dma_len);
478 if (ret) {
479 dev_err(acdev->host->dev, "dma failed");
480 goto fail;
481 }
482
483 if (write)
484 src += dma_len;
485 else
486 dest += dma_len;
487
488 sglen -= dma_len;
489 xfer_cnt -= dma_len;
490
491 /* wait for write to complete */
492 if (write) {
493 ret = wait4buf(acdev);
494 if (ret)
495 goto fail;
496 }
497 }
498 }
499
500fail:
501 spin_lock_irqsave(&acdev->host->lock, flags);
502 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
503 acdev->vbase + XFER_CTR);
504 spin_unlock_irqrestore(&acdev->host->lock, flags);
505
506 return ret;
507}
508
509/*
510 * This routine uses External DMA controller to read/write data to FIFO of CF
511 * controller. There are two xfer related interrupt supported by CF controller:
512 * - buf_avail: This interrupt is generated as soon as we have buffer of 512
513 * bytes available for reading or empty buffer available for writing.
514 * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
515 * data to/from FIFO. xfer_size is programmed in XFER_CTR register.
516 *
517 * Max buffer size = FIFO_SIZE = 512 Bytes.
518 * Max xfer_size = MAX_XFER_COUNT = 256 KB.
519 */
520static void data_xfer(struct work_struct *work)
521{
522 struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
523 work);
524 struct ata_queued_cmd *qc = acdev->qc;
525 struct scatterlist *sg;
526 unsigned long flags;
527 u32 temp;
528 int ret = 0;
529
530 /* request dma channels */
531 /* dma_request_channel may sleep, so calling from process context */
532 acdev->dma_chan = dma_request_channel(acdev->mask, filter,
533 acdev->dma_priv);
534 if (!acdev->dma_chan) {
535 dev_err(acdev->host->dev, "Unable to get dma_chan\n");
536 goto chan_request_fail;
537 }
538
539 for_each_sg(qc->sg, sg, qc->n_elem, temp) {
540 ret = sg_xfer(acdev, sg);
541 if (ret)
542 break;
543 }
544
545 dma_release_channel(acdev->dma_chan);
546
547 /* data xferred successfully */
548 if (!ret) {
549 u32 status;
550
551 spin_lock_irqsave(&acdev->host->lock, flags);
552 status = ioread8(qc->ap->ioaddr.altstatus_addr);
553 spin_unlock_irqrestore(&acdev->host->lock, flags);
554 if (status & (ATA_BUSY | ATA_DRQ)) {
555 ata_sff_queue_delayed_work(&acdev->dwork, 1);
556 return;
557 }
558
559 goto sff_intr;
560 }
561
562 cf_dumpregs(acdev);
563
564chan_request_fail:
565 spin_lock_irqsave(&acdev->host->lock, flags);
566 /* error when transfering data to/from memory */
567 qc->err_mask |= AC_ERR_HOST_BUS;
568 qc->ap->hsm_task_state = HSM_ST_ERR;
569
570 cf_ctrl_reset(acdev);
571 spin_unlock_irqrestore(qc->ap->lock, flags);
572sff_intr:
573 dma_complete(acdev);
574}
575
576static void delayed_finish(struct work_struct *work)
577{
578 struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
579 dwork.work);
580 struct ata_queued_cmd *qc = acdev->qc;
581 unsigned long flags;
582 u8 status;
583
584 spin_lock_irqsave(&acdev->host->lock, flags);
585 status = ioread8(qc->ap->ioaddr.altstatus_addr);
586 spin_unlock_irqrestore(&acdev->host->lock, flags);
587
588 if (status & (ATA_BUSY | ATA_DRQ))
589 ata_sff_queue_delayed_work(&acdev->dwork, 1);
590 else
591 dma_complete(acdev);
592}
593
594static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
595{
596 struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
597 unsigned long flags;
598 u32 irqsts;
599
600 irqsts = readl(acdev->vbase + GIRQ_STS);
601 if (!(irqsts & GIRQ_CF))
602 return IRQ_NONE;
603
604 spin_lock_irqsave(&acdev->host->lock, flags);
605 irqsts = readl(acdev->vbase + IRQ_STS);
606 writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
607 writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
608
609 /* handle only relevant interrupts */
610 irqsts &= ~IGNORED_IRQS;
611
612 if (irqsts & CARD_DETECT_IRQ) {
613 cf_card_detect(acdev, 1);
614 spin_unlock_irqrestore(&acdev->host->lock, flags);
615 return IRQ_HANDLED;
616 }
617
618 if (irqsts & PIO_XFER_ERR_IRQ) {
619 acdev->dma_status = ATA_DMA_ERR;
620 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
621 acdev->vbase + XFER_CTR);
622 spin_unlock_irqrestore(&acdev->host->lock, flags);
623 complete(&acdev->cf_completion);
624 dev_err(acdev->host->dev, "pio xfer err irq\n");
625 return IRQ_HANDLED;
626 }
627
628 spin_unlock_irqrestore(&acdev->host->lock, flags);
629
630 if (irqsts & BUF_AVAIL_IRQ) {
631 complete(&acdev->cf_completion);
632 return IRQ_HANDLED;
633 }
634
635 if (irqsts & XFER_DONE_IRQ) {
636 struct ata_queued_cmd *qc = acdev->qc;
637
638 /* Send Complete only for write */
639 if (qc->tf.flags & ATA_TFLAG_WRITE)
640 complete(&acdev->cf_completion);
641 }
642
643 return IRQ_HANDLED;
644}
645
646static void arasan_cf_freeze(struct ata_port *ap)
647{
648 struct arasan_cf_dev *acdev = ap->host->private_data;
649
650 /* stop transfer and reset controller */
651 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
652 acdev->vbase + XFER_CTR);
653 cf_ctrl_reset(acdev);
654 acdev->dma_status = ATA_DMA_ERR;
655
656 ata_sff_dma_pause(ap);
657 ata_sff_freeze(ap);
658}
659
660void arasan_cf_error_handler(struct ata_port *ap)
661{
662 struct arasan_cf_dev *acdev = ap->host->private_data;
663
664 /*
665 * DMA transfers using an external DMA controller may be scheduled.
666 * Abort them before handling error. Refer data_xfer() for further
667 * details.
668 */
669 cancel_work_sync(&acdev->work);
670 cancel_delayed_work_sync(&acdev->dwork);
671 return ata_sff_error_handler(ap);
672}
673
674static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
675{
676 u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
677 u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
678
679 xfer_ctr |= write ? XFER_WRITE : XFER_READ;
680 writel(xfer_ctr, acdev->vbase + XFER_CTR);
681
682 acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf);
683 ata_sff_queue_work(&acdev->work);
684}
685
686unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
687{
688 struct ata_port *ap = qc->ap;
689 struct arasan_cf_dev *acdev = ap->host->private_data;
690
691 /* defer PIO handling to sff_qc_issue */
692 if (!ata_is_dma(qc->tf.protocol))
693 return ata_sff_qc_issue(qc);
694
695 /* select the device */
696 ata_wait_idle(ap);
697 ata_sff_dev_select(ap, qc->dev->devno);
698 ata_wait_idle(ap);
699
700 /* start the command */
701 switch (qc->tf.protocol) {
702 case ATA_PROT_DMA:
703 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
704
705 ap->ops->sff_tf_load(ap, &qc->tf);
706 acdev->dma_status = 0;
707 acdev->qc = qc;
708 arasan_cf_dma_start(acdev);
709 ap->hsm_task_state = HSM_ST_LAST;
710 break;
711
712 default:
713 WARN_ON(1);
714 return AC_ERR_SYSTEM;
715 }
716
717 return 0;
718}
719
720static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
721{
722 struct arasan_cf_dev *acdev = ap->host->private_data;
723 u8 pio = adev->pio_mode - XFER_PIO_0;
724 unsigned long flags;
725 u32 val;
726
727 /* Arasan ctrl supports Mode0 -> Mode6 */
728 if (pio > 6) {
729 dev_err(ap->dev, "Unknown PIO mode\n");
730 return;
731 }
732
733 spin_lock_irqsave(&acdev->host->lock, flags);
734 val = readl(acdev->vbase + OP_MODE) &
735 ~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
736 writel(val, acdev->vbase + OP_MODE);
737 val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
738 val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
739 writel(val, acdev->vbase + TM_CFG);
740
741 cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
742 cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
743 spin_unlock_irqrestore(&acdev->host->lock, flags);
744}
745
746static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
747{
748 struct arasan_cf_dev *acdev = ap->host->private_data;
749 u32 opmode, tmcfg, dma_mode = adev->dma_mode;
750 unsigned long flags;
751
752 spin_lock_irqsave(&acdev->host->lock, flags);
753 opmode = readl(acdev->vbase + OP_MODE) &
754 ~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
755 tmcfg = readl(acdev->vbase + TM_CFG);
756
757 if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
758 opmode |= ULTRA_DMA_ENB;
759 tmcfg &= ~ULTRA_DMA_TIMING_MASK;
760 tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
761 } else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
762 opmode |= MULTI_WORD_DMA_ENB;
763 tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
764 tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
765 TRUEIDE_MWORD_DMA_TIMING_SHIFT;
766 } else {
767 dev_err(ap->dev, "Unknown DMA mode\n");
768 spin_unlock_irqrestore(&acdev->host->lock, flags);
769 return;
770 }
771
772 writel(opmode, acdev->vbase + OP_MODE);
773 writel(tmcfg, acdev->vbase + TM_CFG);
774 writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
775
776 cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
777 cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
778 spin_unlock_irqrestore(&acdev->host->lock, flags);
779}
780
781static struct ata_port_operations arasan_cf_ops = {
782 .inherits = &ata_sff_port_ops,
783 .freeze = arasan_cf_freeze,
784 .error_handler = arasan_cf_error_handler,
785 .qc_issue = arasan_cf_qc_issue,
786 .set_piomode = arasan_cf_set_piomode,
787 .set_dmamode = arasan_cf_set_dmamode,
788};
789
790static int __devinit arasan_cf_probe(struct platform_device *pdev)
791{
792 struct arasan_cf_dev *acdev;
793 struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
794 struct ata_host *host;
795 struct ata_port *ap;
796 struct resource *res;
797 irq_handler_t irq_handler = NULL;
798 int ret = 0;
799
800 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
801 if (!res)
802 return -EINVAL;
803
804 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
805 DRIVER_NAME)) {
806 dev_warn(&pdev->dev, "Failed to get memory region resource\n");
807 return -ENOENT;
808 }
809
810 acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
811 if (!acdev) {
812 dev_warn(&pdev->dev, "kzalloc fail\n");
813 return -ENOMEM;
814 }
815
816 /* if irq is 0, support only PIO */
817 acdev->irq = platform_get_irq(pdev, 0);
818 if (acdev->irq)
819 irq_handler = arasan_cf_interrupt;
820 else
821 pdata->quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
822
823 acdev->pbase = res->start;
824 acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
825 resource_size(res));
826 if (!acdev->vbase) {
827 dev_warn(&pdev->dev, "ioremap fail\n");
828 return -ENOMEM;
829 }
830
831#ifdef CONFIG_HAVE_CLK
832 acdev->clk = clk_get(&pdev->dev, NULL);
833 if (IS_ERR(acdev->clk)) {
834 dev_warn(&pdev->dev, "Clock not found\n");
835 return PTR_ERR(acdev->clk);
836 }
837#endif
838
839 /* allocate host */
840 host = ata_host_alloc(&pdev->dev, 1);
841 if (!host) {
842 ret = -ENOMEM;
843 dev_warn(&pdev->dev, "alloc host fail\n");
844 goto free_clk;
845 }
846
847 ap = host->ports[0];
848 host->private_data = acdev;
849 acdev->host = host;
850 ap->ops = &arasan_cf_ops;
851 ap->pio_mask = ATA_PIO6;
852 ap->mwdma_mask = ATA_MWDMA4;
853 ap->udma_mask = ATA_UDMA6;
854
855 init_completion(&acdev->cf_completion);
856 init_completion(&acdev->dma_completion);
857 INIT_WORK(&acdev->work, data_xfer);
858 INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
859 dma_cap_set(DMA_MEMCPY, acdev->mask);
860 acdev->dma_priv = pdata->dma_priv;
861
862 /* Handle platform specific quirks */
863 if (pdata->quirk) {
864 if (pdata->quirk & CF_BROKEN_PIO) {
865 ap->ops->set_piomode = NULL;
866 ap->pio_mask = 0;
867 }
868 if (pdata->quirk & CF_BROKEN_MWDMA)
869 ap->mwdma_mask = 0;
870 if (pdata->quirk & CF_BROKEN_UDMA)
871 ap->udma_mask = 0;
872 }
873 ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
874
875 ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
876 ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
877 ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
878 ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
879 ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
880 ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
881 ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
882 ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
883 ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
884 ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
885 ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
886 ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
887 ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
888
889 ata_port_desc(ap, "phy_addr %llx virt_addr %p",
890 (unsigned long long) res->start, acdev->vbase);
891
892 ret = cf_init(acdev);
893 if (ret)
894 goto free_clk;
895
896 cf_card_detect(acdev, 0);
897
898 return ata_host_activate(host, acdev->irq, irq_handler, 0,
899 &arasan_cf_sht);
900
901free_clk:
902#ifdef CONFIG_HAVE_CLK
903 clk_put(acdev->clk);
904#endif
905 return ret;
906}
907
908static int __devexit arasan_cf_remove(struct platform_device *pdev)
909{
910 struct ata_host *host = dev_get_drvdata(&pdev->dev);
911 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
912
913 ata_host_detach(host);
914 cf_exit(acdev);
915#ifdef CONFIG_HAVE_CLK
916 clk_put(acdev->clk);
917#endif
918
919 return 0;
920}
921
922#ifdef CONFIG_PM
923static int arasan_cf_suspend(struct device *dev)
924{
925 struct platform_device *pdev = to_platform_device(dev);
926 struct ata_host *host = dev_get_drvdata(&pdev->dev);
927 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
928
929 if (acdev->dma_chan) {
930 acdev->dma_chan->device->device_control(acdev->dma_chan,
931 DMA_TERMINATE_ALL, 0);
932 dma_release_channel(acdev->dma_chan);
933 }
934 cf_exit(acdev);
935 return ata_host_suspend(host, PMSG_SUSPEND);
936}
937
938static int arasan_cf_resume(struct device *dev)
939{
940 struct platform_device *pdev = to_platform_device(dev);
941 struct ata_host *host = dev_get_drvdata(&pdev->dev);
942 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
943
944 cf_init(acdev);
945 ata_host_resume(host);
946
947 return 0;
948}
949
950static const struct dev_pm_ops arasan_cf_pm_ops = {
951 .suspend = arasan_cf_suspend,
952 .resume = arasan_cf_resume,
953};
954#endif
955
956static struct platform_driver arasan_cf_driver = {
957 .probe = arasan_cf_probe,
958 .remove = __devexit_p(arasan_cf_remove),
959 .driver = {
960 .name = DRIVER_NAME,
961 .owner = THIS_MODULE,
962#ifdef CONFIG_PM
963 .pm = &arasan_cf_pm_ops,
964#endif
965 },
966};
967
968static int __init arasan_cf_init(void)
969{
970 return platform_driver_register(&arasan_cf_driver);
971}
972module_init(arasan_cf_init);
973
974static void __exit arasan_cf_exit(void)
975{
976 platform_driver_unregister(&arasan_cf_driver);
977}
978module_exit(arasan_cf_exit);
979
980MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
981MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
982MODULE_LICENSE("GPL");
983MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 66ce6a526f2..36f189c7ee8 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -194,7 +194,7 @@ static int __init pata_at32_init_one(struct device *dev,
194 /* Setup ATA bindings */ 194 /* Setup ATA bindings */
195 ap->ops = &at32_port_ops; 195 ap->ops = &at32_port_ops;
196 ap->pio_mask = PIO_MASK; 196 ap->pio_mask = PIO_MASK;
197 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS; 197 ap->flags |= ATA_FLAG_SLAVE_POSS;
198 198
199 /* 199 /*
200 * Since all 8-bit taskfile transfers has to go on the lower 200 * Since all 8-bit taskfile transfers has to go on the lower
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 7aed5c79259..e0b58b8dfe6 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1454,9 +1454,7 @@ static struct ata_port_operations bfin_pata_ops = {
1454 1454
1455static struct ata_port_info bfin_port_info[] = { 1455static struct ata_port_info bfin_port_info[] = {
1456 { 1456 {
1457 .flags = ATA_FLAG_SLAVE_POSS 1457 .flags = ATA_FLAG_SLAVE_POSS,
1458 | ATA_FLAG_MMIO
1459 | ATA_FLAG_NO_LEGACY,
1460 .pio_mask = ATA_PIO4, 1458 .pio_mask = ATA_PIO4,
1461 .mwdma_mask = 0, 1459 .mwdma_mask = 0,
1462 .udma_mask = 0, 1460 .udma_mask = 0,
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 538ec38ba99..6c77d68dbd0 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -14,6 +14,7 @@
14 * Look into engine reset on timeout errors. Should not be required. 14 * Look into engine reset on timeout errors. Should not be required.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 18
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -25,7 +26,7 @@
25#include <linux/libata.h> 26#include <linux/libata.h>
26 27
27#define DRV_NAME "pata_hpt366" 28#define DRV_NAME "pata_hpt366"
28#define DRV_VERSION "0.6.10" 29#define DRV_VERSION "0.6.11"
29 30
30struct hpt_clock { 31struct hpt_clock {
31 u8 xfer_mode; 32 u8 xfer_mode;
@@ -160,8 +161,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
160 161
161 while (list[i] != NULL) { 162 while (list[i] != NULL) {
162 if (!strcmp(list[i], model_num)) { 163 if (!strcmp(list[i], model_num)) {
163 pr_warning(DRV_NAME ": %s is not supported for %s.\n", 164 pr_warn("%s is not supported for %s\n",
164 modestr, list[i]); 165 modestr, list[i]);
165 return 1; 166 return 1;
166 } 167 }
167 i++; 168 i++;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 4c5b5183225..9620636aa40 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -14,6 +14,8 @@
14 * Look into engine reset on timeout errors. Should not be required. 14 * Look into engine reset on timeout errors. Should not be required.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/pci.h> 21#include <linux/pci.h>
@@ -24,7 +26,7 @@
24#include <linux/libata.h> 26#include <linux/libata.h>
25 27
26#define DRV_NAME "pata_hpt37x" 28#define DRV_NAME "pata_hpt37x"
27#define DRV_VERSION "0.6.22" 29#define DRV_VERSION "0.6.23"
28 30
29struct hpt_clock { 31struct hpt_clock {
30 u8 xfer_speed; 32 u8 xfer_speed;
@@ -229,8 +231,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
229 231
230 while (list[i] != NULL) { 232 while (list[i] != NULL) {
231 if (!strcmp(list[i], model_num)) { 233 if (!strcmp(list[i], model_num)) {
232 pr_warning(DRV_NAME ": %s is not supported for %s.\n", 234 pr_warn("%s is not supported for %s\n",
233 modestr, list[i]); 235 modestr, list[i]);
234 return 1; 236 return 1;
235 } 237 }
236 i++; 238 i++;
@@ -863,8 +865,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
863 chip_table = &hpt372; 865 chip_table = &hpt372;
864 break; 866 break;
865 default: 867 default:
866 pr_err(DRV_NAME ": Unknown HPT366 subtype, " 868 pr_err("Unknown HPT366 subtype, please report (%d)\n",
867 "please report (%d).\n", rev); 869 rev);
868 return -ENODEV; 870 return -ENODEV;
869 } 871 }
870 break; 872 break;
@@ -904,8 +906,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
904 *ppi = &info_hpt374_fn1; 906 *ppi = &info_hpt374_fn1;
905 break; 907 break;
906 default: 908 default:
907 pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n", 909 pr_err("PCI table is bogus, please report (%d)\n", dev->device);
908 dev->device);
909 return -ENODEV; 910 return -ENODEV;
910 } 911 }
911 /* Ok so this is a chip we support */ 912 /* Ok so this is a chip we support */
@@ -953,7 +954,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
953 u8 sr; 954 u8 sr;
954 u32 total = 0; 955 u32 total = 0;
955 956
956 pr_warning(DRV_NAME ": BIOS has not set timing clocks.\n"); 957 pr_warn("BIOS has not set timing clocks\n");
957 958
958 /* This is the process the HPT371 BIOS is reported to use */ 959 /* This is the process the HPT371 BIOS is reported to use */
959 for (i = 0; i < 128; i++) { 960 for (i = 0; i < 128; i++) {
@@ -1009,7 +1010,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1009 (f_high << 16) | f_low | 0x100); 1010 (f_high << 16) | f_low | 0x100);
1010 } 1011 }
1011 if (adjust == 8) { 1012 if (adjust == 8) {
1012 pr_err(DRV_NAME ": DPLL did not stabilize!\n"); 1013 pr_err("DPLL did not stabilize!\n");
1013 return -ENODEV; 1014 return -ENODEV;
1014 } 1015 }
1015 if (dpll == 3) 1016 if (dpll == 3)
@@ -1017,7 +1018,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1017 else 1018 else
1018 private_data = (void *)hpt37x_timings_50; 1019 private_data = (void *)hpt37x_timings_50;
1019 1020
1020 pr_info(DRV_NAME ": bus clock %dMHz, using %dMHz DPLL.\n", 1021 pr_info("bus clock %dMHz, using %dMHz DPLL\n",
1021 MHz[clock_slot], MHz[dpll]); 1022 MHz[clock_slot], MHz[dpll]);
1022 } else { 1023 } else {
1023 private_data = (void *)chip_table->clocks[clock_slot]; 1024 private_data = (void *)chip_table->clocks[clock_slot];
@@ -1032,7 +1033,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1032 if (clock_slot < 2 && ppi[0] == &info_hpt370a) 1033 if (clock_slot < 2 && ppi[0] == &info_hpt370a)
1033 ppi[0] = &info_hpt370a_33; 1034 ppi[0] = &info_hpt370a_33;
1034 1035
1035 pr_info(DRV_NAME ": %s using %dMHz bus clock.\n", 1036 pr_info("%s using %dMHz bus clock\n",
1036 chip_table->name, MHz[clock_slot]); 1037 chip_table->name, MHz[clock_slot]);
1037 } 1038 }
1038 1039
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index eca68caf5f4..765f136d8cd 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -15,6 +15,8 @@
15 * Work out best PLL policy 15 * Work out best PLL policy
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/module.h> 21#include <linux/module.h>
20#include <linux/pci.h> 22#include <linux/pci.h>
@@ -25,7 +27,7 @@
25#include <linux/libata.h> 27#include <linux/libata.h>
26 28
27#define DRV_NAME "pata_hpt3x2n" 29#define DRV_NAME "pata_hpt3x2n"
28#define DRV_VERSION "0.3.14" 30#define DRV_VERSION "0.3.15"
29 31
30enum { 32enum {
31 HPT_PCI_FAST = (1 << 31), 33 HPT_PCI_FAST = (1 << 31),
@@ -418,7 +420,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
418 u16 sr; 420 u16 sr;
419 u32 total = 0; 421 u32 total = 0;
420 422
421 pr_warning(DRV_NAME ": BIOS clock data not set.\n"); 423 pr_warn("BIOS clock data not set\n");
422 424
423 /* This is the process the HPT371 BIOS is reported to use */ 425 /* This is the process the HPT371 BIOS is reported to use */
424 for (i = 0; i < 128; i++) { 426 for (i = 0; i < 128; i++) {
@@ -528,8 +530,7 @@ hpt372n:
528 ppi[0] = &info_hpt372n; 530 ppi[0] = &info_hpt372n;
529 break; 531 break;
530 default: 532 default:
531 pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n", 533 pr_err("PCI table is bogus, please report (%d)\n", dev->device);
532 dev->device);
533 return -ENODEV; 534 return -ENODEV;
534 } 535 }
535 536
@@ -578,11 +579,11 @@ hpt372n:
578 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); 579 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
579 } 580 }
580 if (adjust == 8) { 581 if (adjust == 8) {
581 pr_err(DRV_NAME ": DPLL did not stabilize!\n"); 582 pr_err("DPLL did not stabilize!\n");
582 return -ENODEV; 583 return -ENODEV;
583 } 584 }
584 585
585 pr_info(DRV_NAME ": bus clock %dMHz, using 66MHz DPLL.\n", pci_mhz); 586 pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
586 587
587 /* 588 /*
588 * Set our private data up. We only need a few flags 589 * Set our private data up. We only need a few flags
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index b63d5e2d462..24d7df81546 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -151,7 +151,7 @@ static struct ata_port_operations hpt3x3_port_ops = {
151 .check_atapi_dma= hpt3x3_atapi_dma, 151 .check_atapi_dma= hpt3x3_atapi_dma,
152 .freeze = hpt3x3_freeze, 152 .freeze = hpt3x3_freeze,
153#endif 153#endif
154 154
155}; 155};
156 156
157/** 157/**
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index aa0e0c51cc0..2d15f2548a1 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -616,7 +616,7 @@ static void it821x_display_disk(int n, u8 *buf)
616 if (buf[52] > 4) /* No Disk */ 616 if (buf[52] > 4) /* No Disk */
617 return; 617 return;
618 618
619 ata_id_c_string((u16 *)buf, id, 0, 41); 619 ata_id_c_string((u16 *)buf, id, 0, 41);
620 620
621 if (buf[51]) { 621 if (buf[51]) {
622 mode = ffs(buf[51]); 622 mode = ffs(buf[51]);
@@ -910,7 +910,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
910 rc = pcim_enable_device(pdev); 910 rc = pcim_enable_device(pdev);
911 if (rc) 911 if (rc)
912 return rc; 912 return rc;
913 913
914 if (pdev->vendor == PCI_VENDOR_ID_RDC) { 914 if (pdev->vendor == PCI_VENDOR_ID_RDC) {
915 /* Deal with Vortex86SX */ 915 /* Deal with Vortex86SX */
916 if (pdev->revision == 0x11) 916 if (pdev->revision == 0x11)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index ba54b089f98..5253b271b3f 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -177,7 +177,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
177 177
178 ap->ops = &ixp4xx_port_ops; 178 ap->ops = &ixp4xx_port_ops;
179 ap->pio_mask = ATA_PIO4; 179 ap->pio_mask = ATA_PIO4;
180 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI; 180 ap->flags |= ATA_FLAG_NO_ATAPI;
181 181
182 ixp4xx_setup_port(ap, data, cs0->start, cs1->start); 182 ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
183 183
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 75b49d01780..46f589edccd 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1053,8 +1053,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
1053 /* Allocate libata host for 1 port */ 1053 /* Allocate libata host for 1 port */
1054 memset(&pinfo, 0, sizeof(struct ata_port_info)); 1054 memset(&pinfo, 0, sizeof(struct ata_port_info));
1055 pmac_macio_calc_timing_masks(priv, &pinfo); 1055 pmac_macio_calc_timing_masks(priv, &pinfo);
1056 pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | 1056 pinfo.flags = ATA_FLAG_SLAVE_POSS;
1057 ATA_FLAG_NO_LEGACY;
1058 pinfo.port_ops = &pata_macio_ops; 1057 pinfo.port_ops = &pata_macio_ops;
1059 pinfo.private_data = priv; 1058 pinfo.private_data = priv;
1060 1059
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index dd38083dcbe..75a6a0c0094 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -38,7 +38,7 @@ static int marvell_pata_active(struct pci_dev *pdev)
38 38
39 /* We don't yet know how to do this for other devices */ 39 /* We don't yet know how to do this for other devices */
40 if (pdev->device != 0x6145) 40 if (pdev->device != 0x6145)
41 return 1; 41 return 1;
42 42
43 barp = pci_iomap(pdev, 5, 0x10); 43 barp = pci_iomap(pdev, 5, 0x10);
44 if (barp == NULL) 44 if (barp == NULL)
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index cc50bd09aa2..e277a142138 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -165,7 +165,7 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
165 return rc; 165 return rc;
166 ninja32_program(host->iomap[0]); 166 ninja32_program(host->iomap[0]);
167 ata_host_resume(host); 167 ata_host_resume(host);
168 return 0; 168 return 0;
169} 169}
170#endif 170#endif
171 171
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index fa1b95a9a7f..220ddc90608 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -848,8 +848,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
848 cf_port->ap = ap; 848 cf_port->ap = ap;
849 ap->ops = &octeon_cf_ops; 849 ap->ops = &octeon_cf_ops;
850 ap->pio_mask = ATA_PIO6; 850 ap->pio_mask = ATA_PIO6;
851 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY 851 ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
852 | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
853 852
854 base = cs0 + ocd->base_region_bias; 853 base = cs0 + ocd->base_region_bias;
855 if (!ocd->is16bit) { 854 if (!ocd->is16bit) {
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 11fb4ccc74b..a2a73d95384 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -85,7 +85,7 @@ static __devinit int palmld_pata_probe(struct platform_device *pdev)
85 ap = host->ports[0]; 85 ap = host->ports[0];
86 ap->ops = &palmld_port_ops; 86 ap->ops = &palmld_port_ops;
87 ap->pio_mask = ATA_PIO4; 87 ap->pio_mask = ATA_PIO4;
88 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING; 88 ap->flags |= ATA_FLAG_PIO_POLLING;
89 89
90 /* memory mapping voodoo */ 90 /* memory mapping voodoo */
91 ap->ioaddr.cmd_addr = mem + 0x10; 91 ap->ioaddr.cmd_addr = mem + 0x10;
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 806292160b3..29af660d968 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -124,7 +124,7 @@ static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
124 * reset will recover the device. 124 * reset will recover the device.
125 * 125 *
126 */ 126 */
127 127
128static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc) 128static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
129{ 129{
130 int count; 130 int count;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index b1835112252..9765ace1692 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -150,8 +150,7 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
150static struct ata_port_info pdc2027x_port_info[] = { 150static struct ata_port_info pdc2027x_port_info[] = {
151 /* PDC_UDMA_100 */ 151 /* PDC_UDMA_100 */
152 { 152 {
153 .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS | 153 .flags = ATA_FLAG_SLAVE_POSS,
154 ATA_FLAG_MMIO,
155 .pio_mask = ATA_PIO4, 154 .pio_mask = ATA_PIO4,
156 .mwdma_mask = ATA_MWDMA2, 155 .mwdma_mask = ATA_MWDMA2,
157 .udma_mask = ATA_UDMA5, 156 .udma_mask = ATA_UDMA5,
@@ -159,8 +158,7 @@ static struct ata_port_info pdc2027x_port_info[] = {
159 }, 158 },
160 /* PDC_UDMA_133 */ 159 /* PDC_UDMA_133 */
161 { 160 {
162 .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS | 161 .flags = ATA_FLAG_SLAVE_POSS,
163 ATA_FLAG_MMIO,
164 .pio_mask = ATA_PIO4, 162 .pio_mask = ATA_PIO4,
165 .mwdma_mask = ATA_MWDMA2, 163 .mwdma_mask = ATA_MWDMA2,
166 .udma_mask = ATA_UDMA6, 164 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 1898c6ed4b4..b4ede40f8ae 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -292,7 +292,6 @@ static int __devinit pxa_ata_probe(struct platform_device *pdev)
292 ap->ops = &pxa_ata_port_ops; 292 ap->ops = &pxa_ata_port_ops;
293 ap->pio_mask = ATA_PIO4; 293 ap->pio_mask = ATA_PIO4;
294 ap->mwdma_mask = ATA_MWDMA2; 294 ap->mwdma_mask = ATA_MWDMA2;
295 ap->flags = ATA_FLAG_MMIO;
296 295
297 ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, 296 ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
298 resource_size(cmd_res)); 297 resource_size(cmd_res));
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 0ffd631000b..baeaf938d55 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -91,7 +91,6 @@ static void rb532_pata_setup_ports(struct ata_host *ah)
91 91
92 ap->ops = &rb532_pata_port_ops; 92 ap->ops = &rb532_pata_port_ops;
93 ap->pio_mask = ATA_PIO4; 93 ap->pio_mask = ATA_PIO4;
94 ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
95 94
96 ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE; 95 ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE;
97 ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; 96 ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 8a51d673e5b..c446ae6055a 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -531,7 +531,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
531 } 531 }
532 532
533 ap = host->ports[0]; 533 ap = host->ports[0];
534 ap->flags |= ATA_FLAG_MMIO;
535 ap->pio_mask = ATA_PIO4; 534 ap->pio_mask = ATA_PIO4;
536 535
537 if (cpu_type == TYPE_S3C64XX) { 536 if (cpu_type == TYPE_S3C64XX) {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 093715c3273..88ea9b677b4 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -959,7 +959,7 @@ static struct ata_port_operations scc_pata_ops = {
959 959
960static struct ata_port_info scc_port_info[] = { 960static struct ata_port_info scc_port_info[] = {
961 { 961 {
962 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY, 962 .flags = ATA_FLAG_SLAVE_POSS,
963 .pio_mask = ATA_PIO4, 963 .pio_mask = ATA_PIO4,
964 /* No MWDMA */ 964 /* No MWDMA */
965 .udma_mask = ATA_UDMA6, 965 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 60cea13cccc..c04abc393fc 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -593,7 +593,7 @@ static const struct ata_port_info sis_info133 = {
593 .port_ops = &sis_133_ops, 593 .port_ops = &sis_133_ops,
594}; 594};
595const struct ata_port_info sis_info133_for_sata = { 595const struct ata_port_info sis_info133_for_sata = {
596 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 596 .flags = ATA_FLAG_SLAVE_POSS,
597 .pio_mask = ATA_PIO4, 597 .pio_mask = ATA_PIO4,
598 /* No MWDMA */ 598 /* No MWDMA */
599 .udma_mask = ATA_UDMA6, 599 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index adbe0426c8f..1111712b3d7 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -166,9 +166,7 @@ static struct ata_port_operations adma_ata_ops = {
166static struct ata_port_info adma_port_info[] = { 166static struct ata_port_info adma_port_info[] = {
167 /* board_1841_idx */ 167 /* board_1841_idx */
168 { 168 {
169 .flags = ATA_FLAG_SLAVE_POSS | 169 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
170 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
171 ATA_FLAG_PIO_POLLING,
172 .pio_mask = ATA_PIO4_ONLY, 170 .pio_mask = ATA_PIO4_ONLY,
173 .udma_mask = ATA_UDMA4, 171 .udma_mask = ATA_UDMA4,
174 .port_ops = &adma_ata_ops, 172 .port_ops = &adma_ata_ops,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 6cf57c5c2b5..712ab5a4922 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -40,8 +40,11 @@
40#include <scsi/scsi_host.h> 40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h> 41#include <scsi/scsi_cmnd.h>
42 42
43/* These two are defined in "libata.h" */
44#undef DRV_NAME
45#undef DRV_VERSION
43#define DRV_NAME "sata-dwc" 46#define DRV_NAME "sata-dwc"
44#define DRV_VERSION "1.0" 47#define DRV_VERSION "1.3"
45 48
46/* SATA DMA driver Globals */ 49/* SATA DMA driver Globals */
47#define DMA_NUM_CHANS 1 50#define DMA_NUM_CHANS 1
@@ -333,11 +336,47 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
333 void __iomem *addr, int dir); 336 void __iomem *addr, int dir);
334static void dma_dwc_xfer_start(int dma_ch); 337static void dma_dwc_xfer_start(int dma_ch);
335 338
339static const char *get_prot_descript(u8 protocol)
340{
341 switch ((enum ata_tf_protocols)protocol) {
342 case ATA_PROT_NODATA:
343 return "ATA no data";
344 case ATA_PROT_PIO:
345 return "ATA PIO";
346 case ATA_PROT_DMA:
347 return "ATA DMA";
348 case ATA_PROT_NCQ:
349 return "ATA NCQ";
350 case ATAPI_PROT_NODATA:
351 return "ATAPI no data";
352 case ATAPI_PROT_PIO:
353 return "ATAPI PIO";
354 case ATAPI_PROT_DMA:
355 return "ATAPI DMA";
356 default:
357 return "unknown";
358 }
359}
360
361static const char *get_dma_dir_descript(int dma_dir)
362{
363 switch ((enum dma_data_direction)dma_dir) {
364 case DMA_BIDIRECTIONAL:
365 return "bidirectional";
366 case DMA_TO_DEVICE:
367 return "to device";
368 case DMA_FROM_DEVICE:
369 return "from device";
370 default:
371 return "none";
372 }
373}
374
336static void sata_dwc_tf_dump(struct ata_taskfile *tf) 375static void sata_dwc_tf_dump(struct ata_taskfile *tf)
337{ 376{
338 dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:" 377 dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
339 "0x%lx device: %x\n", tf->command, ata_get_cmd_descript\ 378 "0x%lx device: %x\n", tf->command,
340 (tf->protocol), tf->flags, tf->device); 379 get_prot_descript(tf->protocol), tf->flags, tf->device);
341 dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x " 380 dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
342 "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, 381 "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
343 tf->lbam, tf->lbah); 382 tf->lbam, tf->lbah);
@@ -715,7 +754,7 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
715 /* Program the CTL register with src enable / dst enable */ 754 /* Program the CTL register with src enable / dst enable */
716 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low), 755 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
717 DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); 756 DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
718 return 0; 757 return dma_ch;
719} 758}
720 759
721/* 760/*
@@ -967,7 +1006,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
967 } 1006 }
968 1007
969 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", 1008 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
970 __func__, ata_get_cmd_descript(qc->tf.protocol)); 1009 __func__, get_prot_descript(qc->tf.protocol));
971DRVSTILLBUSY: 1010DRVSTILLBUSY:
972 if (ata_is_dma(qc->tf.protocol)) { 1011 if (ata_is_dma(qc->tf.protocol)) {
973 /* 1012 /*
@@ -1057,7 +1096,7 @@ DRVSTILLBUSY:
1057 1096
1058 /* Process completed command */ 1097 /* Process completed command */
1059 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 1098 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
1060 ata_get_cmd_descript(qc->tf.protocol)); 1099 get_prot_descript(qc->tf.protocol));
1061 if (ata_is_dma(qc->tf.protocol)) { 1100 if (ata_is_dma(qc->tf.protocol)) {
1062 host_pvt.dma_interrupt_count++; 1101 host_pvt.dma_interrupt_count++;
1063 if (hsdevp->dma_pending[tag] == \ 1102 if (hsdevp->dma_pending[tag] == \
@@ -1142,8 +1181,8 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
1142 if (tag > 0) { 1181 if (tag > 0) {
1143 dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " 1182 dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
1144 "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, 1183 "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
1145 ata_get_cmd_descript(qc->dma_dir), 1184 get_dma_dir_descript(qc->dma_dir),
1146 ata_get_cmd_descript(qc->tf.protocol), 1185 get_prot_descript(qc->tf.protocol),
1147 in_le32(&(hsdev->sata_dwc_regs->dmacr))); 1186 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1148 } 1187 }
1149#endif 1188#endif
@@ -1354,7 +1393,7 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
1354 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1393 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1355 1394
1356 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, 1395 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
1357 ata_get_cmd_descript(tf), tag); 1396 ata_get_cmd_descript(tf->command), tag);
1358 1397
1359 spin_lock_irqsave(&ap->host->lock, flags); 1398 spin_lock_irqsave(&ap->host->lock, flags);
1360 hsdevp->cmd_issued[tag] = cmd_issued; 1399 hsdevp->cmd_issued[tag] = cmd_issued;
@@ -1413,7 +1452,7 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
1413 1452
1414 dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " 1453 dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
1415 "start_dma? %x\n", __func__, qc, tag, qc->tf.command, 1454 "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
1416 ata_get_cmd_descript(qc->dma_dir), start_dma); 1455 get_dma_dir_descript(qc->dma_dir), start_dma);
1417 sata_dwc_tf_dump(&(qc->tf)); 1456 sata_dwc_tf_dump(&(qc->tf));
1418 1457
1419 if (start_dma) { 1458 if (start_dma) {
@@ -1462,10 +1501,9 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1462 int dma_chan; 1501 int dma_chan;
1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1502 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1503 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1465 int err;
1466 1504
1467 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", 1505 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
1468 __func__, ap->port_no, ata_get_cmd_descript(qc->dma_dir), 1506 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
1469 qc->n_elem); 1507 qc->n_elem);
1470 1508
1471 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], 1509 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
@@ -1474,7 +1512,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1474 dmadr), qc->dma_dir); 1512 dmadr), qc->dma_dir);
1475 if (dma_chan < 0) { 1513 if (dma_chan < 0) {
1476 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", 1514 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
1477 __func__, err); 1515 __func__, dma_chan);
1478 return; 1516 return;
1479 } 1517 }
1480 hsdevp->dma_chan[tag] = dma_chan; 1518 hsdevp->dma_chan[tag] = dma_chan;
@@ -1491,8 +1529,8 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1491 dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d " 1529 dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
1492 "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1530 "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
1493 __func__, ap->print_id, qc->tf.command, 1531 __func__, ap->print_id, qc->tf.command,
1494 ata_get_cmd_descript(&qc->tf), 1532 ata_get_cmd_descript(qc->tf.command),
1495 qc->tag, ata_get_cmd_descript(qc->tf.protocol), 1533 qc->tag, get_prot_descript(qc->tf.protocol),
1496 ap->link.active_tag, ap->link.sactive); 1534 ap->link.active_tag, ap->link.sactive);
1497#endif 1535#endif
1498 1536
@@ -1533,7 +1571,7 @@ static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
1533#ifdef DEBUG_NCQ 1571#ifdef DEBUG_NCQ
1534 if (qc->tag > 0) 1572 if (qc->tag > 0)
1535 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", 1573 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
1536 __func__, tag, qc->ap->link.active_tag); 1574 __func__, qc->tag, qc->ap->link.active_tag);
1537 1575
1538 return ; 1576 return ;
1539#endif 1577#endif
@@ -1580,9 +1618,8 @@ static struct ata_port_operations sata_dwc_ops = {
1580 1618
1581static const struct ata_port_info sata_dwc_port_info[] = { 1619static const struct ata_port_info sata_dwc_port_info[] = {
1582 { 1620 {
1583 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 1621 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
1584 ATA_FLAG_MMIO | ATA_FLAG_NCQ, 1622 .pio_mask = ATA_PIO4,
1585 .pio_mask = 0x1f, /* pio 0-4 */
1586 .udma_mask = ATA_UDMA6, 1623 .udma_mask = ATA_UDMA6,
1587 .port_ops = &sata_dwc_ops, 1624 .port_ops = &sata_dwc_ops,
1588 }, 1625 },
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b0214d00d50..7f9eab34a38 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -33,8 +33,7 @@ enum {
33 SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1, 33 SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1,
34 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */ 34 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
35 35
36 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 36 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
37 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
38 ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN), 37 ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
39 38
40 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, 39 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
@@ -186,6 +185,11 @@ enum {
186 COMMANDSTAT = 0x20, 185 COMMANDSTAT = 0x20,
187}; 186};
188 187
188/* TRANSCFG (transport-layer) configuration control */
189enum {
190 TRANSCFG_RX_WATER_MARK = (1 << 4),
191};
192
189/* PHY (link-layer) configuration control */ 193/* PHY (link-layer) configuration control */
190enum { 194enum {
191 PHY_BIST_ENABLE = 0x01, 195 PHY_BIST_ENABLE = 0x01,
@@ -1040,12 +1044,15 @@ static void sata_fsl_error_intr(struct ata_port *ap)
1040 1044
1041 /* find out the offending link and qc */ 1045 /* find out the offending link and qc */
1042 if (ap->nr_pmp_links) { 1046 if (ap->nr_pmp_links) {
1047 unsigned int dev_num;
1048
1043 dereg = ioread32(hcr_base + DE); 1049 dereg = ioread32(hcr_base + DE);
1044 iowrite32(dereg, hcr_base + DE); 1050 iowrite32(dereg, hcr_base + DE);
1045 iowrite32(cereg, hcr_base + CE); 1051 iowrite32(cereg, hcr_base + CE);
1046 1052
1047 if (dereg < ap->nr_pmp_links) { 1053 dev_num = ffs(dereg) - 1;
1048 link = &ap->pmp_link[dereg]; 1054 if (dev_num < ap->nr_pmp_links && dereg != 0) {
1055 link = &ap->pmp_link[dev_num];
1049 ehi = &link->eh_info; 1056 ehi = &link->eh_info;
1050 qc = ata_qc_from_tag(ap, link->active_tag); 1057 qc = ata_qc_from_tag(ap, link->active_tag);
1051 /* 1058 /*
@@ -1303,6 +1310,7 @@ static int sata_fsl_probe(struct platform_device *ofdev,
1303 struct sata_fsl_host_priv *host_priv = NULL; 1310 struct sata_fsl_host_priv *host_priv = NULL;
1304 int irq; 1311 int irq;
1305 struct ata_host *host; 1312 struct ata_host *host;
1313 u32 temp;
1306 1314
1307 struct ata_port_info pi = sata_fsl_port_info[0]; 1315 struct ata_port_info pi = sata_fsl_port_info[0];
1308 const struct ata_port_info *ppi[] = { &pi, NULL }; 1316 const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1317,6 +1325,12 @@ static int sata_fsl_probe(struct platform_device *ofdev,
1317 ssr_base = hcr_base + 0x100; 1325 ssr_base = hcr_base + 0x100;
1318 csr_base = hcr_base + 0x140; 1326 csr_base = hcr_base + 0x140;
1319 1327
1328 if (!of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc8315-sata")) {
1329 temp = ioread32(csr_base + TRANSCFG);
1330 temp = temp & 0xffffffe0;
1331 iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG);
1332 }
1333
1320 DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG)); 1334 DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG));
1321 DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc)); 1335 DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc));
1322 DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE); 1336 DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bf74a36d3cc..cd40651e9b7 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -160,8 +160,7 @@ enum {
160 /* Host Flags */ 160 /* Host Flags */
161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
162 162
163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
164 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
165 164
166 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, 165 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
167 166
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 7254e255fd7..42344e3c686 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -539,7 +539,7 @@ struct nv_pi_priv {
539static const struct ata_port_info nv_port_info[] = { 539static const struct ata_port_info nv_port_info[] = {
540 /* generic */ 540 /* generic */
541 { 541 {
542 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 542 .flags = ATA_FLAG_SATA,
543 .pio_mask = NV_PIO_MASK, 543 .pio_mask = NV_PIO_MASK,
544 .mwdma_mask = NV_MWDMA_MASK, 544 .mwdma_mask = NV_MWDMA_MASK,
545 .udma_mask = NV_UDMA_MASK, 545 .udma_mask = NV_UDMA_MASK,
@@ -548,7 +548,7 @@ static const struct ata_port_info nv_port_info[] = {
548 }, 548 },
549 /* nforce2/3 */ 549 /* nforce2/3 */
550 { 550 {
551 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 551 .flags = ATA_FLAG_SATA,
552 .pio_mask = NV_PIO_MASK, 552 .pio_mask = NV_PIO_MASK,
553 .mwdma_mask = NV_MWDMA_MASK, 553 .mwdma_mask = NV_MWDMA_MASK,
554 .udma_mask = NV_UDMA_MASK, 554 .udma_mask = NV_UDMA_MASK,
@@ -557,7 +557,7 @@ static const struct ata_port_info nv_port_info[] = {
557 }, 557 },
558 /* ck804 */ 558 /* ck804 */
559 { 559 {
560 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 560 .flags = ATA_FLAG_SATA,
561 .pio_mask = NV_PIO_MASK, 561 .pio_mask = NV_PIO_MASK,
562 .mwdma_mask = NV_MWDMA_MASK, 562 .mwdma_mask = NV_MWDMA_MASK,
563 .udma_mask = NV_UDMA_MASK, 563 .udma_mask = NV_UDMA_MASK,
@@ -566,8 +566,7 @@ static const struct ata_port_info nv_port_info[] = {
566 }, 566 },
567 /* ADMA */ 567 /* ADMA */
568 { 568 {
569 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 569 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
570 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
571 .pio_mask = NV_PIO_MASK, 570 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK, 571 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK, 572 .udma_mask = NV_UDMA_MASK,
@@ -576,7 +575,7 @@ static const struct ata_port_info nv_port_info[] = {
576 }, 575 },
577 /* MCP5x */ 576 /* MCP5x */
578 { 577 {
579 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 578 .flags = ATA_FLAG_SATA,
580 .pio_mask = NV_PIO_MASK, 579 .pio_mask = NV_PIO_MASK,
581 .mwdma_mask = NV_MWDMA_MASK, 580 .mwdma_mask = NV_MWDMA_MASK,
582 .udma_mask = NV_UDMA_MASK, 581 .udma_mask = NV_UDMA_MASK,
@@ -585,8 +584,7 @@ static const struct ata_port_info nv_port_info[] = {
585 }, 584 },
586 /* SWNCQ */ 585 /* SWNCQ */
587 { 586 {
588 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 587 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
589 ATA_FLAG_NCQ,
590 .pio_mask = NV_PIO_MASK, 588 .pio_mask = NV_PIO_MASK,
591 .mwdma_mask = NV_MWDMA_MASK, 589 .mwdma_mask = NV_MWDMA_MASK,
592 .udma_mask = NV_UDMA_MASK, 590 .udma_mask = NV_UDMA_MASK,
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index f03ad48273f..a004b1e0ea6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -134,9 +134,7 @@ enum {
134 PDC_IRQ_DISABLE = (1 << 10), 134 PDC_IRQ_DISABLE = (1 << 10),
135 PDC_RESET = (1 << 11), /* HDMA reset */ 135 PDC_RESET = (1 << 11), /* HDMA reset */
136 136
137 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | 137 PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING,
138 ATA_FLAG_MMIO |
139 ATA_FLAG_PIO_POLLING,
140 138
141 /* ap->flags bits */ 139 /* ap->flags bits */
142 PDC_FLAG_GEN_II = (1 << 24), 140 PDC_FLAG_GEN_II = (1 << 24),
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index daeebf19a6a..c5603265fa5 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -155,8 +155,7 @@ static struct ata_port_operations qs_ata_ops = {
155static const struct ata_port_info qs_port_info[] = { 155static const struct ata_port_info qs_port_info[] = {
156 /* board_2068_idx */ 156 /* board_2068_idx */
157 { 157 {
158 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 158 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
159 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
160 .pio_mask = ATA_PIO4_ONLY, 159 .pio_mask = ATA_PIO4_ONLY,
161 .udma_mask = ATA_UDMA6, 160 .udma_mask = ATA_UDMA6,
162 .port_ops = &qs_ata_ops, 161 .port_ops = &qs_ata_ops,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3a4f8421971..b42edaaf3a5 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -61,8 +61,7 @@ enum {
61 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 61 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
62 SIL_FLAG_MOD15WRITE = (1 << 30), 62 SIL_FLAG_MOD15WRITE = (1 << 30),
63 63
64 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 64 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA,
65 ATA_FLAG_MMIO,
66 65
67 /* 66 /*
68 * Controller IDs 67 * Controller IDs
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index af41c6fd125..06c564e5505 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -244,8 +244,7 @@ enum {
244 BID_SIL3131 = 2, 244 BID_SIL3131 = 2,
245 245
246 /* host flags */ 246 /* host flags */
247 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 247 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
248 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
249 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | 248 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
250 ATA_FLAG_AN | ATA_FLAG_PMP, 249 ATA_FLAG_AN | ATA_FLAG_PMP,
251 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ 250 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 2bfe3ae0397..cdcc13e9cf5 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -96,7 +96,7 @@ static struct ata_port_operations sis_ops = {
96}; 96};
97 97
98static const struct ata_port_info sis_port_info = { 98static const struct ata_port_info sis_port_info = {
99 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 99 .flags = ATA_FLAG_SATA,
100 .pio_mask = ATA_PIO4, 100 .pio_mask = ATA_PIO4,
101 .mwdma_mask = ATA_MWDMA2, 101 .mwdma_mask = ATA_MWDMA2,
102 .udma_mask = ATA_UDMA6, 102 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 7d9db4aaf07..35eabcf3456 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -359,8 +359,7 @@ static struct ata_port_operations k2_sata_ops = {
359static const struct ata_port_info k2_port_info[] = { 359static const struct ata_port_info k2_port_info[] = {
360 /* chip_svw4 */ 360 /* chip_svw4 */
361 { 361 {
362 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 362 .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA,
363 ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
364 .pio_mask = ATA_PIO4, 363 .pio_mask = ATA_PIO4,
365 .mwdma_mask = ATA_MWDMA2, 364 .mwdma_mask = ATA_MWDMA2,
366 .udma_mask = ATA_UDMA6, 365 .udma_mask = ATA_UDMA6,
@@ -368,8 +367,7 @@ static const struct ata_port_info k2_port_info[] = {
368 }, 367 },
369 /* chip_svw8 */ 368 /* chip_svw8 */
370 { 369 {
371 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 370 .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA |
372 ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA |
373 K2_FLAG_SATA_8_PORTS, 371 K2_FLAG_SATA_8_PORTS,
374 .pio_mask = ATA_PIO4, 372 .pio_mask = ATA_PIO4,
375 .mwdma_mask = ATA_MWDMA2, 373 .mwdma_mask = ATA_MWDMA2,
@@ -378,8 +376,7 @@ static const struct ata_port_info k2_port_info[] = {
378 }, 376 },
379 /* chip_svw42 */ 377 /* chip_svw42 */
380 { 378 {
381 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 379 .flags = ATA_FLAG_SATA | K2_FLAG_BAR_POS_3,
382 ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3,
383 .pio_mask = ATA_PIO4, 380 .pio_mask = ATA_PIO4,
384 .mwdma_mask = ATA_MWDMA2, 381 .mwdma_mask = ATA_MWDMA2,
385 .udma_mask = ATA_UDMA6, 382 .udma_mask = ATA_UDMA6,
@@ -387,8 +384,7 @@ static const struct ata_port_info k2_port_info[] = {
387 }, 384 },
388 /* chip_svw43 */ 385 /* chip_svw43 */
389 { 386 {
390 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 387 .flags = ATA_FLAG_SATA,
391 ATA_FLAG_MMIO,
392 .pio_mask = ATA_PIO4, 388 .pio_mask = ATA_PIO4,
393 .mwdma_mask = ATA_MWDMA2, 389 .mwdma_mask = ATA_MWDMA2,
394 .udma_mask = ATA_UDMA6, 390 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index bedd5188e5b..8fd3b7252bd 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -273,9 +273,8 @@ static struct ata_port_operations pdc_20621_ops = {
273static const struct ata_port_info pdc_port_info[] = { 273static const struct ata_port_info pdc_port_info[] = {
274 /* board_20621 */ 274 /* board_20621 */
275 { 275 {
276 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 276 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
277 ATA_FLAG_SRST | ATA_FLAG_MMIO | 277 ATA_FLAG_PIO_POLLING,
278 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
279 .pio_mask = ATA_PIO4, 278 .pio_mask = ATA_PIO4,
280 .mwdma_mask = ATA_MWDMA2, 279 .mwdma_mask = ATA_MWDMA2,
281 .udma_mask = ATA_UDMA6, 280 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index b8578c32d34..235be717a71 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -88,8 +88,7 @@ static struct ata_port_operations uli_ops = {
88}; 88};
89 89
90static const struct ata_port_info uli_port_info = { 90static const struct ata_port_info uli_port_info = {
91 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 91 .flags = ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX,
92 ATA_FLAG_IGN_SIMPLEX,
93 .pio_mask = ATA_PIO4, 92 .pio_mask = ATA_PIO4,
94 .udma_mask = ATA_UDMA6, 93 .udma_mask = ATA_UDMA6,
95 .port_ops = &uli_ops, 94 .port_ops = &uli_ops,
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 8b677bbf2d3..21242c5709a 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -148,7 +148,7 @@ static struct ata_port_operations vt8251_ops = {
148}; 148};
149 149
150static const struct ata_port_info vt6420_port_info = { 150static const struct ata_port_info vt6420_port_info = {
151 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 151 .flags = ATA_FLAG_SATA,
152 .pio_mask = ATA_PIO4, 152 .pio_mask = ATA_PIO4,
153 .mwdma_mask = ATA_MWDMA2, 153 .mwdma_mask = ATA_MWDMA2,
154 .udma_mask = ATA_UDMA6, 154 .udma_mask = ATA_UDMA6,
@@ -156,7 +156,7 @@ static const struct ata_port_info vt6420_port_info = {
156}; 156};
157 157
158static struct ata_port_info vt6421_sport_info = { 158static struct ata_port_info vt6421_sport_info = {
159 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 159 .flags = ATA_FLAG_SATA,
160 .pio_mask = ATA_PIO4, 160 .pio_mask = ATA_PIO4,
161 .mwdma_mask = ATA_MWDMA2, 161 .mwdma_mask = ATA_MWDMA2,
162 .udma_mask = ATA_UDMA6, 162 .udma_mask = ATA_UDMA6,
@@ -164,7 +164,7 @@ static struct ata_port_info vt6421_sport_info = {
164}; 164};
165 165
166static struct ata_port_info vt6421_pport_info = { 166static struct ata_port_info vt6421_pport_info = {
167 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY, 167 .flags = ATA_FLAG_SLAVE_POSS,
168 .pio_mask = ATA_PIO4, 168 .pio_mask = ATA_PIO4,
169 /* No MWDMA */ 169 /* No MWDMA */
170 .udma_mask = ATA_UDMA6, 170 .udma_mask = ATA_UDMA6,
@@ -172,8 +172,7 @@ static struct ata_port_info vt6421_pport_info = {
172}; 172};
173 173
174static struct ata_port_info vt8251_port_info = { 174static struct ata_port_info vt8251_port_info = {
175 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS | 175 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
176 ATA_FLAG_NO_LEGACY,
177 .pio_mask = ATA_PIO4, 176 .pio_mask = ATA_PIO4,
178 .mwdma_mask = ATA_MWDMA2, 177 .mwdma_mask = ATA_MWDMA2,
179 .udma_mask = ATA_UDMA6, 178 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index e079cf29ed5..7c987371136 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -340,8 +340,7 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
340 const struct pci_device_id *ent) 340 const struct pci_device_id *ent)
341{ 341{
342 static const struct ata_port_info pi = { 342 static const struct ata_port_info pi = {
343 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 343 .flags = ATA_FLAG_SATA,
344 ATA_FLAG_MMIO,
345 .pio_mask = ATA_PIO4, 344 .pio_mask = ATA_PIO4,
346 .mwdma_mask = ATA_MWDMA2, 345 .mwdma_mask = ATA_MWDMA2,
347 .udma_mask = ATA_UDMA6, 346 .udma_mask = ATA_UDMA6,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d7aa39e349a..9cb8668ff5f 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -120,6 +120,10 @@ static DEFINE_SPINLOCK(minor_lock);
120#define EXTENDED (1<<EXT_SHIFT) 120#define EXTENDED (1<<EXT_SHIFT)
121#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) 121#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
122#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) 122#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
123#define EMULATED_HD_DISK_MINOR_OFFSET (0)
124#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
125#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16))
126#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4)
123 127
124#define DEV_NAME "xvd" /* name in /dev */ 128#define DEV_NAME "xvd" /* name in /dev */
125 129
@@ -281,7 +285,7 @@ static int blkif_queue_request(struct request *req)
281 info->shadow[id].request = req; 285 info->shadow[id].request = req;
282 286
283 ring_req->id = id; 287 ring_req->id = id;
284 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); 288 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
285 ring_req->handle = info->handle; 289 ring_req->handle = info->handle;
286 290
287 ring_req->operation = rq_data_dir(req) ? 291 ring_req->operation = rq_data_dir(req) ?
@@ -317,7 +321,7 @@ static int blkif_queue_request(struct request *req)
317 rq_data_dir(req) ); 321 rq_data_dir(req) );
318 322
319 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); 323 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
320 ring_req->seg[i] = 324 ring_req->u.rw.seg[i] =
321 (struct blkif_request_segment) { 325 (struct blkif_request_segment) {
322 .gref = ref, 326 .gref = ref,
323 .first_sect = fsect, 327 .first_sect = fsect,
@@ -434,6 +438,65 @@ static void xlvbd_flush(struct blkfront_info *info)
434 info->feature_flush ? "enabled" : "disabled"); 438 info->feature_flush ? "enabled" : "disabled");
435} 439}
436 440
441static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
442{
443 int major;
444 major = BLKIF_MAJOR(vdevice);
445 *minor = BLKIF_MINOR(vdevice);
446 switch (major) {
447 case XEN_IDE0_MAJOR:
448 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
449 *minor = ((*minor / 64) * PARTS_PER_DISK) +
450 EMULATED_HD_DISK_MINOR_OFFSET;
451 break;
452 case XEN_IDE1_MAJOR:
453 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
454 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
455 EMULATED_HD_DISK_MINOR_OFFSET;
456 break;
457 case XEN_SCSI_DISK0_MAJOR:
458 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
459 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
460 break;
461 case XEN_SCSI_DISK1_MAJOR:
462 case XEN_SCSI_DISK2_MAJOR:
463 case XEN_SCSI_DISK3_MAJOR:
464 case XEN_SCSI_DISK4_MAJOR:
465 case XEN_SCSI_DISK5_MAJOR:
466 case XEN_SCSI_DISK6_MAJOR:
467 case XEN_SCSI_DISK7_MAJOR:
468 *offset = (*minor / PARTS_PER_DISK) +
469 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
470 EMULATED_SD_DISK_NAME_OFFSET;
471 *minor = *minor +
472 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
473 EMULATED_SD_DISK_MINOR_OFFSET;
474 break;
475 case XEN_SCSI_DISK8_MAJOR:
476 case XEN_SCSI_DISK9_MAJOR:
477 case XEN_SCSI_DISK10_MAJOR:
478 case XEN_SCSI_DISK11_MAJOR:
479 case XEN_SCSI_DISK12_MAJOR:
480 case XEN_SCSI_DISK13_MAJOR:
481 case XEN_SCSI_DISK14_MAJOR:
482 case XEN_SCSI_DISK15_MAJOR:
483 *offset = (*minor / PARTS_PER_DISK) +
484 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
485 EMULATED_SD_DISK_NAME_OFFSET;
486 *minor = *minor +
487 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
488 EMULATED_SD_DISK_MINOR_OFFSET;
489 break;
490 case XENVBD_MAJOR:
491 *offset = *minor / PARTS_PER_DISK;
492 break;
493 default:
494 printk(KERN_WARNING "blkfront: your disk configuration is "
495 "incorrect, please use an xvd device instead\n");
496 return -ENODEV;
497 }
498 return 0;
499}
437 500
438static int xlvbd_alloc_gendisk(blkif_sector_t capacity, 501static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
439 struct blkfront_info *info, 502 struct blkfront_info *info,
@@ -441,7 +504,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
441{ 504{
442 struct gendisk *gd; 505 struct gendisk *gd;
443 int nr_minors = 1; 506 int nr_minors = 1;
444 int err = -ENODEV; 507 int err;
445 unsigned int offset; 508 unsigned int offset;
446 int minor; 509 int minor;
447 int nr_parts; 510 int nr_parts;
@@ -456,12 +519,20 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
456 } 519 }
457 520
458 if (!VDEV_IS_EXTENDED(info->vdevice)) { 521 if (!VDEV_IS_EXTENDED(info->vdevice)) {
459 minor = BLKIF_MINOR(info->vdevice); 522 err = xen_translate_vdev(info->vdevice, &minor, &offset);
460 nr_parts = PARTS_PER_DISK; 523 if (err)
524 return err;
525 nr_parts = PARTS_PER_DISK;
461 } else { 526 } else {
462 minor = BLKIF_MINOR_EXT(info->vdevice); 527 minor = BLKIF_MINOR_EXT(info->vdevice);
463 nr_parts = PARTS_PER_EXT_DISK; 528 nr_parts = PARTS_PER_EXT_DISK;
529 offset = minor / nr_parts;
530 if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4)
531 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
532 "emulated IDE disks,\n\t choose an xvd device name"
533 "from xvde on\n", info->vdevice);
464 } 534 }
535 err = -ENODEV;
465 536
466 if ((minor % nr_parts) == 0) 537 if ((minor % nr_parts) == 0)
467 nr_minors = nr_parts; 538 nr_minors = nr_parts;
@@ -475,8 +546,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
475 if (gd == NULL) 546 if (gd == NULL)
476 goto release; 547 goto release;
477 548
478 offset = minor / nr_parts;
479
480 if (nr_minors > 1) { 549 if (nr_minors > 1) {
481 if (offset < 26) 550 if (offset < 26)
482 sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset); 551 sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
@@ -615,7 +684,7 @@ static void blkif_completion(struct blk_shadow *s)
615{ 684{
616 int i; 685 int i;
617 for (i = 0; i < s->req.nr_segments; i++) 686 for (i = 0; i < s->req.nr_segments; i++)
618 gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL); 687 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
619} 688}
620 689
621static irqreturn_t blkif_interrupt(int irq, void *dev_id) 690static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -932,7 +1001,7 @@ static int blkif_recover(struct blkfront_info *info)
932 /* Rewrite any grant references invalidated by susp/resume. */ 1001 /* Rewrite any grant references invalidated by susp/resume. */
933 for (j = 0; j < req->nr_segments; j++) 1002 for (j = 0; j < req->nr_segments; j++)
934 gnttab_grant_foreign_access_ref( 1003 gnttab_grant_foreign_access_ref(
935 req->seg[j].gref, 1004 req->u.rw.seg[j].gref,
936 info->xbdev->otherend_id, 1005 info->xbdev->otherend_id,
937 pfn_to_mfn(info->shadow[req->id].frame[j]), 1006 pfn_to_mfn(info->shadow[req->id].frame[j]),
938 rq_data_dir(info->shadow[req->id].request)); 1007 rq_data_dir(info->shadow[req->id].request));
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index e6d75627c6c..33dc2298af7 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -53,6 +53,8 @@ MODULE_LICENSE("GPL");
53 53
54#define RTC_BITS 55 /* 55 bits for this implementation */ 54#define RTC_BITS 55 /* 55 bits for this implementation */
55 55
56static struct k_clock sgi_clock;
57
56extern unsigned long sn_rtc_cycles_per_second; 58extern unsigned long sn_rtc_cycles_per_second;
57 59
58#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC)) 60#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
@@ -487,7 +489,7 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
487 return 0; 489 return 0;
488}; 490};
489 491
490static int sgi_clock_set(clockid_t clockid, struct timespec *tp) 492static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp)
491{ 493{
492 494
493 u64 nsec; 495 u64 nsec;
@@ -763,15 +765,21 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
763 return err; 765 return err;
764} 766}
765 767
768static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp)
769{
770 tp->tv_sec = 0;
771 tp->tv_nsec = sgi_clock_period;
772 return 0;
773}
774
766static struct k_clock sgi_clock = { 775static struct k_clock sgi_clock = {
767 .res = 0, 776 .clock_set = sgi_clock_set,
768 .clock_set = sgi_clock_set, 777 .clock_get = sgi_clock_get,
769 .clock_get = sgi_clock_get, 778 .clock_getres = sgi_clock_getres,
770 .timer_create = sgi_timer_create, 779 .timer_create = sgi_timer_create,
771 .nsleep = do_posix_clock_nonanosleep, 780 .timer_set = sgi_timer_set,
772 .timer_set = sgi_timer_set, 781 .timer_del = sgi_timer_del,
773 .timer_del = sgi_timer_del, 782 .timer_get = sgi_timer_get
774 .timer_get = sgi_timer_get
775}; 783};
776 784
777/** 785/**
@@ -831,8 +839,8 @@ static int __init mmtimer_init(void)
831 (unsigned long) node); 839 (unsigned long) node);
832 } 840 }
833 841
834 sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second; 842 sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second;
835 register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock); 843 posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock);
836 844
837 printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION, 845 printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
838 sn_rtc_cycles_per_second/(unsigned long)1E6); 846 sn_rtc_cycles_per_second/(unsigned long)1E6);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 526bfbf6961..94284c8473b 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -81,8 +81,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
81 */ 81 */
82static DEFINE_MUTEX(dbs_mutex); 82static DEFINE_MUTEX(dbs_mutex);
83 83
84static struct workqueue_struct *kconservative_wq;
85
86static struct dbs_tuners { 84static struct dbs_tuners {
87 unsigned int sampling_rate; 85 unsigned int sampling_rate;
88 unsigned int sampling_down_factor; 86 unsigned int sampling_down_factor;
@@ -560,7 +558,7 @@ static void do_dbs_timer(struct work_struct *work)
560 558
561 dbs_check_cpu(dbs_info); 559 dbs_check_cpu(dbs_info);
562 560
563 queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); 561 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
564 mutex_unlock(&dbs_info->timer_mutex); 562 mutex_unlock(&dbs_info->timer_mutex);
565} 563}
566 564
@@ -572,8 +570,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
572 570
573 dbs_info->enable = 1; 571 dbs_info->enable = 1;
574 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 572 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
575 queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, 573 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
576 delay);
577} 574}
578 575
579static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 576static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -716,25 +713,12 @@ struct cpufreq_governor cpufreq_gov_conservative = {
716 713
717static int __init cpufreq_gov_dbs_init(void) 714static int __init cpufreq_gov_dbs_init(void)
718{ 715{
719 int err; 716 return cpufreq_register_governor(&cpufreq_gov_conservative);
720
721 kconservative_wq = create_workqueue("kconservative");
722 if (!kconservative_wq) {
723 printk(KERN_ERR "Creation of kconservative failed\n");
724 return -EFAULT;
725 }
726
727 err = cpufreq_register_governor(&cpufreq_gov_conservative);
728 if (err)
729 destroy_workqueue(kconservative_wq);
730
731 return err;
732} 717}
733 718
734static void __exit cpufreq_gov_dbs_exit(void) 719static void __exit cpufreq_gov_dbs_exit(void)
735{ 720{
736 cpufreq_unregister_governor(&cpufreq_gov_conservative); 721 cpufreq_unregister_governor(&cpufreq_gov_conservative);
737 destroy_workqueue(kconservative_wq);
738} 722}
739 723
740 724
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c631f27a3dc..58aa85ea5ec 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -104,8 +104,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
104 */ 104 */
105static DEFINE_MUTEX(dbs_mutex); 105static DEFINE_MUTEX(dbs_mutex);
106 106
107static struct workqueue_struct *kondemand_wq;
108
109static struct dbs_tuners { 107static struct dbs_tuners {
110 unsigned int sampling_rate; 108 unsigned int sampling_rate;
111 unsigned int up_threshold; 109 unsigned int up_threshold;
@@ -667,7 +665,7 @@ static void do_dbs_timer(struct work_struct *work)
667 __cpufreq_driver_target(dbs_info->cur_policy, 665 __cpufreq_driver_target(dbs_info->cur_policy,
668 dbs_info->freq_lo, CPUFREQ_RELATION_H); 666 dbs_info->freq_lo, CPUFREQ_RELATION_H);
669 } 667 }
670 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 668 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
671 mutex_unlock(&dbs_info->timer_mutex); 669 mutex_unlock(&dbs_info->timer_mutex);
672} 670}
673 671
@@ -681,8 +679,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
681 679
682 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 680 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
683 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 681 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
684 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, 682 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
685 delay);
686} 683}
687 684
688static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 685static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -814,7 +811,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
814 811
815static int __init cpufreq_gov_dbs_init(void) 812static int __init cpufreq_gov_dbs_init(void)
816{ 813{
817 int err;
818 cputime64_t wall; 814 cputime64_t wall;
819 u64 idle_time; 815 u64 idle_time;
820 int cpu = get_cpu(); 816 int cpu = get_cpu();
@@ -838,22 +834,12 @@ static int __init cpufreq_gov_dbs_init(void)
838 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); 834 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
839 } 835 }
840 836
841 kondemand_wq = create_workqueue("kondemand"); 837 return cpufreq_register_governor(&cpufreq_gov_ondemand);
842 if (!kondemand_wq) {
843 printk(KERN_ERR "Creation of kondemand failed\n");
844 return -EFAULT;
845 }
846 err = cpufreq_register_governor(&cpufreq_gov_ondemand);
847 if (err)
848 destroy_workqueue(kondemand_wq);
849
850 return err;
851} 838}
852 839
853static void __exit cpufreq_gov_dbs_exit(void) 840static void __exit cpufreq_gov_dbs_exit(void)
854{ 841{
855 cpufreq_unregister_governor(&cpufreq_gov_ondemand); 842 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
856 destroy_workqueue(kondemand_wq);
857} 843}
858 844
859 845
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 61653f07967..1b46a9d9f90 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -330,9 +330,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
330 i2c->adap = ocores_adapter; 330 i2c->adap = ocores_adapter;
331 i2c_set_adapdata(&i2c->adap, i2c); 331 i2c_set_adapdata(&i2c->adap, i2c);
332 i2c->adap.dev.parent = &pdev->dev; 332 i2c->adap.dev.parent = &pdev->dev;
333#ifdef CONFIG_OF
334 i2c->adap.dev.of_node = pdev->dev.of_node; 333 i2c->adap.dev.of_node = pdev->dev.of_node;
335#endif
336 334
337 /* add i2c adapter to i2c tree */ 335 /* add i2c adapter to i2c tree */
338 ret = i2c_add_adapter(&i2c->adap); 336 ret = i2c_add_adapter(&i2c->adap);
@@ -390,15 +388,11 @@ static int ocores_i2c_resume(struct platform_device *pdev)
390#define ocores_i2c_resume NULL 388#define ocores_i2c_resume NULL
391#endif 389#endif
392 390
393#ifdef CONFIG_OF
394static struct of_device_id ocores_i2c_match[] = { 391static struct of_device_id ocores_i2c_match[] = {
395 { 392 { .compatible = "opencores,i2c-ocores", },
396 .compatible = "opencores,i2c-ocores", 393 {},
397 },
398 {},
399}; 394};
400MODULE_DEVICE_TABLE(of, ocores_i2c_match); 395MODULE_DEVICE_TABLE(of, ocores_i2c_match);
401#endif
402 396
403/* work with hotplug and coldplug */ 397/* work with hotplug and coldplug */
404MODULE_ALIAS("platform:ocores-i2c"); 398MODULE_ALIAS("platform:ocores-i2c");
@@ -411,9 +405,7 @@ static struct platform_driver ocores_i2c_driver = {
411 .driver = { 405 .driver = {
412 .owner = THIS_MODULE, 406 .owner = THIS_MODULE,
413 .name = "ocores-i2c", 407 .name = "ocores-i2c",
414#ifdef CONFIG_OF 408 .of_match_table = ocores_i2c_match,
415 .of_match_table = ocores_i2c_match,
416#endif
417 }, 409 },
418}; 410};
419 411
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index f0bd5bcdf56..045ba6efea4 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -537,9 +537,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
537 client->dev.parent = &client->adapter->dev; 537 client->dev.parent = &client->adapter->dev;
538 client->dev.bus = &i2c_bus_type; 538 client->dev.bus = &i2c_bus_type;
539 client->dev.type = &i2c_client_type; 539 client->dev.type = &i2c_client_type;
540#ifdef CONFIG_OF
541 client->dev.of_node = info->of_node; 540 client->dev.of_node = info->of_node;
542#endif
543 541
544 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), 542 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
545 client->addr); 543 client->addr);
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index c8c136cf7bb..43031492d73 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -43,7 +43,6 @@ struct tps6507x_ts {
43 struct input_dev *input_dev; 43 struct input_dev *input_dev;
44 struct device *dev; 44 struct device *dev;
45 char phys[32]; 45 char phys[32];
46 struct workqueue_struct *wq;
47 struct delayed_work work; 46 struct delayed_work work;
48 unsigned polling; /* polling is active */ 47 unsigned polling; /* polling is active */
49 struct ts_event tc; 48 struct ts_event tc;
@@ -220,8 +219,8 @@ done:
220 poll = 1; 219 poll = 1;
221 220
222 if (poll) { 221 if (poll) {
223 schd = queue_delayed_work(tsc->wq, &tsc->work, 222 schd = schedule_delayed_work(&tsc->work,
224 msecs_to_jiffies(tsc->poll_period)); 223 msecs_to_jiffies(tsc->poll_period));
225 if (schd) 224 if (schd)
226 tsc->polling = 1; 225 tsc->polling = 1;
227 else { 226 else {
@@ -303,7 +302,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
303 tsc->input_dev = input_dev; 302 tsc->input_dev = input_dev;
304 303
305 INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler); 304 INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
306 tsc->wq = create_workqueue("TPS6507x Touchscreen");
307 305
308 if (init_data) { 306 if (init_data) {
309 tsc->poll_period = init_data->poll_period; 307 tsc->poll_period = init_data->poll_period;
@@ -325,8 +323,8 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
325 if (error) 323 if (error)
326 goto err2; 324 goto err2;
327 325
328 schd = queue_delayed_work(tsc->wq, &tsc->work, 326 schd = schedule_delayed_work(&tsc->work,
329 msecs_to_jiffies(tsc->poll_period)); 327 msecs_to_jiffies(tsc->poll_period));
330 328
331 if (schd) 329 if (schd)
332 tsc->polling = 1; 330 tsc->polling = 1;
@@ -341,7 +339,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
341 339
342err2: 340err2:
343 cancel_delayed_work_sync(&tsc->work); 341 cancel_delayed_work_sync(&tsc->work);
344 destroy_workqueue(tsc->wq);
345 input_free_device(input_dev); 342 input_free_device(input_dev);
346err1: 343err1:
347 kfree(tsc); 344 kfree(tsc);
@@ -357,7 +354,6 @@ static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
357 struct input_dev *input_dev = tsc->input_dev; 354 struct input_dev *input_dev = tsc->input_dev;
358 355
359 cancel_delayed_work_sync(&tsc->work); 356 cancel_delayed_work_sync(&tsc->work);
360 destroy_workqueue(tsc->wq);
361 357
362 input_unregister_device(input_dev); 358 input_unregister_device(input_dev);
363 359
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 818313e277e..d5ad7723b17 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7361,7 +7361,7 @@ static int __init md_init(void)
7361{ 7361{
7362 int ret = -ENOMEM; 7362 int ret = -ENOMEM;
7363 7363
7364 md_wq = alloc_workqueue("md", WQ_RESCUER, 0); 7364 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
7365 if (!md_wq) 7365 if (!md_wq)
7366 goto err_wq; 7366 goto err_wq;
7367 7367
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index a0421efe04c..8a5b2d8f4da 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -84,7 +84,8 @@ int i2o_driver_register(struct i2o_driver *drv)
84 osm_debug("Register driver %s\n", drv->name); 84 osm_debug("Register driver %s\n", drv->name);
85 85
86 if (drv->event) { 86 if (drv->event) {
87 drv->event_queue = create_workqueue(drv->name); 87 drv->event_queue = alloc_workqueue(drv->name,
88 WQ_MEM_RECLAIM, 1);
88 if (!drv->event_queue) { 89 if (!drv->event_queue) {
89 osm_err("Could not initialize event queue for driver " 90 osm_err("Could not initialize event queue for driver "
90 "%s\n", drv->name); 91 "%s\n", drv->name);
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
index 740ff0738ea..620973ed8bf 100644
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -183,9 +183,7 @@ struct iwmct_priv {
183 u32 barker; 183 u32 barker;
184 struct iwmct_dbg dbg; 184 struct iwmct_dbg dbg;
185 185
186 /* drivers work queue */ 186 /* drivers work items */
187 struct workqueue_struct *wq;
188 struct workqueue_struct *bus_rescan_wq;
189 struct work_struct bus_rescan_worker; 187 struct work_struct bus_rescan_worker;
190 struct work_struct isr_worker; 188 struct work_struct isr_worker;
191 189
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index c73cef2c3c5..727af07f1fb 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -89,7 +89,7 @@ static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
89 switch (msg->hdr.opcode) { 89 switch (msg->hdr.opcode) {
90 case OP_OPR_ALIVE: 90 case OP_OPR_ALIVE:
91 LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n"); 91 LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
92 queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker); 92 schedule_work(&priv->bus_rescan_worker);
93 break; 93 break;
94 default: 94 default:
95 LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n", 95 LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
@@ -360,7 +360,7 @@ static void iwmct_irq(struct sdio_func *func)
360 /* clear the function's interrupt request bit (write 1 to clear) */ 360 /* clear the function's interrupt request bit (write 1 to clear) */
361 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); 361 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
362 362
363 queue_work(priv->wq, &priv->isr_worker); 363 schedule_work(&priv->isr_worker);
364 364
365 LOG_TRACE(priv, IRQ, "exit iwmct_irq\n"); 365 LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
366 366
@@ -506,10 +506,6 @@ static int iwmct_probe(struct sdio_func *func,
506 priv->func = func; 506 priv->func = func;
507 sdio_set_drvdata(func, priv); 507 sdio_set_drvdata(func, priv);
508 508
509
510 /* create drivers work queue */
511 priv->wq = create_workqueue(DRV_NAME "_wq");
512 priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
513 INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker); 509 INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
514 INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker); 510 INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
515 511
@@ -604,9 +600,9 @@ static void iwmct_remove(struct sdio_func *func)
604 sdio_release_irq(func); 600 sdio_release_irq(func);
605 sdio_release_host(func); 601 sdio_release_host(func);
606 602
607 /* Safely destroy osc workqueue */ 603 /* Make sure works are finished */
608 destroy_workqueue(priv->bus_rescan_wq); 604 flush_work_sync(&priv->bus_rescan_worker);
609 destroy_workqueue(priv->wq); 605 flush_work_sync(&priv->isr_worker);
610 606
611 sdio_claim_host(func); 607 sdio_claim_host(func);
612 sdio_disable_func(func); 608 sdio_disable_func(func);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index fd877f633dd..2f7fc0c5146 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1516,21 +1516,17 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)
1516 return 0; 1516 return 0;
1517} 1517}
1518 1518
1519#if defined(CONFIG_OF)
1520static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { 1519static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
1521 { .compatible = "mmc-spi-slot", }, 1520 { .compatible = "mmc-spi-slot", },
1522 {}, 1521 {},
1523}; 1522};
1524#endif
1525 1523
1526static struct spi_driver mmc_spi_driver = { 1524static struct spi_driver mmc_spi_driver = {
1527 .driver = { 1525 .driver = {
1528 .name = "mmc_spi", 1526 .name = "mmc_spi",
1529 .bus = &spi_bus_type, 1527 .bus = &spi_bus_type,
1530 .owner = THIS_MODULE, 1528 .owner = THIS_MODULE,
1531#if defined(CONFIG_OF)
1532 .of_match_table = mmc_spi_of_match_table, 1529 .of_match_table = mmc_spi_of_match_table,
1533#endif
1534 }, 1530 },
1535 .probe = mmc_spi_probe, 1531 .probe = mmc_spi_probe,
1536 .remove = __devexit_p(mmc_spi_remove), 1532 .remove = __devexit_p(mmc_spi_remove),
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b79d7e1555d..db0290f05bd 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -1163,15 +1163,11 @@ static int ethoc_resume(struct platform_device *pdev)
1163# define ethoc_resume NULL 1163# define ethoc_resume NULL
1164#endif 1164#endif
1165 1165
1166#ifdef CONFIG_OF
1167static struct of_device_id ethoc_match[] = { 1166static struct of_device_id ethoc_match[] = {
1168 { 1167 { .compatible = "opencores,ethoc", },
1169 .compatible = "opencores,ethoc",
1170 },
1171 {}, 1168 {},
1172}; 1169};
1173MODULE_DEVICE_TABLE(of, ethoc_match); 1170MODULE_DEVICE_TABLE(of, ethoc_match);
1174#endif
1175 1171
1176static struct platform_driver ethoc_driver = { 1172static struct platform_driver ethoc_driver = {
1177 .probe = ethoc_probe, 1173 .probe = ethoc_probe,
@@ -1181,9 +1177,7 @@ static struct platform_driver ethoc_driver = {
1181 .driver = { 1177 .driver = {
1182 .name = "ethoc", 1178 .name = "ethoc",
1183 .owner = THIS_MODULE, 1179 .owner = THIS_MODULE,
1184#ifdef CONFIG_OF
1185 .of_match_table = ethoc_match, 1180 .of_match_table = ethoc_match,
1186#endif
1187 }, 1181 },
1188}; 1182};
1189 1183
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 61915f37141..471a52a2f8d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -706,11 +706,10 @@ static void schedule_reset(struct ipw2100_priv *priv)
706 netif_stop_queue(priv->net_dev); 706 netif_stop_queue(priv->net_dev);
707 priv->status |= STATUS_RESET_PENDING; 707 priv->status |= STATUS_RESET_PENDING;
708 if (priv->reset_backoff) 708 if (priv->reset_backoff)
709 queue_delayed_work(priv->workqueue, &priv->reset_work, 709 schedule_delayed_work(&priv->reset_work,
710 priv->reset_backoff * HZ); 710 priv->reset_backoff * HZ);
711 else 711 else
712 queue_delayed_work(priv->workqueue, &priv->reset_work, 712 schedule_delayed_work(&priv->reset_work, 0);
713 0);
714 713
715 if (priv->reset_backoff < MAX_RESET_BACKOFF) 714 if (priv->reset_backoff < MAX_RESET_BACKOFF)
716 priv->reset_backoff++; 715 priv->reset_backoff++;
@@ -1474,7 +1473,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
1474 1473
1475 if (priv->stop_hang_check) { 1474 if (priv->stop_hang_check) {
1476 priv->stop_hang_check = 0; 1475 priv->stop_hang_check = 0;
1477 queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); 1476 schedule_delayed_work(&priv->hang_check, HZ / 2);
1478 } 1477 }
1479 1478
1480 fail_up: 1479 fail_up:
@@ -1808,8 +1807,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1808 1807
1809 if (priv->stop_rf_kill) { 1808 if (priv->stop_rf_kill) {
1810 priv->stop_rf_kill = 0; 1809 priv->stop_rf_kill = 0;
1811 queue_delayed_work(priv->workqueue, &priv->rf_kill, 1810 schedule_delayed_work(&priv->rf_kill,
1812 round_jiffies_relative(HZ)); 1811 round_jiffies_relative(HZ));
1813 } 1812 }
1814 1813
1815 deferred = 1; 1814 deferred = 1;
@@ -2086,7 +2085,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
2086 priv->status |= STATUS_ASSOCIATING; 2085 priv->status |= STATUS_ASSOCIATING;
2087 priv->connect_start = get_seconds(); 2086 priv->connect_start = get_seconds();
2088 2087
2089 queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10); 2088 schedule_delayed_work(&priv->wx_event_work, HZ / 10);
2090} 2089}
2091 2090
2092static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, 2091static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
@@ -2166,9 +2165,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2166 return; 2165 return;
2167 2166
2168 if (priv->status & STATUS_SECURITY_UPDATED) 2167 if (priv->status & STATUS_SECURITY_UPDATED)
2169 queue_delayed_work(priv->workqueue, &priv->security_work, 0); 2168 schedule_delayed_work(&priv->security_work, 0);
2170 2169
2171 queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); 2170 schedule_delayed_work(&priv->wx_event_work, 0);
2172} 2171}
2173 2172
2174static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) 2173static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -2183,8 +2182,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
2183 /* Make sure the RF Kill check timer is running */ 2182 /* Make sure the RF Kill check timer is running */
2184 priv->stop_rf_kill = 0; 2183 priv->stop_rf_kill = 0;
2185 cancel_delayed_work(&priv->rf_kill); 2184 cancel_delayed_work(&priv->rf_kill);
2186 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2185 schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
2187 round_jiffies_relative(HZ));
2188} 2186}
2189 2187
2190static void send_scan_event(void *data) 2188static void send_scan_event(void *data)
@@ -2219,13 +2217,12 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
2219 /* Only userspace-requested scan completion events go out immediately */ 2217 /* Only userspace-requested scan completion events go out immediately */
2220 if (!priv->user_requested_scan) { 2218 if (!priv->user_requested_scan) {
2221 if (!delayed_work_pending(&priv->scan_event_later)) 2219 if (!delayed_work_pending(&priv->scan_event_later))
2222 queue_delayed_work(priv->workqueue, 2220 schedule_delayed_work(&priv->scan_event_later,
2223 &priv->scan_event_later, 2221 round_jiffies_relative(msecs_to_jiffies(4000)));
2224 round_jiffies_relative(msecs_to_jiffies(4000)));
2225 } else { 2222 } else {
2226 priv->user_requested_scan = 0; 2223 priv->user_requested_scan = 0;
2227 cancel_delayed_work(&priv->scan_event_later); 2224 cancel_delayed_work(&priv->scan_event_later);
2228 queue_work(priv->workqueue, &priv->scan_event_now); 2225 schedule_work(&priv->scan_event_now);
2229 } 2226 }
2230} 2227}
2231 2228
@@ -4329,8 +4326,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
4329 /* Make sure the RF_KILL check timer is running */ 4326 /* Make sure the RF_KILL check timer is running */
4330 priv->stop_rf_kill = 0; 4327 priv->stop_rf_kill = 0;
4331 cancel_delayed_work(&priv->rf_kill); 4328 cancel_delayed_work(&priv->rf_kill);
4332 queue_delayed_work(priv->workqueue, &priv->rf_kill, 4329 schedule_delayed_work(&priv->rf_kill,
4333 round_jiffies_relative(HZ)); 4330 round_jiffies_relative(HZ));
4334 } else 4331 } else
4335 schedule_reset(priv); 4332 schedule_reset(priv);
4336 } 4333 }
@@ -4461,20 +4458,17 @@ static void bd_queue_initialize(struct ipw2100_priv *priv,
4461 IPW_DEBUG_INFO("exit\n"); 4458 IPW_DEBUG_INFO("exit\n");
4462} 4459}
4463 4460
4464static void ipw2100_kill_workqueue(struct ipw2100_priv *priv) 4461static void ipw2100_kill_works(struct ipw2100_priv *priv)
4465{ 4462{
4466 if (priv->workqueue) { 4463 priv->stop_rf_kill = 1;
4467 priv->stop_rf_kill = 1; 4464 priv->stop_hang_check = 1;
4468 priv->stop_hang_check = 1; 4465 cancel_delayed_work_sync(&priv->reset_work);
4469 cancel_delayed_work(&priv->reset_work); 4466 cancel_delayed_work_sync(&priv->security_work);
4470 cancel_delayed_work(&priv->security_work); 4467 cancel_delayed_work_sync(&priv->wx_event_work);
4471 cancel_delayed_work(&priv->wx_event_work); 4468 cancel_delayed_work_sync(&priv->hang_check);
4472 cancel_delayed_work(&priv->hang_check); 4469 cancel_delayed_work_sync(&priv->rf_kill);
4473 cancel_delayed_work(&priv->rf_kill); 4470 cancel_work_sync(&priv->scan_event_now);
4474 cancel_delayed_work(&priv->scan_event_later); 4471 cancel_delayed_work_sync(&priv->scan_event_later);
4475 destroy_workqueue(priv->workqueue);
4476 priv->workqueue = NULL;
4477 }
4478} 4472}
4479 4473
4480static int ipw2100_tx_allocate(struct ipw2100_priv *priv) 4474static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -6046,7 +6040,7 @@ static void ipw2100_hang_check(struct work_struct *work)
6046 priv->last_rtc = rtc; 6040 priv->last_rtc = rtc;
6047 6041
6048 if (!priv->stop_hang_check) 6042 if (!priv->stop_hang_check)
6049 queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); 6043 schedule_delayed_work(&priv->hang_check, HZ / 2);
6050 6044
6051 spin_unlock_irqrestore(&priv->low_lock, flags); 6045 spin_unlock_irqrestore(&priv->low_lock, flags);
6052} 6046}
@@ -6062,8 +6056,8 @@ static void ipw2100_rf_kill(struct work_struct *work)
6062 if (rf_kill_active(priv)) { 6056 if (rf_kill_active(priv)) {
6063 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 6057 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6064 if (!priv->stop_rf_kill) 6058 if (!priv->stop_rf_kill)
6065 queue_delayed_work(priv->workqueue, &priv->rf_kill, 6059 schedule_delayed_work(&priv->rf_kill,
6066 round_jiffies_relative(HZ)); 6060 round_jiffies_relative(HZ));
6067 goto exit_unlock; 6061 goto exit_unlock;
6068 } 6062 }
6069 6063
@@ -6209,8 +6203,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6209 INIT_LIST_HEAD(&priv->fw_pend_list); 6203 INIT_LIST_HEAD(&priv->fw_pend_list);
6210 INIT_STAT(&priv->fw_pend_stat); 6204 INIT_STAT(&priv->fw_pend_stat);
6211 6205
6212 priv->workqueue = create_workqueue(DRV_NAME);
6213
6214 INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); 6206 INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
6215 INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); 6207 INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
6216 INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); 6208 INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
@@ -6410,7 +6402,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6410 if (dev->irq) 6402 if (dev->irq)
6411 free_irq(dev->irq, priv); 6403 free_irq(dev->irq, priv);
6412 6404
6413 ipw2100_kill_workqueue(priv); 6405 ipw2100_kill_works(priv);
6414 6406
6415 /* These are safe to call even if they weren't allocated */ 6407 /* These are safe to call even if they weren't allocated */
6416 ipw2100_queues_free(priv); 6408 ipw2100_queues_free(priv);
@@ -6460,9 +6452,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6460 * first, then close() will crash. */ 6452 * first, then close() will crash. */
6461 unregister_netdev(dev); 6453 unregister_netdev(dev);
6462 6454
6463 /* ipw2100_down will ensure that there is no more pending work 6455 ipw2100_kill_works(priv);
6464 * in the workqueue's, so we can safely remove them now. */
6465 ipw2100_kill_workqueue(priv);
6466 6456
6467 ipw2100_queues_free(priv); 6457 ipw2100_queues_free(priv);
6468 6458
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 838002b4881..99cba968aa5 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -580,7 +580,6 @@ struct ipw2100_priv {
580 580
581 struct tasklet_struct irq_tasklet; 581 struct tasklet_struct irq_tasklet;
582 582
583 struct workqueue_struct *workqueue;
584 struct delayed_work reset_work; 583 struct delayed_work reset_work;
585 struct delayed_work security_work; 584 struct delayed_work security_work;
586 struct delayed_work wx_event_work; 585 struct delayed_work wx_event_work;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index ae438ed80c2..160881f234c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -894,9 +894,8 @@ static void ipw_led_link_on(struct ipw_priv *priv)
894 894
895 /* If we aren't associated, schedule turning the LED off */ 895 /* If we aren't associated, schedule turning the LED off */
896 if (!(priv->status & STATUS_ASSOCIATED)) 896 if (!(priv->status & STATUS_ASSOCIATED))
897 queue_delayed_work(priv->workqueue, 897 schedule_delayed_work(&priv->led_link_off,
898 &priv->led_link_off, 898 LD_TIME_LINK_ON);
899 LD_TIME_LINK_ON);
900 } 899 }
901 900
902 spin_unlock_irqrestore(&priv->lock, flags); 901 spin_unlock_irqrestore(&priv->lock, flags);
@@ -939,8 +938,8 @@ static void ipw_led_link_off(struct ipw_priv *priv)
939 * turning the LED on (blink while unassociated) */ 938 * turning the LED on (blink while unassociated) */
940 if (!(priv->status & STATUS_RF_KILL_MASK) && 939 if (!(priv->status & STATUS_RF_KILL_MASK) &&
941 !(priv->status & STATUS_ASSOCIATED)) 940 !(priv->status & STATUS_ASSOCIATED))
942 queue_delayed_work(priv->workqueue, &priv->led_link_on, 941 schedule_delayed_work(&priv->led_link_on,
943 LD_TIME_LINK_OFF); 942 LD_TIME_LINK_OFF);
944 943
945 } 944 }
946 945
@@ -980,13 +979,11 @@ static void __ipw_led_activity_on(struct ipw_priv *priv)
980 priv->status |= STATUS_LED_ACT_ON; 979 priv->status |= STATUS_LED_ACT_ON;
981 980
982 cancel_delayed_work(&priv->led_act_off); 981 cancel_delayed_work(&priv->led_act_off);
983 queue_delayed_work(priv->workqueue, &priv->led_act_off, 982 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
984 LD_TIME_ACT_ON);
985 } else { 983 } else {
986 /* Reschedule LED off for full time period */ 984 /* Reschedule LED off for full time period */
987 cancel_delayed_work(&priv->led_act_off); 985 cancel_delayed_work(&priv->led_act_off);
988 queue_delayed_work(priv->workqueue, &priv->led_act_off, 986 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
989 LD_TIME_ACT_ON);
990 } 987 }
991} 988}
992 989
@@ -1795,13 +1792,11 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1795 if (disable_radio) { 1792 if (disable_radio) {
1796 priv->status |= STATUS_RF_KILL_SW; 1793 priv->status |= STATUS_RF_KILL_SW;
1797 1794
1798 if (priv->workqueue) { 1795 cancel_delayed_work(&priv->request_scan);
1799 cancel_delayed_work(&priv->request_scan); 1796 cancel_delayed_work(&priv->request_direct_scan);
1800 cancel_delayed_work(&priv->request_direct_scan); 1797 cancel_delayed_work(&priv->request_passive_scan);
1801 cancel_delayed_work(&priv->request_passive_scan); 1798 cancel_delayed_work(&priv->scan_event);
1802 cancel_delayed_work(&priv->scan_event); 1799 schedule_work(&priv->down);
1803 }
1804 queue_work(priv->workqueue, &priv->down);
1805 } else { 1800 } else {
1806 priv->status &= ~STATUS_RF_KILL_SW; 1801 priv->status &= ~STATUS_RF_KILL_SW;
1807 if (rf_kill_active(priv)) { 1802 if (rf_kill_active(priv)) {
@@ -1809,10 +1804,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1809 "disabled by HW switch\n"); 1804 "disabled by HW switch\n");
1810 /* Make sure the RF_KILL check timer is running */ 1805 /* Make sure the RF_KILL check timer is running */
1811 cancel_delayed_work(&priv->rf_kill); 1806 cancel_delayed_work(&priv->rf_kill);
1812 queue_delayed_work(priv->workqueue, &priv->rf_kill, 1807 schedule_delayed_work(&priv->rf_kill,
1813 round_jiffies_relative(2 * HZ)); 1808 round_jiffies_relative(2 * HZ));
1814 } else 1809 } else
1815 queue_work(priv->workqueue, &priv->up); 1810 schedule_work(&priv->up);
1816 } 1811 }
1817 1812
1818 return 1; 1813 return 1;
@@ -2063,7 +2058,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
2063 cancel_delayed_work(&priv->request_passive_scan); 2058 cancel_delayed_work(&priv->request_passive_scan);
2064 cancel_delayed_work(&priv->scan_event); 2059 cancel_delayed_work(&priv->scan_event);
2065 schedule_work(&priv->link_down); 2060 schedule_work(&priv->link_down);
2066 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); 2061 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2067 handled |= IPW_INTA_BIT_RF_KILL_DONE; 2062 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2068 } 2063 }
2069 2064
@@ -2103,7 +2098,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
2103 priv->status &= ~STATUS_HCMD_ACTIVE; 2098 priv->status &= ~STATUS_HCMD_ACTIVE;
2104 wake_up_interruptible(&priv->wait_command_queue); 2099 wake_up_interruptible(&priv->wait_command_queue);
2105 2100
2106 queue_work(priv->workqueue, &priv->adapter_restart); 2101 schedule_work(&priv->adapter_restart);
2107 handled |= IPW_INTA_BIT_FATAL_ERROR; 2102 handled |= IPW_INTA_BIT_FATAL_ERROR;
2108 } 2103 }
2109 2104
@@ -2323,11 +2318,6 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2323 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); 2318 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2324} 2319}
2325 2320
2326/*
2327 * NOTE: This must be executed from our workqueue as it results in udelay
2328 * being called which may corrupt the keyboard if executed on default
2329 * workqueue
2330 */
2331static void ipw_adapter_restart(void *adapter) 2321static void ipw_adapter_restart(void *adapter)
2332{ 2322{
2333 struct ipw_priv *priv = adapter; 2323 struct ipw_priv *priv = adapter;
@@ -2368,13 +2358,13 @@ static void ipw_scan_check(void *data)
2368 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2358 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2369 "adapter after (%dms).\n", 2359 "adapter after (%dms).\n",
2370 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2360 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2371 queue_work(priv->workqueue, &priv->adapter_restart); 2361 schedule_work(&priv->adapter_restart);
2372 } else if (priv->status & STATUS_SCANNING) { 2362 } else if (priv->status & STATUS_SCANNING) {
2373 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan " 2363 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2374 "after (%dms).\n", 2364 "after (%dms).\n",
2375 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2365 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2376 ipw_abort_scan(priv); 2366 ipw_abort_scan(priv);
2377 queue_delayed_work(priv->workqueue, &priv->scan_check, HZ); 2367 schedule_delayed_work(&priv->scan_check, HZ);
2378 } 2368 }
2379} 2369}
2380 2370
@@ -3943,7 +3933,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3943 3933
3944 if (priv->status & STATUS_ASSOCIATING) { 3934 if (priv->status & STATUS_ASSOCIATING) {
3945 IPW_DEBUG_ASSOC("Disassociating while associating.\n"); 3935 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3946 queue_work(priv->workqueue, &priv->disassociate); 3936 schedule_work(&priv->disassociate);
3947 return; 3937 return;
3948 } 3938 }
3949 3939
@@ -4360,8 +4350,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
4360 4350
4361 priv->quality = quality; 4351 priv->quality = quality;
4362 4352
4363 queue_delayed_work(priv->workqueue, &priv->gather_stats, 4353 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4364 IPW_STATS_INTERVAL);
4365} 4354}
4366 4355
4367static void ipw_bg_gather_stats(struct work_struct *work) 4356static void ipw_bg_gather_stats(struct work_struct *work)
@@ -4396,10 +4385,10 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4396 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4385 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4397 IPW_DL_STATE, 4386 IPW_DL_STATE,
4398 "Aborting scan with missed beacon.\n"); 4387 "Aborting scan with missed beacon.\n");
4399 queue_work(priv->workqueue, &priv->abort_scan); 4388 schedule_work(&priv->abort_scan);
4400 } 4389 }
4401 4390
4402 queue_work(priv->workqueue, &priv->disassociate); 4391 schedule_work(&priv->disassociate);
4403 return; 4392 return;
4404 } 4393 }
4405 4394
@@ -4425,8 +4414,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4425 if (!(priv->status & STATUS_ROAMING)) { 4414 if (!(priv->status & STATUS_ROAMING)) {
4426 priv->status |= STATUS_ROAMING; 4415 priv->status |= STATUS_ROAMING;
4427 if (!(priv->status & STATUS_SCANNING)) 4416 if (!(priv->status & STATUS_SCANNING))
4428 queue_delayed_work(priv->workqueue, 4417 schedule_delayed_work(&priv->request_scan, 0);
4429 &priv->request_scan, 0);
4430 } 4418 }
4431 return; 4419 return;
4432 } 4420 }
@@ -4439,7 +4427,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4439 * channels..) */ 4427 * channels..) */
4440 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, 4428 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4441 "Aborting scan with missed beacon.\n"); 4429 "Aborting scan with missed beacon.\n");
4442 queue_work(priv->workqueue, &priv->abort_scan); 4430 schedule_work(&priv->abort_scan);
4443 } 4431 }
4444 4432
4445 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); 4433 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
@@ -4462,8 +4450,8 @@ static void handle_scan_event(struct ipw_priv *priv)
4462 /* Only userspace-requested scan completion events go out immediately */ 4450 /* Only userspace-requested scan completion events go out immediately */
4463 if (!priv->user_requested_scan) { 4451 if (!priv->user_requested_scan) {
4464 if (!delayed_work_pending(&priv->scan_event)) 4452 if (!delayed_work_pending(&priv->scan_event))
4465 queue_delayed_work(priv->workqueue, &priv->scan_event, 4453 schedule_delayed_work(&priv->scan_event,
4466 round_jiffies_relative(msecs_to_jiffies(4000))); 4454 round_jiffies_relative(msecs_to_jiffies(4000)));
4467 } else { 4455 } else {
4468 union iwreq_data wrqu; 4456 union iwreq_data wrqu;
4469 4457
@@ -4516,20 +4504,17 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4516 4504
4517 IPW_DEBUG_ASSOC 4505 IPW_DEBUG_ASSOC
4518 ("queueing adhoc check\n"); 4506 ("queueing adhoc check\n");
4519 queue_delayed_work(priv-> 4507 schedule_delayed_work(
4520 workqueue, 4508 &priv->adhoc_check,
4521 &priv-> 4509 le16_to_cpu(priv->
4522 adhoc_check, 4510 assoc_request.
4523 le16_to_cpu(priv-> 4511 beacon_interval));
4524 assoc_request.
4525 beacon_interval));
4526 break; 4512 break;
4527 } 4513 }
4528 4514
4529 priv->status &= ~STATUS_ASSOCIATING; 4515 priv->status &= ~STATUS_ASSOCIATING;
4530 priv->status |= STATUS_ASSOCIATED; 4516 priv->status |= STATUS_ASSOCIATED;
4531 queue_work(priv->workqueue, 4517 schedule_work(&priv->system_config);
4532 &priv->system_config);
4533 4518
4534#ifdef CONFIG_IPW2200_QOS 4519#ifdef CONFIG_IPW2200_QOS
4535#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ 4520#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
@@ -4792,43 +4777,37 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4792#ifdef CONFIG_IPW2200_MONITOR 4777#ifdef CONFIG_IPW2200_MONITOR
4793 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4778 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4794 priv->status |= STATUS_SCAN_FORCED; 4779 priv->status |= STATUS_SCAN_FORCED;
4795 queue_delayed_work(priv->workqueue, 4780 schedule_delayed_work(&priv->request_scan, 0);
4796 &priv->request_scan, 0);
4797 break; 4781 break;
4798 } 4782 }
4799 priv->status &= ~STATUS_SCAN_FORCED; 4783 priv->status &= ~STATUS_SCAN_FORCED;
4800#endif /* CONFIG_IPW2200_MONITOR */ 4784#endif /* CONFIG_IPW2200_MONITOR */
4801 4785
4802 /* Do queued direct scans first */ 4786 /* Do queued direct scans first */
4803 if (priv->status & STATUS_DIRECT_SCAN_PENDING) { 4787 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4804 queue_delayed_work(priv->workqueue, 4788 schedule_delayed_work(&priv->request_direct_scan, 0);
4805 &priv->request_direct_scan, 0);
4806 }
4807 4789
4808 if (!(priv->status & (STATUS_ASSOCIATED | 4790 if (!(priv->status & (STATUS_ASSOCIATED |
4809 STATUS_ASSOCIATING | 4791 STATUS_ASSOCIATING |
4810 STATUS_ROAMING | 4792 STATUS_ROAMING |
4811 STATUS_DISASSOCIATING))) 4793 STATUS_DISASSOCIATING)))
4812 queue_work(priv->workqueue, &priv->associate); 4794 schedule_work(&priv->associate);
4813 else if (priv->status & STATUS_ROAMING) { 4795 else if (priv->status & STATUS_ROAMING) {
4814 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4796 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4815 /* If a scan completed and we are in roam mode, then 4797 /* If a scan completed and we are in roam mode, then
4816 * the scan that completed was the one requested as a 4798 * the scan that completed was the one requested as a
4817 * result of entering roam... so, schedule the 4799 * result of entering roam... so, schedule the
4818 * roam work */ 4800 * roam work */
4819 queue_work(priv->workqueue, 4801 schedule_work(&priv->roam);
4820 &priv->roam);
4821 else 4802 else
4822 /* Don't schedule if we aborted the scan */ 4803 /* Don't schedule if we aborted the scan */
4823 priv->status &= ~STATUS_ROAMING; 4804 priv->status &= ~STATUS_ROAMING;
4824 } else if (priv->status & STATUS_SCAN_PENDING) 4805 } else if (priv->status & STATUS_SCAN_PENDING)
4825 queue_delayed_work(priv->workqueue, 4806 schedule_delayed_work(&priv->request_scan, 0);
4826 &priv->request_scan, 0);
4827 else if (priv->config & CFG_BACKGROUND_SCAN 4807 else if (priv->config & CFG_BACKGROUND_SCAN
4828 && priv->status & STATUS_ASSOCIATED) 4808 && priv->status & STATUS_ASSOCIATED)
4829 queue_delayed_work(priv->workqueue, 4809 schedule_delayed_work(&priv->request_scan,
4830 &priv->request_scan, 4810 round_jiffies_relative(HZ));
4831 round_jiffies_relative(HZ));
4832 4811
4833 /* Send an empty event to user space. 4812 /* Send an empty event to user space.
4834 * We don't send the received data on the event because 4813 * We don't send the received data on the event because
@@ -5192,7 +5171,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv)
5192 /* If the pre-allocated buffer pool is dropping low, schedule to 5171 /* If the pre-allocated buffer pool is dropping low, schedule to
5193 * refill it */ 5172 * refill it */
5194 if (rxq->free_count <= RX_LOW_WATERMARK) 5173 if (rxq->free_count <= RX_LOW_WATERMARK)
5195 queue_work(priv->workqueue, &priv->rx_replenish); 5174 schedule_work(&priv->rx_replenish);
5196 5175
5197 /* If we've added more space for the firmware to place data, tell it */ 5176 /* If we've added more space for the firmware to place data, tell it */
5198 if (write != rxq->write) 5177 if (write != rxq->write)
@@ -6133,8 +6112,8 @@ static void ipw_adhoc_check(void *data)
6133 return; 6112 return;
6134 } 6113 }
6135 6114
6136 queue_delayed_work(priv->workqueue, &priv->adhoc_check, 6115 schedule_delayed_work(&priv->adhoc_check,
6137 le16_to_cpu(priv->assoc_request.beacon_interval)); 6116 le16_to_cpu(priv->assoc_request.beacon_interval));
6138} 6117}
6139 6118
6140static void ipw_bg_adhoc_check(struct work_struct *work) 6119static void ipw_bg_adhoc_check(struct work_struct *work)
@@ -6523,8 +6502,7 @@ send_request:
6523 } else 6502 } else
6524 priv->status &= ~STATUS_SCAN_PENDING; 6503 priv->status &= ~STATUS_SCAN_PENDING;
6525 6504
6526 queue_delayed_work(priv->workqueue, &priv->scan_check, 6505 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6527 IPW_SCAN_CHECK_WATCHDOG);
6528done: 6506done:
6529 mutex_unlock(&priv->mutex); 6507 mutex_unlock(&priv->mutex);
6530 return err; 6508 return err;
@@ -6994,8 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6994 !memcmp(network->ssid, 6972 !memcmp(network->ssid,
6995 priv->assoc_network->ssid, 6973 priv->assoc_network->ssid,
6996 network->ssid_len)) { 6974 network->ssid_len)) {
6997 queue_work(priv->workqueue, 6975 schedule_work(&priv->merge_networks);
6998 &priv->merge_networks);
6999 } 6976 }
7000 } 6977 }
7001 6978
@@ -7663,7 +7640,7 @@ static int ipw_associate(void *data)
7663 if (priv->status & STATUS_DISASSOCIATING) { 7640 if (priv->status & STATUS_DISASSOCIATING) {
7664 IPW_DEBUG_ASSOC("Not attempting association (in " 7641 IPW_DEBUG_ASSOC("Not attempting association (in "
7665 "disassociating)\n "); 7642 "disassociating)\n ");
7666 queue_work(priv->workqueue, &priv->associate); 7643 schedule_work(&priv->associate);
7667 return 0; 7644 return 0;
7668 } 7645 }
7669 7646
@@ -7731,12 +7708,10 @@ static int ipw_associate(void *data)
7731 7708
7732 if (!(priv->status & STATUS_SCANNING)) { 7709 if (!(priv->status & STATUS_SCANNING)) {
7733 if (!(priv->config & CFG_SPEED_SCAN)) 7710 if (!(priv->config & CFG_SPEED_SCAN))
7734 queue_delayed_work(priv->workqueue, 7711 schedule_delayed_work(&priv->request_scan,
7735 &priv->request_scan, 7712 SCAN_INTERVAL);
7736 SCAN_INTERVAL);
7737 else 7713 else
7738 queue_delayed_work(priv->workqueue, 7714 schedule_delayed_work(&priv->request_scan, 0);
7739 &priv->request_scan, 0);
7740 } 7715 }
7741 7716
7742 return 0; 7717 return 0;
@@ -8899,7 +8874,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
8899 8874
8900 priv->ieee->iw_mode = wrqu->mode; 8875 priv->ieee->iw_mode = wrqu->mode;
8901 8876
8902 queue_work(priv->workqueue, &priv->adapter_restart); 8877 schedule_work(&priv->adapter_restart);
8903 mutex_unlock(&priv->mutex); 8878 mutex_unlock(&priv->mutex);
8904 return err; 8879 return err;
8905} 8880}
@@ -9598,7 +9573,7 @@ static int ipw_wx_set_scan(struct net_device *dev,
9598 9573
9599 IPW_DEBUG_WX("Start scan\n"); 9574 IPW_DEBUG_WX("Start scan\n");
9600 9575
9601 queue_delayed_work(priv->workqueue, work, 0); 9576 schedule_delayed_work(work, 0);
9602 9577
9603 return 0; 9578 return 0;
9604} 9579}
@@ -9937,7 +9912,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
9937#else 9912#else
9938 priv->net_dev->type = ARPHRD_IEEE80211; 9913 priv->net_dev->type = ARPHRD_IEEE80211;
9939#endif 9914#endif
9940 queue_work(priv->workqueue, &priv->adapter_restart); 9915 schedule_work(&priv->adapter_restart);
9941 } 9916 }
9942 9917
9943 ipw_set_channel(priv, parms[1]); 9918 ipw_set_channel(priv, parms[1]);
@@ -9947,7 +9922,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
9947 return 0; 9922 return 0;
9948 } 9923 }
9949 priv->net_dev->type = ARPHRD_ETHER; 9924 priv->net_dev->type = ARPHRD_ETHER;
9950 queue_work(priv->workqueue, &priv->adapter_restart); 9925 schedule_work(&priv->adapter_restart);
9951 } 9926 }
9952 mutex_unlock(&priv->mutex); 9927 mutex_unlock(&priv->mutex);
9953 return 0; 9928 return 0;
@@ -9961,7 +9936,7 @@ static int ipw_wx_reset(struct net_device *dev,
9961{ 9936{
9962 struct ipw_priv *priv = libipw_priv(dev); 9937 struct ipw_priv *priv = libipw_priv(dev);
9963 IPW_DEBUG_WX("RESET\n"); 9938 IPW_DEBUG_WX("RESET\n");
9964 queue_work(priv->workqueue, &priv->adapter_restart); 9939 schedule_work(&priv->adapter_restart);
9965 return 0; 9940 return 0;
9966} 9941}
9967 9942
@@ -10551,7 +10526,7 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10551 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 10526 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10552 printk(KERN_INFO "%s: Setting MAC to %pM\n", 10527 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10553 priv->net_dev->name, priv->mac_addr); 10528 priv->net_dev->name, priv->mac_addr);
10554 queue_work(priv->workqueue, &priv->adapter_restart); 10529 schedule_work(&priv->adapter_restart);
10555 mutex_unlock(&priv->mutex); 10530 mutex_unlock(&priv->mutex);
10556 return 0; 10531 return 0;
10557} 10532}
@@ -10684,9 +10659,7 @@ static void ipw_rf_kill(void *adapter)
10684 10659
10685 if (rf_kill_active(priv)) { 10660 if (rf_kill_active(priv)) {
10686 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 10661 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10687 if (priv->workqueue) 10662 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10688 queue_delayed_work(priv->workqueue,
10689 &priv->rf_kill, 2 * HZ);
10690 goto exit_unlock; 10663 goto exit_unlock;
10691 } 10664 }
10692 10665
@@ -10697,7 +10670,7 @@ static void ipw_rf_kill(void *adapter)
10697 "device\n"); 10670 "device\n");
10698 10671
10699 /* we can not do an adapter restart while inside an irq lock */ 10672 /* we can not do an adapter restart while inside an irq lock */
10700 queue_work(priv->workqueue, &priv->adapter_restart); 10673 schedule_work(&priv->adapter_restart);
10701 } else 10674 } else
10702 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " 10675 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10703 "enabled\n"); 10676 "enabled\n");
@@ -10735,7 +10708,7 @@ static void ipw_link_up(struct ipw_priv *priv)
10735 notify_wx_assoc_event(priv); 10708 notify_wx_assoc_event(priv);
10736 10709
10737 if (priv->config & CFG_BACKGROUND_SCAN) 10710 if (priv->config & CFG_BACKGROUND_SCAN)
10738 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); 10711 schedule_delayed_work(&priv->request_scan, HZ);
10739} 10712}
10740 10713
10741static void ipw_bg_link_up(struct work_struct *work) 10714static void ipw_bg_link_up(struct work_struct *work)
@@ -10764,7 +10737,7 @@ static void ipw_link_down(struct ipw_priv *priv)
10764 10737
10765 if (!(priv->status & STATUS_EXIT_PENDING)) { 10738 if (!(priv->status & STATUS_EXIT_PENDING)) {
10766 /* Queue up another scan... */ 10739 /* Queue up another scan... */
10767 queue_delayed_work(priv->workqueue, &priv->request_scan, 0); 10740 schedule_delayed_work(&priv->request_scan, 0);
10768 } else 10741 } else
10769 cancel_delayed_work(&priv->scan_event); 10742 cancel_delayed_work(&priv->scan_event);
10770} 10743}
@@ -10782,7 +10755,6 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10782{ 10755{
10783 int ret = 0; 10756 int ret = 0;
10784 10757
10785 priv->workqueue = create_workqueue(DRV_NAME);
10786 init_waitqueue_head(&priv->wait_command_queue); 10758 init_waitqueue_head(&priv->wait_command_queue);
10787 init_waitqueue_head(&priv->wait_state); 10759 init_waitqueue_head(&priv->wait_state);
10788 10760
@@ -11339,8 +11311,7 @@ static int ipw_up(struct ipw_priv *priv)
11339 IPW_WARNING("Radio Frequency Kill Switch is On:\n" 11311 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11340 "Kill switch must be turned off for " 11312 "Kill switch must be turned off for "
11341 "wireless networking to work.\n"); 11313 "wireless networking to work.\n");
11342 queue_delayed_work(priv->workqueue, &priv->rf_kill, 11314 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11343 2 * HZ);
11344 return 0; 11315 return 0;
11345 } 11316 }
11346 11317
@@ -11350,8 +11321,7 @@ static int ipw_up(struct ipw_priv *priv)
11350 11321
11351 /* If configure to try and auto-associate, kick 11322 /* If configure to try and auto-associate, kick
11352 * off a scan. */ 11323 * off a scan. */
11353 queue_delayed_work(priv->workqueue, 11324 schedule_delayed_work(&priv->request_scan, 0);
11354 &priv->request_scan, 0);
11355 11325
11356 return 0; 11326 return 0;
11357 } 11327 }
@@ -11817,7 +11787,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11817 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); 11787 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11818 if (err) { 11788 if (err) {
11819 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); 11789 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11820 goto out_destroy_workqueue; 11790 goto out_iounmap;
11821 } 11791 }
11822 11792
11823 SET_NETDEV_DEV(net_dev, &pdev->dev); 11793 SET_NETDEV_DEV(net_dev, &pdev->dev);
@@ -11885,9 +11855,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11885 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11855 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11886 out_release_irq: 11856 out_release_irq:
11887 free_irq(pdev->irq, priv); 11857 free_irq(pdev->irq, priv);
11888 out_destroy_workqueue:
11889 destroy_workqueue(priv->workqueue);
11890 priv->workqueue = NULL;
11891 out_iounmap: 11858 out_iounmap:
11892 iounmap(priv->hw_base); 11859 iounmap(priv->hw_base);
11893 out_pci_release_regions: 11860 out_pci_release_regions:
@@ -11930,18 +11897,31 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11930 kfree(priv->cmdlog); 11897 kfree(priv->cmdlog);
11931 priv->cmdlog = NULL; 11898 priv->cmdlog = NULL;
11932 } 11899 }
11933 /* ipw_down will ensure that there is no more pending work 11900
11934 * in the workqueue's, so we can safely remove them now. */ 11901 /* make sure all works are inactive */
11935 cancel_delayed_work(&priv->adhoc_check); 11902 cancel_delayed_work_sync(&priv->adhoc_check);
11936 cancel_delayed_work(&priv->gather_stats); 11903 cancel_work_sync(&priv->associate);
11937 cancel_delayed_work(&priv->request_scan); 11904 cancel_work_sync(&priv->disassociate);
11938 cancel_delayed_work(&priv->request_direct_scan); 11905 cancel_work_sync(&priv->system_config);
11939 cancel_delayed_work(&priv->request_passive_scan); 11906 cancel_work_sync(&priv->rx_replenish);
11940 cancel_delayed_work(&priv->scan_event); 11907 cancel_work_sync(&priv->adapter_restart);
11941 cancel_delayed_work(&priv->rf_kill); 11908 cancel_delayed_work_sync(&priv->rf_kill);
11942 cancel_delayed_work(&priv->scan_check); 11909 cancel_work_sync(&priv->up);
11943 destroy_workqueue(priv->workqueue); 11910 cancel_work_sync(&priv->down);
11944 priv->workqueue = NULL; 11911 cancel_delayed_work_sync(&priv->request_scan);
11912 cancel_delayed_work_sync(&priv->request_direct_scan);
11913 cancel_delayed_work_sync(&priv->request_passive_scan);
11914 cancel_delayed_work_sync(&priv->scan_event);
11915 cancel_delayed_work_sync(&priv->gather_stats);
11916 cancel_work_sync(&priv->abort_scan);
11917 cancel_work_sync(&priv->roam);
11918 cancel_delayed_work_sync(&priv->scan_check);
11919 cancel_work_sync(&priv->link_up);
11920 cancel_work_sync(&priv->link_down);
11921 cancel_delayed_work_sync(&priv->led_link_on);
11922 cancel_delayed_work_sync(&priv->led_link_off);
11923 cancel_delayed_work_sync(&priv->led_act_off);
11924 cancel_work_sync(&priv->merge_networks);
11945 11925
11946 /* Free MAC hash list for ADHOC */ 11926 /* Free MAC hash list for ADHOC */
11947 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { 11927 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
@@ -12029,7 +12009,7 @@ static int ipw_pci_resume(struct pci_dev *pdev)
12029 priv->suspend_time = get_seconds() - priv->suspend_at; 12009 priv->suspend_time = get_seconds() - priv->suspend_at;
12030 12010
12031 /* Bring the device back up */ 12011 /* Bring the device back up */
12032 queue_work(priv->workqueue, &priv->up); 12012 schedule_work(&priv->up);
12033 12013
12034 return 0; 12014 return 0;
12035} 12015}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index d7d049c7a4f..0441445b8bf 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -1299,8 +1299,6 @@ struct ipw_priv {
1299 u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; 1299 u8 direct_scan_ssid[IW_ESSID_MAX_SIZE];
1300 u8 direct_scan_ssid_len; 1300 u8 direct_scan_ssid_len;
1301 1301
1302 struct workqueue_struct *workqueue;
1303
1304 struct delayed_work adhoc_check; 1302 struct delayed_work adhoc_check;
1305 struct work_struct associate; 1303 struct work_struct associate;
1306 struct work_struct disassociate; 1304 struct work_struct disassociate;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 3c6e100a3ad..d06a6374ed6 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -69,4 +69,10 @@ config OF_MDIO
69 help 69 help
70 OpenFirmware MDIO bus (Ethernet PHY) accessors 70 OpenFirmware MDIO bus (Ethernet PHY) accessors
71 71
72config OF_PCI
73 def_tristate PCI
74 depends on PCI && (PPC || MICROBLAZE || X86)
75 help
76 OpenFirmware PCI bus accessors
77
72endmenu # OF 78endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 3ab21a0a490..f7861ed2f28 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_OF_I2C) += of_i2c.o
9obj-$(CONFIG_OF_NET) += of_net.o 9obj-$(CONFIG_OF_NET) += of_net.o
10obj-$(CONFIG_OF_SPI) += of_spi.o 10obj-$(CONFIG_OF_SPI) += of_spi.o
11obj-$(CONFIG_OF_MDIO) += of_mdio.o 11obj-$(CONFIG_OF_MDIO) += of_mdio.o
12obj-$(CONFIG_OF_PCI) += of_pci.o
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
new file mode 100644
index 00000000000..ac1ec54e4fd
--- /dev/null
+++ b/drivers/of/of_pci.c
@@ -0,0 +1,92 @@
1#include <linux/kernel.h>
2#include <linux/of_pci.h>
3#include <linux/of_irq.h>
4#include <asm/prom.h>
5
6/**
7 * of_irq_map_pci - Resolve the interrupt for a PCI device
8 * @pdev: the device whose interrupt is to be resolved
9 * @out_irq: structure of_irq filled by this function
10 *
11 * This function resolves the PCI interrupt for a given PCI device. If a
12 * device-node exists for a given pci_dev, it will use normal OF tree
13 * walking. If not, it will implement standard swizzling and walk up the
14 * PCI tree until an device-node is found, at which point it will finish
15 * resolving using the OF tree walking.
16 */
17int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
18{
19 struct device_node *dn, *ppnode;
20 struct pci_dev *ppdev;
21 u32 lspec;
22 __be32 lspec_be;
23 __be32 laddr[3];
24 u8 pin;
25 int rc;
26
27 /* Check if we have a device node, if yes, fallback to standard
28 * device tree parsing
29 */
30 dn = pci_device_to_OF_node(pdev);
31 if (dn) {
32 rc = of_irq_map_one(dn, 0, out_irq);
33 if (!rc)
34 return rc;
35 }
36
37 /* Ok, we don't, time to have fun. Let's start by building up an
38 * interrupt spec. we assume #interrupt-cells is 1, which is standard
39 * for PCI. If you do different, then don't use that routine.
40 */
41 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
42 if (rc != 0)
43 return rc;
44 /* No pin, exit */
45 if (pin == 0)
46 return -ENODEV;
47
48 /* Now we walk up the PCI tree */
49 lspec = pin;
50 for (;;) {
51 /* Get the pci_dev of our parent */
52 ppdev = pdev->bus->self;
53
54 /* Ouch, it's a host bridge... */
55 if (ppdev == NULL) {
56 ppnode = pci_bus_to_OF_node(pdev->bus);
57
58 /* No node for host bridge ? give up */
59 if (ppnode == NULL)
60 return -EINVAL;
61 } else {
62 /* We found a P2P bridge, check if it has a node */
63 ppnode = pci_device_to_OF_node(ppdev);
64 }
65
66 /* Ok, we have found a parent with a device-node, hand over to
67 * the OF parsing code.
68 * We build a unit address from the linux device to be used for
69 * resolution. Note that we use the linux bus number which may
70 * not match your firmware bus numbering.
71 * Fortunately, in most cases, interrupt-map-mask doesn't
72 * include the bus number as part of the matching.
73 * You should still be careful about that though if you intend
74 * to rely on this function (you ship a firmware that doesn't
75 * create device nodes for all PCI devices).
76 */
77 if (ppnode)
78 break;
79
80 /* We can only get here if we hit a P2P bridge with no node,
81 * let's do standard swizzling and try again
82 */
83 lspec = pci_swizzle_interrupt_pin(pdev, lspec);
84 pdev = ppdev;
85 }
86
87 lspec_be = cpu_to_be32(lspec);
88 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
89 laddr[1] = laddr[2] = cpu_to_be32(0);
90 return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
91}
92EXPORT_SYMBOL_GPL(of_irq_map_pci);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 3a5a6fcc0ea..492b7d807fe 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -243,7 +243,7 @@ struct pci_ops pcifront_bus_ops = {
243 243
244#ifdef CONFIG_PCI_MSI 244#ifdef CONFIG_PCI_MSI
245static int pci_frontend_enable_msix(struct pci_dev *dev, 245static int pci_frontend_enable_msix(struct pci_dev *dev,
246 int **vector, int nvec) 246 int vector[], int nvec)
247{ 247{
248 int err; 248 int err;
249 int i; 249 int i;
@@ -277,18 +277,24 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
277 if (likely(!err)) { 277 if (likely(!err)) {
278 if (likely(!op.value)) { 278 if (likely(!op.value)) {
279 /* we get the result */ 279 /* we get the result */
280 for (i = 0; i < nvec; i++) 280 for (i = 0; i < nvec; i++) {
281 *(*vector+i) = op.msix_entries[i].vector; 281 if (op.msix_entries[i].vector <= 0) {
282 return 0; 282 dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n",
283 i, op.msix_entries[i].vector);
284 err = -EINVAL;
285 vector[i] = -1;
286 continue;
287 }
288 vector[i] = op.msix_entries[i].vector;
289 }
283 } else { 290 } else {
284 printk(KERN_DEBUG "enable msix get value %x\n", 291 printk(KERN_DEBUG "enable msix get value %x\n",
285 op.value); 292 op.value);
286 return op.value;
287 } 293 }
288 } else { 294 } else {
289 dev_err(&dev->dev, "enable msix get err %x\n", err); 295 dev_err(&dev->dev, "enable msix get err %x\n", err);
290 return err;
291 } 296 }
297 return err;
292} 298}
293 299
294static void pci_frontend_disable_msix(struct pci_dev *dev) 300static void pci_frontend_disable_msix(struct pci_dev *dev)
@@ -310,7 +316,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
310 dev_err(&dev->dev, "pci_disable_msix get err %x\n", err); 316 dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
311} 317}
312 318
313static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector) 319static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
314{ 320{
315 int err; 321 int err;
316 struct xen_pci_op op = { 322 struct xen_pci_op op = {
@@ -324,7 +330,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
324 330
325 err = do_pci_op(pdev, &op); 331 err = do_pci_op(pdev, &op);
326 if (likely(!err)) { 332 if (likely(!err)) {
327 *(*vector) = op.value; 333 vector[0] = op.value;
334 if (op.value <= 0) {
335 dev_warn(&dev->dev, "MSI entry is invalid: %d!\n",
336 op.value);
337 err = -EINVAL;
338 vector[0] = -1;
339 }
328 } else { 340 } else {
329 dev_err(&dev->dev, "pci frontend enable msi failed for dev " 341 dev_err(&dev->dev, "pci frontend enable msi failed for dev "
330 "%x:%x\n", op.bus, op.devfn); 342 "%x:%x\n", op.bus, op.devfn);
@@ -733,8 +745,7 @@ static void free_pdev(struct pcifront_device *pdev)
733 745
734 pcifront_free_roots(pdev); 746 pcifront_free_roots(pdev);
735 747
736 /*For PCIE_AER error handling job*/ 748 cancel_work_sync(&pdev->op_work);
737 flush_scheduled_work();
738 749
739 if (pdev->irq >= 0) 750 if (pdev->irq >= 0)
740 unbind_from_irqhandler(pdev->irq, pdev); 751 unbind_from_irqhandler(pdev->irq, pdev);
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index c404b61386b..09b4437b3e6 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -117,6 +117,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
117 struct module *owner) 117 struct module *owner)
118{ 118{
119 struct rtc_device *rtc; 119 struct rtc_device *rtc;
120 struct rtc_wkalrm alrm;
120 int id, err; 121 int id, err;
121 122
122 if (idr_pre_get(&rtc_idr, GFP_KERNEL) == 0) { 123 if (idr_pre_get(&rtc_idr, GFP_KERNEL) == 0) {
@@ -166,6 +167,12 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
166 rtc->pie_timer.function = rtc_pie_update_irq; 167 rtc->pie_timer.function = rtc_pie_update_irq;
167 rtc->pie_enabled = 0; 168 rtc->pie_enabled = 0;
168 169
170 /* Check to see if there is an ALARM already set in hw */
171 err = __rtc_read_alarm(rtc, &alrm);
172
173 if (!err && !rtc_valid_tm(&alrm.time))
174 rtc_set_alarm(rtc, &alrm);
175
169 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); 176 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
170 dev_set_name(&rtc->dev, "rtc%d", id); 177 dev_set_name(&rtc->dev, "rtc%d", id);
171 178
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index cb2f0728fd7..8ec6b069a7f 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -116,6 +116,186 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
116} 116}
117EXPORT_SYMBOL_GPL(rtc_set_mmss); 117EXPORT_SYMBOL_GPL(rtc_set_mmss);
118 118
119static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
120{
121 int err;
122
123 err = mutex_lock_interruptible(&rtc->ops_lock);
124 if (err)
125 return err;
126
127 if (rtc->ops == NULL)
128 err = -ENODEV;
129 else if (!rtc->ops->read_alarm)
130 err = -EINVAL;
131 else {
132 memset(alarm, 0, sizeof(struct rtc_wkalrm));
133 err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
134 }
135
136 mutex_unlock(&rtc->ops_lock);
137 return err;
138}
139
140int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
141{
142 int err;
143 struct rtc_time before, now;
144 int first_time = 1;
145 unsigned long t_now, t_alm;
146 enum { none, day, month, year } missing = none;
147 unsigned days;
148
149 /* The lower level RTC driver may return -1 in some fields,
150 * creating invalid alarm->time values, for reasons like:
151 *
152 * - The hardware may not be capable of filling them in;
153 * many alarms match only on time-of-day fields, not
154 * day/month/year calendar data.
155 *
156 * - Some hardware uses illegal values as "wildcard" match
157 * values, which non-Linux firmware (like a BIOS) may try
158 * to set up as e.g. "alarm 15 minutes after each hour".
159 * Linux uses only oneshot alarms.
160 *
161 * When we see that here, we deal with it by using values from
162 * a current RTC timestamp for any missing (-1) values. The
163 * RTC driver prevents "periodic alarm" modes.
164 *
165 * But this can be racey, because some fields of the RTC timestamp
166 * may have wrapped in the interval since we read the RTC alarm,
167 * which would lead to us inserting inconsistent values in place
168 * of the -1 fields.
169 *
170 * Reading the alarm and timestamp in the reverse sequence
171 * would have the same race condition, and not solve the issue.
172 *
173 * So, we must first read the RTC timestamp,
174 * then read the RTC alarm value,
175 * and then read a second RTC timestamp.
176 *
177 * If any fields of the second timestamp have changed
178 * when compared with the first timestamp, then we know
179 * our timestamp may be inconsistent with that used by
180 * the low-level rtc_read_alarm_internal() function.
181 *
182 * So, when the two timestamps disagree, we just loop and do
183 * the process again to get a fully consistent set of values.
184 *
185 * This could all instead be done in the lower level driver,
186 * but since more than one lower level RTC implementation needs it,
187 * then it's probably best best to do it here instead of there..
188 */
189
190 /* Get the "before" timestamp */
191 err = rtc_read_time(rtc, &before);
192 if (err < 0)
193 return err;
194 do {
195 if (!first_time)
196 memcpy(&before, &now, sizeof(struct rtc_time));
197 first_time = 0;
198
199 /* get the RTC alarm values, which may be incomplete */
200 err = rtc_read_alarm_internal(rtc, alarm);
201 if (err)
202 return err;
203
204 /* full-function RTCs won't have such missing fields */
205 if (rtc_valid_tm(&alarm->time) == 0)
206 return 0;
207
208 /* get the "after" timestamp, to detect wrapped fields */
209 err = rtc_read_time(rtc, &now);
210 if (err < 0)
211 return err;
212
213 /* note that tm_sec is a "don't care" value here: */
214 } while ( before.tm_min != now.tm_min
215 || before.tm_hour != now.tm_hour
216 || before.tm_mon != now.tm_mon
217 || before.tm_year != now.tm_year);
218
219 /* Fill in the missing alarm fields using the timestamp; we
220 * know there's at least one since alarm->time is invalid.
221 */
222 if (alarm->time.tm_sec == -1)
223 alarm->time.tm_sec = now.tm_sec;
224 if (alarm->time.tm_min == -1)
225 alarm->time.tm_min = now.tm_min;
226 if (alarm->time.tm_hour == -1)
227 alarm->time.tm_hour = now.tm_hour;
228
229 /* For simplicity, only support date rollover for now */
230 if (alarm->time.tm_mday == -1) {
231 alarm->time.tm_mday = now.tm_mday;
232 missing = day;
233 }
234 if (alarm->time.tm_mon == -1) {
235 alarm->time.tm_mon = now.tm_mon;
236 if (missing == none)
237 missing = month;
238 }
239 if (alarm->time.tm_year == -1) {
240 alarm->time.tm_year = now.tm_year;
241 if (missing == none)
242 missing = year;
243 }
244
245 /* with luck, no rollover is needed */
246 rtc_tm_to_time(&now, &t_now);
247 rtc_tm_to_time(&alarm->time, &t_alm);
248 if (t_now < t_alm)
249 goto done;
250
251 switch (missing) {
252
253 /* 24 hour rollover ... if it's now 10am Monday, an alarm that
254 * that will trigger at 5am will do so at 5am Tuesday, which
255 * could also be in the next month or year. This is a common
256 * case, especially for PCs.
257 */
258 case day:
259 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
260 t_alm += 24 * 60 * 60;
261 rtc_time_to_tm(t_alm, &alarm->time);
262 break;
263
264 /* Month rollover ... if it's the 31th, an alarm on the 3rd will
265 * be next month. An alarm matching on the 30th, 29th, or 28th
266 * may end up in the month after that! Many newer PCs support
267 * this type of alarm.
268 */
269 case month:
270 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
271 do {
272 if (alarm->time.tm_mon < 11)
273 alarm->time.tm_mon++;
274 else {
275 alarm->time.tm_mon = 0;
276 alarm->time.tm_year++;
277 }
278 days = rtc_month_days(alarm->time.tm_mon,
279 alarm->time.tm_year);
280 } while (days < alarm->time.tm_mday);
281 break;
282
283 /* Year rollover ... easy except for leap years! */
284 case year:
285 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
286 do {
287 alarm->time.tm_year++;
288 } while (rtc_valid_tm(&alarm->time) != 0);
289 break;
290
291 default:
292 dev_warn(&rtc->dev, "alarm rollover not handled\n");
293 }
294
295done:
296 return 0;
297}
298
119int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 299int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
120{ 300{
121 int err; 301 int err;
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 26d1cf5d19a..518a76ec71c 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -183,33 +183,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
183 return 0; 183 return 0;
184} 184}
185 185
186/*
187 * Handle commands from user-space
188 */
189static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
190 unsigned long arg)
191{
192 int ret = 0;
193
194 pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg);
195
196 /* important: scrub old status before enabling IRQs */
197 switch (cmd) {
198 case RTC_UIE_OFF: /* update off */
199 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
200 break;
201 case RTC_UIE_ON: /* update on */
202 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
203 at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
204 break;
205 default:
206 ret = -ENOIOCTLCMD;
207 break;
208 }
209
210 return ret;
211}
212
213static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 186static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
214{ 187{
215 pr_debug("%s(): cmd=%08x\n", __func__, enabled); 188 pr_debug("%s(): cmd=%08x\n", __func__, enabled);
@@ -269,7 +242,6 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
269} 242}
270 243
271static const struct rtc_class_ops at91_rtc_ops = { 244static const struct rtc_class_ops at91_rtc_ops = {
272 .ioctl = at91_rtc_ioctl,
273 .read_time = at91_rtc_readtime, 245 .read_time = at91_rtc_readtime,
274 .set_time = at91_rtc_settime, 246 .set_time = at91_rtc_settime,
275 .read_alarm = at91_rtc_readalarm, 247 .read_alarm = at91_rtc_readalarm,
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 5469c52cba3..a3ad957507d 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -216,33 +216,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
216 return 0; 216 return 0;
217} 217}
218 218
219/*
220 * Handle commands from user-space
221 */
222static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
223 unsigned long arg)
224{
225 struct sam9_rtc *rtc = dev_get_drvdata(dev);
226 int ret = 0;
227 u32 mr = rtt_readl(rtc, MR);
228
229 dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
230
231 switch (cmd) {
232 case RTC_UIE_OFF: /* update off */
233 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
234 break;
235 case RTC_UIE_ON: /* update on */
236 rtt_writel(rtc, MR, mr | AT91_RTT_RTTINCIEN);
237 break;
238 default:
239 ret = -ENOIOCTLCMD;
240 break;
241 }
242
243 return ret;
244}
245
246static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 219static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
247{ 220{
248 struct sam9_rtc *rtc = dev_get_drvdata(dev); 221 struct sam9_rtc *rtc = dev_get_drvdata(dev);
@@ -303,7 +276,6 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
303} 276}
304 277
305static const struct rtc_class_ops at91_rtc_ops = { 278static const struct rtc_class_ops at91_rtc_ops = {
306 .ioctl = at91_rtc_ioctl,
307 .read_time = at91_rtc_readtime, 279 .read_time = at91_rtc_readtime,
308 .set_time = at91_rtc_settime, 280 .set_time = at91_rtc_settime,
309 .read_alarm = at91_rtc_readalarm, 281 .read_alarm = at91_rtc_readalarm,
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 17971d93354..ca9cff85ab8 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -240,32 +240,6 @@ static void bfin_rtc_int_set_alarm(struct bfin_rtc *rtc)
240 */ 240 */
241 bfin_rtc_int_set(rtc->rtc_alarm.tm_yday == -1 ? RTC_ISTAT_ALARM : RTC_ISTAT_ALARM_DAY); 241 bfin_rtc_int_set(rtc->rtc_alarm.tm_yday == -1 ? RTC_ISTAT_ALARM : RTC_ISTAT_ALARM_DAY);
242} 242}
243static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
244{
245 struct bfin_rtc *rtc = dev_get_drvdata(dev);
246 int ret = 0;
247
248 dev_dbg_stamp(dev);
249
250 bfin_rtc_sync_pending(dev);
251
252 switch (cmd) {
253 case RTC_UIE_ON:
254 dev_dbg_stamp(dev);
255 bfin_rtc_int_set(RTC_ISTAT_SEC);
256 break;
257 case RTC_UIE_OFF:
258 dev_dbg_stamp(dev);
259 bfin_rtc_int_clear(~RTC_ISTAT_SEC);
260 break;
261
262 default:
263 dev_dbg_stamp(dev);
264 ret = -ENOIOCTLCMD;
265 }
266
267 return ret;
268}
269 243
270static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 244static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
271{ 245{
@@ -358,7 +332,6 @@ static int bfin_rtc_proc(struct device *dev, struct seq_file *seq)
358} 332}
359 333
360static struct rtc_class_ops bfin_rtc_ops = { 334static struct rtc_class_ops bfin_rtc_ops = {
361 .ioctl = bfin_rtc_ioctl,
362 .read_time = bfin_rtc_read_time, 335 .read_time = bfin_rtc_read_time,
363 .set_time = bfin_rtc_set_time, 336 .set_time = bfin_rtc_set_time,
364 .read_alarm = bfin_rtc_read_alarm, 337 .read_alarm = bfin_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c7ff8df347e..911e75cdc12 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -37,6 +37,8 @@
37#include <linux/mod_devicetable.h> 37#include <linux/mod_devicetable.h>
38#include <linux/log2.h> 38#include <linux/log2.h>
39#include <linux/pm.h> 39#include <linux/pm.h>
40#include <linux/of.h>
41#include <linux/of_platform.h>
40 42
41/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ 43/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
42#include <asm-generic/rtc.h> 44#include <asm-generic/rtc.h>
@@ -375,50 +377,6 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
375 return 0; 377 return 0;
376} 378}
377 379
378static int cmos_irq_set_freq(struct device *dev, int freq)
379{
380 struct cmos_rtc *cmos = dev_get_drvdata(dev);
381 int f;
382 unsigned long flags;
383
384 if (!is_valid_irq(cmos->irq))
385 return -ENXIO;
386
387 if (!is_power_of_2(freq))
388 return -EINVAL;
389 /* 0 = no irqs; 1 = 2^15 Hz ... 15 = 2^0 Hz */
390 f = ffs(freq);
391 if (f-- > 16)
392 return -EINVAL;
393 f = 16 - f;
394
395 spin_lock_irqsave(&rtc_lock, flags);
396 hpet_set_periodic_freq(freq);
397 CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
398 spin_unlock_irqrestore(&rtc_lock, flags);
399
400 return 0;
401}
402
403static int cmos_irq_set_state(struct device *dev, int enabled)
404{
405 struct cmos_rtc *cmos = dev_get_drvdata(dev);
406 unsigned long flags;
407
408 if (!is_valid_irq(cmos->irq))
409 return -ENXIO;
410
411 spin_lock_irqsave(&rtc_lock, flags);
412
413 if (enabled)
414 cmos_irq_enable(cmos, RTC_PIE);
415 else
416 cmos_irq_disable(cmos, RTC_PIE);
417
418 spin_unlock_irqrestore(&rtc_lock, flags);
419 return 0;
420}
421
422static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled) 380static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
423{ 381{
424 struct cmos_rtc *cmos = dev_get_drvdata(dev); 382 struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -438,25 +396,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
438 return 0; 396 return 0;
439} 397}
440 398
441static int cmos_update_irq_enable(struct device *dev, unsigned int enabled)
442{
443 struct cmos_rtc *cmos = dev_get_drvdata(dev);
444 unsigned long flags;
445
446 if (!is_valid_irq(cmos->irq))
447 return -EINVAL;
448
449 spin_lock_irqsave(&rtc_lock, flags);
450
451 if (enabled)
452 cmos_irq_enable(cmos, RTC_UIE);
453 else
454 cmos_irq_disable(cmos, RTC_UIE);
455
456 spin_unlock_irqrestore(&rtc_lock, flags);
457 return 0;
458}
459
460#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) 399#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
461 400
462static int cmos_procfs(struct device *dev, struct seq_file *seq) 401static int cmos_procfs(struct device *dev, struct seq_file *seq)
@@ -501,10 +440,7 @@ static const struct rtc_class_ops cmos_rtc_ops = {
501 .read_alarm = cmos_read_alarm, 440 .read_alarm = cmos_read_alarm,
502 .set_alarm = cmos_set_alarm, 441 .set_alarm = cmos_set_alarm,
503 .proc = cmos_procfs, 442 .proc = cmos_procfs,
504 .irq_set_freq = cmos_irq_set_freq,
505 .irq_set_state = cmos_irq_set_state,
506 .alarm_irq_enable = cmos_alarm_irq_enable, 443 .alarm_irq_enable = cmos_alarm_irq_enable,
507 .update_irq_enable = cmos_update_irq_enable,
508}; 444};
509 445
510/*----------------------------------------------------------------*/ 446/*----------------------------------------------------------------*/
@@ -1123,6 +1059,47 @@ static struct pnp_driver cmos_pnp_driver = {
1123 1059
1124#endif /* CONFIG_PNP */ 1060#endif /* CONFIG_PNP */
1125 1061
1062#ifdef CONFIG_OF
1063static const struct of_device_id of_cmos_match[] = {
1064 {
1065 .compatible = "motorola,mc146818",
1066 },
1067 { },
1068};
1069MODULE_DEVICE_TABLE(of, of_cmos_match);
1070
1071static __init void cmos_of_init(struct platform_device *pdev)
1072{
1073 struct device_node *node = pdev->dev.of_node;
1074 struct rtc_time time;
1075 int ret;
1076 const __be32 *val;
1077
1078 if (!node)
1079 return;
1080
1081 val = of_get_property(node, "ctrl-reg", NULL);
1082 if (val)
1083 CMOS_WRITE(be32_to_cpup(val), RTC_CONTROL);
1084
1085 val = of_get_property(node, "freq-reg", NULL);
1086 if (val)
1087 CMOS_WRITE(be32_to_cpup(val), RTC_FREQ_SELECT);
1088
1089 get_rtc_time(&time);
1090 ret = rtc_valid_tm(&time);
1091 if (ret) {
1092 struct rtc_time def_time = {
1093 .tm_year = 1,
1094 .tm_mday = 1,
1095 };
1096 set_rtc_time(&def_time);
1097 }
1098}
1099#else
1100static inline void cmos_of_init(struct platform_device *pdev) {}
1101#define of_cmos_match NULL
1102#endif
1126/*----------------------------------------------------------------*/ 1103/*----------------------------------------------------------------*/
1127 1104
1128/* Platform setup should have set up an RTC device, when PNP is 1105/* Platform setup should have set up an RTC device, when PNP is
@@ -1131,6 +1108,7 @@ static struct pnp_driver cmos_pnp_driver = {
1131 1108
1132static int __init cmos_platform_probe(struct platform_device *pdev) 1109static int __init cmos_platform_probe(struct platform_device *pdev)
1133{ 1110{
1111 cmos_of_init(pdev);
1134 cmos_wake_setup(&pdev->dev); 1112 cmos_wake_setup(&pdev->dev);
1135 return cmos_do_probe(&pdev->dev, 1113 return cmos_do_probe(&pdev->dev,
1136 platform_get_resource(pdev, IORESOURCE_IO, 0), 1114 platform_get_resource(pdev, IORESOURCE_IO, 0),
@@ -1162,6 +1140,7 @@ static struct platform_driver cmos_platform_driver = {
1162#ifdef CONFIG_PM 1140#ifdef CONFIG_PM
1163 .pm = &cmos_pm_ops, 1141 .pm = &cmos_pm_ops,
1164#endif 1142#endif
1143 .of_match_table = of_cmos_match,
1165 } 1144 }
1166}; 1145};
1167 1146
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 34647fc1ee9..8d46838dff8 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -231,10 +231,6 @@ davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
231 case RTC_WIE_OFF: 231 case RTC_WIE_OFF:
232 rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN; 232 rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN;
233 break; 233 break;
234 case RTC_UIE_OFF:
235 case RTC_UIE_ON:
236 ret = -ENOTTY;
237 break;
238 default: 234 default:
239 ret = -ENOIOCTLCMD; 235 ret = -ENOIOCTLCMD;
240 } 236 }
@@ -473,55 +469,6 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
473 return 0; 469 return 0;
474} 470}
475 471
476static int davinci_rtc_irq_set_state(struct device *dev, int enabled)
477{
478 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
479 unsigned long flags;
480 u8 rtc_ctrl;
481
482 spin_lock_irqsave(&davinci_rtc_lock, flags);
483
484 rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
485
486 if (enabled) {
487 while (rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL)
488 & PRTCSS_RTC_CTRL_WDTBUS)
489 cpu_relax();
490
491 rtc_ctrl |= PRTCSS_RTC_CTRL_TE;
492 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
493
494 rtcss_write(davinci_rtc, 0x0, PRTCSS_RTC_CLKC_CNT);
495
496 rtc_ctrl |= PRTCSS_RTC_CTRL_TIEN |
497 PRTCSS_RTC_CTRL_TMMD |
498 PRTCSS_RTC_CTRL_TMRFLG;
499 } else
500 rtc_ctrl &= ~PRTCSS_RTC_CTRL_TIEN;
501
502 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
503
504 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
505
506 return 0;
507}
508
509static int davinci_rtc_irq_set_freq(struct device *dev, int freq)
510{
511 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
512 unsigned long flags;
513 u16 tmr_counter = (0x8000 >> (ffs(freq) - 1));
514
515 spin_lock_irqsave(&davinci_rtc_lock, flags);
516
517 rtcss_write(davinci_rtc, tmr_counter & 0xFF, PRTCSS_RTC_TMR0);
518 rtcss_write(davinci_rtc, (tmr_counter & 0xFF00) >> 8, PRTCSS_RTC_TMR1);
519
520 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
521
522 return 0;
523}
524
525static struct rtc_class_ops davinci_rtc_ops = { 472static struct rtc_class_ops davinci_rtc_ops = {
526 .ioctl = davinci_rtc_ioctl, 473 .ioctl = davinci_rtc_ioctl,
527 .read_time = davinci_rtc_read_time, 474 .read_time = davinci_rtc_read_time,
@@ -529,8 +476,6 @@ static struct rtc_class_ops davinci_rtc_ops = {
529 .alarm_irq_enable = davinci_rtc_alarm_irq_enable, 476 .alarm_irq_enable = davinci_rtc_alarm_irq_enable,
530 .read_alarm = davinci_rtc_read_alarm, 477 .read_alarm = davinci_rtc_read_alarm,
531 .set_alarm = davinci_rtc_set_alarm, 478 .set_alarm = davinci_rtc_set_alarm,
532 .irq_set_state = davinci_rtc_irq_set_state,
533 .irq_set_freq = davinci_rtc_irq_set_freq,
534}; 479};
535 480
536static int __init davinci_rtc_probe(struct platform_device *pdev) 481static int __init davinci_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 37268e97de4..3fffd708711 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -397,29 +397,12 @@ static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
397 return 0; 397 return 0;
398} 398}
399 399
400static int ds1511_rtc_update_irq_enable(struct device *dev,
401 unsigned int enabled)
402{
403 struct platform_device *pdev = to_platform_device(dev);
404 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
405
406 if (pdata->irq <= 0)
407 return -EINVAL;
408 if (enabled)
409 pdata->irqen |= RTC_UF;
410 else
411 pdata->irqen &= ~RTC_UF;
412 ds1511_rtc_update_alarm(pdata);
413 return 0;
414}
415
416static const struct rtc_class_ops ds1511_rtc_ops = { 400static const struct rtc_class_ops ds1511_rtc_ops = {
417 .read_time = ds1511_rtc_read_time, 401 .read_time = ds1511_rtc_read_time,
418 .set_time = ds1511_rtc_set_time, 402 .set_time = ds1511_rtc_set_time,
419 .read_alarm = ds1511_rtc_read_alarm, 403 .read_alarm = ds1511_rtc_read_alarm,
420 .set_alarm = ds1511_rtc_set_alarm, 404 .set_alarm = ds1511_rtc_set_alarm,
421 .alarm_irq_enable = ds1511_rtc_alarm_irq_enable, 405 .alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
422 .update_irq_enable = ds1511_rtc_update_irq_enable,
423}; 406};
424 407
425 static ssize_t 408 static ssize_t
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index ff432e2ca27..fee41b97c9e 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -227,29 +227,12 @@ static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
227 return 0; 227 return 0;
228} 228}
229 229
230static int ds1553_rtc_update_irq_enable(struct device *dev,
231 unsigned int enabled)
232{
233 struct platform_device *pdev = to_platform_device(dev);
234 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
235
236 if (pdata->irq <= 0)
237 return -EINVAL;
238 if (enabled)
239 pdata->irqen |= RTC_UF;
240 else
241 pdata->irqen &= ~RTC_UF;
242 ds1553_rtc_update_alarm(pdata);
243 return 0;
244}
245
246static const struct rtc_class_ops ds1553_rtc_ops = { 230static const struct rtc_class_ops ds1553_rtc_ops = {
247 .read_time = ds1553_rtc_read_time, 231 .read_time = ds1553_rtc_read_time,
248 .set_time = ds1553_rtc_set_time, 232 .set_time = ds1553_rtc_set_time,
249 .read_alarm = ds1553_rtc_read_alarm, 233 .read_alarm = ds1553_rtc_read_alarm,
250 .set_alarm = ds1553_rtc_set_alarm, 234 .set_alarm = ds1553_rtc_set_alarm,
251 .alarm_irq_enable = ds1553_rtc_alarm_irq_enable, 235 .alarm_irq_enable = ds1553_rtc_alarm_irq_enable,
252 .update_irq_enable = ds1553_rtc_update_irq_enable,
253}; 236};
254 237
255static ssize_t ds1553_nvram_read(struct file *filp, struct kobject *kobj, 238static ssize_t ds1553_nvram_read(struct file *filp, struct kobject *kobj,
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 950735415a7..27b7bf672ac 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -339,23 +339,6 @@ static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
339 return 0; 339 return 0;
340} 340}
341 341
342static int ds3232_update_irq_enable(struct device *dev, unsigned int enabled)
343{
344 struct i2c_client *client = to_i2c_client(dev);
345 struct ds3232 *ds3232 = i2c_get_clientdata(client);
346
347 if (client->irq <= 0)
348 return -EINVAL;
349
350 if (enabled)
351 ds3232->rtc->irq_data |= RTC_UF;
352 else
353 ds3232->rtc->irq_data &= ~RTC_UF;
354
355 ds3232_update_alarm(client);
356 return 0;
357}
358
359static irqreturn_t ds3232_irq(int irq, void *dev_id) 342static irqreturn_t ds3232_irq(int irq, void *dev_id)
360{ 343{
361 struct i2c_client *client = dev_id; 344 struct i2c_client *client = dev_id;
@@ -406,7 +389,6 @@ static const struct rtc_class_ops ds3232_rtc_ops = {
406 .read_alarm = ds3232_read_alarm, 389 .read_alarm = ds3232_read_alarm,
407 .set_alarm = ds3232_set_alarm, 390 .set_alarm = ds3232_set_alarm,
408 .alarm_irq_enable = ds3232_alarm_irq_enable, 391 .alarm_irq_enable = ds3232_alarm_irq_enable,
409 .update_irq_enable = ds3232_update_irq_enable,
410}; 392};
411 393
412static int __devinit ds3232_probe(struct i2c_client *client, 394static int __devinit ds3232_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 2e16f72c905..b6473631d18 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -168,12 +168,6 @@ static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
168 return ret; 168 return ret;
169} 169}
170 170
171static int jz4740_rtc_update_irq_enable(struct device *dev, unsigned int enable)
172{
173 struct jz4740_rtc *rtc = dev_get_drvdata(dev);
174 return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ_IRQ, enable);
175}
176
177static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) 171static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
178{ 172{
179 struct jz4740_rtc *rtc = dev_get_drvdata(dev); 173 struct jz4740_rtc *rtc = dev_get_drvdata(dev);
@@ -185,7 +179,6 @@ static struct rtc_class_ops jz4740_rtc_ops = {
185 .set_mmss = jz4740_rtc_set_mmss, 179 .set_mmss = jz4740_rtc_set_mmss,
186 .read_alarm = jz4740_rtc_read_alarm, 180 .read_alarm = jz4740_rtc_read_alarm,
187 .set_alarm = jz4740_rtc_set_alarm, 181 .set_alarm = jz4740_rtc_set_alarm,
188 .update_irq_enable = jz4740_rtc_update_irq_enable,
189 .alarm_irq_enable = jz4740_rtc_alarm_irq_enable, 182 .alarm_irq_enable = jz4740_rtc_alarm_irq_enable,
190}; 183};
191 184
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 5314b153bfb..c4200646955 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -282,12 +282,6 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev)
282 return IRQ_HANDLED; 282 return IRQ_HANDLED;
283} 283}
284 284
285static int mc13xxx_rtc_update_irq_enable(struct device *dev,
286 unsigned int enabled)
287{
288 return mc13xxx_rtc_irq_enable(dev, enabled, MC13XXX_IRQ_1HZ);
289}
290
291static int mc13xxx_rtc_alarm_irq_enable(struct device *dev, 285static int mc13xxx_rtc_alarm_irq_enable(struct device *dev,
292 unsigned int enabled) 286 unsigned int enabled)
293{ 287{
@@ -300,7 +294,6 @@ static const struct rtc_class_ops mc13xxx_rtc_ops = {
300 .read_alarm = mc13xxx_rtc_read_alarm, 294 .read_alarm = mc13xxx_rtc_read_alarm,
301 .set_alarm = mc13xxx_rtc_set_alarm, 295 .set_alarm = mc13xxx_rtc_set_alarm,
302 .alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable, 296 .alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable,
303 .update_irq_enable = mc13xxx_rtc_update_irq_enable,
304}; 297};
305 298
306static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev) 299static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev)
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index dfcdf0901d2..b40c1ff1ebc 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -240,32 +240,12 @@ static int mpc5121_rtc_alarm_irq_enable(struct device *dev,
240 return 0; 240 return 0;
241} 241}
242 242
243static int mpc5121_rtc_update_irq_enable(struct device *dev,
244 unsigned int enabled)
245{
246 struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
247 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
248 int val;
249
250 val = in_8(&regs->int_enable);
251
252 if (enabled)
253 val = (val & ~0x8) | 0x1;
254 else
255 val &= ~0x1;
256
257 out_8(&regs->int_enable, val);
258
259 return 0;
260}
261
262static const struct rtc_class_ops mpc5121_rtc_ops = { 243static const struct rtc_class_ops mpc5121_rtc_ops = {
263 .read_time = mpc5121_rtc_read_time, 244 .read_time = mpc5121_rtc_read_time,
264 .set_time = mpc5121_rtc_set_time, 245 .set_time = mpc5121_rtc_set_time,
265 .read_alarm = mpc5121_rtc_read_alarm, 246 .read_alarm = mpc5121_rtc_read_alarm,
266 .set_alarm = mpc5121_rtc_set_alarm, 247 .set_alarm = mpc5121_rtc_set_alarm,
267 .alarm_irq_enable = mpc5121_rtc_alarm_irq_enable, 248 .alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
268 .update_irq_enable = mpc5121_rtc_update_irq_enable,
269}; 249};
270 250
271static int __devinit mpc5121_rtc_probe(struct platform_device *op, 251static int __devinit mpc5121_rtc_probe(struct platform_device *op,
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 1db62db8469..b86bc328463 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -62,6 +62,17 @@ static inline int is_intr(u8 rtc_intr)
62 return rtc_intr & RTC_IRQMASK; 62 return rtc_intr & RTC_IRQMASK;
63} 63}
64 64
65static inline unsigned char vrtc_is_updating(void)
66{
67 unsigned char uip;
68 unsigned long flags;
69
70 spin_lock_irqsave(&rtc_lock, flags);
71 uip = (vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP);
72 spin_unlock_irqrestore(&rtc_lock, flags);
73 return uip;
74}
75
65/* 76/*
66 * rtc_time's year contains the increment over 1900, but vRTC's YEAR 77 * rtc_time's year contains the increment over 1900, but vRTC's YEAR
67 * register can't be programmed to value larger than 0x64, so vRTC 78 * register can't be programmed to value larger than 0x64, so vRTC
@@ -76,7 +87,7 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time)
76{ 87{
77 unsigned long flags; 88 unsigned long flags;
78 89
79 if (rtc_is_updating()) 90 if (vrtc_is_updating())
80 mdelay(20); 91 mdelay(20);
81 92
82 spin_lock_irqsave(&rtc_lock, flags); 93 spin_lock_irqsave(&rtc_lock, flags);
@@ -236,25 +247,6 @@ static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
236 return 0; 247 return 0;
237} 248}
238 249
239static int mrst_irq_set_state(struct device *dev, int enabled)
240{
241 struct mrst_rtc *mrst = dev_get_drvdata(dev);
242 unsigned long flags;
243
244 if (!mrst->irq)
245 return -ENXIO;
246
247 spin_lock_irqsave(&rtc_lock, flags);
248
249 if (enabled)
250 mrst_irq_enable(mrst, RTC_PIE);
251 else
252 mrst_irq_disable(mrst, RTC_PIE);
253
254 spin_unlock_irqrestore(&rtc_lock, flags);
255 return 0;
256}
257
258/* Currently, the vRTC doesn't support UIE ON/OFF */ 250/* Currently, the vRTC doesn't support UIE ON/OFF */
259static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 251static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
260{ 252{
@@ -301,7 +293,6 @@ static const struct rtc_class_ops mrst_rtc_ops = {
301 .read_alarm = mrst_read_alarm, 293 .read_alarm = mrst_read_alarm,
302 .set_alarm = mrst_set_alarm, 294 .set_alarm = mrst_set_alarm,
303 .proc = mrst_procfs, 295 .proc = mrst_procfs,
304 .irq_set_state = mrst_irq_set_state,
305 .alarm_irq_enable = mrst_rtc_alarm_irq_enable, 296 .alarm_irq_enable = mrst_rtc_alarm_irq_enable,
306}; 297};
307 298
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 0b06c1e03fd..826ab64a8fa 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -274,12 +274,6 @@ static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
274 return 0; 274 return 0;
275} 275}
276 276
277static int mxc_rtc_update_irq_enable(struct device *dev, unsigned int enabled)
278{
279 mxc_rtc_irq_enable(dev, RTC_1HZ_BIT, enabled);
280 return 0;
281}
282
283/* 277/*
284 * This function reads the current RTC time into tm in Gregorian date. 278 * This function reads the current RTC time into tm in Gregorian date.
285 */ 279 */
@@ -368,7 +362,6 @@ static struct rtc_class_ops mxc_rtc_ops = {
368 .read_alarm = mxc_rtc_read_alarm, 362 .read_alarm = mxc_rtc_read_alarm,
369 .set_alarm = mxc_rtc_set_alarm, 363 .set_alarm = mxc_rtc_set_alarm,
370 .alarm_irq_enable = mxc_rtc_alarm_irq_enable, 364 .alarm_irq_enable = mxc_rtc_alarm_irq_enable,
371 .update_irq_enable = mxc_rtc_update_irq_enable,
372}; 365};
373 366
374static int __init mxc_rtc_probe(struct platform_device *pdev) 367static int __init mxc_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index ddb0857e15a..781068d62f2 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -134,20 +134,6 @@ static void nuc900_rtc_bin2bcd(struct device *dev, struct rtc_time *settm,
134 gettm->bcd_hour = bin2bcd(settm->tm_hour) << 16; 134 gettm->bcd_hour = bin2bcd(settm->tm_hour) << 16;
135} 135}
136 136
137static int nuc900_update_irq_enable(struct device *dev, unsigned int enabled)
138{
139 struct nuc900_rtc *rtc = dev_get_drvdata(dev);
140
141 if (enabled)
142 __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
143 (TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
144 else
145 __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
146 (~TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
147
148 return 0;
149}
150
151static int nuc900_alarm_irq_enable(struct device *dev, unsigned int enabled) 137static int nuc900_alarm_irq_enable(struct device *dev, unsigned int enabled)
152{ 138{
153 struct nuc900_rtc *rtc = dev_get_drvdata(dev); 139 struct nuc900_rtc *rtc = dev_get_drvdata(dev);
@@ -234,7 +220,6 @@ static struct rtc_class_ops nuc900_rtc_ops = {
234 .read_alarm = nuc900_rtc_read_alarm, 220 .read_alarm = nuc900_rtc_read_alarm,
235 .set_alarm = nuc900_rtc_set_alarm, 221 .set_alarm = nuc900_rtc_set_alarm,
236 .alarm_irq_enable = nuc900_alarm_irq_enable, 222 .alarm_irq_enable = nuc900_alarm_irq_enable,
237 .update_irq_enable = nuc900_update_irq_enable,
238}; 223};
239 224
240static int __devinit nuc900_rtc_probe(struct platform_device *pdev) 225static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index b4dbf3a319b..de0dd7b1f14 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -135,44 +135,6 @@ static irqreturn_t rtc_irq(int irq, void *rtc)
135 return IRQ_HANDLED; 135 return IRQ_HANDLED;
136} 136}
137 137
138#ifdef CONFIG_RTC_INTF_DEV
139
140static int
141omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
142{
143 u8 reg;
144
145 switch (cmd) {
146 case RTC_UIE_OFF:
147 case RTC_UIE_ON:
148 break;
149 default:
150 return -ENOIOCTLCMD;
151 }
152
153 local_irq_disable();
154 rtc_wait_not_busy();
155 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
156 switch (cmd) {
157 /* UIE = Update Interrupt Enable (1/second) */
158 case RTC_UIE_OFF:
159 reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
160 break;
161 case RTC_UIE_ON:
162 reg |= OMAP_RTC_INTERRUPTS_IT_TIMER;
163 break;
164 }
165 rtc_wait_not_busy();
166 rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
167 local_irq_enable();
168
169 return 0;
170}
171
172#else
173#define omap_rtc_ioctl NULL
174#endif
175
176static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 138static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
177{ 139{
178 u8 reg; 140 u8 reg;
@@ -313,7 +275,6 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
313} 275}
314 276
315static struct rtc_class_ops omap_rtc_ops = { 277static struct rtc_class_ops omap_rtc_ops = {
316 .ioctl = omap_rtc_ioctl,
317 .read_time = omap_rtc_read_time, 278 .read_time = omap_rtc_read_time,
318 .set_time = omap_rtc_set_time, 279 .set_time = omap_rtc_set_time,
319 .read_alarm = omap_rtc_read_alarm, 280 .read_alarm = omap_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index 25c0b3fd44f..a633abc4289 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -131,18 +131,12 @@ static int pcap_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
131 return pcap_rtc_irq_enable(dev, PCAP_IRQ_TODA, en); 131 return pcap_rtc_irq_enable(dev, PCAP_IRQ_TODA, en);
132} 132}
133 133
134static int pcap_rtc_update_irq_enable(struct device *dev, unsigned int en)
135{
136 return pcap_rtc_irq_enable(dev, PCAP_IRQ_1HZ, en);
137}
138
139static const struct rtc_class_ops pcap_rtc_ops = { 134static const struct rtc_class_ops pcap_rtc_ops = {
140 .read_time = pcap_rtc_read_time, 135 .read_time = pcap_rtc_read_time,
141 .read_alarm = pcap_rtc_read_alarm, 136 .read_alarm = pcap_rtc_read_alarm,
142 .set_alarm = pcap_rtc_set_alarm, 137 .set_alarm = pcap_rtc_set_alarm,
143 .set_mmss = pcap_rtc_set_mmss, 138 .set_mmss = pcap_rtc_set_mmss,
144 .alarm_irq_enable = pcap_rtc_alarm_irq_enable, 139 .alarm_irq_enable = pcap_rtc_alarm_irq_enable,
145 .update_irq_enable = pcap_rtc_update_irq_enable,
146}; 140};
147 141
148static int __devinit pcap_rtc_probe(struct platform_device *pdev) 142static int __devinit pcap_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 16edf94ab42..f90c574f9d0 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -106,25 +106,6 @@ pcf50633_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
106 return 0; 106 return 0;
107} 107}
108 108
109static int
110pcf50633_rtc_update_irq_enable(struct device *dev, unsigned int enabled)
111{
112 struct pcf50633_rtc *rtc = dev_get_drvdata(dev);
113 int err;
114
115 if (enabled)
116 err = pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND);
117 else
118 err = pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND);
119
120 if (err < 0)
121 return err;
122
123 rtc->second_enabled = enabled;
124
125 return 0;
126}
127
128static int pcf50633_rtc_read_time(struct device *dev, struct rtc_time *tm) 109static int pcf50633_rtc_read_time(struct device *dev, struct rtc_time *tm)
129{ 110{
130 struct pcf50633_rtc *rtc; 111 struct pcf50633_rtc *rtc;
@@ -262,8 +243,7 @@ static struct rtc_class_ops pcf50633_rtc_ops = {
262 .set_time = pcf50633_rtc_set_time, 243 .set_time = pcf50633_rtc_set_time,
263 .read_alarm = pcf50633_rtc_read_alarm, 244 .read_alarm = pcf50633_rtc_read_alarm,
264 .set_alarm = pcf50633_rtc_set_alarm, 245 .set_alarm = pcf50633_rtc_set_alarm,
265 .alarm_irq_enable = pcf50633_rtc_alarm_irq_enable, 246 .alarm_irq_enable = pcf50633_rtc_alarm_irq_enable,
266 .update_irq_enable = pcf50633_rtc_update_irq_enable,
267}; 247};
268 248
269static void pcf50633_rtc_irq(int irq, void *data) 249static void pcf50633_rtc_irq(int irq, void *data)
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index bbdb2f02798..d554368c9f5 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -35,11 +35,6 @@ static irqreturn_t pl030_interrupt(int irq, void *dev_id)
35 return IRQ_HANDLED; 35 return IRQ_HANDLED;
36} 36}
37 37
38static int pl030_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
39{
40 return -ENOIOCTLCMD;
41}
42
43static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) 38static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
44{ 39{
45 struct pl030_rtc *rtc = dev_get_drvdata(dev); 40 struct pl030_rtc *rtc = dev_get_drvdata(dev);
@@ -96,7 +91,6 @@ static int pl030_set_time(struct device *dev, struct rtc_time *tm)
96} 91}
97 92
98static const struct rtc_class_ops pl030_ops = { 93static const struct rtc_class_ops pl030_ops = {
99 .ioctl = pl030_ioctl,
100 .read_time = pl030_read_time, 94 .read_time = pl030_read_time,
101 .set_time = pl030_set_time, 95 .set_time = pl030_set_time,
102 .read_alarm = pl030_read_alarm, 96 .read_alarm = pl030_read_alarm,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index b7a6690e5b3..d829ea63c4f 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -293,57 +293,6 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
293 return ret; 293 return ret;
294} 294}
295 295
296/* Periodic interrupt is only available in ST variants. */
297static int pl031_irq_set_state(struct device *dev, int enabled)
298{
299 struct pl031_local *ldata = dev_get_drvdata(dev);
300
301 if (enabled == 1) {
302 /* Clear any pending timer interrupt. */
303 writel(RTC_BIT_PI, ldata->base + RTC_ICR);
304
305 writel(readl(ldata->base + RTC_IMSC) | RTC_BIT_PI,
306 ldata->base + RTC_IMSC);
307
308 /* Now start the timer */
309 writel(readl(ldata->base + RTC_TCR) | RTC_TCR_EN,
310 ldata->base + RTC_TCR);
311
312 } else {
313 writel(readl(ldata->base + RTC_IMSC) & (~RTC_BIT_PI),
314 ldata->base + RTC_IMSC);
315
316 /* Also stop the timer */
317 writel(readl(ldata->base + RTC_TCR) & (~RTC_TCR_EN),
318 ldata->base + RTC_TCR);
319 }
320 /* Wait at least 1 RTC32 clock cycle to ensure next access
321 * to RTC_TCR will succeed.
322 */
323 udelay(40);
324
325 return 0;
326}
327
328static int pl031_irq_set_freq(struct device *dev, int freq)
329{
330 struct pl031_local *ldata = dev_get_drvdata(dev);
331
332 /* Cant set timer if it is already enabled */
333 if (readl(ldata->base + RTC_TCR) & RTC_TCR_EN) {
334 dev_err(dev, "can't change frequency while timer enabled\n");
335 return -EINVAL;
336 }
337
338 /* If self start bit in RTC_TCR is set timer will start here,
339 * but we never set that bit. Instead we start the timer when
340 * set_state is called with enabled == 1.
341 */
342 writel(RTC_TIMER_FREQ / freq, ldata->base + RTC_TLR);
343
344 return 0;
345}
346
347static int pl031_remove(struct amba_device *adev) 296static int pl031_remove(struct amba_device *adev)
348{ 297{
349 struct pl031_local *ldata = dev_get_drvdata(&adev->dev); 298 struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
@@ -440,8 +389,6 @@ static struct rtc_class_ops stv1_pl031_ops = {
440 .read_alarm = pl031_read_alarm, 389 .read_alarm = pl031_read_alarm,
441 .set_alarm = pl031_set_alarm, 390 .set_alarm = pl031_set_alarm,
442 .alarm_irq_enable = pl031_alarm_irq_enable, 391 .alarm_irq_enable = pl031_alarm_irq_enable,
443 .irq_set_state = pl031_irq_set_state,
444 .irq_set_freq = pl031_irq_set_freq,
445}; 392};
446 393
447/* And the second ST derivative */ 394/* And the second ST derivative */
@@ -451,8 +398,6 @@ static struct rtc_class_ops stv2_pl031_ops = {
451 .read_alarm = pl031_stv2_read_alarm, 398 .read_alarm = pl031_stv2_read_alarm,
452 .set_alarm = pl031_stv2_set_alarm, 399 .set_alarm = pl031_stv2_set_alarm,
453 .alarm_irq_enable = pl031_alarm_irq_enable, 400 .alarm_irq_enable = pl031_alarm_irq_enable,
454 .irq_set_state = pl031_irq_set_state,
455 .irq_set_freq = pl031_irq_set_freq,
456}; 401};
457 402
458static struct amba_id pl031_ids[] = { 403static struct amba_id pl031_ids[] = {
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index 242bbf86c74..0a59fda5c09 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -69,6 +69,14 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
69 alrm.enabled ? "yes" : "no"); 69 alrm.enabled ? "yes" : "no");
70 seq_printf(seq, "alrm_pending\t: %s\n", 70 seq_printf(seq, "alrm_pending\t: %s\n",
71 alrm.pending ? "yes" : "no"); 71 alrm.pending ? "yes" : "no");
72 seq_printf(seq, "update IRQ enabled\t: %s\n",
73 (rtc->uie_rtctimer.enabled) ? "yes" : "no");
74 seq_printf(seq, "periodic IRQ enabled\t: %s\n",
75 (rtc->pie_enabled) ? "yes" : "no");
76 seq_printf(seq, "periodic IRQ frequency\t: %d\n",
77 rtc->irq_freq);
78 seq_printf(seq, "max user IRQ frequency\t: %d\n",
79 rtc->max_user_freq);
72 } 80 }
73 81
74 seq_printf(seq, "24hr\t\t: yes\n"); 82 seq_printf(seq, "24hr\t\t: yes\n");
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 29e867a1aaa..fc9f4991574 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -209,32 +209,6 @@ static void pxa_rtc_release(struct device *dev)
209 free_irq(pxa_rtc->irq_1Hz, dev); 209 free_irq(pxa_rtc->irq_1Hz, dev);
210} 210}
211 211
212static int pxa_periodic_irq_set_freq(struct device *dev, int freq)
213{
214 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
215 int period_ms;
216
217 if (freq < 1 || freq > MAXFREQ_PERIODIC)
218 return -EINVAL;
219
220 period_ms = 1000 / freq;
221 rtc_writel(pxa_rtc, PIAR, period_ms);
222
223 return 0;
224}
225
226static int pxa_periodic_irq_set_state(struct device *dev, int enabled)
227{
228 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
229
230 if (enabled)
231 rtsr_set_bits(pxa_rtc, RTSR_PIALE | RTSR_PICE);
232 else
233 rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_PICE);
234
235 return 0;
236}
237
238static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled) 212static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
239{ 213{
240 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); 214 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
@@ -250,21 +224,6 @@ static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
250 return 0; 224 return 0;
251} 225}
252 226
253static int pxa_update_irq_enable(struct device *dev, unsigned int enabled)
254{
255 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
256
257 spin_lock_irq(&pxa_rtc->lock);
258
259 if (enabled)
260 rtsr_set_bits(pxa_rtc, RTSR_HZE);
261 else
262 rtsr_clear_bits(pxa_rtc, RTSR_HZE);
263
264 spin_unlock_irq(&pxa_rtc->lock);
265 return 0;
266}
267
268static int pxa_rtc_read_time(struct device *dev, struct rtc_time *tm) 227static int pxa_rtc_read_time(struct device *dev, struct rtc_time *tm)
269{ 228{
270 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); 229 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
@@ -346,10 +305,7 @@ static const struct rtc_class_ops pxa_rtc_ops = {
346 .read_alarm = pxa_rtc_read_alarm, 305 .read_alarm = pxa_rtc_read_alarm,
347 .set_alarm = pxa_rtc_set_alarm, 306 .set_alarm = pxa_rtc_set_alarm,
348 .alarm_irq_enable = pxa_alarm_irq_enable, 307 .alarm_irq_enable = pxa_alarm_irq_enable,
349 .update_irq_enable = pxa_update_irq_enable,
350 .proc = pxa_rtc_proc, 308 .proc = pxa_rtc_proc,
351 .irq_set_state = pxa_periodic_irq_set_state,
352 .irq_set_freq = pxa_periodic_irq_set_freq,
353}; 309};
354 310
355static int __init pxa_rtc_probe(struct platform_device *pdev) 311static int __init pxa_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 6aaa1550e3b..85c1b848dd7 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -281,57 +281,6 @@ static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm)
281 return rs5c372_set_datetime(to_i2c_client(dev), tm); 281 return rs5c372_set_datetime(to_i2c_client(dev), tm);
282} 282}
283 283
284#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
285
286static int
287rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
288{
289 struct i2c_client *client = to_i2c_client(dev);
290 struct rs5c372 *rs5c = i2c_get_clientdata(client);
291 unsigned char buf;
292 int status, addr;
293
294 buf = rs5c->regs[RS5C_REG_CTRL1];
295 switch (cmd) {
296 case RTC_UIE_OFF:
297 case RTC_UIE_ON:
298 /* some 327a modes use a different IRQ pin for 1Hz irqs */
299 if (rs5c->type == rtc_rs5c372a
300 && (buf & RS5C372A_CTRL1_SL1))
301 return -ENOIOCTLCMD;
302 default:
303 return -ENOIOCTLCMD;
304 }
305
306 status = rs5c_get_regs(rs5c);
307 if (status < 0)
308 return status;
309
310 addr = RS5C_ADDR(RS5C_REG_CTRL1);
311 switch (cmd) {
312 case RTC_UIE_OFF: /* update off */
313 buf &= ~RS5C_CTRL1_CT_MASK;
314 break;
315 case RTC_UIE_ON: /* update on */
316 buf &= ~RS5C_CTRL1_CT_MASK;
317 buf |= RS5C_CTRL1_CT4;
318 break;
319 }
320
321 if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
322 printk(KERN_WARNING "%s: can't update alarm\n",
323 rs5c->rtc->name);
324 status = -EIO;
325 } else
326 rs5c->regs[RS5C_REG_CTRL1] = buf;
327
328 return status;
329}
330
331#else
332#define rs5c_rtc_ioctl NULL
333#endif
334
335 284
336static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 285static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
337{ 286{
@@ -480,7 +429,6 @@ static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
480 429
481static const struct rtc_class_ops rs5c372_rtc_ops = { 430static const struct rtc_class_ops rs5c372_rtc_ops = {
482 .proc = rs5c372_rtc_proc, 431 .proc = rs5c372_rtc_proc,
483 .ioctl = rs5c_rtc_ioctl,
484 .read_time = rs5c372_rtc_read_time, 432 .read_time = rs5c372_rtc_read_time,
485 .set_time = rs5c372_rtc_set_time, 433 .set_time = rs5c372_rtc_set_time,
486 .read_alarm = rs5c_read_alarm, 434 .read_alarm = rs5c_read_alarm,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index af32a62e12a..fde172fb2ab 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -424,37 +424,12 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
424 return 0; 424 return 0;
425} 425}
426 426
427static int rx8025_irq_set_state(struct device *dev, int enabled)
428{
429 struct i2c_client *client = to_i2c_client(dev);
430 struct rx8025_data *rx8025 = i2c_get_clientdata(client);
431 int ctrl1;
432 int err;
433
434 if (client->irq <= 0)
435 return -ENXIO;
436
437 ctrl1 = rx8025->ctrl1 & ~RX8025_BIT_CTRL1_CT;
438 if (enabled)
439 ctrl1 |= RX8025_BIT_CTRL1_CT_1HZ;
440 if (ctrl1 != rx8025->ctrl1) {
441 rx8025->ctrl1 = ctrl1;
442 err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
443 rx8025->ctrl1);
444 if (err)
445 return err;
446 }
447
448 return 0;
449}
450
451static struct rtc_class_ops rx8025_rtc_ops = { 427static struct rtc_class_ops rx8025_rtc_ops = {
452 .read_time = rx8025_get_time, 428 .read_time = rx8025_get_time,
453 .set_time = rx8025_set_time, 429 .set_time = rx8025_set_time,
454 .read_alarm = rx8025_read_alarm, 430 .read_alarm = rx8025_read_alarm,
455 .set_alarm = rx8025_set_alarm, 431 .set_alarm = rx8025_set_alarm,
456 .alarm_irq_enable = rx8025_alarm_irq_enable, 432 .alarm_irq_enable = rx8025_alarm_irq_enable,
457 .irq_set_state = rx8025_irq_set_state,
458}; 433};
459 434
460/* 435/*
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index b80fa288240..714964913e5 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -93,37 +93,6 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
93 return 0; 93 return 0;
94} 94}
95 95
96static int s3c_rtc_setpie(struct device *dev, int enabled)
97{
98 unsigned int tmp;
99
100 pr_debug("%s: pie=%d\n", __func__, enabled);
101
102 spin_lock_irq(&s3c_rtc_pie_lock);
103
104 if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
105 tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
106 tmp &= ~S3C64XX_RTCCON_TICEN;
107
108 if (enabled)
109 tmp |= S3C64XX_RTCCON_TICEN;
110
111 writew(tmp, s3c_rtc_base + S3C2410_RTCCON);
112 } else {
113 tmp = readb(s3c_rtc_base + S3C2410_TICNT);
114 tmp &= ~S3C2410_TICNT_ENABLE;
115
116 if (enabled)
117 tmp |= S3C2410_TICNT_ENABLE;
118
119 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
120 }
121
122 spin_unlock_irq(&s3c_rtc_pie_lock);
123
124 return 0;
125}
126
127static int s3c_rtc_setfreq(struct device *dev, int freq) 96static int s3c_rtc_setfreq(struct device *dev, int freq)
128{ 97{
129 struct platform_device *pdev = to_platform_device(dev); 98 struct platform_device *pdev = to_platform_device(dev);
@@ -379,8 +348,6 @@ static const struct rtc_class_ops s3c_rtcops = {
379 .set_time = s3c_rtc_settime, 348 .set_time = s3c_rtc_settime,
380 .read_alarm = s3c_rtc_getalarm, 349 .read_alarm = s3c_rtc_getalarm,
381 .set_alarm = s3c_rtc_setalarm, 350 .set_alarm = s3c_rtc_setalarm,
382 .irq_set_freq = s3c_rtc_setfreq,
383 .irq_set_state = s3c_rtc_setpie,
384 .proc = s3c_rtc_proc, 351 .proc = s3c_rtc_proc,
385 .alarm_irq_enable = s3c_rtc_setaie, 352 .alarm_irq_enable = s3c_rtc_setaie,
386}; 353};
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 5dfe5ffcb0d..0b40bb88a88 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -43,7 +43,6 @@
43#define RTC_DEF_TRIM 0 43#define RTC_DEF_TRIM 0
44 44
45static const unsigned long RTC_FREQ = 1024; 45static const unsigned long RTC_FREQ = 1024;
46static unsigned long timer_freq;
47static struct rtc_time rtc_alarm; 46static struct rtc_time rtc_alarm;
48static DEFINE_SPINLOCK(sa1100_rtc_lock); 47static DEFINE_SPINLOCK(sa1100_rtc_lock);
49 48
@@ -156,114 +155,11 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
156 return IRQ_HANDLED; 155 return IRQ_HANDLED;
157} 156}
158 157
159static int sa1100_irq_set_freq(struct device *dev, int freq)
160{
161 if (freq < 1 || freq > timer_freq) {
162 return -EINVAL;
163 } else {
164 struct rtc_device *rtc = (struct rtc_device *)dev;
165
166 rtc->irq_freq = freq;
167
168 return 0;
169 }
170}
171
172static int rtc_timer1_count;
173
174static int sa1100_irq_set_state(struct device *dev, int enabled)
175{
176 spin_lock_irq(&sa1100_rtc_lock);
177 if (enabled) {
178 struct rtc_device *rtc = (struct rtc_device *)dev;
179
180 OSMR1 = timer_freq / rtc->irq_freq + OSCR;
181 OIER |= OIER_E1;
182 rtc_timer1_count = 1;
183 } else {
184 OIER &= ~OIER_E1;
185 }
186 spin_unlock_irq(&sa1100_rtc_lock);
187
188 return 0;
189}
190
191static inline int sa1100_timer1_retrigger(struct rtc_device *rtc)
192{
193 unsigned long diff;
194 unsigned long period = timer_freq / rtc->irq_freq;
195
196 spin_lock_irq(&sa1100_rtc_lock);
197
198 do {
199 OSMR1 += period;
200 diff = OSMR1 - OSCR;
201 /* If OSCR > OSMR1, diff is a very large number (unsigned
202 * math). This means we have a lost interrupt. */
203 } while (diff > period);
204 OIER |= OIER_E1;
205
206 spin_unlock_irq(&sa1100_rtc_lock);
207
208 return 0;
209}
210
211static irqreturn_t timer1_interrupt(int irq, void *dev_id)
212{
213 struct platform_device *pdev = to_platform_device(dev_id);
214 struct rtc_device *rtc = platform_get_drvdata(pdev);
215
216 /*
217 * If we match for the first time, rtc_timer1_count will be 1.
218 * Otherwise, we wrapped around (very unlikely but
219 * still possible) so compute the amount of missed periods.
220 * The match reg is updated only when the data is actually retrieved
221 * to avoid unnecessary interrupts.
222 */
223 OSSR = OSSR_M1; /* clear match on timer1 */
224
225 rtc_update_irq(rtc, rtc_timer1_count, RTC_PF | RTC_IRQF);
226
227 if (rtc_timer1_count == 1)
228 rtc_timer1_count =
229 (rtc->irq_freq * ((1 << 30) / (timer_freq >> 2)));
230
231 /* retrigger. */
232 sa1100_timer1_retrigger(rtc);
233
234 return IRQ_HANDLED;
235}
236
237static int sa1100_rtc_read_callback(struct device *dev, int data)
238{
239 if (data & RTC_PF) {
240 struct rtc_device *rtc = (struct rtc_device *)dev;
241
242 /* interpolate missed periods and set match for the next */
243 unsigned long period = timer_freq / rtc->irq_freq;
244 unsigned long oscr = OSCR;
245 unsigned long osmr1 = OSMR1;
246 unsigned long missed = (oscr - osmr1)/period;
247 data += missed << 8;
248 OSSR = OSSR_M1; /* clear match on timer 1 */
249 OSMR1 = osmr1 + (missed + 1)*period;
250 /* Ensure we didn't miss another match in the mean time.
251 * Here we compare (match - OSCR) 8 instead of 0 --
252 * see comment in pxa_timer_interrupt() for explanation.
253 */
254 while ((signed long)((osmr1 = OSMR1) - OSCR) <= 8) {
255 data += 0x100;
256 OSSR = OSSR_M1; /* clear match on timer 1 */
257 OSMR1 = osmr1 + period;
258 }
259 }
260 return data;
261}
262
263static int sa1100_rtc_open(struct device *dev) 158static int sa1100_rtc_open(struct device *dev)
264{ 159{
265 int ret; 160 int ret;
266 struct rtc_device *rtc = (struct rtc_device *)dev; 161 struct platform_device *plat_dev = to_platform_device(dev);
162 struct rtc_device *rtc = platform_get_drvdata(plat_dev);
267 163
268 ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED, 164 ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED,
269 "rtc 1Hz", dev); 165 "rtc 1Hz", dev);
@@ -277,19 +173,11 @@ static int sa1100_rtc_open(struct device *dev)
277 dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm); 173 dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm);
278 goto fail_ai; 174 goto fail_ai;
279 } 175 }
280 ret = request_irq(IRQ_OST1, timer1_interrupt, IRQF_DISABLED,
281 "rtc timer", dev);
282 if (ret) {
283 dev_err(dev, "IRQ %d already in use.\n", IRQ_OST1);
284 goto fail_pi;
285 }
286 rtc->max_user_freq = RTC_FREQ; 176 rtc->max_user_freq = RTC_FREQ;
287 sa1100_irq_set_freq(dev, RTC_FREQ); 177 rtc_irq_set_freq(rtc, NULL, RTC_FREQ);
288 178
289 return 0; 179 return 0;
290 180
291 fail_pi:
292 free_irq(IRQ_RTCAlrm, dev);
293 fail_ai: 181 fail_ai:
294 free_irq(IRQ_RTC1Hz, dev); 182 free_irq(IRQ_RTC1Hz, dev);
295 fail_ui: 183 fail_ui:
@@ -304,30 +192,10 @@ static void sa1100_rtc_release(struct device *dev)
304 OSSR = OSSR_M1; 192 OSSR = OSSR_M1;
305 spin_unlock_irq(&sa1100_rtc_lock); 193 spin_unlock_irq(&sa1100_rtc_lock);
306 194
307 free_irq(IRQ_OST1, dev);
308 free_irq(IRQ_RTCAlrm, dev); 195 free_irq(IRQ_RTCAlrm, dev);
309 free_irq(IRQ_RTC1Hz, dev); 196 free_irq(IRQ_RTC1Hz, dev);
310} 197}
311 198
312
313static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
314 unsigned long arg)
315{
316 switch (cmd) {
317 case RTC_UIE_OFF:
318 spin_lock_irq(&sa1100_rtc_lock);
319 RTSR &= ~RTSR_HZE;
320 spin_unlock_irq(&sa1100_rtc_lock);
321 return 0;
322 case RTC_UIE_ON:
323 spin_lock_irq(&sa1100_rtc_lock);
324 RTSR |= RTSR_HZE;
325 spin_unlock_irq(&sa1100_rtc_lock);
326 return 0;
327 }
328 return -ENOIOCTLCMD;
329}
330
331static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 199static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
332{ 200{
333 spin_lock_irq(&sa1100_rtc_lock); 201 spin_lock_irq(&sa1100_rtc_lock);
@@ -386,31 +254,20 @@ static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
386 254
387static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) 255static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
388{ 256{
389 struct rtc_device *rtc = (struct rtc_device *)dev; 257 seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR);
390 258 seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR);
391 seq_printf(seq, "trim/divider\t: 0x%08x\n", (u32) RTTR);
392 seq_printf(seq, "update_IRQ\t: %s\n",
393 (RTSR & RTSR_HZE) ? "yes" : "no");
394 seq_printf(seq, "periodic_IRQ\t: %s\n",
395 (OIER & OIER_E1) ? "yes" : "no");
396 seq_printf(seq, "periodic_freq\t: %d\n", rtc->irq_freq);
397 seq_printf(seq, "RTSR\t\t: 0x%08x\n", (u32)RTSR);
398 259
399 return 0; 260 return 0;
400} 261}
401 262
402static const struct rtc_class_ops sa1100_rtc_ops = { 263static const struct rtc_class_ops sa1100_rtc_ops = {
403 .open = sa1100_rtc_open, 264 .open = sa1100_rtc_open,
404 .read_callback = sa1100_rtc_read_callback,
405 .release = sa1100_rtc_release, 265 .release = sa1100_rtc_release,
406 .ioctl = sa1100_rtc_ioctl,
407 .read_time = sa1100_rtc_read_time, 266 .read_time = sa1100_rtc_read_time,
408 .set_time = sa1100_rtc_set_time, 267 .set_time = sa1100_rtc_set_time,
409 .read_alarm = sa1100_rtc_read_alarm, 268 .read_alarm = sa1100_rtc_read_alarm,
410 .set_alarm = sa1100_rtc_set_alarm, 269 .set_alarm = sa1100_rtc_set_alarm,
411 .proc = sa1100_rtc_proc, 270 .proc = sa1100_rtc_proc,
412 .irq_set_freq = sa1100_irq_set_freq,
413 .irq_set_state = sa1100_irq_set_state,
414 .alarm_irq_enable = sa1100_rtc_alarm_irq_enable, 271 .alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
415}; 272};
416 273
@@ -418,8 +275,6 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
418{ 275{
419 struct rtc_device *rtc; 276 struct rtc_device *rtc;
420 277
421 timer_freq = get_clock_tick_rate();
422
423 /* 278 /*
424 * According to the manual we should be able to let RTTR be zero 279 * According to the manual we should be able to let RTTR be zero
425 * and then a default diviser for a 32.768KHz clock is used. 280 * and then a default diviser for a 32.768KHz clock is used.
@@ -445,11 +300,6 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
445 300
446 platform_set_drvdata(pdev, rtc); 301 platform_set_drvdata(pdev, rtc);
447 302
448 /* Set the irq_freq */
449 /*TODO: Find out who is messing with this value after we initialize
450 * it here.*/
451 rtc->irq_freq = RTC_FREQ;
452
453 /* Fix for a nasty initialization problem the in SA11xx RTSR register. 303 /* Fix for a nasty initialization problem the in SA11xx RTSR register.
454 * See also the comments in sa1100_rtc_interrupt(). 304 * See also the comments in sa1100_rtc_interrupt().
455 * 305 *
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 93314a9e7fa..e55dc1ac83a 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -344,27 +344,6 @@ static inline void sh_rtc_setcie(struct device *dev, unsigned int enable)
344 spin_unlock_irq(&rtc->lock); 344 spin_unlock_irq(&rtc->lock);
345} 345}
346 346
347static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
348{
349 struct sh_rtc *rtc = dev_get_drvdata(dev);
350 unsigned int ret = 0;
351
352 switch (cmd) {
353 case RTC_UIE_OFF:
354 rtc->periodic_freq &= ~PF_OXS;
355 sh_rtc_setcie(dev, 0);
356 break;
357 case RTC_UIE_ON:
358 rtc->periodic_freq |= PF_OXS;
359 sh_rtc_setcie(dev, 1);
360 break;
361 default:
362 ret = -ENOIOCTLCMD;
363 }
364
365 return ret;
366}
367
368static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 347static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
369{ 348{
370 sh_rtc_setaie(dev, enabled); 349 sh_rtc_setaie(dev, enabled);
@@ -598,13 +577,10 @@ static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
598} 577}
599 578
600static struct rtc_class_ops sh_rtc_ops = { 579static struct rtc_class_ops sh_rtc_ops = {
601 .ioctl = sh_rtc_ioctl,
602 .read_time = sh_rtc_read_time, 580 .read_time = sh_rtc_read_time,
603 .set_time = sh_rtc_set_time, 581 .set_time = sh_rtc_set_time,
604 .read_alarm = sh_rtc_read_alarm, 582 .read_alarm = sh_rtc_read_alarm,
605 .set_alarm = sh_rtc_set_alarm, 583 .set_alarm = sh_rtc_set_alarm,
606 .irq_set_state = sh_rtc_irq_set_state,
607 .irq_set_freq = sh_rtc_irq_set_freq,
608 .proc = sh_rtc_proc, 584 .proc = sh_rtc_proc,
609 .alarm_irq_enable = sh_rtc_alarm_irq_enable, 585 .alarm_irq_enable = sh_rtc_alarm_irq_enable,
610}; 586};
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 7e7d0c806f2..572e9534b59 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -115,19 +115,6 @@ static int stmp3xxx_alarm_irq_enable(struct device *dev, unsigned int enabled)
115 return 0; 115 return 0;
116} 116}
117 117
118static int stmp3xxx_update_irq_enable(struct device *dev, unsigned int enabled)
119{
120 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
121
122 if (enabled)
123 stmp3xxx_setl(BM_RTC_CTRL_ONEMSEC_IRQ_EN,
124 rtc_data->io + HW_RTC_CTRL);
125 else
126 stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ_EN,
127 rtc_data->io + HW_RTC_CTRL);
128 return 0;
129}
130
131static int stmp3xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) 118static int stmp3xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
132{ 119{
133 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 120 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
@@ -149,8 +136,6 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
149static struct rtc_class_ops stmp3xxx_rtc_ops = { 136static struct rtc_class_ops stmp3xxx_rtc_ops = {
150 .alarm_irq_enable = 137 .alarm_irq_enable =
151 stmp3xxx_alarm_irq_enable, 138 stmp3xxx_alarm_irq_enable,
152 .update_irq_enable =
153 stmp3xxx_update_irq_enable,
154 .read_time = stmp3xxx_rtc_gettime, 139 .read_time = stmp3xxx_rtc_gettime,
155 .set_mmss = stmp3xxx_rtc_set_mmss, 140 .set_mmss = stmp3xxx_rtc_set_mmss,
156 .read_alarm = stmp3xxx_rtc_read_alarm, 141 .read_alarm = stmp3xxx_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index a82d6fe9707..7e96254bd36 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -78,11 +78,16 @@ static ssize_t test_irq_store(struct device *dev,
78 struct rtc_device *rtc = platform_get_drvdata(plat_dev); 78 struct rtc_device *rtc = platform_get_drvdata(plat_dev);
79 79
80 retval = count; 80 retval = count;
81 if (strncmp(buf, "tick", 4) == 0) 81 if (strncmp(buf, "tick", 4) == 0 && rtc->pie_enabled)
82 rtc_update_irq(rtc, 1, RTC_PF | RTC_IRQF); 82 rtc_update_irq(rtc, 1, RTC_PF | RTC_IRQF);
83 else if (strncmp(buf, "alarm", 5) == 0) 83 else if (strncmp(buf, "alarm", 5) == 0) {
84 rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF); 84 struct rtc_wkalrm alrm;
85 else if (strncmp(buf, "update", 6) == 0) 85 int err = rtc_read_alarm(rtc, &alrm);
86
87 if (!err && alrm.enabled)
88 rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF);
89
90 } else if (strncmp(buf, "update", 6) == 0 && rtc->uie_rtctimer.enabled)
86 rtc_update_irq(rtc, 1, RTC_UF | RTC_IRQF); 91 rtc_update_irq(rtc, 1, RTC_UF | RTC_IRQF);
87 else 92 else
88 retval = -EINVAL; 93 retval = -EINVAL;
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index ed1b8682812..f9a2799c44d 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -213,18 +213,6 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
213 return ret; 213 return ret;
214} 214}
215 215
216static int twl_rtc_update_irq_enable(struct device *dev, unsigned enabled)
217{
218 int ret;
219
220 if (enabled)
221 ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
222 else
223 ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
224
225 return ret;
226}
227
228/* 216/*
229 * Gets current TWL RTC time and date parameters. 217 * Gets current TWL RTC time and date parameters.
230 * 218 *
@@ -433,7 +421,6 @@ static struct rtc_class_ops twl_rtc_ops = {
433 .read_alarm = twl_rtc_read_alarm, 421 .read_alarm = twl_rtc_read_alarm,
434 .set_alarm = twl_rtc_set_alarm, 422 .set_alarm = twl_rtc_set_alarm,
435 .alarm_irq_enable = twl_rtc_alarm_irq_enable, 423 .alarm_irq_enable = twl_rtc_alarm_irq_enable,
436 .update_irq_enable = twl_rtc_update_irq_enable,
437}; 424};
438 425
439/*----------------------------------------------------------------------*/ 426/*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 769190ac6d1..c5698cda366 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -207,36 +207,6 @@ static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
207 return 0; 207 return 0;
208} 208}
209 209
210static int vr41xx_rtc_irq_set_freq(struct device *dev, int freq)
211{
212 u64 count;
213
214 if (!is_power_of_2(freq))
215 return -EINVAL;
216 count = RTC_FREQUENCY;
217 do_div(count, freq);
218
219 spin_lock_irq(&rtc_lock);
220
221 periodic_count = count;
222 rtc1_write(RTCL1LREG, periodic_count);
223 rtc1_write(RTCL1HREG, periodic_count >> 16);
224
225 spin_unlock_irq(&rtc_lock);
226
227 return 0;
228}
229
230static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
231{
232 if (enabled)
233 enable_irq(pie_irq);
234 else
235 disable_irq(pie_irq);
236
237 return 0;
238}
239
240static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 210static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
241{ 211{
242 switch (cmd) { 212 switch (cmd) {
@@ -308,8 +278,6 @@ static const struct rtc_class_ops vr41xx_rtc_ops = {
308 .set_time = vr41xx_rtc_set_time, 278 .set_time = vr41xx_rtc_set_time,
309 .read_alarm = vr41xx_rtc_read_alarm, 279 .read_alarm = vr41xx_rtc_read_alarm,
310 .set_alarm = vr41xx_rtc_set_alarm, 280 .set_alarm = vr41xx_rtc_set_alarm,
311 .irq_set_freq = vr41xx_rtc_irq_set_freq,
312 .irq_set_state = vr41xx_rtc_irq_set_state,
313}; 281};
314 282
315static int __devinit rtc_probe(struct platform_device *pdev) 283static int __devinit rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 82931dc65c0..bdc909bd56d 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -315,21 +315,6 @@ static int wm831x_rtc_alarm_irq_enable(struct device *dev,
315 return wm831x_rtc_stop_alarm(wm831x_rtc); 315 return wm831x_rtc_stop_alarm(wm831x_rtc);
316} 316}
317 317
318static int wm831x_rtc_update_irq_enable(struct device *dev,
319 unsigned int enabled)
320{
321 struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
322 int val;
323
324 if (enabled)
325 val = 1 << WM831X_RTC_PINT_FREQ_SHIFT;
326 else
327 val = 0;
328
329 return wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
330 WM831X_RTC_PINT_FREQ_MASK, val);
331}
332
333static irqreturn_t wm831x_alm_irq(int irq, void *data) 318static irqreturn_t wm831x_alm_irq(int irq, void *data)
334{ 319{
335 struct wm831x_rtc *wm831x_rtc = data; 320 struct wm831x_rtc *wm831x_rtc = data;
@@ -354,7 +339,6 @@ static const struct rtc_class_ops wm831x_rtc_ops = {
354 .read_alarm = wm831x_rtc_readalarm, 339 .read_alarm = wm831x_rtc_readalarm,
355 .set_alarm = wm831x_rtc_setalarm, 340 .set_alarm = wm831x_rtc_setalarm,
356 .alarm_irq_enable = wm831x_rtc_alarm_irq_enable, 341 .alarm_irq_enable = wm831x_rtc_alarm_irq_enable,
357 .update_irq_enable = wm831x_rtc_update_irq_enable,
358}; 342};
359 343
360#ifdef CONFIG_PM 344#ifdef CONFIG_PM
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 3d0dc76b38a..66421426e40 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -302,26 +302,6 @@ static int wm8350_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
302 return ret; 302 return ret;
303} 303}
304 304
305static int wm8350_rtc_update_irq_enable(struct device *dev,
306 unsigned int enabled)
307{
308 struct wm8350 *wm8350 = dev_get_drvdata(dev);
309
310 /* Suppress duplicate changes since genirq nests enable and
311 * disable calls. */
312 if (enabled == wm8350->rtc.update_enabled)
313 return 0;
314
315 if (enabled)
316 wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_SEC);
317 else
318 wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
319
320 wm8350->rtc.update_enabled = enabled;
321
322 return 0;
323}
324
325static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data) 305static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data)
326{ 306{
327 struct wm8350 *wm8350 = data; 307 struct wm8350 *wm8350 = data;
@@ -357,7 +337,6 @@ static const struct rtc_class_ops wm8350_rtc_ops = {
357 .read_alarm = wm8350_rtc_readalarm, 337 .read_alarm = wm8350_rtc_readalarm,
358 .set_alarm = wm8350_rtc_setalarm, 338 .set_alarm = wm8350_rtc_setalarm,
359 .alarm_irq_enable = wm8350_rtc_alarm_irq_enable, 339 .alarm_irq_enable = wm8350_rtc_alarm_irq_enable,
360 .update_irq_enable = wm8350_rtc_update_irq_enable,
361}; 340};
362 341
363#ifdef CONFIG_PM 342#ifdef CONFIG_PM
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 79cefbe3136..638c72b7f94 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4277,7 +4277,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4277 4277
4278 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", 4278 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4279 phba->shost->host_no); 4279 phba->shost->host_no);
4280 phba->wq = create_workqueue(phba->wq_name); 4280 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
4281 if (!phba->wq) { 4281 if (!phba->wq) {
4282 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4282 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4283 "Failed to allocate work queue\n"); 4283 "Failed to allocate work queue\n");
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9c5c8be7223..d841e98a8bd 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6219,11 +6219,10 @@ static struct ata_port_operations ipr_sata_ops = {
6219}; 6219};
6220 6220
6221static struct ata_port_info sata_port_info = { 6221static struct ata_port_info sata_port_info = {
6222 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET | 6222 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6223 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 6223 .pio_mask = ATA_PIO4_ONLY,
6224 .pio_mask = 0x10, /* pio4 */ 6224 .mwdma_mask = ATA_MWDMA2,
6225 .mwdma_mask = 0x07, 6225 .udma_mask = ATA_UDMA6,
6226 .udma_mask = 0x7f, /* udma0-6 */
6227 .port_ops = &ipr_sata_ops 6226 .port_ops = &ipr_sata_ops
6228}; 6227};
6229 6228
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e1a395b438e..4d3b704ede1 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -238,37 +238,43 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
238 return true; 238 return true;
239} 239}
240 240
241static void sas_ata_phy_reset(struct ata_port *ap) 241static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
242 unsigned long deadline)
242{ 243{
244 struct ata_port *ap = link->ap;
243 struct domain_device *dev = ap->private_data; 245 struct domain_device *dev = ap->private_data;
244 struct sas_internal *i = 246 struct sas_internal *i =
245 to_sas_internal(dev->port->ha->core.shost->transportt); 247 to_sas_internal(dev->port->ha->core.shost->transportt);
246 int res = TMF_RESP_FUNC_FAILED; 248 int res = TMF_RESP_FUNC_FAILED;
249 int ret = 0;
247 250
248 if (i->dft->lldd_I_T_nexus_reset) 251 if (i->dft->lldd_I_T_nexus_reset)
249 res = i->dft->lldd_I_T_nexus_reset(dev); 252 res = i->dft->lldd_I_T_nexus_reset(dev);
250 253
251 if (res != TMF_RESP_FUNC_COMPLETE) 254 if (res != TMF_RESP_FUNC_COMPLETE) {
252 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__); 255 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
256 ret = -EAGAIN;
257 }
253 258
254 switch (dev->sata_dev.command_set) { 259 switch (dev->sata_dev.command_set) {
255 case ATA_COMMAND_SET: 260 case ATA_COMMAND_SET:
256 SAS_DPRINTK("%s: Found ATA device.\n", __func__); 261 SAS_DPRINTK("%s: Found ATA device.\n", __func__);
257 ap->link.device[0].class = ATA_DEV_ATA; 262 *class = ATA_DEV_ATA;
258 break; 263 break;
259 case ATAPI_COMMAND_SET: 264 case ATAPI_COMMAND_SET:
260 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__); 265 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
261 ap->link.device[0].class = ATA_DEV_ATAPI; 266 *class = ATA_DEV_ATAPI;
262 break; 267 break;
263 default: 268 default:
264 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n", 269 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
265 __func__, 270 __func__,
266 dev->sata_dev.command_set); 271 dev->sata_dev.command_set);
267 ap->link.device[0].class = ATA_DEV_UNKNOWN; 272 *class = ATA_DEV_UNKNOWN;
268 break; 273 break;
269 } 274 }
270 275
271 ap->cbl = ATA_CBL_SATA; 276 ap->cbl = ATA_CBL_SATA;
277 return ret;
272} 278}
273 279
274static void sas_ata_post_internal(struct ata_queued_cmd *qc) 280static void sas_ata_post_internal(struct ata_queued_cmd *qc)
@@ -349,7 +355,11 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
349} 355}
350 356
351static struct ata_port_operations sas_sata_ops = { 357static struct ata_port_operations sas_sata_ops = {
352 .phy_reset = sas_ata_phy_reset, 358 .prereset = ata_std_prereset,
359 .softreset = NULL,
360 .hardreset = sas_ata_hard_reset,
361 .postreset = ata_std_postreset,
362 .error_handler = ata_std_error_handler,
353 .post_internal_cmd = sas_ata_post_internal, 363 .post_internal_cmd = sas_ata_post_internal,
354 .qc_defer = ata_std_qc_defer, 364 .qc_defer = ata_std_qc_defer,
355 .qc_prep = ata_noop_qc_prep, 365 .qc_prep = ata_noop_qc_prep,
@@ -362,10 +372,9 @@ static struct ata_port_operations sas_sata_ops = {
362}; 372};
363 373
364static struct ata_port_info sata_port_info = { 374static struct ata_port_info sata_port_info = {
365 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET | 375 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
366 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, 376 .pio_mask = ATA_PIO4,
367 .pio_mask = 0x1f, /* PIO0-4 */ 377 .mwdma_mask = ATA_MWDMA2,
368 .mwdma_mask = 0x07, /* MWDMA0-2 */
369 .udma_mask = ATA_UDMA6, 378 .udma_mask = ATA_UDMA6,
370 .port_ops = &sas_sata_ops 379 .port_ops = &sas_sata_ops
371}; 380};
@@ -781,3 +790,68 @@ int sas_discover_sata(struct domain_device *dev)
781 790
782 return res; 791 return res;
783} 792}
793
794void sas_ata_strategy_handler(struct Scsi_Host *shost)
795{
796 struct scsi_device *sdev;
797
798 shost_for_each_device(sdev, shost) {
799 struct domain_device *ddev = sdev_to_domain_dev(sdev);
800 struct ata_port *ap = ddev->sata_dev.ap;
801
802 if (!dev_is_sata(ddev))
803 continue;
804
805 ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
806 ata_scsi_port_error_handler(shost, ap);
807 }
808}
809
810int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task,
811 enum blk_eh_timer_return *rtn)
812{
813 struct domain_device *ddev = cmd_to_domain_dev(cmd);
814
815 if (!dev_is_sata(ddev) || task)
816 return 0;
817
818 /* we're a sata device with no task, so this must be a libata
819 * eh timeout. Ideally should hook into libata timeout
820 * handling, but there's no point, it just wants to activate
821 * the eh thread */
822 *rtn = BLK_EH_NOT_HANDLED;
823 return 1;
824}
825
826int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
827 struct list_head *done_q)
828{
829 int rtn = 0;
830 struct scsi_cmnd *cmd, *n;
831 struct ata_port *ap;
832
833 do {
834 LIST_HEAD(sata_q);
835
836 ap = NULL;
837
838 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
839 struct domain_device *ddev = cmd_to_domain_dev(cmd);
840
841 if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
842 continue;
843 if(ap && ap != ddev->sata_dev.ap)
844 continue;
845 ap = ddev->sata_dev.ap;
846 rtn = 1;
847 list_move(&cmd->eh_entry, &sata_q);
848 }
849
850 if (!list_empty(&sata_q)) {
851 ata_port_printk(ap, KERN_DEBUG,"sas eh calling libata cmd error handler\n");
852 ata_scsi_cmd_error_handler(shost, ap, &sata_q);
853 }
854 } while (ap);
855
856 return rtn;
857}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9a7aaf5f131..67758ea8eb7 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -663,11 +663,16 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
663 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any 663 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
664 * command we see here has no sas_task and is thus unknown to the HA. 664 * command we see here has no sas_task and is thus unknown to the HA.
665 */ 665 */
666 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) 666 if (!sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q))
667 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); 667 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
668 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
668 669
669out: 670out:
671 /* now link into libata eh --- if we have any ata devices */
672 sas_ata_strategy_handler(shost);
673
670 scsi_eh_flush_done_q(&ha->eh_done_q); 674 scsi_eh_flush_done_q(&ha->eh_done_q);
675
671 SAS_DPRINTK("--- Exit %s\n", __func__); 676 SAS_DPRINTK("--- Exit %s\n", __func__);
672 return; 677 return;
673} 678}
@@ -676,6 +681,11 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
676{ 681{
677 struct sas_task *task = TO_SAS_TASK(cmd); 682 struct sas_task *task = TO_SAS_TASK(cmd);
678 unsigned long flags; 683 unsigned long flags;
684 enum blk_eh_timer_return rtn;
685
686 if (sas_ata_timed_out(cmd, task, &rtn))
687 return rtn;
688
679 689
680 if (!task) { 690 if (!task) {
681 cmd->request->timeout /= 2; 691 cmd->request->timeout /= 2;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f27724d76cf..e90f7c16b95 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -349,7 +349,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
349 "Can't create request queue\n"); 349 "Can't create request queue\n");
350 goto fail; 350 goto fail;
351 } 351 }
352 ha->wq = create_workqueue("qla2xxx_wq"); 352 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
353 vha->req = ha->req_q_map[req]; 353 vha->req = ha->req_q_map[req];
354 options |= BIT_1; 354 options |= BIT_1;
355 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 355 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index c399be97992..f67282058ba 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -629,7 +629,7 @@ static int __init scsi_tgt_init(void)
629 if (!scsi_tgt_cmd_cache) 629 if (!scsi_tgt_cmd_cache)
630 return -ENOMEM; 630 return -ENOMEM;
631 631
632 scsi_tgtd = create_workqueue("scsi_tgtd"); 632 scsi_tgtd = alloc_workqueue("scsi_tgtd", 0, 1);
633 if (!scsi_tgtd) { 633 if (!scsi_tgtd) {
634 err = -ENOMEM; 634 err = -ENOMEM;
635 goto free_kmemcache; 635 goto free_kmemcache;
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 95928833855..a429b01d028 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1557,9 +1557,7 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
1557 drv_data->ssp = ssp; 1557 drv_data->ssp = ssp;
1558 1558
1559 master->dev.parent = &pdev->dev; 1559 master->dev.parent = &pdev->dev;
1560#ifdef CONFIG_OF
1561 master->dev.of_node = pdev->dev.of_node; 1560 master->dev.of_node = pdev->dev.of_node;
1562#endif
1563 /* the spi->mode bits understood by this driver: */ 1561 /* the spi->mode bits understood by this driver: */
1564 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1562 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1565 1563
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 19752b09e15..378e504f89e 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -89,9 +89,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
89 goto err_nomem; 89 goto err_nomem;
90 90
91 pdev->dev.parent = &dev->dev; 91 pdev->dev.parent = &dev->dev;
92#ifdef CONFIG_OF
93 pdev->dev.of_node = dev->dev.of_node; 92 pdev->dev.of_node = dev->dev.of_node;
94#endif
95 ssp = &spi_info->ssp; 93 ssp = &spi_info->ssp;
96 ssp->phys_base = pci_resource_start(dev, 0); 94 ssp->phys_base = pci_resource_start(dev, 0);
97 ssp->mmio_base = ioremap(phys_beg, phys_len); 95 ssp->mmio_base = ioremap(phys_beg, phys_len);
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 7adaef62a99..4d2c75df886 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -351,14 +351,12 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
351 return IRQ_HANDLED; 351 return IRQ_HANDLED;
352} 352}
353 353
354#ifdef CONFIG_OF
355static const struct of_device_id xilinx_spi_of_match[] = { 354static const struct of_device_id xilinx_spi_of_match[] = {
356 { .compatible = "xlnx,xps-spi-2.00.a", }, 355 { .compatible = "xlnx,xps-spi-2.00.a", },
357 { .compatible = "xlnx,xps-spi-2.00.b", }, 356 { .compatible = "xlnx,xps-spi-2.00.b", },
358 {} 357 {}
359}; 358};
360MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); 359MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
361#endif
362 360
363struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, 361struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
364 u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word) 362 u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word)
@@ -394,9 +392,7 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
394 392
395 master->bus_num = bus_num; 393 master->bus_num = bus_num;
396 master->num_chipselect = num_cs; 394 master->num_chipselect = num_cs;
397#ifdef CONFIG_OF
398 master->dev.of_node = dev->of_node; 395 master->dev.of_node = dev->of_node;
399#endif
400 396
401 xspi->mem = *mem; 397 xspi->mem = *mem;
402 xspi->irq = irq; 398 xspi->irq = irq;
@@ -539,9 +535,7 @@ static struct platform_driver xilinx_spi_driver = {
539 .driver = { 535 .driver = {
540 .name = XILINX_SPI_NAME, 536 .name = XILINX_SPI_NAME,
541 .owner = THIS_MODULE, 537 .owner = THIS_MODULE,
542#ifdef CONFIG_OF
543 .of_match_table = xilinx_spi_of_match, 538 .of_match_table = xilinx_spi_of_match,
544#endif
545 }, 539 },
546}; 540};
547 541
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 43f9f02c7db..718050ace08 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -232,7 +232,7 @@ static int increase_reservation(unsigned long nr_pages)
232 set_phys_to_machine(pfn, frame_list[i]); 232 set_phys_to_machine(pfn, frame_list[i]);
233 233
234 /* Link back into the page tables if not highmem. */ 234 /* Link back into the page tables if not highmem. */
235 if (pfn < max_low_pfn) { 235 if (!xen_hvm_domain() && pfn < max_low_pfn) {
236 int ret; 236 int ret;
237 ret = HYPERVISOR_update_va_mapping( 237 ret = HYPERVISOR_update_va_mapping(
238 (unsigned long)__va(pfn << PAGE_SHIFT), 238 (unsigned long)__va(pfn << PAGE_SHIFT),
@@ -280,7 +280,7 @@ static int decrease_reservation(unsigned long nr_pages)
280 280
281 scrub_page(page); 281 scrub_page(page);
282 282
283 if (!PageHighMem(page)) { 283 if (!xen_hvm_domain() && !PageHighMem(page)) {
284 ret = HYPERVISOR_update_va_mapping( 284 ret = HYPERVISOR_update_va_mapping(
285 (unsigned long)__va(pfn << PAGE_SHIFT), 285 (unsigned long)__va(pfn << PAGE_SHIFT),
286 __pte_ma(0), 0); 286 __pte_ma(0), 0);
@@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages)
296 /* No more mappings: invalidate P2M and add to balloon. */ 296 /* No more mappings: invalidate P2M and add to balloon. */
297 for (i = 0; i < nr_pages; i++) { 297 for (i = 0; i < nr_pages; i++) {
298 pfn = mfn_to_pfn(frame_list[i]); 298 pfn = mfn_to_pfn(frame_list[i]);
299 set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 299 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
300 balloon_append(pfn_to_page(pfn)); 300 balloon_append(pfn_to_page(pfn));
301 } 301 }
302 302
@@ -392,15 +392,19 @@ static struct notifier_block xenstore_notifier;
392 392
393static int __init balloon_init(void) 393static int __init balloon_init(void)
394{ 394{
395 unsigned long pfn, extra_pfn_end; 395 unsigned long pfn, nr_pages, extra_pfn_end;
396 struct page *page; 396 struct page *page;
397 397
398 if (!xen_pv_domain()) 398 if (!xen_domain())
399 return -ENODEV; 399 return -ENODEV;
400 400
401 pr_info("xen_balloon: Initialising balloon driver.\n"); 401 pr_info("xen_balloon: Initialising balloon driver.\n");
402 402
403 balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn); 403 if (xen_pv_domain())
404 nr_pages = xen_start_info->nr_pages;
405 else
406 nr_pages = max_pfn;
407 balloon_stats.current_pages = min(nr_pages, max_pfn);
404 balloon_stats.target_pages = balloon_stats.current_pages; 408 balloon_stats.target_pages = balloon_stats.current_pages;
405 balloon_stats.balloon_low = 0; 409 balloon_stats.balloon_low = 0;
406 balloon_stats.balloon_high = 0; 410 balloon_stats.balloon_high = 0;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 74681478100..0ad1699a1b3 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -114,7 +114,7 @@ struct cpu_evtchn_s {
114static __initdata struct cpu_evtchn_s init_evtchn_mask = { 114static __initdata struct cpu_evtchn_s init_evtchn_mask = {
115 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, 115 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
116}; 116};
117static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask; 117static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
118 118
119static inline unsigned long *cpu_evtchn_mask(int cpu) 119static inline unsigned long *cpu_evtchn_mask(int cpu)
120{ 120{
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
277 277
278 BUG_ON(irq == -1); 278 BUG_ON(irq == -1);
279#ifdef CONFIG_SMP 279#ifdef CONFIG_SMP
280 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); 280 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
281#endif 281#endif
282 282
283 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); 283 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void)
294 294
295 /* By default all event channels notify CPU#0. */ 295 /* By default all event channels notify CPU#0. */
296 for_each_irq_desc(i, desc) { 296 for_each_irq_desc(i, desc) {
297 cpumask_copy(desc->affinity, cpumask_of(0)); 297 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
298 } 298 }
299#endif 299#endif
300 300
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port)
376 put_cpu(); 376 put_cpu();
377} 377}
378 378
379static int get_nr_hw_irqs(void) 379static int xen_allocate_irq_dynamic(void)
380{ 380{
381 int ret = 1; 381 int first = 0;
382 int irq;
382 383
383#ifdef CONFIG_X86_IO_APIC 384#ifdef CONFIG_X86_IO_APIC
384 ret = get_nr_irqs_gsi(); 385 /*
386 * For an HVM guest or domain 0 which see "real" (emulated or
387 * actual repectively) GSIs we allocate dynamic IRQs
388 * e.g. those corresponding to event channels or MSIs
389 * etc. from the range above those "real" GSIs to avoid
390 * collisions.
391 */
392 if (xen_initial_domain() || xen_hvm_domain())
393 first = get_nr_irqs_gsi();
385#endif 394#endif
386 395
387 return ret; 396retry:
388} 397 irq = irq_alloc_desc_from(first, -1);
389 398
390static int find_unbound_pirq(int type) 399 if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
391{ 400 printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
392 int rc, i; 401 first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
393 struct physdev_get_free_pirq op_get_free_pirq; 402 goto retry;
394 op_get_free_pirq.type = type; 403 }
395 404
396 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 405 if (irq < 0)
397 if (!rc) 406 panic("No available IRQ to bind to: increase nr_irqs!\n");
398 return op_get_free_pirq.pirq;
399 407
400 for (i = 0; i < nr_irqs; i++) { 408 return irq;
401 if (pirq_to_irq[i] < 0)
402 return i;
403 }
404 return -1;
405} 409}
406 410
407static int find_unbound_irq(void) 411static int xen_allocate_irq_gsi(unsigned gsi)
408{ 412{
409 struct irq_data *data; 413 int irq;
410 int irq, res;
411 int bottom = get_nr_hw_irqs();
412 int top = nr_irqs-1;
413
414 if (bottom == nr_irqs)
415 goto no_irqs;
416 414
417 /* This loop starts from the top of IRQ space and goes down. 415 /*
418 * We need this b/c if we have a PCI device in a Xen PV guest 416 * A PV guest has no concept of a GSI (since it has no ACPI
419 * we do not have an IO-APIC (though the backend might have them) 417 * nor access to/knowledge of the physical APICs). Therefore
420 * mapped in. To not have a collision of physical IRQs with the Xen 418 * all IRQs are dynamically allocated from the entire IRQ
421 * event channels start at the top of the IRQ space for virtual IRQs. 419 * space.
422 */ 420 */
423 for (irq = top; irq > bottom; irq--) { 421 if (xen_pv_domain() && !xen_initial_domain())
424 data = irq_get_irq_data(irq); 422 return xen_allocate_irq_dynamic();
425 /* only 15->0 have init'd desc; handle irq > 16 */
426 if (!data)
427 break;
428 if (data->chip == &no_irq_chip)
429 break;
430 if (data->chip != &xen_dynamic_chip)
431 continue;
432 if (irq_info[irq].type == IRQT_UNBOUND)
433 return irq;
434 }
435
436 if (irq == bottom)
437 goto no_irqs;
438 423
439 res = irq_alloc_desc_at(irq, -1); 424 /* Legacy IRQ descriptors are already allocated by the arch. */
425 if (gsi < NR_IRQS_LEGACY)
426 return gsi;
440 427
441 if (WARN_ON(res != irq)) 428 irq = irq_alloc_desc_at(gsi, -1);
442 return -1; 429 if (irq < 0)
430 panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
443 431
444 return irq; 432 return irq;
445
446no_irqs:
447 panic("No available IRQ to bind to: increase nr_irqs!\n");
448} 433}
449 434
450static bool identity_mapped_irq(unsigned irq) 435static void xen_free_irq(unsigned irq)
451{ 436{
452 /* identity map all the hardware irqs */ 437 /* Legacy IRQ descriptors are managed by the arch. */
453 return irq < get_nr_hw_irqs(); 438 if (irq < NR_IRQS_LEGACY)
439 return;
440
441 irq_free_desc(irq);
454} 442}
455 443
456static void pirq_unmask_notify(int irq) 444static void pirq_unmask_notify(int irq)
@@ -486,7 +474,7 @@ static bool probing_irq(int irq)
486 return desc && desc->action == NULL; 474 return desc && desc->action == NULL;
487} 475}
488 476
489static unsigned int startup_pirq(unsigned int irq) 477static unsigned int __startup_pirq(unsigned int irq)
490{ 478{
491 struct evtchn_bind_pirq bind_pirq; 479 struct evtchn_bind_pirq bind_pirq;
492 struct irq_info *info = info_for_irq(irq); 480 struct irq_info *info = info_for_irq(irq);
@@ -524,9 +512,15 @@ out:
524 return 0; 512 return 0;
525} 513}
526 514
527static void shutdown_pirq(unsigned int irq) 515static unsigned int startup_pirq(struct irq_data *data)
516{
517 return __startup_pirq(data->irq);
518}
519
520static void shutdown_pirq(struct irq_data *data)
528{ 521{
529 struct evtchn_close close; 522 struct evtchn_close close;
523 unsigned int irq = data->irq;
530 struct irq_info *info = info_for_irq(irq); 524 struct irq_info *info = info_for_irq(irq);
531 int evtchn = evtchn_from_irq(irq); 525 int evtchn = evtchn_from_irq(irq);
532 526
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq)
546 info->evtchn = 0; 540 info->evtchn = 0;
547} 541}
548 542
549static void enable_pirq(unsigned int irq) 543static void enable_pirq(struct irq_data *data)
550{ 544{
551 startup_pirq(irq); 545 startup_pirq(data);
552} 546}
553 547
554static void disable_pirq(unsigned int irq) 548static void disable_pirq(struct irq_data *data)
555{ 549{
556} 550}
557 551
558static void ack_pirq(unsigned int irq) 552static void ack_pirq(struct irq_data *data)
559{ 553{
560 int evtchn = evtchn_from_irq(irq); 554 int evtchn = evtchn_from_irq(data->irq);
561 555
562 move_native_irq(irq); 556 move_native_irq(data->irq);
563 557
564 if (VALID_EVTCHN(evtchn)) { 558 if (VALID_EVTCHN(evtchn)) {
565 mask_evtchn(evtchn); 559 mask_evtchn(evtchn);
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq)
567 } 561 }
568} 562}
569 563
570static void end_pirq(unsigned int irq)
571{
572 int evtchn = evtchn_from_irq(irq);
573 struct irq_desc *desc = irq_to_desc(irq);
574
575 if (WARN_ON(!desc))
576 return;
577
578 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
579 (IRQ_DISABLED|IRQ_PENDING)) {
580 shutdown_pirq(irq);
581 } else if (VALID_EVTCHN(evtchn)) {
582 unmask_evtchn(evtchn);
583 pirq_unmask_notify(irq);
584 }
585}
586
587static int find_irq_by_gsi(unsigned gsi) 564static int find_irq_by_gsi(unsigned gsi)
588{ 565{
589 int irq; 566 int irq;
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
638 goto out; /* XXX need refcount? */ 615 goto out; /* XXX need refcount? */
639 } 616 }
640 617
641 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore 618 irq = xen_allocate_irq_gsi(gsi);
642 * we are using the !xen_initial_domain() to drop in the function.*/
643 if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
644 xen_pv_domain())) {
645 irq = gsi;
646 irq_alloc_desc_at(irq, -1);
647 } else
648 irq = find_unbound_irq();
649 619
650 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 620 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
651 handle_level_irq, name); 621 handle_level_irq, name);
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
658 * this in the priv domain. */ 628 * this in the priv domain. */
659 if (xen_initial_domain() && 629 if (xen_initial_domain() &&
660 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 630 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
661 irq_free_desc(irq); 631 xen_free_irq(irq);
662 irq = -ENOSPC; 632 irq = -ENOSPC;
663 goto out; 633 goto out;
664 } 634 }
@@ -674,87 +644,46 @@ out:
674} 644}
675 645
676#ifdef CONFIG_PCI_MSI 646#ifdef CONFIG_PCI_MSI
677#include <linux/msi.h> 647int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
678#include "../pci/msi.h"
679
680void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
681{ 648{
682 spin_lock(&irq_mapping_update_lock); 649 int rc;
683 650 struct physdev_get_free_pirq op_get_free_pirq;
684 if (alloc & XEN_ALLOC_IRQ) {
685 *irq = find_unbound_irq();
686 if (*irq == -1)
687 goto out;
688 }
689
690 if (alloc & XEN_ALLOC_PIRQ) {
691 *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
692 if (*pirq == -1)
693 goto out;
694 }
695 651
696 set_irq_chip_and_handler_name(*irq, &xen_pirq_chip, 652 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
697 handle_level_irq, name); 653 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
698 654
699 irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0); 655 WARN_ONCE(rc == -ENOSYS,
700 pirq_to_irq[*pirq] = *irq; 656 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
701 657
702out: 658 return rc ? -1 : op_get_free_pirq.pirq;
703 spin_unlock(&irq_mapping_update_lock);
704} 659}
705 660
706int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) 661int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
662 int pirq, int vector, const char *name)
707{ 663{
708 int irq = -1; 664 int irq, ret;
709 struct physdev_map_pirq map_irq;
710 int rc;
711 int pos;
712 u32 table_offset, bir;
713
714 memset(&map_irq, 0, sizeof(map_irq));
715 map_irq.domid = DOMID_SELF;
716 map_irq.type = MAP_PIRQ_TYPE_MSI;
717 map_irq.index = -1;
718 map_irq.pirq = -1;
719 map_irq.bus = dev->bus->number;
720 map_irq.devfn = dev->devfn;
721
722 if (type == PCI_CAP_ID_MSIX) {
723 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
724
725 pci_read_config_dword(dev, msix_table_offset_reg(pos),
726 &table_offset);
727 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
728
729 map_irq.table_base = pci_resource_start(dev, bir);
730 map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
731 }
732 665
733 spin_lock(&irq_mapping_update_lock); 666 spin_lock(&irq_mapping_update_lock);
734 667
735 irq = find_unbound_irq(); 668 irq = xen_allocate_irq_dynamic();
736
737 if (irq == -1) 669 if (irq == -1)
738 goto out; 670 goto out;
739 671
740 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
741 if (rc) {
742 printk(KERN_WARNING "xen map irq failed %d\n", rc);
743
744 irq_free_desc(irq);
745
746 irq = -1;
747 goto out;
748 }
749 irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
750
751 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 672 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
752 handle_level_irq, 673 handle_level_irq, name);
753 (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
754 674
675 irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
676 pirq_to_irq[pirq] = irq;
677 ret = irq_set_msi_desc(irq, msidesc);
678 if (ret < 0)
679 goto error_irq;
755out: 680out:
756 spin_unlock(&irq_mapping_update_lock); 681 spin_unlock(&irq_mapping_update_lock);
757 return irq; 682 return irq;
683error_irq:
684 spin_unlock(&irq_mapping_update_lock);
685 xen_free_irq(irq);
686 return -1;
758} 687}
759#endif 688#endif
760 689
@@ -779,11 +708,12 @@ int xen_destroy_irq(int irq)
779 printk(KERN_WARNING "unmap irq failed %d\n", rc); 708 printk(KERN_WARNING "unmap irq failed %d\n", rc);
780 goto out; 709 goto out;
781 } 710 }
782 pirq_to_irq[info->u.pirq.pirq] = -1;
783 } 711 }
712 pirq_to_irq[info->u.pirq.pirq] = -1;
713
784 irq_info[irq] = mk_unbound_info(); 714 irq_info[irq] = mk_unbound_info();
785 715
786 irq_free_desc(irq); 716 xen_free_irq(irq);
787 717
788out: 718out:
789 spin_unlock(&irq_mapping_update_lock); 719 spin_unlock(&irq_mapping_update_lock);
@@ -814,7 +744,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
814 irq = evtchn_to_irq[evtchn]; 744 irq = evtchn_to_irq[evtchn];
815 745
816 if (irq == -1) { 746 if (irq == -1) {
817 irq = find_unbound_irq(); 747 irq = xen_allocate_irq_dynamic();
818 748
819 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 749 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
820 handle_fasteoi_irq, "event"); 750 handle_fasteoi_irq, "event");
@@ -839,7 +769,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
839 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 769 irq = per_cpu(ipi_to_irq, cpu)[ipi];
840 770
841 if (irq == -1) { 771 if (irq == -1) {
842 irq = find_unbound_irq(); 772 irq = xen_allocate_irq_dynamic();
843 if (irq < 0) 773 if (irq < 0)
844 goto out; 774 goto out;
845 775
@@ -875,7 +805,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
875 irq = per_cpu(virq_to_irq, cpu)[virq]; 805 irq = per_cpu(virq_to_irq, cpu)[virq];
876 806
877 if (irq == -1) { 807 if (irq == -1) {
878 irq = find_unbound_irq(); 808 irq = xen_allocate_irq_dynamic();
879 809
880 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 810 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
881 handle_percpu_irq, "virq"); 811 handle_percpu_irq, "virq");
@@ -934,7 +864,7 @@ static void unbind_from_irq(unsigned int irq)
934 if (irq_info[irq].type != IRQT_UNBOUND) { 864 if (irq_info[irq].type != IRQT_UNBOUND) {
935 irq_info[irq] = mk_unbound_info(); 865 irq_info[irq] = mk_unbound_info();
936 866
937 irq_free_desc(irq); 867 xen_free_irq(irq);
938 } 868 }
939 869
940 spin_unlock(&irq_mapping_update_lock); 870 spin_unlock(&irq_mapping_update_lock);
@@ -990,7 +920,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
990 if (irq < 0) 920 if (irq < 0)
991 return irq; 921 return irq;
992 922
993 irqflags |= IRQF_NO_SUSPEND; 923 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
994 retval = request_irq(irq, handler, irqflags, devname, dev_id); 924 retval = request_irq(irq, handler, irqflags, devname, dev_id);
995 if (retval != 0) { 925 if (retval != 0) {
996 unbind_from_irq(irq); 926 unbind_from_irq(irq);
@@ -1234,11 +1164,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1234 return 0; 1164 return 0;
1235} 1165}
1236 1166
1237static int set_affinity_irq(unsigned irq, const struct cpumask *dest) 1167static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1168 bool force)
1238{ 1169{
1239 unsigned tcpu = cpumask_first(dest); 1170 unsigned tcpu = cpumask_first(dest);
1240 1171
1241 return rebind_irq_to_cpu(irq, tcpu); 1172 return rebind_irq_to_cpu(data->irq, tcpu);
1242} 1173}
1243 1174
1244int resend_irq_on_evtchn(unsigned int irq) 1175int resend_irq_on_evtchn(unsigned int irq)
@@ -1257,35 +1188,35 @@ int resend_irq_on_evtchn(unsigned int irq)
1257 return 1; 1188 return 1;
1258} 1189}
1259 1190
1260static void enable_dynirq(unsigned int irq) 1191static void enable_dynirq(struct irq_data *data)
1261{ 1192{
1262 int evtchn = evtchn_from_irq(irq); 1193 int evtchn = evtchn_from_irq(data->irq);
1263 1194
1264 if (VALID_EVTCHN(evtchn)) 1195 if (VALID_EVTCHN(evtchn))
1265 unmask_evtchn(evtchn); 1196 unmask_evtchn(evtchn);
1266} 1197}
1267 1198
1268static void disable_dynirq(unsigned int irq) 1199static void disable_dynirq(struct irq_data *data)
1269{ 1200{
1270 int evtchn = evtchn_from_irq(irq); 1201 int evtchn = evtchn_from_irq(data->irq);
1271 1202
1272 if (VALID_EVTCHN(evtchn)) 1203 if (VALID_EVTCHN(evtchn))
1273 mask_evtchn(evtchn); 1204 mask_evtchn(evtchn);
1274} 1205}
1275 1206
1276static void ack_dynirq(unsigned int irq) 1207static void ack_dynirq(struct irq_data *data)
1277{ 1208{
1278 int evtchn = evtchn_from_irq(irq); 1209 int evtchn = evtchn_from_irq(data->irq);
1279 1210
1280 move_masked_irq(irq); 1211 move_masked_irq(data->irq);
1281 1212
1282 if (VALID_EVTCHN(evtchn)) 1213 if (VALID_EVTCHN(evtchn))
1283 unmask_evtchn(evtchn); 1214 unmask_evtchn(evtchn);
1284} 1215}
1285 1216
1286static int retrigger_dynirq(unsigned int irq) 1217static int retrigger_dynirq(struct irq_data *data)
1287{ 1218{
1288 int evtchn = evtchn_from_irq(irq); 1219 int evtchn = evtchn_from_irq(data->irq);
1289 struct shared_info *sh = HYPERVISOR_shared_info; 1220 struct shared_info *sh = HYPERVISOR_shared_info;
1290 int ret = 0; 1221 int ret = 0;
1291 1222
@@ -1334,7 +1265,7 @@ static void restore_cpu_pirqs(void)
1334 1265
1335 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1266 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1336 1267
1337 startup_pirq(irq); 1268 __startup_pirq(irq);
1338 } 1269 }
1339} 1270}
1340 1271
@@ -1445,7 +1376,6 @@ void xen_poll_irq(int irq)
1445void xen_irq_resume(void) 1376void xen_irq_resume(void)
1446{ 1377{
1447 unsigned int cpu, irq, evtchn; 1378 unsigned int cpu, irq, evtchn;
1448 struct irq_desc *desc;
1449 1379
1450 init_evtchn_cpu_bindings(); 1380 init_evtchn_cpu_bindings();
1451 1381
@@ -1465,66 +1395,48 @@ void xen_irq_resume(void)
1465 restore_cpu_ipis(cpu); 1395 restore_cpu_ipis(cpu);
1466 } 1396 }
1467 1397
1468 /*
1469 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1470 * are not handled by the IRQ core.
1471 */
1472 for_each_irq_desc(irq, desc) {
1473 if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
1474 continue;
1475 if (desc->status & IRQ_DISABLED)
1476 continue;
1477
1478 evtchn = evtchn_from_irq(irq);
1479 if (evtchn == -1)
1480 continue;
1481
1482 unmask_evtchn(evtchn);
1483 }
1484
1485 restore_cpu_pirqs(); 1398 restore_cpu_pirqs();
1486} 1399}
1487 1400
1488static struct irq_chip xen_dynamic_chip __read_mostly = { 1401static struct irq_chip xen_dynamic_chip __read_mostly = {
1489 .name = "xen-dyn", 1402 .name = "xen-dyn",
1490 1403
1491 .disable = disable_dynirq, 1404 .irq_disable = disable_dynirq,
1492 .mask = disable_dynirq, 1405 .irq_mask = disable_dynirq,
1493 .unmask = enable_dynirq, 1406 .irq_unmask = enable_dynirq,
1494 1407
1495 .eoi = ack_dynirq, 1408 .irq_eoi = ack_dynirq,
1496 .set_affinity = set_affinity_irq, 1409 .irq_set_affinity = set_affinity_irq,
1497 .retrigger = retrigger_dynirq, 1410 .irq_retrigger = retrigger_dynirq,
1498}; 1411};
1499 1412
1500static struct irq_chip xen_pirq_chip __read_mostly = { 1413static struct irq_chip xen_pirq_chip __read_mostly = {
1501 .name = "xen-pirq", 1414 .name = "xen-pirq",
1502 1415
1503 .startup = startup_pirq, 1416 .irq_startup = startup_pirq,
1504 .shutdown = shutdown_pirq, 1417 .irq_shutdown = shutdown_pirq,
1505 1418
1506 .enable = enable_pirq, 1419 .irq_enable = enable_pirq,
1507 .unmask = enable_pirq, 1420 .irq_unmask = enable_pirq,
1508 1421
1509 .disable = disable_pirq, 1422 .irq_disable = disable_pirq,
1510 .mask = disable_pirq, 1423 .irq_mask = disable_pirq,
1511 1424
1512 .ack = ack_pirq, 1425 .irq_ack = ack_pirq,
1513 .end = end_pirq,
1514 1426
1515 .set_affinity = set_affinity_irq, 1427 .irq_set_affinity = set_affinity_irq,
1516 1428
1517 .retrigger = retrigger_dynirq, 1429 .irq_retrigger = retrigger_dynirq,
1518}; 1430};
1519 1431
1520static struct irq_chip xen_percpu_chip __read_mostly = { 1432static struct irq_chip xen_percpu_chip __read_mostly = {
1521 .name = "xen-percpu", 1433 .name = "xen-percpu",
1522 1434
1523 .disable = disable_dynirq, 1435 .irq_disable = disable_dynirq,
1524 .mask = disable_dynirq, 1436 .irq_mask = disable_dynirq,
1525 .unmask = enable_dynirq, 1437 .irq_unmask = enable_dynirq,
1526 1438
1527 .ack = ack_dynirq, 1439 .irq_ack = ack_dynirq,
1528}; 1440};
1529 1441
1530int xen_set_callback_via(uint64_t via) 1442int xen_set_callback_via(uint64_t via)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 24177272bcb..ebb292859b5 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -34,42 +34,38 @@ enum shutdown_state {
34/* Ignore multiple shutdown requests. */ 34/* Ignore multiple shutdown requests. */
35static enum shutdown_state shutting_down = SHUTDOWN_INVALID; 35static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
36 36
37#ifdef CONFIG_PM_SLEEP 37struct suspend_info {
38static int xen_hvm_suspend(void *data) 38 int cancelled;
39{ 39 unsigned long arg; /* extra hypercall argument */
40 int err; 40 void (*pre)(void);
41 struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 41 void (*post)(int cancelled);
42 int *cancelled = data; 42};
43
44 BUG_ON(!irqs_disabled());
45
46 err = sysdev_suspend(PMSG_SUSPEND);
47 if (err) {
48 printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
49 err);
50 return err;
51 }
52
53 *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
54 43
55 xen_hvm_post_suspend(*cancelled); 44static void xen_hvm_post_suspend(int cancelled)
45{
46 xen_arch_hvm_post_suspend(cancelled);
56 gnttab_resume(); 47 gnttab_resume();
48}
57 49
58 if (!*cancelled) { 50static void xen_pre_suspend(void)
59 xen_irq_resume(); 51{
60 xen_console_resume(); 52 xen_mm_pin_all();
61 xen_timer_resume(); 53 gnttab_suspend();
62 } 54 xen_arch_pre_suspend();
63 55}
64 sysdev_resume();
65 56
66 return 0; 57static void xen_post_suspend(int cancelled)
58{
59 xen_arch_post_suspend(cancelled);
60 gnttab_resume();
61 xen_mm_unpin_all();
67} 62}
68 63
64#ifdef CONFIG_PM_SLEEP
69static int xen_suspend(void *data) 65static int xen_suspend(void *data)
70{ 66{
67 struct suspend_info *si = data;
71 int err; 68 int err;
72 int *cancelled = data;
73 69
74 BUG_ON(!irqs_disabled()); 70 BUG_ON(!irqs_disabled());
75 71
@@ -80,22 +76,20 @@ static int xen_suspend(void *data)
80 return err; 76 return err;
81 } 77 }
82 78
83 xen_mm_pin_all(); 79 if (si->pre)
84 gnttab_suspend(); 80 si->pre();
85 xen_pre_suspend();
86 81
87 /* 82 /*
88 * This hypercall returns 1 if suspend was cancelled 83 * This hypercall returns 1 if suspend was cancelled
89 * or the domain was merely checkpointed, and 0 if it 84 * or the domain was merely checkpointed, and 0 if it
90 * is resuming in a new domain. 85 * is resuming in a new domain.
91 */ 86 */
92 *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info)); 87 si->cancelled = HYPERVISOR_suspend(si->arg);
93 88
94 xen_post_suspend(*cancelled); 89 if (si->post)
95 gnttab_resume(); 90 si->post(si->cancelled);
96 xen_mm_unpin_all();
97 91
98 if (!*cancelled) { 92 if (!si->cancelled) {
99 xen_irq_resume(); 93 xen_irq_resume();
100 xen_console_resume(); 94 xen_console_resume();
101 xen_timer_resume(); 95 xen_timer_resume();
@@ -109,7 +103,7 @@ static int xen_suspend(void *data)
109static void do_suspend(void) 103static void do_suspend(void)
110{ 104{
111 int err; 105 int err;
112 int cancelled = 1; 106 struct suspend_info si;
113 107
114 shutting_down = SHUTDOWN_SUSPEND; 108 shutting_down = SHUTDOWN_SUSPEND;
115 109
@@ -139,20 +133,29 @@ static void do_suspend(void)
139 goto out_resume; 133 goto out_resume;
140 } 134 }
141 135
142 if (xen_hvm_domain()) 136 si.cancelled = 1;
143 err = stop_machine(xen_hvm_suspend, &cancelled, cpumask_of(0)); 137
144 else 138 if (xen_hvm_domain()) {
145 err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); 139 si.arg = 0UL;
140 si.pre = NULL;
141 si.post = &xen_hvm_post_suspend;
142 } else {
143 si.arg = virt_to_mfn(xen_start_info);
144 si.pre = &xen_pre_suspend;
145 si.post = &xen_post_suspend;
146 }
147
148 err = stop_machine(xen_suspend, &si, cpumask_of(0));
146 149
147 dpm_resume_noirq(PMSG_RESUME); 150 dpm_resume_noirq(PMSG_RESUME);
148 151
149 if (err) { 152 if (err) {
150 printk(KERN_ERR "failed to start xen_suspend: %d\n", err); 153 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
151 cancelled = 1; 154 si.cancelled = 1;
152 } 155 }
153 156
154out_resume: 157out_resume:
155 if (!cancelled) { 158 if (!si.cancelled) {
156 xen_arch_resume(); 159 xen_arch_resume();
157 xs_resume(); 160 xs_resume();
158 } else 161 } else
@@ -172,12 +175,39 @@ out:
172} 175}
173#endif /* CONFIG_PM_SLEEP */ 176#endif /* CONFIG_PM_SLEEP */
174 177
178struct shutdown_handler {
179 const char *command;
180 void (*cb)(void);
181};
182
183static void do_poweroff(void)
184{
185 shutting_down = SHUTDOWN_POWEROFF;
186 orderly_poweroff(false);
187}
188
189static void do_reboot(void)
190{
191 shutting_down = SHUTDOWN_POWEROFF; /* ? */
192 ctrl_alt_del();
193}
194
175static void shutdown_handler(struct xenbus_watch *watch, 195static void shutdown_handler(struct xenbus_watch *watch,
176 const char **vec, unsigned int len) 196 const char **vec, unsigned int len)
177{ 197{
178 char *str; 198 char *str;
179 struct xenbus_transaction xbt; 199 struct xenbus_transaction xbt;
180 int err; 200 int err;
201 static struct shutdown_handler handlers[] = {
202 { "poweroff", do_poweroff },
203 { "halt", do_poweroff },
204 { "reboot", do_reboot },
205#ifdef CONFIG_PM_SLEEP
206 { "suspend", do_suspend },
207#endif
208 {NULL, NULL},
209 };
210 static struct shutdown_handler *handler;
181 211
182 if (shutting_down != SHUTDOWN_INVALID) 212 if (shutting_down != SHUTDOWN_INVALID)
183 return; 213 return;
@@ -194,7 +224,14 @@ static void shutdown_handler(struct xenbus_watch *watch,
194 return; 224 return;
195 } 225 }
196 226
197 xenbus_write(xbt, "control", "shutdown", ""); 227 for (handler = &handlers[0]; handler->command; handler++) {
228 if (strcmp(str, handler->command) == 0)
229 break;
230 }
231
232 /* Only acknowledge commands which we are prepared to handle. */
233 if (handler->cb)
234 xenbus_write(xbt, "control", "shutdown", "");
198 235
199 err = xenbus_transaction_end(xbt, 0); 236 err = xenbus_transaction_end(xbt, 0);
200 if (err == -EAGAIN) { 237 if (err == -EAGAIN) {
@@ -202,17 +239,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
202 goto again; 239 goto again;
203 } 240 }
204 241
205 if (strcmp(str, "poweroff") == 0 || 242 if (handler->cb) {
206 strcmp(str, "halt") == 0) { 243 handler->cb();
207 shutting_down = SHUTDOWN_POWEROFF;
208 orderly_poweroff(false);
209 } else if (strcmp(str, "reboot") == 0) {
210 shutting_down = SHUTDOWN_POWEROFF; /* ? */
211 ctrl_alt_del();
212#ifdef CONFIG_PM_SLEEP
213 } else if (strcmp(str, "suspend") == 0) {
214 do_suspend();
215#endif
216 } else { 244 } else {
217 printk(KERN_INFO "Ignoring shutdown request: %s\n", str); 245 printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
218 shutting_down = SHUTDOWN_INVALID; 246 shutting_down = SHUTDOWN_INVALID;
@@ -291,27 +319,18 @@ static int shutdown_event(struct notifier_block *notifier,
291 return NOTIFY_DONE; 319 return NOTIFY_DONE;
292} 320}
293 321
294static int __init __setup_shutdown_event(void)
295{
296 /* Delay initialization in the PV on HVM case */
297 if (xen_hvm_domain())
298 return 0;
299
300 if (!xen_pv_domain())
301 return -ENODEV;
302
303 return xen_setup_shutdown_event();
304}
305
306int xen_setup_shutdown_event(void) 322int xen_setup_shutdown_event(void)
307{ 323{
308 static struct notifier_block xenstore_notifier = { 324 static struct notifier_block xenstore_notifier = {
309 .notifier_call = shutdown_event 325 .notifier_call = shutdown_event
310 }; 326 };
327
328 if (!xen_domain())
329 return -ENODEV;
311 register_xenstore_notifier(&xenstore_notifier); 330 register_xenstore_notifier(&xenstore_notifier);
312 331
313 return 0; 332 return 0;
314} 333}
315EXPORT_SYMBOL_GPL(xen_setup_shutdown_event); 334EXPORT_SYMBOL_GPL(xen_setup_shutdown_event);
316 335
317subsys_initcall(__setup_shutdown_event); 336subsys_initcall(xen_setup_shutdown_event);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index afbe041f42c..319dd0a94d5 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -156,9 +156,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
156 if (ret) 156 if (ret)
157 goto out; 157 goto out;
158 xenbus_probe(NULL); 158 xenbus_probe(NULL);
159 ret = xen_setup_shutdown_event();
160 if (ret)
161 goto out;
162 return 0; 159 return 0;
163 160
164out: 161out: