aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2008-12-04 09:50:55 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2009-01-05 11:53:07 -0500
commitc765d479037808532310212e9b3fa95760e975f2 (patch)
tree74c9f5570a9a5477522bdf203067303b2e9ef6d3
parent7f5ff766a7babd72fc192125e12ef5570effff4c (diff)
affs: do not zero ->i_op
it is already set to empty table and should never be NULL Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/affs/inode.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 415d9c67ac16..3c4ec7d864c4 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -119,8 +119,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
119 goto bad_inode; 119 goto bad_inode;
120#else 120#else
121 inode->i_mode |= S_IFDIR; 121 inode->i_mode |= S_IFDIR;
122 inode->i_op = NULL; 122 /* ... and leave ->i_op and ->i_fop pointing to empty */
123 inode->i_fop = NULL;
124 break; 123 break;
125#endif 124#endif
126 case ST_LINKFILE: 125 case ST_LINKFILE:
.git/diff/drivers/acpi/acpica/dscontrol.c?h=update_litmus_2019&id=c50e86ce7c2961a41f2f7aa6e4fd6c99229ba205'>drivers/acpi/acpica/dscontrol.c4
-rw-r--r--drivers/acpi/acpica/dsfield.c103
-rw-r--r--drivers/acpi/acpica/dsinit.c4
-rw-r--r--drivers/acpi/acpica/dsmethod.c6
-rw-r--r--drivers/acpi/acpica/dsmthdat.c32
-rw-r--r--drivers/acpi/acpica/dsobject.c14
-rw-r--r--drivers/acpi/acpica/dsopcode.c12
-rw-r--r--drivers/acpi/acpica/dsutils.c6
-rw-r--r--drivers/acpi/acpica/dswscope.c4
-rw-r--r--drivers/acpi/acpica/dswstate.c20
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c4
-rw-r--r--drivers/acpi/acpica/evgpe.c22
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c22
-rw-r--r--drivers/acpi/acpica/evmisc.c191
-rw-r--r--drivers/acpi/acpica/evregion.c24
-rw-r--r--drivers/acpi/acpica/evrgnini.c28
-rw-r--r--drivers/acpi/acpica/evsci.c4
-rw-r--r--drivers/acpi/acpica/evxface.c474
-rw-r--r--drivers/acpi/acpica/evxfevnt.c8
-rw-r--r--drivers/acpi/acpica/evxfgpe.c112
-rw-r--r--drivers/acpi/acpica/evxfregn.c18
-rw-r--r--drivers/acpi/acpica/exconfig.c8
-rw-r--r--drivers/acpi/acpica/exconvrt.c10
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c4
-rw-r--r--drivers/acpi/acpica/exdump.c51
-rw-r--r--drivers/acpi/acpica/exfldio.c14
-rw-r--r--drivers/acpi/acpica/exmisc.c26
-rw-r--r--drivers/acpi/acpica/exmutex.c6
-rw-r--r--drivers/acpi/acpica/exprep.c6
-rw-r--r--drivers/acpi/acpica/exregion.c38
-rw-r--r--drivers/acpi/acpica/exresolv.c4
-rw-r--r--drivers/acpi/acpica/exresop.c10
-rw-r--r--drivers/acpi/acpica/exstore.c6
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c8
-rw-r--r--drivers/acpi/acpica/exutils.c10
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c30
-rw-r--r--drivers/acpi/acpica/hwregs.c22
-rw-r--r--drivers/acpi/acpica/hwsleep.c42
-rw-r--r--drivers/acpi/acpica/hwtimer.c4
-rw-r--r--drivers/acpi/acpica/hwvalid.c4
-rw-r--r--drivers/acpi/acpica/hwxface.c12
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c26
-rw-r--r--drivers/acpi/acpica/nsaccess.c8
-rw-r--r--drivers/acpi/acpica/nsalloc.c10
-rw-r--r--drivers/acpi/acpica/nsdump.c18
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c6
-rw-r--r--drivers/acpi/acpica/nseval.c10
-rw-r--r--drivers/acpi/acpica/nsinit.c6
-rw-r--r--drivers/acpi/acpica/nsload.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c10
-rw-r--r--drivers/acpi/acpica/nsobject.c28
-rw-r--r--drivers/acpi/acpica/nspredef.c40
-rw-r--r--drivers/acpi/acpica/nsrepair.c14
-rw-r--r--drivers/acpi/acpica/nsrepair2.c26
-rw-r--r--drivers/acpi/acpica/nssearch.c12
-rw-r--r--drivers/acpi/acpica/nsutils.c26
-rw-r--r--drivers/acpi/acpica/nswalk.c8
-rw-r--r--drivers/acpi/acpica/nsxfeval.c26
-rw-r--r--drivers/acpi/acpica/nsxfname.c16
-rw-r--r--drivers/acpi/acpica/nsxfobj.c8
-rw-r--r--drivers/acpi/acpica/psargs.c6
-rw-r--r--drivers/acpi/acpica/psloop.c16
-rw-r--r--drivers/acpi/acpica/psopcode.c4
-rw-r--r--drivers/acpi/acpica/psparse.c6
-rw-r--r--drivers/acpi/acpica/psscope.c6
-rw-r--r--drivers/acpi/acpica/pstree.c14
-rw-r--r--drivers/acpi/acpica/psutils.c8
-rw-r--r--drivers/acpi/acpica/psxface.c16
-rw-r--r--drivers/acpi/acpica/rsaddr.c14
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c6
-rw-r--r--drivers/acpi/acpica/rsdump.c10
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c22
-rw-r--r--drivers/acpi/acpica/rsutils.c44
-rw-r--r--drivers/acpi/acpica/rsxface.c14
-rw-r--r--drivers/acpi/acpica/tbfadt.c144
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c23
-rw-r--r--drivers/acpi/acpica/tbutils.c58
-rw-r--r--drivers/acpi/acpica/tbxface.c226
-rw-r--r--drivers/acpi/acpica/tbxfload.c389
-rw-r--r--drivers/acpi/acpica/tbxfroot.c12
-rw-r--r--drivers/acpi/acpica/utaddress.c10
-rw-r--r--drivers/acpi/acpica/utalloc.c20
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c32
-rw-r--r--drivers/acpi/acpica/utdecode.c70
-rw-r--r--drivers/acpi/acpica/utdelete.c38
-rw-r--r--drivers/acpi/acpica/uteval.c6
-rw-r--r--drivers/acpi/acpica/utexcep.c153
-rw-r--r--drivers/acpi/acpica/utglobal.c11
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utlock.c6
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmisc.c104
-rw-r--r--drivers/acpi/acpica/utmutex.c8
-rw-r--r--drivers/acpi/acpica/utobject.c14
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c34
-rw-r--r--drivers/acpi/acpica/utstate.c16
-rw-r--r--drivers/acpi/acpica/utxface.c18
-rw-r--r--drivers/acpi/acpica/utxferror.c90
-rw-r--r--drivers/acpi/acpica/utxfmutex.c14
-rw-r--r--drivers/acpi/apei/apei-base.c22
-rw-r--r--drivers/acpi/apei/apei-internal.h9
-rw-r--r--drivers/acpi/apei/ghes.c6
-rw-r--r--drivers/acpi/battery.c26
-rw-r--r--drivers/acpi/bus.c4
-rw-r--r--drivers/acpi/button.c13
-rw-r--r--drivers/acpi/container.c43
-rw-r--r--drivers/acpi/fan.c25
-rw-r--r--drivers/acpi/glue.c4
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pci_root.c13
-rw-r--r--drivers/acpi/power.c18
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_driver.c43
-rw-r--r--drivers/acpi/processor_idle.c33
-rw-r--r--drivers/acpi/sbs.c12
-rw-r--r--drivers/acpi/scan.c80
-rw-r--r--drivers/acpi/sleep.c103
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c31
-rw-r--r--drivers/acpi/utils.c42
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/acpi/video_detect.c60
-rw-r--r--drivers/amba/bus.c2
-rw-r--r--drivers/amba/tegra-ahb.c6
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/acard-ahci.c13
-rw-r--r--drivers/ata/ahci.c71
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_platform.c10
-rw-r--r--drivers/ata/ata_generic.c15
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libahci.c3
-rw-r--r--drivers/ata/libata-acpi.c401
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/ata/libata-eh.c59
-rw-r--r--drivers/ata/libata-pmp.c4
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-transport.c6
-rw-r--r--drivers/ata/libata.h15
-rw-r--r--drivers/ata/pata_acpi.c18
-rw-r--r--drivers/ata/pata_amd.c13
-rw-r--r--drivers/ata/pata_arasan_cf.c18
-rw-r--r--drivers/ata/pata_artop.c13
-rw-r--r--drivers/ata/pata_atiixp.c30
-rw-r--r--drivers/ata/pata_atp867x.c13
-rw-r--r--drivers/ata/pata_cmd640.c13
-rw-r--r--drivers/ata/pata_cmd64x.c17
-rw-r--r--drivers/ata/pata_cs5520.c14
-rw-r--r--drivers/ata/pata_cs5530.c13
-rw-r--r--drivers/ata/pata_cs5535.c13
-rw-r--r--drivers/ata/pata_cs5536.c13
-rw-r--r--drivers/ata/pata_cypress.c15
-rw-r--r--drivers/ata/pata_efar.c14
-rw-r--r--drivers/ata/pata_hpt366.c13
-rw-r--r--drivers/ata/pata_hpt37x.c13
-rw-r--r--drivers/ata/pata_hpt3x2n.c13
-rw-r--r--drivers/ata/pata_hpt3x3.c15
-rw-r--r--drivers/ata/pata_imx.c10
-rw-r--r--drivers/ata/pata_it8213.c13
-rw-r--r--drivers/ata/pata_it821x.c14
-rw-r--r--drivers/ata/pata_jmicron.c13
-rw-r--r--drivers/ata/pata_marvell.c14
-rw-r--r--drivers/ata/pata_mpiix.c13
-rw-r--r--drivers/ata/pata_netcell.c14
-rw-r--r--drivers/ata/pata_ninja32.c13
-rw-r--r--drivers/ata/pata_ns87410.c13
-rw-r--r--drivers/ata/pata_ns87415.c13
-rw-r--r--drivers/ata/pata_oldpiix.c14
-rw-r--r--drivers/ata/pata_opti.c14
-rw-r--r--drivers/ata/pata_optidma.c13
-rw-r--r--drivers/ata/pata_pcmcia.c3
-rw-r--r--drivers/ata/pata_pdc2027x.c19
-rw-r--r--drivers/ata/pata_pdc202xx_old.c13
-rw-r--r--drivers/ata/pata_piccolo.c16
-rw-r--r--drivers/ata/pata_radisys.c14
-rw-r--r--drivers/ata/pata_rdc.c13
-rw-r--r--drivers/ata/pata_rz1000.c14
-rw-r--r--drivers/ata/pata_sc1200.c13
-rw-r--r--drivers/ata/pata_scc.c21
-rw-r--r--drivers/ata/pata_sch.c13
-rw-r--r--drivers/ata/pata_serverworks.c13
-rw-r--r--drivers/ata/pata_sil680.c13
-rw-r--r--drivers/ata/pata_sis.c14
-rw-r--r--drivers/ata/pata_sl82c105.c13
-rw-r--r--drivers/ata/pata_triflex.c13
-rw-r--r--drivers/ata/pata_via.c13
-rw-r--r--drivers/ata/pdc_adma.c13
-rwxr-xr-x[-rw-r--r--]drivers/ata/sata_dwc_460ex.c72
-rw-r--r--drivers/ata/sata_inic162x.c13
-rw-r--r--drivers/ata/sata_mv.c42
-rw-r--r--drivers/ata/sata_nv.c13
-rw-r--r--drivers/ata/sata_promise.c13
-rw-r--r--drivers/ata/sata_qstor.c13
-rw-r--r--drivers/ata/sata_sil.c14
-rw-r--r--drivers/ata/sata_sil24.c13
-rw-r--r--drivers/ata/sata_sis.c13
-rw-r--r--drivers/ata/sata_svw.c13
-rw-r--r--drivers/ata/sata_sx4.c16
-rw-r--r--drivers/ata/sata_uli.c14
-rw-r--r--drivers/ata/sata_via.c13
-rw-r--r--drivers/ata/sata_vsc.c13
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--drivers/base/bus.c1
-rw-r--r--drivers/base/core.c80
-rw-r--r--drivers/base/dd.c22
-rw-r--r--drivers/base/devtmpfs.c107
-rw-r--r--drivers/base/dma-buf.c1
-rw-r--r--drivers/base/dma-coherent.c1
-rw-r--r--drivers/base/dma-mapping.c49
-rw-r--r--drivers/base/driver.c6
-rw-r--r--drivers/base/firmware_class.c6
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/common.c4
-rw-r--r--drivers/base/power/domain.c342
-rw-r--r--drivers/base/power/main.c32
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/runtime.c13
-rw-r--r--drivers/base/power/sysfs.c4
-rw-r--r--drivers/base/regmap/internal.h17
-rw-r--r--drivers/base/regmap/regmap-irq.c57
-rw-r--r--drivers/base/regmap/regmap-mmio.c30
-rw-r--r--drivers/base/regmap/regmap.c350
-rw-r--r--drivers/bcma/Kconfig19
-rw-r--r--drivers/bcma/Makefile3
-rw-r--r--drivers/bcma/bcma_private.h31
-rw-r--r--drivers/bcma/core.c10
-rw-r--r--drivers/bcma/driver_chipcommon.c5
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c19
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c371
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c19
-rw-r--r--drivers/bcma/driver_gmac_cmn.c14
-rw-r--r--drivers/bcma/driver_mips.c39
-rw-r--r--drivers/bcma/driver_pci.c6
-rw-r--r--drivers/bcma/driver_pci_host.c18
-rw-r--r--drivers/bcma/host_pci.c6
-rw-r--r--drivers/bcma/main.c44
-rw-r--r--drivers/bcma/scan.c63
-rw-r--r--drivers/bcma/scan.h2
-rw-r--r--drivers/bcma/sprom.c30
-rw-r--r--drivers/block/cciss_scsi.c11
-rw-r--r--drivers/block/drbd/drbd_actlog.c8
-rw-r--r--drivers/block/drbd/drbd_bitmap.c30
-rw-r--r--drivers/block/drbd/drbd_int.h45
-rw-r--r--drivers/block/drbd/drbd_main.c97
-rw-r--r--drivers/block/drbd/drbd_nl.c40
-rw-r--r--drivers/block/drbd/drbd_proc.c3
-rw-r--r--drivers/block/drbd/drbd_receiver.c38
-rw-r--r--drivers/block/drbd/drbd_req.c111
-rw-r--r--drivers/block/drbd/drbd_worker.c12
-rw-r--r--drivers/block/floppy.c25
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/mg_disk.c13
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c166
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h5
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/rbd.c820
-rw-r--r--drivers/block/rbd_types.h1
-rw-r--r--drivers/block/umem.c17
-rw-r--r--drivers/block/virtio_blk.c115
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkfront.c63
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c5
-rw-r--r--drivers/bluetooth/bluecard_cs.c16
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c6
-rw-r--r--drivers/bluetooth/btmrvl_drv.h2
-rw-r--r--drivers/bluetooth/btmrvl_main.c18
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c12
-rw-r--r--drivers/bluetooth/btuart_cs.c6
-rw-r--r--drivers/bluetooth/btusb.c18
-rw-r--r--drivers/bluetooth/dtl1_cs.c22
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_h5.c747
-rw-r--r--drivers/bluetooth/hci_ldisc.c68
-rw-r--r--drivers/bluetooth/hci_ll.c6
-rw-r--r--drivers/bluetooth/hci_uart.h10
-rw-r--r--drivers/char/agp/intel-agp.c16
-rw-r--r--drivers/char/agp/intel-agp.h43
-rw-r--r--drivers/char/agp/intel-gtt.c198
-rw-r--r--drivers/char/bsr.c6
-rw-r--r--drivers/char/hw_random/Kconfig26
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/atmel-rng.c9
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c175
-rw-r--r--drivers/char/hw_random/exynos-rng.c182
-rw-r--r--drivers/char/hw_random/mxc-rnga.c21
-rw-r--r--drivers/char/hw_random/omap-rng.c15
-rw-r--r--drivers/char/hw_random/virtio-rng.c37
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c13
-rw-r--r--drivers/char/mem.c11
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/random.c355
-rw-r--r--drivers/char/sonypi.c13
-rw-r--r--drivers/char/tpm/tpm.c29
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_atmel.c12
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_nsc.c13
-rw-r--r--drivers/char/tpm/tpm_tis.c20
-rw-r--r--drivers/clk/Kconfig8
-rw-r--r--drivers/clk/Makefile10
-rw-r--r--drivers/clk/clk-divider.c189
-rw-r--r--drivers/clk/clk-fixed-factor.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c25
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-highbank.c346
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk-nomadik.c47
-rw-r--r--drivers/clk/clk-u300.c746
-rw-r--r--drivers/clk/clk-wm831x.c428
-rw-r--r--drivers/clk/clk.c185
-rw-r--r--drivers/clk/clkdev.c77
-rw-r--r--drivers/clk/mxs/clk-imx23.c15
-rw-r--r--drivers/clk/mxs/clk-imx28.c45
-rw-r--r--drivers/clk/socfpga/Makefile1
-rw-r--r--drivers/clk/socfpga/clk.c51
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c314
-rw-r--r--drivers/clk/spear/spear1340_clock.c281
-rw-r--r--drivers/clk/spear/spear3xx_clock.c182
-rw-r--r--drivers/clk/spear/spear6xx_clock.c126
-rw-r--r--drivers/clk/versatile/Makefile3
-rw-r--r--drivers/clk/versatile/clk-icst.c100
-rw-r--r--drivers/clk/versatile/clk-icst.h10
-rw-r--r--drivers/clk/versatile/clk-integrator.c111
-rw-r--r--drivers/clocksource/Kconfig6
-rw-r--r--drivers/clocksource/Makefile4
-rw-r--r--drivers/clocksource/cs5535-clockevt.c4
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c131
-rw-r--r--drivers/clocksource/sh_cmt.c26
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c16
-rw-r--r--drivers/clocksource/time-armada-370-xp.c226
-rw-r--r--drivers/connector/cn_proc.c36
-rw-r--r--drivers/connector/cn_queue.c12
-rw-r--r--drivers/connector/connector.c30
-rw-r--r--drivers/cpufreq/cpufreq.c35
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c14
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c55
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c1
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-ich.c2
-rw-r--r--drivers/cpuidle/Kconfig3
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/coupled.c727
-rw-r--r--drivers/cpuidle/cpuidle.c103
-rw-r--r--drivers/cpuidle/cpuidle.h32
-rw-r--r--drivers/cpuidle/driver.c29
-rw-r--r--drivers/cpuidle/governors/menu.c6
-rw-r--r--drivers/cpuidle/sysfs.c21
-rw-r--r--drivers/crypto/Kconfig56
-rw-r--r--drivers/crypto/Makefile7
-rw-r--r--drivers/crypto/atmel-aes-regs.h62
-rw-r--r--drivers/crypto/atmel-aes.c1206
-rw-r--r--drivers/crypto/atmel-sha-regs.h46
-rw-r--r--drivers/crypto/atmel-sha.c1112
-rw-r--r--drivers/crypto/atmel-tdes-regs.h89
-rw-r--r--drivers/crypto/atmel-tdes.c1215
-rw-r--r--drivers/crypto/bfin_crc.c780
-rw-r--r--drivers/crypto/caam/Kconfig30
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamalg.c572
-rw-r--r--drivers/crypto/caam/caamhash.c1878
-rw-r--r--drivers/crypto/caam/caamrng.c309
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c179
-rw-r--r--drivers/crypto/caam/ctrl.h13
-rw-r--r--drivers/crypto/caam/desc.h31
-rw-r--r--drivers/crypto/caam/desc_constr.h57
-rw-r--r--drivers/crypto/caam/error.c44
-rw-r--r--drivers/crypto/caam/intern.h6
-rw-r--r--drivers/crypto/caam/jr.c115
-rw-r--r--drivers/crypto/caam/key_gen.c122
-rw-r--r--drivers/crypto/caam/key_gen.h17
-rw-r--r--drivers/crypto/caam/pdb.h401
-rw-r--r--drivers/crypto/caam/regs.h38
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h156
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/crypto/mv_cesa.c65
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/talitos.c283
-rw-r--r--drivers/crypto/talitos.h123
-rw-r--r--drivers/crypto/tegra-aes.c12
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c39
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c33
-rw-r--r--drivers/dma/Kconfig37
-rw-r--r--drivers/dma/Makefile6
-rw-r--r--drivers/dma/amba-pl08x.c941
-rw-r--r--drivers/dma/at_hdmac.c11
-rw-r--r--drivers/dma/coh901318.c72
-rw-r--r--drivers/dma/dmaengine.c20
-rw-r--r--drivers/dma/dw_dmac.c184
-rw-r--r--drivers/dma/dw_dmac_regs.h8
-rw-r--r--drivers/dma/imx-dma.c36
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ipu/ipu_idmac.c8
-rw-r--r--drivers/dma/ipu/ipu_irq.c14
-rw-r--r--drivers/dma/mmp_tdma.c610
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/omap-dma.c669
-rw-r--r--drivers/dma/pl330.c30
-rw-r--r--drivers/dma/sa11x0-dma.c388
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/shdma-base.c943
-rw-r--r--drivers/dma/sh/shdma.c955
-rw-r--r--drivers/dma/sh/shdma.h (renamed from drivers/dma/shdma.h)46
-rw-r--r--drivers/dma/shdma.c1524
-rw-r--r--drivers/dma/tegra20-apb-dma.c1431
-rw-r--r--drivers/dma/virt-dma.c123
-rw-r--r--drivers/dma/virt-dma.h152
-rw-r--r--drivers/edac/Kconfig24
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/amd64_edac.c376
-rw-r--r--drivers/edac/amd64_edac.h29
-rw-r--r--drivers/edac/amd64_edac_dbg.c89
-rw-r--r--drivers/edac/amd64_edac_inj.c134
-rw-r--r--drivers/edac/amd76x_edac.c34
-rw-r--r--drivers/edac/cell_edac.c28
-rw-r--r--drivers/edac/cpc925_edac.c96
-rw-r--r--drivers/edac/e752x_edac.c92
-rw-r--r--drivers/edac/e7xxx_edac.c89
-rw-r--r--drivers/edac/edac_core.h39
-rw-r--r--drivers/edac/edac_device.c47
-rw-r--r--drivers/edac/edac_device_sysfs.c71
-rw-r--r--drivers/edac/edac_mc.c397
-rw-r--r--drivers/edac/edac_mc_sysfs.c1355
-rw-r--r--drivers/edac/edac_module.c20
-rw-r--r--drivers/edac/edac_module.h26
-rw-r--r--drivers/edac/edac_pci.c26
-rw-r--r--drivers/edac/edac_pci_sysfs.c49
-rw-r--r--drivers/edac/highbank_l2_edac.c149
-rw-r--r--drivers/edac/highbank_mc_edac.c264
-rw-r--r--drivers/edac/i3000_edac.c47
-rw-r--r--drivers/edac/i3200_edac.c48
-rw-r--r--drivers/edac/i5000_edac.c207
-rw-r--r--drivers/edac/i5100_edac.c14
-rw-r--r--drivers/edac/i5400_edac.c201
-rw-r--r--drivers/edac/i7300_edac.c173
-rw-r--r--drivers/edac/i7core_edac.c535
-rw-r--r--drivers/edac/i82443bxgx_edac.c51
-rw-r--r--drivers/edac/i82860_edac.c45
-rw-r--r--drivers/edac/i82875p_edac.c53
-rw-r--r--drivers/edac/i82975x_edac.c55
-rw-r--r--drivers/edac/mpc85xx_edac.c134
-rw-r--r--drivers/edac/mv64x60_edac.c40
-rw-r--r--drivers/edac/pasemi_edac.c22
-rw-r--r--drivers/edac/ppc4xx_edac.c16
-rw-r--r--drivers/edac/r82600_edac.c48
-rw-r--r--drivers/edac/sb_edac.c267
-rw-r--r--drivers/edac/tile_edac.c12
-rw-r--r--drivers/edac/x38_edac.c48
-rw-r--r--drivers/extcon/Kconfig20
-rw-r--r--drivers/extcon/Makefile2
-rw-r--r--drivers/extcon/extcon-arizona.c490
-rw-r--r--drivers/extcon/extcon-max77693.c779
-rw-r--r--drivers/extcon/extcon-max8997.c34
-rw-r--r--drivers/extcon/extcon_class.c4
-rw-r--r--drivers/extcon/extcon_gpio.c22
-rw-r--r--drivers/firewire/core-device.c9
-rw-r--r--drivers/firewire/core-iso.c2
-rw-r--r--drivers/firewire/core-transaction.c23
-rw-r--r--drivers/firewire/ohci.c30
-rw-r--r--drivers/firmware/dmi_scan.c3
-rw-r--r--drivers/firmware/memmap.c8
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/gpio/Kconfig27
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/devres.c1
-rw-r--r--drivers/gpio/gpio-amd8111.c246
-rw-r--r--drivers/gpio/gpio-arizona.c163
-rw-r--r--drivers/gpio/gpio-em.c2
-rw-r--r--drivers/gpio/gpio-langwell.c7
-rw-r--r--drivers/gpio/gpio-lpc32xx.c74
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-mxc.c136
-rw-r--r--drivers/gpio/gpio-omap.c24
-rw-r--r--drivers/gpio/gpio-pca953x.c67
-rw-r--r--drivers/gpio/gpio-pcf857x.c93
-rw-r--r--drivers/gpio/gpio-pxa.c30
-rw-r--r--drivers/gpio/gpio-samsung.c19
-rw-r--r--drivers/gpio/gpio-sch.c3
-rw-r--r--drivers/gpio/gpio-sta2x11.c5
-rw-r--r--drivers/gpio/gpio-tps6586x.c158
-rw-r--r--drivers/gpio/gpio-tps65910.c3
-rw-r--r--drivers/gpio/gpio-wm8994.c22
-rw-r--r--drivers/gpio/gpiolib-of.c9
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c16
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c1
-rw-r--r--drivers/gpu/drm/drm_dma.c5
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c32
-rw-r--r--drivers/gpu/drm/drm_edid_load.c8
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_fops.c78
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_info.c38
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_lock.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c169
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_pci.c49
-rw-r--r--drivers/gpu/drm/drm_proc.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c296
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h31
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c125
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c49
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c246
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c53
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c48
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c35
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/opregion.c8
-rw-r--r--drivers/gpu/drm/gma500/opregion.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c17
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h6
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c68
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c94
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c307
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h70
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c351
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c535
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c271
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h192
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c139
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h28
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c126
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c538
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c158
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h54
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c375
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c10
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c66
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c49
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c478
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c128
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c66
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c35
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c20
-rw-r--r--drivers/gpu/drm/nouveau/Makefile3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c245
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h83
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c (renamed from drivers/gpu/drm/nouveau/nouveau_object.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c81
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc4
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h94
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h87
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c37
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c107
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c41
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c144
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c344
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c62
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h7
-rw-r--r--drivers/gpu/drm/radeon/ni.c211
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/r100.c1191
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c21
-rw-r--r--drivers/gpu/drm/radeon/r420.c21
-rw-r--r--drivers/gpu/drm/radeon/r520.c18
-rw-r--r--drivers/gpu/drm/radeon/r600.c214
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c70
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c238
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c13
-rw-r--r--drivers/gpu/drm/radeon/r600d.h33
-rw-r--r--drivers/gpu/drm/radeon/radeon.h180
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c138
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c111
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c390
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c398
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c426
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c283
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c173
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c411
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c84
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c95
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c48
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6009
-rw-r--r--drivers/gpu/drm/radeon/rs400.c21
-rw-r--r--drivers/gpu/drm/radeon/rs600.c44
-rw-r--r--drivers/gpu/drm/radeon/rs690.c21
-rw-r--r--drivers/gpu/drm/radeon/rv515.c33
-rw-r--r--drivers/gpu/drm/radeon/rv770.c43
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h3
-rw-r--r--drivers/gpu/drm/radeon/si.c215
-rw-r--r--drivers/gpu/drm/radeon/si_reg.h72
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c9
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c5
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c19
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c13
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c15
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c49
-rw-r--r--drivers/gpu/drm/via/via_drv.c4
-rw-r--r--drivers/gpu/drm/via/via_map.c3
-rw-r--r--drivers/gpu/drm/via/via_mm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c88
-rw-r--r--drivers/hid/Kconfig80
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-chicony.c1
-rw-r--r--drivers/hid/hid-core.c31
-rw-r--r--drivers/hid/hid-cypress.c2
-rw-r--r--drivers/hid/hid-holtek-kbd.c183
-rw-r--r--drivers/hid/hid-ids.h28
-rw-r--r--drivers/hid/hid-input.c12
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c564
-rw-r--r--drivers/hid/hid-logitech-dj.c38
-rw-r--r--drivers/hid/hid-magicmouse.c151
-rw-r--r--drivers/hid/hid-multitouch.c23
-rw-r--r--drivers/hid/hid-picolcd.c6
-rw-r--r--drivers/hid/hid-roccat-arvo.c16
-rw-r--r--drivers/hid/hid-roccat-common.c72
-rw-r--r--drivers/hid/hid-roccat-common.h16
-rw-r--r--drivers/hid/hid-roccat-isku.c52
-rw-r--r--drivers/hid/hid-roccat-isku.h7
-rw-r--r--drivers/hid/hid-roccat-kone.c6
-rw-r--r--drivers/hid/hid-roccat-koneplus.c98
-rw-r--r--drivers/hid/hid-roccat-koneplus.h22
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c71
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h15
-rw-r--r--drivers/hid/hid-roccat-pyra.c59
-rw-r--r--drivers/hid/hid-roccat-pyra.h12
-rw-r--r--drivers/hid/hid-roccat-savu.c316
-rw-r--r--drivers/hid/hid-roccat-savu.h87
-rw-r--r--drivers/hid/hid-wiimote-ext.c2
-rw-r--r--drivers/hid/hidraw.c12
-rw-r--r--drivers/hid/uhid.c572
-rw-r--r--drivers/hid/usbhid/Kconfig8
-rw-r--r--drivers/hid/usbhid/hid-core.c294
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/Kconfig20
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/abituguru.c19
-rw-r--r--drivers/hwmon/abituguru3.c19
-rw-r--r--drivers/hwmon/acpi_power_meter.c25
-rw-r--r--drivers/hwmon/adm1021.c18
-rw-r--r--drivers/hwmon/adm1025.c15
-rw-r--r--drivers/hwmon/adm1026.c15
-rw-r--r--drivers/hwmon/adm1031.c15
-rw-r--r--drivers/hwmon/adm9240.c14
-rw-r--r--drivers/hwmon/adt7475.c7
-rw-r--r--drivers/hwmon/applesmc.c207
-rw-r--r--drivers/hwmon/asc7621.c5
-rw-r--r--drivers/hwmon/asus_atk0110.c6
-rw-r--r--drivers/hwmon/atxp1.c16
-rw-r--r--drivers/hwmon/coretemp.c39
-rw-r--r--drivers/hwmon/da9052-hwmon.c344
-rw-r--r--drivers/hwmon/ds1621.c16
-rw-r--r--drivers/hwmon/emc2103.c14
-rw-r--r--drivers/hwmon/emc6w201.c15
-rw-r--r--drivers/hwmon/exynos4_tmu.c20
-rw-r--r--drivers/hwmon/f71805f.c26
-rw-r--r--drivers/hwmon/fam15h_power.c3
-rw-r--r--drivers/hwmon/gl518sm.c15
-rw-r--r--drivers/hwmon/gl520sm.c15
-rw-r--r--drivers/hwmon/gpio-fan.c77
-rw-r--r--drivers/hwmon/hih6130.c293
-rw-r--r--drivers/hwmon/it87.c2
-rw-r--r--drivers/hwmon/jc42.c28
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/k8temp.c25
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/lm63.c14
-rw-r--r--drivers/hwmon/lm75.c9
-rw-r--r--drivers/hwmon/lm77.c73
-rw-r--r--drivers/hwmon/lm78.c36
-rw-r--r--drivers/hwmon/lm80.c14
-rw-r--r--drivers/hwmon/lm83.c15
-rw-r--r--drivers/hwmon/lm85.c7
-rw-r--r--drivers/hwmon/lm87.c15
-rw-r--r--drivers/hwmon/lm90.c12
-rw-r--r--drivers/hwmon/lm92.c15
-rw-r--r--drivers/hwmon/lm93.c14
-rw-r--r--drivers/hwmon/ltc4261.c2
-rw-r--r--drivers/hwmon/max1111.c9
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1619.c15
-rw-r--r--drivers/hwmon/max6639.c17
-rw-r--r--drivers/hwmon/max6642.c15
-rw-r--r--drivers/hwmon/max6650.c10
-rw-r--r--drivers/hwmon/mc13783-adc.c12
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/pc87360.c41
-rw-r--r--drivers/hwmon/pc87427.c51
-rw-r--r--drivers/hwmon/pcf8591.c15
-rw-r--r--drivers/hwmon/s3c-hwmon.c7
-rw-r--r--drivers/hwmon/sis5595.c28
-rw-r--r--drivers/hwmon/smsc47b397.c22
-rw-r--r--drivers/hwmon/smsc47m1.c45
-rw-r--r--drivers/hwmon/smsc47m192.c16
-rw-r--r--drivers/hwmon/thmc50.c17
-rw-r--r--drivers/hwmon/tmp102.c14
-rw-r--r--drivers/hwmon/tmp401.c6
-rw-r--r--drivers/hwmon/tmp421.c13
-rw-r--r--drivers/hwmon/via-cputemp.c2
-rw-r--r--drivers/hwmon/via686a.c23
-rw-r--r--drivers/hwmon/vt1211.c22
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/hwmon/w83627hf.c48
-rw-r--r--drivers/hwmon/w83781d.c52
-rw-r--r--drivers/hwmon/w83791d.c15
-rw-r--r--drivers/hwmon/w83792d.c18
-rw-r--r--drivers/hwmon/w83795.c11
-rw-r--r--drivers/hwmon/w83l785ts.c34
-rw-r--r--drivers/hwmon/wm831x-hwmon.c9
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c4
-rw-r--r--drivers/i2c/busses/Kconfig13
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c13
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c14
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c13
-rw-r--r--drivers/i2c/busses/i2c-amd756.c13
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c13
-rw-r--r--drivers/i2c/busses/i2c-at91.c13
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c147
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c12
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c3
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c12
-rw-r--r--drivers/i2c/busses/i2c-hydra.c17
-rw-r--r--drivers/i2c/busses/i2c-i801.c343
-rw-r--r--drivers/i2c/busses/i2c-imx.c76
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c13
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c133
-rw-r--r--drivers/i2c/busses/i2c-mxs.c68
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c14
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c210
-rw-r--r--drivers/i2c/busses/i2c-ocores.c113
-rw-r--r--drivers/i2c/busses/i2c-octeon.c92
-rw-r--r--drivers/i2c/busses/i2c-omap.c160
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c13
-rw-r--r--drivers/i2c/busses/i2c-piix4.c209
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c3
-rw-r--r--drivers/i2c/busses/i2c-pnx.c19
-rw-r--r--drivers/i2c/busses/i2c-powermac.c157
-rw-r--r--drivers/i2c/busses/i2c-puv3.c15
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c12
-rw-r--r--drivers/i2c/busses/i2c-pxa.c7
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c6
-rw-r--r--drivers/i2c/busses/i2c-sis630.c15
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c15
-rw-r--r--drivers/i2c/busses/i2c-stu300.c102
-rw-r--r--drivers/i2c/busses/i2c-tegra.c134
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c1
-rw-r--r--drivers/i2c/busses/i2c-via.c14
-rw-r--r--drivers/i2c/i2c-core.c60
-rw-r--r--drivers/i2c/i2c-smbus.c13
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/ide/icside.c17
-rw-r--r--drivers/ide/ide-cs.c3
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/idle/intel_idle.c81
-rw-r--r--drivers/ieee802154/Kconfig6
-rw-r--r--drivers/ieee802154/Makefile1
-rw-r--r--drivers/ieee802154/at86rf230.c968
-rw-r--r--drivers/iio/Kconfig13
-rw-r--r--drivers/iio/Makefile4
-rw-r--r--drivers/iio/adc/Kconfig13
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7266.c536
-rw-r--r--drivers/iio/adc/at91_adc.c57
-rw-r--r--drivers/iio/dac/Kconfig (renamed from drivers/staging/iio/dac/Kconfig)25
-rw-r--r--drivers/iio/dac/Makefile (renamed from drivers/staging/iio/dac/Makefile)1
-rw-r--r--drivers/iio/dac/ad5064.c (renamed from drivers/staging/iio/dac/ad5064.c)268
-rw-r--r--drivers/iio/dac/ad5360.c (renamed from drivers/staging/iio/dac/ad5360.c)1
-rw-r--r--drivers/iio/dac/ad5380.c (renamed from drivers/staging/iio/dac/ad5380.c)262
-rw-r--r--drivers/iio/dac/ad5421.c (renamed from drivers/staging/iio/dac/ad5421.c)3
-rw-r--r--drivers/iio/dac/ad5446.c (renamed from drivers/staging/iio/dac/ad5446.c)68
-rw-r--r--drivers/iio/dac/ad5446.h (renamed from drivers/staging/iio/dac/ad5446.h)2
-rw-r--r--drivers/iio/dac/ad5504.c (renamed from drivers/staging/iio/dac/ad5504.c)216
-rw-r--r--drivers/iio/dac/ad5624r.h (renamed from drivers/staging/iio/dac/ad5624r.h)0
-rw-r--r--drivers/iio/dac/ad5624r_spi.c (renamed from drivers/staging/iio/dac/ad5624r_spi.c)218
-rw-r--r--drivers/iio/dac/ad5686.c (renamed from drivers/staging/iio/dac/ad5686.c)197
-rw-r--r--drivers/iio/dac/ad5764.c (renamed from drivers/staging/iio/dac/ad5764.c)1
-rw-r--r--drivers/iio/dac/ad5791.c (renamed from drivers/staging/iio/dac/ad5791.c)222
-rw-r--r--drivers/iio/dac/max517.c (renamed from drivers/staging/iio/dac/max517.c)182
-rw-r--r--drivers/iio/dac/mcp4725.c227
-rw-r--r--drivers/iio/frequency/Kconfig41
-rw-r--r--drivers/iio/frequency/Makefile6
-rw-r--r--drivers/iio/frequency/ad9523.c1059
-rw-r--r--drivers/iio/frequency/adf4350.c486
-rw-r--r--drivers/iio/industrialio-buffer.c52
-rw-r--r--drivers/iio/industrialio-core.c86
-rw-r--r--drivers/iio/industrialio-event.c3
-rw-r--r--drivers/iio/industrialio-trigger.c45
-rw-r--r--drivers/iio/industrialio-triggered-buffer.c110
-rw-r--r--drivers/iio/inkern.c48
-rw-r--r--drivers/iio/light/Kconfig45
-rw-r--r--drivers/iio/light/Makefile7
-rw-r--r--drivers/iio/light/adjd_s311.c365
-rw-r--r--drivers/iio/light/lm3533-als.c932
-rw-r--r--drivers/iio/light/vcnl4000.c217
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/cm.c16
-rw-r--r--drivers/infiniband/core/cm_msgs.h12
-rw-r--r--drivers/infiniband/core/cma.c84
-rw-r--r--drivers/infiniband/core/netlink.c17
-rw-r--r--drivers/infiniband/core/sa_query.c133
-rw-r--r--drivers/infiniband/core/ucma.c28
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c157
-rw-r--r--drivers/infiniband/hw/mlx4/main.c103
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h20
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c34
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c18
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c78
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c16
-rw-r--r--drivers/infiniband/hw/qib/qib.h45
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c63
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c91
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c92
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c164
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c238
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c152
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c327
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h198
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c247
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c56
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c43
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c246
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c31
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c66
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h56
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h56
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c644
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c42
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c87
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c15
-rw-r--r--drivers/input/ff-memless.c4
-rw-r--r--drivers/input/input-mt.c2
-rw-r--r--drivers/input/joystick/as5011.c5
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/gpio_keys.c1
-rw-r--r--drivers/input/keyboard/imx_keypad.c25
-rw-r--r--drivers/input/keyboard/lm8333.c2
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c394
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c3
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c2
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c76
-rw-r--r--drivers/input/keyboard/omap4-keypad.c127
-rw-r--r--drivers/input/keyboard/qt1070.c3
-rw-r--r--drivers/input/keyboard/spear-keyboard.c137
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c3
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c4
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c8
-rw-r--r--drivers/input/misc/88pm80x_onkey.c168
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ab8500-ponkey.c13
-rw-r--r--drivers/input/misc/ad714x.c8
-rw-r--r--drivers/input/misc/cma3000_d0x.c2
-rw-r--r--drivers/input/misc/dm355evm_keys.c3
-rw-r--r--drivers/input/misc/twl6040-vibra.c42
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/mouse/synaptics.c60
-rw-r--r--drivers/input/mouse/synaptics.h3
-rw-r--r--drivers/input/mouse/synaptics_usb.c2
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/tablet/hanwang.c57
-rw-r--r--drivers/input/tablet/wacom_sys.c107
-rw-r--r--drivers/input/tablet/wacom_wac.c48
-rw-r--r--drivers/input/tablet/wacom_wac.h11
-rw-r--r--drivers/input/touchscreen/Kconfig25
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7879.c7
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c464
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c3
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c3
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c898
-rw-r--r--drivers/input/touchscreen/eeti_ts.c21
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c2
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c1
-rw-r--r--drivers/input/touchscreen/mms114.c544
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c2
-rw-r--r--drivers/input/touchscreen/tsc2005.c3
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c2
-rw-r--r--drivers/iommu/Kconfig6
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c121
-rw-r--r--drivers/iommu/amd_iommu_init.c571
-rw-r--r--drivers/iommu/amd_iommu_types.h15
-rw-r--r--drivers/iommu/amd_iommu_v2.c6
-rw-r--r--drivers/iommu/dmar.c194
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c113
-rw-r--r--drivers/iommu/intel_irq_remapping.c38
-rw-r--r--drivers/iommu/iommu.c611
-rw-r--r--drivers/iommu/iova.c14
-rw-r--r--drivers/iommu/irq_remapping.c10
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/msm_iommu.c5
-rw-r--r--drivers/iommu/of_iommu.c90
-rw-r--r--drivers/iommu/omap-iommu.c4
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c288
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c7
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c18
-rw-r--r--drivers/isdn/hisax/hfc_usb.c18
-rw-r--r--drivers/isdn/hisax/isurf.c5
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c12
-rw-r--r--drivers/isdn/mISDN/layer2.c2
-rw-r--r--drivers/isdn/mISDN/stack.c4
-rw-r--r--drivers/leds/Kconfig43
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class.c29
-rw-r--r--drivers/leds/led-core.c73
-rw-r--r--drivers/leds/led-triggers.c116
-rw-r--r--drivers/leds/leds-88pm860x.c9
-rw-r--r--drivers/leds/leds-adp5520.c8
-rw-r--r--drivers/leds/leds-asic3.c16
-rw-r--r--drivers/leds/leds-atmel-pwm.c5
-rw-r--r--drivers/leds/leds-bd2802.c8
-rw-r--r--drivers/leds/leds-blinkm.c815
-rw-r--r--drivers/leds/leds-da903x.c9
-rw-r--r--drivers/leds/leds-dac124s085.c4
-rw-r--r--drivers/leds/leds-gpio.c11
-rw-r--r--drivers/leds/leds-lm3530.c24
-rw-r--r--drivers/leds/leds-lm3556.c512
-rw-r--r--drivers/leds/leds-lp3944.c9
-rw-r--r--drivers/leds/leds-lp5521.c20
-rw-r--r--drivers/leds/leds-lp5523.c30
-rw-r--r--drivers/leds/leds-lp8788.c193
-rw-r--r--drivers/leds/leds-lt3593.c9
-rw-r--r--drivers/leds/leds-max8997.c93
-rw-r--r--drivers/leds/leds-mc13783.c9
-rw-r--r--drivers/leds/leds-netxbig.c10
-rw-r--r--drivers/leds/leds-ns2.c19
-rw-r--r--drivers/leds/leds-pca9532.c10
-rw-r--r--drivers/leds/leds-pca955x.c18
-rw-r--r--drivers/leds/leds-pca9633.c6
-rw-r--r--drivers/leds/leds-pwm.c7
-rw-r--r--drivers/leds/leds-regulator.c9
-rw-r--r--drivers/leds/leds-renesas-tpu.c25
-rw-r--r--drivers/leds/leds-s3c24xx.c44
-rw-r--r--drivers/leds/leds-sunfire.c21
-rw-r--r--drivers/leds/leds-tca6507.c17
-rw-r--r--drivers/leds/leds.h2
-rw-r--r--drivers/leds/ledtrig-backlight.c8
-rw-r--r--drivers/leds/ledtrig-default-on.c2
-rw-r--r--drivers/leds/ledtrig-gpio.c6
-rw-r--r--drivers/leds/ledtrig-heartbeat.c18
-rw-r--r--drivers/leds/ledtrig-ide-disk.c25
-rw-r--r--drivers/leds/ledtrig-oneshot.c204
-rw-r--r--drivers/leds/ledtrig-timer.c2
-rw-r--r--drivers/leds/ledtrig-transient.c8
-rw-r--r--drivers/md/Kconfig14
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/dm-crypt.c219
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-exception-store.c13
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-ioctl.c5
-rw-r--r--drivers/md/dm-linear.c2
-rw-r--r--drivers/md/dm-log.c13
-rw-r--r--drivers/md/dm-mpath.c49
-rw-r--r--drivers/md/dm-raid.c147
-rw-r--r--drivers/md/dm-raid1.c11
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-snap.c34
-rw-r--r--drivers/md/dm-stripe.c87
-rw-r--r--drivers/md/dm-table.c3
-rw-r--r--drivers/md/dm-thin-metadata.c769
-rw-r--r--drivers/md/dm-thin-metadata.h25
-rw-r--r--drivers/md/dm-thin.c541
-rw-r--r--drivers/md/dm-verity.c2
-rw-r--r--drivers/md/dm.c40
-rw-r--r--drivers/md/dm.h5
-rw-r--r--drivers/md/md.c121
-rw-r--r--drivers/md/md.h11
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/persistent-data/Makefile1
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c105
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h21
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c438
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.h26
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c12
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c25
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c90
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h11
-rw-r--r--drivers/md/raid1.c244
-rw-r--r--drivers/md/raid1.h30
-rw-r--r--drivers/md/raid10.c145
-rw-r--r--drivers/md/raid10.h25
-rw-r--r--drivers/md/raid5.c376
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/Kconfig114
-rw-r--r--drivers/media/common/saa7146_fops.c5
-rw-r--r--drivers/media/common/tuners/Kconfig64
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c249
-rw-r--r--drivers/media/common/tuners/xc5000.c14
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h1
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c1
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig3
-rw-r--r--drivers/media/dvb/dvb-usb/az6007.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb/dvb-usb/rtl28xxu.c516
-rw-r--r--drivers/media/dvb/frontends/Kconfig8
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/a8293.c37
-rw-r--r--drivers/media/dvb/frontends/cx24110.c4
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb/frontends/dib8000.c4
-rw-r--r--drivers/media/dvb/frontends/drxk.h11
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c350
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.h17
-rw-r--r--drivers/media/dvb/frontends/lg2160.c2
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c5
-rw-r--r--drivers/media/dvb/frontends/rtl2832.c789
-rw-r--r--drivers/media/dvb/frontends/rtl2832.h74
-rw-r--r--drivers/media/dvb/frontends/rtl2832_priv.h260
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c20
-rw-r--r--drivers/media/dvb/frontends/stb0899_drv.c22
-rw-r--r--drivers/media/dvb/frontends/stv0367.c5
-rw-r--r--drivers/media/dvb/frontends/stv090x.c4
-rw-r--r--drivers/media/dvb/frontends/tda10071.c351
-rw-r--r--drivers/media/dvb/frontends/tda10071_priv.h15
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c39
-rw-r--r--drivers/media/dvb/siano/smsusb.c4
-rw-r--r--drivers/media/radio/Kconfig34
-rw-r--r--drivers/media/radio/Makefile4
-rw-r--r--drivers/media/radio/lm7000.h43
-rw-r--r--drivers/media/radio/radio-aimslab.c66
-rw-r--r--drivers/media/radio/radio-cadet.c388
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-mr800.c5
-rw-r--r--drivers/media/radio/radio-sf16fmi.c61
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/radio-shark.c381
-rw-r--r--drivers/media/radio/radio-shark2.c355
-rw-r--r--drivers/media/radio/radio-tea5777.c491
-rw-r--r--drivers/media/radio/radio-tea5777.h87
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c292
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c11
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c51
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h7
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c4
-rw-r--r--drivers/media/rc/Kconfig61
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c133
-rw-r--r--drivers/media/rc/ene_ir.c3
-rw-r--r--drivers/media/rc/fintek-cir.c32
-rw-r--r--drivers/media/rc/gpio-ir-recv.c26
-rw-r--r--drivers/media/rc/iguanair.c639
-rw-r--r--drivers/media/rc/mceusb.c20
-rw-r--r--drivers/media/rc/nuvoton-cir.c145
-rw-r--r--drivers/media/rc/rc-main.c5
-rw-r--r--drivers/media/rc/winbond-cir.c4
-rw-r--r--drivers/media/video/Kconfig85
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/adv7180.c235
-rw-r--r--drivers/media/video/adv7393.c487
-rw-r--r--drivers/media/video/adv7393_regs.h188
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c95
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c11
-rw-r--r--drivers/media/video/bt8xx/bttv.h3
-rw-r--r--drivers/media/video/bt8xx/bttvp.h1
-rw-r--r--drivers/media/video/bw-qcam.c47
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/video/cs8420.h50
-rw-r--r--drivers/media/video/cx18/cx18-driver.c10
-rw-r--r--drivers/media/video/cx18/cx18-driver.h2
-rw-r--r--drivers/media/video/cx18/cx18-firmware.c9
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c18
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.h2
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c15
-rw-r--r--drivers/media/video/cx18/cx18-streams.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c56
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c17
-rw-r--r--drivers/media/video/cx231xx/cx231xx-i2c.c8
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c89
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c10
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c9
-rw-r--r--drivers/media/video/cx23885/cx23885.h3
-rw-r--r--drivers/media/video/cx25821/cx25821-core.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-i2c.c10
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-video.c2
-rw-r--r--drivers/media/video/cx25821/cx25821.h4
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c76
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c31
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c236
-rw-r--r--drivers/media/video/cx88/cx88-cards.c20
-rw-r--r--drivers/media/video/cx88/cx88-core.c7
-rw-r--r--drivers/media/video/cx88/cx88-video.c901
-rw-r--r--drivers/media/video/cx88/cx88.h68
-rw-r--r--drivers/media/video/davinci/Kconfig30
-rw-r--r--drivers/media/video/davinci/Makefile8
-rw-r--r--drivers/media/video/davinci/vpbe_display.c4
-rw-r--r--drivers/media/video/davinci/vpif.c45
-rw-r--r--drivers/media/video/davinci/vpif.h45
-rw-r--r--drivers/media/video/davinci/vpif_capture.c694
-rw-r--r--drivers/media/video/davinci/vpif_capture.h16
-rw-r--r--drivers/media/video/davinci/vpif_display.c684
-rw-r--r--drivers/media/video/davinci/vpif_display.h23
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c27
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c10
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c33
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c95
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c4
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h51
-rw-r--r--drivers/media/video/gspca/benq.c7
-rw-r--r--drivers/media/video/gspca/conex.c208
-rw-r--r--drivers/media/video/gspca/cpia1.c486
-rw-r--r--drivers/media/video/gspca/etoms.c221
-rw-r--r--drivers/media/video/gspca/finepix.c1
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c1
-rw-r--r--drivers/media/video/gspca/gspca.c54
-rw-r--r--drivers/media/video/gspca/jeilinj.c219
-rw-r--r--drivers/media/video/gspca/jl2005bcd.c5
-rw-r--r--drivers/media/video/gspca/kinect.c10
-rw-r--r--drivers/media/video/gspca/konica.c289
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c1
-rw-r--r--drivers/media/video/gspca/mars.c48
-rw-r--r--drivers/media/video/gspca/mr97310a.c439
-rw-r--r--drivers/media/video/gspca/nw80x.c203
-rw-r--r--drivers/media/video/gspca/ov519.c600
-rw-r--r--drivers/media/video/gspca/ov534.c598
-rw-r--r--drivers/media/video/gspca/ov534_9.c295
-rw-r--r--drivers/media/video/gspca/pac207.c1
-rw-r--r--drivers/media/video/gspca/pac7302.c372
-rw-r--r--drivers/media/video/gspca/pac7311.c3
-rw-r--r--drivers/media/video/gspca/se401.c184
-rw-r--r--drivers/media/video/gspca/sn9c2028.c7
-rw-r--r--drivers/media/video/gspca/sn9c20x.c37
-rw-r--r--drivers/media/video/gspca/sonixb.c622
-rw-r--r--drivers/media/video/gspca/sonixj.c3
-rw-r--r--drivers/media/video/gspca/spca1528.c271
-rw-r--r--drivers/media/video/gspca/spca500.c201
-rw-r--r--drivers/media/video/gspca/spca501.c257
-rw-r--r--drivers/media/video/gspca/spca505.c77
-rw-r--r--drivers/media/video/gspca/spca506.c211
-rw-r--r--drivers/media/video/gspca/spca508.c71
-rw-r--r--drivers/media/video/gspca/spca561.c393
-rw-r--r--drivers/media/video/gspca/sq905.c1
-rw-r--r--drivers/media/video/gspca/sq905c.c1
-rw-r--r--drivers/media/video/gspca/sq930x.c110
-rw-r--r--drivers/media/video/gspca/stk014.c188
-rw-r--r--drivers/media/video/gspca/stv0680.c7
-rw-r--r--drivers/media/video/gspca/sunplus.c237
-rw-r--r--drivers/media/video/gspca/t613.c824
-rw-r--r--drivers/media/video/gspca/topro.c459
-rw-r--r--drivers/media/video/gspca/tv8532.c125
-rw-r--r--drivers/media/video/gspca/vc032x.c694
-rw-r--r--drivers/media/video/gspca/vicam.c68
-rw-r--r--drivers/media/video/gspca/w996Xcf.c15
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c473
-rw-r--r--drivers/media/video/ibmmpeg2.h94
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c18
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h1
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/m5mols/Kconfig1
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c4
-rw-r--r--drivers/media/video/mem2mem_testdev.c368
-rw-r--r--drivers/media/video/mt9m001.c2
-rw-r--r--drivers/media/video/mt9m032.c13
-rw-r--r--drivers/media/video/mt9m111.c1
-rw-r--r--drivers/media/video/mt9p031.c5
-rw-r--r--drivers/media/video/mt9t001.c13
-rw-r--r--drivers/media/video/mt9v022.c2
-rw-r--r--drivers/media/video/mx1_camera.c5
-rw-r--r--drivers/media/video/mx2_camera.c84
-rw-r--r--drivers/media/video/mx2_emmaprp.c10
-rw-r--r--drivers/media/video/mx3_camera.c22
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/video/omap3isp/isppreview.c14
-rw-r--r--drivers/media/video/omap3isp/ispresizer.c6
-rw-r--r--drivers/media/video/ov2640.c6
-rw-r--r--drivers/media/video/ov772x.c8
-rw-r--r--drivers/media/video/ov9640.c1
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/media/video/pvrusb2/Kconfig1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c12
-rw-r--r--drivers/media/video/pwc/pwc-if.c171
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c165
-rw-r--r--drivers/media/video/pwc/pwc.h3
-rw-r--r--drivers/media/video/s2255drv.c1
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c133
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c23
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h3
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite-reg.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite.c88
-rw-r--r--drivers/media/video/s5p-fimc/fimc-m2m.c52
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c55
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.h2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c20
-rw-r--r--drivers/media/video/s5p-g2d/g2d.c9
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.c38
-rw-r--r--drivers/media/video/s5p-mfc/regs-mfc.h5
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c15
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c24
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.h4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.h3
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c8
-rw-r--r--drivers/media/video/s5p-tv/sii9234_drv.c12
-rw-r--r--drivers/media/video/saa7121.h132
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c82
-rw-r--r--drivers/media/video/saa7146.h112
-rw-r--r--drivers/media/video/saa7146reg.h283
-rw-r--r--drivers/media/video/saa7164/saa7164-api.c14
-rw-r--r--drivers/media/video/saa7164/saa7164-i2c.c20
-rw-r--r--drivers/media/video/saa7164/saa7164.h2
-rw-r--r--drivers/media/video/smiapp/Kconfig3
-rw-r--r--drivers/media/video/smiapp/smiapp-core.c41
-rw-r--r--drivers/media/video/sn9c102/sn9c102.h2
-rw-r--r--drivers/media/video/soc_camera.c12
-rw-r--r--drivers/media/video/soc_mediabus.c6
-rw-r--r--drivers/media/video/tlg2300/pd-main.c4
-rw-r--r--drivers/media/video/tuner-core.c17
-rw-r--r--drivers/media/video/tvaudio.c291
-rw-r--r--drivers/media/video/tvp5150.c97
-rw-r--r--drivers/media/video/tw9910.c8
-rw-r--r--drivers/media/video/uvc/Kconfig1
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c5
-rw-r--r--drivers/media/video/uvc/uvc_queue.c1
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c8
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c10
-rw-r--r--drivers/media/video/v4l2-ctrls.c3
-rw-r--r--drivers/media/video/v4l2-dev.c73
-rw-r--r--drivers/media/video/v4l2-ioctl.c3364
-rw-r--r--drivers/media/video/v4l2-mem2mem.c18
-rw-r--r--drivers/media/video/v4l2-subdev.c4
-rw-r--r--drivers/media/video/via-camera.c2
-rw-r--r--drivers/media/video/videobuf-core.c16
-rw-r--r--drivers/media/video/videobuf-dma-contig.c57
-rw-r--r--drivers/media/video/videobuf2-core.c417
-rw-r--r--drivers/media/video/vino.c4
-rw-r--r--drivers/media/video/vivi.c216
-rw-r--r--drivers/media/video/zoran/zoran.h4
-rw-r--r--drivers/media/video/zoran/zoran_driver.c4
-rw-r--r--drivers/media/video/zoran/zr36016.c4
-rw-r--r--drivers/media/video/zr364xx.c484
-rw-r--r--drivers/message/i2o/i2o_config.c7
-rw-r--r--drivers/message/i2o/i2o_proc.c37
-rw-r--r--drivers/mfd/88pm800.c596
-rw-r--r--drivers/mfd/88pm805.c301
-rw-r--r--drivers/mfd/88pm80x.c145
-rw-r--r--drivers/mfd/88pm860x-core.c23
-rw-r--r--drivers/mfd/Kconfig103
-rw-r--r--drivers/mfd/Makefile15
-rw-r--r--drivers/mfd/ab3100-core.c23
-rw-r--r--drivers/mfd/ab5500-core.h87
-rw-r--r--drivers/mfd/ab8500-core.c242
-rw-r--r--drivers/mfd/ab8500-debugfs.c12
-rw-r--r--drivers/mfd/ab8500-gpadc.c9
-rw-r--r--drivers/mfd/ab8500-sysctrl.c6
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/anatop-mfd.c2
-rw-r--r--drivers/mfd/arizona-core.c566
-rw-r--r--drivers/mfd/arizona-i2c.c97
-rw-r--r--drivers/mfd/arizona-irq.c275
-rw-r--r--drivers/mfd/arizona-spi.c97
-rw-r--r--drivers/mfd/arizona.h40
-rw-r--r--drivers/mfd/asic3.c1
-rw-r--r--drivers/mfd/da9052-core.c1
-rw-r--r--drivers/mfd/db8500-prcmu.c92
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h1
-rw-r--r--drivers/mfd/ezx-pcap.c2
-rw-r--r--drivers/mfd/max77686-irq.c319
-rw-r--r--drivers/mfd/max77686.c187
-rw-r--r--drivers/mfd/max77693.c11
-rw-r--r--drivers/mfd/max8925-core.c8
-rw-r--r--drivers/mfd/max8997-irq.c62
-rw-r--r--drivers/mfd/max8997.c9
-rw-r--r--drivers/mfd/mc13xxx-core.c4
-rw-r--r--drivers/mfd/mc13xxx-i2c.c12
-rw-r--r--drivers/mfd/mc13xxx-spi.c76
-rw-r--r--drivers/mfd/mfd-core.c30
-rw-r--r--drivers/mfd/omap-usb-host.c48
-rw-r--r--drivers/mfd/palmas.c13
-rw-r--r--drivers/mfd/pcf50633-core.c9
-rw-r--r--drivers/mfd/s5m-core.c206
-rw-r--r--drivers/mfd/s5m-irq.c495
-rw-r--r--drivers/mfd/sec-core.c216
-rw-r--r--drivers/mfd/sec-irq.c317
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/mfd/tc3589x.c9
-rw-r--r--drivers/mfd/timberdale.c2
-rw-r--r--drivers/mfd/tps65010.c3
-rw-r--r--drivers/mfd/tps65090.c4
-rw-r--r--drivers/mfd/tps65217.c67
-rw-r--r--drivers/mfd/tps6586x.c296
-rw-r--r--drivers/mfd/tps65910.c23
-rw-r--r--drivers/mfd/twl-core.c12
-rw-r--r--drivers/mfd/twl6040-core.c24
-rw-r--r--drivers/mfd/wm5102-tables.c2399
-rw-r--r--drivers/mfd/wm5110-tables.c2281
-rw-r--r--drivers/mfd/wm831x-otp.c8
-rw-r--r--drivers/mfd/wm8350-core.c354
-rw-r--r--drivers/mfd/wm8350-i2c.c5
-rw-r--r--drivers/mfd/wm8350-irq.c8
-rw-r--r--drivers/mfd/wm8350-regmap.c3222
-rw-r--r--drivers/mfd/wm8994-core.c17
-rw-r--r--drivers/mfd/wm8994-irq.c10
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ab8500-pwm.c6
-rw-r--r--drivers/misc/cb710/core.c2
-rw-r--r--drivers/misc/eeprom/at25.c61
-rw-r--r--drivers/misc/hpilo.c33
-rw-r--r--drivers/misc/hpilo.h4
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c137
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c358
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h205
-rw-r--r--drivers/misc/iwmc3200top/log.c348
-rw-r--r--drivers/misc/iwmc3200top/log.h171
-rw-r--r--drivers/misc/iwmc3200top/main.c662
-rw-r--r--drivers/misc/lkdtm.c2
-rw-r--r--drivers/misc/mei/init.c4
-rw-r--r--drivers/misc/mei/interface.c85
-rw-r--r--drivers/misc/mei/interface.h18
-rw-r--r--drivers/misc/mei/interrupt.c171
-rw-r--r--drivers/misc/mei/iorw.c8
-rw-r--r--drivers/misc/mei/main.c86
-rw-r--r--drivers/misc/mei/mei_dev.h24
-rw-r--r--drivers/misc/mei/wd.c8
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c88
-rw-r--r--drivers/misc/ti-st/st_core.c5
-rw-r--r--drivers/misc/ti-st/st_ll.c2
-rw-r--r--drivers/mmc/card/block.c50
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/cd-gpio.c83
-rw-r--r--drivers/mmc/core/core.c90
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/core/mmc.c23
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/sd.c177
-rw-r--r--drivers/mmc/core/sdio.c13
-rw-r--r--drivers/mmc/core/sdio_cis.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c188
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h14
-rw-r--r--drivers/mmc/host/atmel-mci.c26
-rw-r--r--drivers/mmc/host/dw_mmc.c51
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mvsdio.c4
-rw-r--r--drivers/mmc/host/mxs-mmc.c28
-rw-r--r--drivers/mmc/host/omap.c386
-rw-r--r--drivers/mmc/host/omap_hsmmc.c235
-rw-r--r--drivers/mmc/host/s3cmci.c10
-rw-r--r--drivers/mmc/host/sdhci-dove.c51
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c9
-rw-r--r--drivers/mmc/host/sdhci-pci.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c54
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c52
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci-tegra.c11
-rw-r--r--drivers/mmc/host/sdhci.c148
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c273
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c74
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c132
-rw-r--r--drivers/mtd/maps/uclinux.c5
-rw-r--r--drivers/mtd/mtdoops.c22
-rw-r--r--drivers/mtd/mtdsuper.c4
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c10
-rw-r--r--drivers/mtd/nand/jz4740_nand.c228
-rw-r--r--drivers/mtd/nand/mxc_nand.c37
-rw-r--r--drivers/mtd/nand/nand_base.c7
-rw-r--r--drivers/mtd/nand/nandsim.c12
-rw-r--r--drivers/mtd/nand/omap2.c105
-rw-r--r--drivers/mtd/nand/orion_nand.c6
-rw-r--r--drivers/mtd/ubi/Kconfig2
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/debug.c8
-rw-r--r--drivers/mtd/ubi/misc.c25
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/vmt.c20
-rw-r--r--drivers/net/appletalk/cops.c6
-rw-r--r--drivers/net/appletalk/ltpc.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c13
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_alb.c26
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c106
-rw-r--r--drivers/net/bonding/bond_procfs.c15
-rw-r--r--drivers/net/bonding/bond_sysfs.c10
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/caif/caif_hsi.c551
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/c_can/Kconfig20
-rw-r--r--drivers/net/can/c_can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can.c140
-rw-r--r--drivers/net/can/c_can/c_can.h164
-rw-r--r--drivers/net/can/c_can/c_can_pci.c221
-rw-r--r--drivers/net/can/c_can/c_can_platform.c82
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/cc770/cc770_platform.c2
-rw-r--r--drivers/net/can/dev.c37
-rw-r--r--drivers/net/can/flexcan.c154
-rw-r--r--drivers/net/can/janz-ican3.c241
-rw-r--r--drivers/net/can/mcp251x.c5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/vcan.c27
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/dummy.c23
-rw-r--r--drivers/net/ethernet/3com/3c501.c2
-rw-r--r--drivers/net/ethernet/8390/Kconfig14
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c480
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c5
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c94
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c105
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c45
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c58
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/b44.c100
-rw-r--r--drivers/net/ethernet/broadcom/b44.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c106
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h214
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c273
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c585
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1286
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c385
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h168
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c63
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c362
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h55
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c97
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h34
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c393
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h43
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c48
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h81
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h42
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h51
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c17
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c183
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h59
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c533
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c11
-rw-r--r--drivers/net/ethernet/freescale/fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c8
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c508
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c420
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c6
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e100.c40
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c160
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c43
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h9
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c80
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c185
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h104
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c78
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c159
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c395
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c840
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c815
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c200
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c223
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c172
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h114
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1387
-rw-r--r--drivers/net/ethernet/jme.c14
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c25
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c28
-rw-r--r--drivers/net/ethernet/marvell/sky2.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c648
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c274
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c527
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c14
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c35
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c10
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c40
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c312
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c11
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c112
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c315
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c101
-rw-r--r--drivers/net/ethernet/rdc/r6040.c16
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1009
-rw-r--r--drivers/net/ethernet/renesas/Kconfig4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c394
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h77
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/enum.h8
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c18
-rw-r--r--drivers/net/ethernet/sfc/falcon.c35
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c12
-rw-r--r--drivers/net/ethernet/sfc/filter.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c11
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h11
-rw-r--r--drivers/net/ethernet/sfc/nic.c11
-rw-r--r--drivers/net/ethernet/sfc/nic.h18
-rw-r--r--drivers/net/ethernet/sfc/rx.c23
-rw-r--r--drivers/net/ethernet/sfc/selftest.c64
-rw-r--r--drivers/net/ethernet/sfc/siena.c37
-rw-r--r--drivers/net/ethernet/sfc/tx.c112
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c19
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c267
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c18
-rw-r--r--drivers/net/ethernet/sun/niu.c18
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c177
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c208
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c25
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Makefile4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1905
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1
-rw-r--r--drivers/net/fddi/defxx.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c8
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c37
-rw-r--r--drivers/net/hyperv/rndis_filter.c101
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c2
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/pxaficp_ir.c12
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macvtap.c11
-rw-r--r--drivers/net/netconsole.c6
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c8
-rw-r--r--drivers/net/phy/bcm63xx.c31
-rw-r--r--drivers/net/phy/bcm87xx.c231
-rw-r--r--drivers/net/phy/broadcom.c119
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c41
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/fixed.c4
-rw-r--r--drivers/net/phy/icplus.c38
-rw-r--r--drivers/net/phy/lxt.c47
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux.c12
-rw-r--r--drivers/net/phy/mdio-octeon.c92
-rw-r--r--drivers/net/phy/mdio_bus.c16
-rw-r--r--drivers/net/phy/micrel.c70
-rw-r--r--drivers/net/phy/national.c8
-rw-r--r--drivers/net/phy/phy.c316
-rw-r--r--drivers/net/phy/phy_device.c139
-rw-r--r--drivers/net/phy/realtek.c6
-rw-r--r--drivers/net/phy/smsc.c66
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/phy/ste10Xp.c21
-rw-r--r--drivers/net/phy/vitesse.c52
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/team/Kconfig13
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c779
-rw-r--r--drivers/net/team/team_mode_activebackup.c17
-rw-r--r--drivers/net/team/team_mode_broadcast.c87
-rw-r--r--drivers/net/team/team_mode_loadbalance.c546
-rw-r--r--drivers/net/team/team_mode_roundrobin.c13
-rw-r--r--drivers/net/tun.c160
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.h218
-rw-r--r--drivers/net/usb/asix_common.c631
-rw-r--r--drivers/net/usb/asix_devices.c (renamed from drivers/net/usb/asix.c)666
-rw-r--r--drivers/net/usb/ax88172a.c414
-rw-r--r--drivers/net/usb/cdc-phonet.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c88
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c584
-rw-r--r--drivers/net/usb/sierra_net.c52
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c36
-rw-r--r--drivers/net/usb/usbnet.c130
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/wan/dscc4.c5
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/Kconfig22
-rw-r--r--drivers/net/wimax/i2400m/Makefile8
-rw-r--r--drivers/net/wimax/i2400m/control.c4
-rw-r--r--drivers/net/wimax/i2400m/driver.c5
-rw-r--r--drivers/net/wimax/i2400m/fw.c9
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h157
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h13
-rw-r--r--drivers/net/wimax/i2400m/sdio-debug-levels.h22
-rw-r--r--drivers/net/wimax/i2400m/sdio-fw.c210
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c301
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c177
-rw-r--r--drivers/net/wimax/i2400m/sdio.c602
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c5
-rw-r--r--drivers/net/wireless/airo.c8
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/ath.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c27
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c7
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c288
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h46
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c48
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c158
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h58
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c489
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c776
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c176
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c164
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c214
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c734
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c124
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h53
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h882
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h755
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1404
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h1284
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h772
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h94
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c528
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c122
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c302
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h108
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c555
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1403
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c246
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c782
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h171
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c532
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c196
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h11
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c53
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c69
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/key.c4
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/b43/b43.h9
-rw-r--r--drivers/net/wireless/b43/main.c66
-rw-r--r--drivers/net/wireless/b43/phy_n.c17
-rw-r--r--drivers/net/wireless/b43/xmit.c9
-rw-r--r--drivers/net/wireless/b43legacy/dma.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h59
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c669
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c131
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c1220
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c24
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c127
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c142
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c172
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/soc.h62
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c20
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c27
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c17
-rw-r--r--drivers/net/wireless/iwlegacy/common.c23
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig5
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile32
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn.h)113
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.h)4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-commands.h)48
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-debugfs.c)43
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h (renamed from drivers/net/wireless/iwlwifi/iwl-dev.h)192
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-devices.c)191
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c (renamed from drivers/net/wireless/iwlwifi/iwl-led.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h (renamed from drivers/net/wireless/iwlwifi/iwl-led.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-lib.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c (renamed from drivers/net/wireless/iwlwifi/iwl-mac80211.c)216
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn.c)504
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c (renamed from drivers/net/wireless/iwlwifi/iwl-power.c)11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h (renamed from drivers/net/wireless/iwlwifi/iwl-power.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.c)63
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.h)3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)78
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rxon.c)54
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c (renamed from drivers/net/wireless/iwlwifi/iwl-scan.c)195
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-sta.c)62
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c471
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.c)13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tx.c)62
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-ucode.c)71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c159
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c903
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c463
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c1148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h269
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c856
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c1114
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h82
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c (renamed from drivers/net/wireless/iwlwifi/iwl-1000.c)19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c (renamed from drivers/net/wireless/iwlwifi/iwl-2000.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c (renamed from drivers/net/wireless/iwlwifi/iwl-5000.c)20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c (renamed from drivers/net/wireless/iwlwifi/iwl-6000.c)48
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h (renamed from drivers/net/wireless/iwlwifi/iwl-cfg.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c (renamed from drivers/net/wireless/iwlwifi/iwl-pci.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h)25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c)106
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie.c)390
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c)200
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig39
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile10
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h57
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c882
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.h31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1002
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h509
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c488
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.c234
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.h127
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c416
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.h100
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c470
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h237
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h367
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h484
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c847
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c191
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1701
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.h60
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c509
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.h64
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c529
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h789
-rw-r--r--drivers/net/wireless/libertas/cfg.c47
-rw-r--r--drivers/net/wireless/libertas/cmd.c25
-rw-r--r--drivers/net/wireless/libertas/cmd.h4
-rw-r--r--drivers/net/wireless/libertas/debugfs.c4
-rw-r--r--drivers/net/wireless/libertas/dev.h2
-rw-r--r--drivers/net/wireless/libertas/firmware.c2
-rw-r--r--drivers/net/wireless/libertas/host.h1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas/main.c11
-rw-r--r--drivers/net/wireless/libertas/mesh.c7
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c56
-rw-r--r--drivers/net/wireless/mwifiex/11n.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c23
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c445
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c31
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c7
-rw-r--r--drivers/net/wireless/mwifiex/decl.h9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h60
-rw-r--r--drivers/net/wireless/mwifiex/ie.c191
-rw-r--r--drivers/net/wireless/mwifiex/init.c67
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h21
-rw-r--r--drivers/net/wireless/mwifiex/join.c20
-rw-r--r--drivers/net/wireless/mwifiex/main.c11
-rw-r--r--drivers/net/wireless/mwifiex/main.h41
-rw-r--r--drivers/net/wireless/mwifiex/scan.c108
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c114
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c151
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c11
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c290
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c10
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c322
-rw-r--r--drivers/net/wireless/mwifiex/usb.c28
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/mwl8k.c5
-rw-r--r--drivers/net/wireless/orinoco/cfg.c11
-rw-r--r--drivers/net/wireless/p54/eeprom.c4
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c6
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c13
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig8
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h181
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c456
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c4
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c7
-rw-r--r--drivers/net/wireless/rtlwifi/core.c14
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c33
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c46
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c14
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/ti/Kconfig1
-rw-r--r--drivers/net/wireless/ti/Makefile1
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c67
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.h237
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.c243
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c621
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h22
-rw-r--r--drivers/net/wireless/ti/wl18xx/Kconfig7
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile3
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c111
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h287
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h111
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c403
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.c75
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1610
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h191
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c127
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.h46
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h95
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig1
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h259
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c184
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c173
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h40
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h99
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c643
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h87
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c39
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h90
-rw-r--r--drivers/net/wireless/ti/wlcore/ini.h22
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c62
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h145
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c921
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c37
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c50
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h15
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h19
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c91
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c112
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c282
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h53
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h119
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h (renamed from drivers/net/wireless/ti/wlcore/wl12xx.h)75
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nfc/nfcwilink.c7
-rw-r--r--drivers/nfc/pn533.c846
-rw-r--r--drivers/nfc/pn544_hci.c47
-rw-r--r--drivers/of/base.c52
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_mdio.c16
-rw-r--r--drivers/of/of_mtd.c2
-rw-r--r--drivers/of/platform.c8
-rw-r--r--drivers/oprofile/oprofile_perf.c23
-rw-r--r--drivers/parisc/dino.c16
-rw-r--r--drivers/parisc/iosapic.c4
-rw-r--r--drivers/parisc/lba_pci.c26
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/access.c6
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/hotplug-pci.c30
-rw-r--r--drivers/pci/hotplug/acpiphp.h4
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c7
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c67
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c14
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c35
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c14
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c8
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c10
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c101
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c28
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c14
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c12
-rw-r--r--drivers/pci/hotplug/shpchp_core.c14
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c45
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c6
-rw-r--r--drivers/pci/iov.c4
-rw-r--r--drivers/pci/pci-acpi.c40
-rw-r--r--drivers/pci/pci-driver.c43
-rw-r--r--drivers/pci/pci-sysfs.c31
-rw-r--r--drivers/pci/pci.c424
-rw-r--r--drivers/pci/pci.h11
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/portdrv_pci.c60
-rw-r--r--drivers/pci/probe.c246
-rw-r--r--drivers/pci/quirks.c248
-rw-r--r--drivers/pci/remove.c1
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pci/setup-bus.c67
-rw-r--r--drivers/pci/setup-res.c125
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/sa1100_shannon.c1
-rw-r--r--drivers/pcmcia/yenta_socket.c26
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/core.c56
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c6
-rw-r--r--drivers/pinctrl/pinctrl-imx.c45
-rw-r--r--drivers/pinctrl/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx51.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx6q.c2
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c38
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c67
-rw-r--r--drivers/pinctrl/pinctrl-single.c987
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c492
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c5
-rw-r--r--drivers/pinctrl/pinctrl-u300.c14
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c6
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/olpc/Makefile4
-rw-r--r--drivers/platform/olpc/olpc-ec.c336
-rw-r--r--drivers/platform/x86/Kconfig6
-rw-r--r--drivers/platform/x86/acer-wmi.c163
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c428
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c108
-rw-r--r--drivers/platform/x86/asus-wmi.c44
-rw-r--r--drivers/platform/x86/asus-wmi.h2
-rw-r--r--drivers/platform/x86/classmate-laptop.c432
-rw-r--r--drivers/platform/x86/dell-laptop.c54
-rw-r--r--drivers/platform/x86/eeepc-wmi.c25
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c10
-rw-r--r--drivers/platform/x86/hdaps.c8
-rw-r--r--drivers/platform/x86/hp_accel.c17
-rw-r--r--drivers/platform/x86/ideapad-laptop.c116
-rw-r--r--drivers/platform/x86/intel_ips.c39
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c18
-rw-r--r--drivers/platform/x86/msi-laptop.c11
-rw-r--r--drivers/platform/x86/panasonic-laptop.c20
-rw-r--r--drivers/platform/x86/samsung-laptop.c41
-rw-r--r--drivers/platform/x86/sony-laptop.c168
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c37
-rw-r--r--drivers/platform/x86/toshiba_acpi.c17
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c14
-rw-r--r--drivers/platform/x86/xo1-rfkill.c3
-rw-r--r--drivers/platform/x86/xo15-ebook.c10
-rw-r--r--drivers/pnp/pnpacpi/core.c4
-rw-r--r--drivers/power/Kconfig5
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ab8500_charger.c8
-rw-r--r--drivers/power/avs/Kconfig12
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/smartreflex.c1126
-rw-r--r--drivers/power/bq27x00_battery.c155
-rw-r--r--drivers/power/charger-manager.c152
-rw-r--r--drivers/power/ds2780_battery.c11
-rw-r--r--drivers/power/ds2781_battery.c14
-rw-r--r--drivers/power/gpio-charger.c2
-rw-r--r--drivers/power/isp1704_charger.c8
-rw-r--r--drivers/power/lp8727_charger.c2
-rw-r--r--drivers/power/max17042_battery.c8
-rw-r--r--drivers/power/olpc_battery.c63
-rw-r--r--drivers/power/pda_power.c32
-rw-r--r--drivers/power/power_supply_core.c65
-rw-r--r--drivers/power/power_supply_sysfs.c8
-rw-r--r--drivers/power/sbs-battery.c2
-rw-r--r--drivers/power/smb347-charger.c123
-rw-r--r--drivers/power/test_power.c75
-rw-r--r--drivers/power/twl4030_charger.c93
-rw-r--r--drivers/pps/pps.c4
-rw-r--r--drivers/pwm/Kconfig127
-rw-r--r--drivers/pwm/Makefile11
-rw-r--r--drivers/pwm/core.c713
-rw-r--r--drivers/pwm/pwm-bfin.c162
-rw-r--r--drivers/pwm/pwm-imx.c230
-rw-r--r--drivers/pwm/pwm-lpc32xx.c148
-rw-r--r--drivers/pwm/pwm-mxs.c203
-rw-r--r--drivers/pwm/pwm-pxa.c218
-rw-r--r--drivers/pwm/pwm-samsung.c357
-rw-r--r--drivers/pwm/pwm-tegra.c259
-rw-r--r--drivers/pwm/pwm-tiecap.c230
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c409
-rw-r--r--drivers/pwm/pwm-vt8500.c177
-rw-r--r--drivers/rapidio/devices/tsi721.c12
-rw-r--r--drivers/regulator/Kconfig39
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/aat2870-regulator.c21
-rw-r--r--drivers/regulator/ab3100.c120
-rw-r--r--drivers/regulator/ab8500.c124
-rw-r--r--drivers/regulator/ad5398.c9
-rw-r--r--drivers/regulator/anatop-regulator.c49
-rw-r--r--drivers/regulator/arizona-ldo1.c138
-rw-r--r--drivers/regulator/arizona-micsupp.c188
-rw-r--r--drivers/regulator/core.c417
-rw-r--r--drivers/regulator/da903x.c6
-rw-r--r--drivers/regulator/da9052-regulator.c4
-rw-r--r--drivers/regulator/db8500-prcmu.c46
-rw-r--r--drivers/regulator/fixed-helper.c19
-rw-r--r--drivers/regulator/fixed.c163
-rw-r--r--drivers/regulator/gpio-regulator.c161
-rw-r--r--drivers/regulator/isl6271a-regulator.c13
-rw-r--r--drivers/regulator/lp3971.c66
-rw-r--r--drivers/regulator/lp3972.c102
-rw-r--r--drivers/regulator/lp872x.c943
-rw-r--r--drivers/regulator/lp8788-buck.c629
-rw-r--r--drivers/regulator/lp8788-ldo.c842
-rw-r--r--drivers/regulator/max1586.c108
-rw-r--r--drivers/regulator/max77686.c389
-rw-r--r--drivers/regulator/max8649.c1
-rw-r--r--drivers/regulator/max8952.c60
-rw-r--r--drivers/regulator/max8997.c40
-rw-r--r--drivers/regulator/max8998.c133
-rw-r--r--drivers/regulator/mc13783-regulator.c38
-rw-r--r--drivers/regulator/mc13892-regulator.c43
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c36
-rw-r--r--drivers/regulator/mc13xxx.h11
-rw-r--r--drivers/regulator/of_regulator.c57
-rw-r--r--drivers/regulator/palmas-regulator.c96
-rw-r--r--drivers/regulator/pcap-regulator.c95
-rw-r--r--drivers/regulator/pcf50633-regulator.c20
-rw-r--r--drivers/regulator/rc5t583-regulator.c24
-rw-r--r--drivers/regulator/s2mps11.c363
-rw-r--r--drivers/regulator/s5m8767.c293
-rw-r--r--drivers/regulator/tps6105x-regulator.c14
-rw-r--r--drivers/regulator/tps62360-regulator.c57
-rw-r--r--drivers/regulator/tps65023-regulator.c201
-rw-r--r--drivers/regulator/tps6507x-regulator.c98
-rw-r--r--drivers/regulator/tps65217-regulator.c140
-rw-r--r--drivers/regulator/tps6524x-regulator.c96
-rw-r--r--drivers/regulator/tps6586x-regulator.c108
-rw-r--r--drivers/regulator/tps65910-regulator.c425
-rw-r--r--drivers/regulator/twl-regulator.c97
-rw-r--r--drivers/regulator/wm831x-dcdc.c78
-rw-r--r--drivers/regulator/wm831x-ldo.c131
-rw-r--r--drivers/regulator/wm8350-regulator.c426
-rw-r--r--drivers/regulator/wm8400-regulator.c25
-rw-r--r--drivers/regulator/wm8994-regulator.c93
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/omap_remoteproc.c28
-rw-r--r--drivers/remoteproc/remoteproc_core.c734
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c4
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c295
-rw-r--r--drivers/remoteproc/remoteproc_internal.h62
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c34
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c60
-rw-r--r--drivers/rtc/Kconfig11
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-88pm80x.c369
-rw-r--r--drivers/rtc/rtc-ab8500.c52
-rw-r--r--drivers/rtc/rtc-at91rm9200.c1
-rw-r--r--drivers/rtc/rtc-coh901331.c61
-rw-r--r--drivers/rtc/rtc-da9052.c5
-rw-r--r--drivers/rtc/rtc-max8925.c13
-rw-r--r--drivers/rtc/rtc-mc13xxx.c6
-rw-r--r--drivers/rtc/rtc-mv.c2
-rw-r--r--drivers/rtc/rtc-mxc.c5
-rw-r--r--drivers/rtc/rtc-pcf2123.c2
-rw-r--r--drivers/rtc/rtc-pcf8563.c11
-rw-r--r--drivers/rtc/rtc-pl031.c95
-rw-r--r--drivers/rtc/rtc-r9701.c6
-rw-r--r--drivers/rtc/rtc-rs5c348.c7
-rw-r--r--drivers/rtc/rtc-s3c.c4
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c8
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c24
-rw-r--r--drivers/s390/block/dasd.c35
-rw-r--r--drivers/s390/block/dasd_3990_erp.c3
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c3
-rw-r--r--drivers/s390/block/dasd_diag.c3
-rw-r--r--drivers/s390/block/dasd_diag.h3
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c3
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/block/dasd_fba.h3
-rw-r--r--drivers/s390/block/dasd_genhd.c3
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c10
-rw-r--r--drivers/s390/block/dasd_proc.c3
-rw-r--r--drivers/s390/char/ctrlchar.c3
-rw-r--r--drivers/s390/char/ctrlchar.h3
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/char/keyboard.h3
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/char/sclp.h10
-rw-r--r--drivers/s390/char/sclp_cmd.c38
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.h1
-rw-r--r--drivers/s390/char/sclp_ocf.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c3
-rw-r--r--drivers/s390/char/sclp_sdias.c4
-rw-r--r--drivers/s390/char/sclp_tty.c3
-rw-r--r--drivers/s390/char/sclp_tty.h3
-rw-r--r--drivers/s390/char/tape.h1
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c1
-rw-r--r--drivers/s390/char/tape_3590.h3
-rw-r--r--drivers/s390/char/tape_char.c3
-rw-r--r--drivers/s390/char/tape_class.c5
-rw-r--r--drivers/s390/char/tape_class.h3
-rw-r--r--drivers/s390/char/tape_core.c1
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/char/tape_std.h3
-rw-r--r--drivers/s390/char/tty3270.c3
-rw-r--r--drivers/s390/char/tty3270.h2
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/vmcp.h2
-rw-r--r--drivers/s390/char/vmlogrdr.c45
-rw-r--r--drivers/s390/char/vmwatchdog.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/airq.c3
-rw-r--r--drivers/s390/cio/blacklist.c4
-rw-r--r--drivers/s390/cio/chp.c16
-rw-r--r--drivers/s390/cio/chp.h4
-rw-r--r--drivers/s390/cio/chsc.c3
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/crw.c2
-rw-r--r--drivers/s390/cio/device.c3
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/device_pgid.c2
-rw-r--r--drivers/s390/cio/device_status.c5
-rw-r--r--drivers/s390/cio/idset.c2
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c4
-rw-r--r--drivers/s390/cio/qdio_debug.h2
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c4
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c12
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h4
-rw-r--r--drivers/s390/kvm/kvm_virtio.c5
-rw-r--r--drivers/s390/net/claw.c3
-rw-r--r--drivers/s390/net/ctcm_dbug.c2
-rw-r--r--drivers/s390/net/ctcm_dbug.h2
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_fsms.h2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_main.h2
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.h2
-rw-r--r--drivers/s390/net/ctcm_sysfs.c2
-rw-r--r--drivers/s390/net/netiucv.c34
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c11
-rw-r--r--drivers/s390/net/qeth_l3_sys.c2
-rw-r--r--drivers/s390/net/smsgiucv.h2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c2
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h2
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/s390/scsi/zfcp_unit.c2
-rw-r--r--drivers/scsi/Kconfig19
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c237
-rw-r--r--drivers/scsi/aacraid/aacraid.h79
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c54
-rw-r--r--drivers/scsi/aacraid/commsup.c31
-rw-r--r--drivers/scsi/aacraid/dpcsup.c6
-rw-r--r--drivers/scsi/aacraid/linit.c16
-rw-r--r--drivers/scsi/aacraid/nark.c4
-rw-r--r--drivers/scsi/aacraid/rkt.c2
-rw-r--r--drivers/scsi/aacraid/rx.c4
-rw-r--r--drivers/scsi/aacraid/sa.c4
-rw-r--r--drivers/scsi/aacraid/src.c96
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c5
-rw-r--r--drivers/scsi/bfa/bfad_im.c12
-rw-r--r--drivers/scsi/bnx2fc/Makefile3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.c70
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h73
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c100
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c25
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c40
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h59
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c38
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c21
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c9
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c38
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c71
-rw-r--r--drivers/scsi/fcoe/fcoe.c36
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c13
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c12
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/hptiop.c10
-rw-r--r--drivers/scsi/hptiop.h1
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/libfc/fc_exch.c130
-rw-r--r--drivers/scsi/libfc/fc_fcp.c22
-rw-r--r--drivers/scsi/libfc/fc_frame.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c69
-rw-r--r--drivers/scsi/libsas/sas_ata.c53
-rw-r--r--drivers/scsi/libsas/sas_discover.c23
-rw-r--r--drivers/scsi/libsas/sas_event.c12
-rw-r--r--drivers/scsi/libsas/sas_expander.c74
-rw-r--r--drivers/scsi/libsas/sas_init.c39
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c195
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h45
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c233
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c131
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c21
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c62
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c207
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c156
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c8
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_lib.c121
-rw-r--r--drivers/scsi/scsi_netlink.c7
-rw-r--r--drivers/scsi/scsi_pm.c23
-rw-r--r--drivers/scsi/scsi_priv.h10
-rw-r--r--drivers/scsi/scsi_scan.c34
-rw-r--r--drivers/scsi/scsi_sysfs.c56
-rw-r--r--drivers/scsi/scsi_transport_fc.c72
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c17
-rw-r--r--drivers/scsi/scsi_wait_scan.c42
-rw-r--r--drivers/scsi/sd.c12
-rw-r--r--drivers/scsi/ufs/ufshcd.c35
-rw-r--r--drivers/scsi/virtio_scsi.c337
-rw-r--r--drivers/sh/Kconfig1
-rw-r--r--drivers/sh/Makefile3
-rw-r--r--drivers/sh/clk/cpg.c333
-rw-r--r--drivers/sh/intc/Kconfig4
-rw-r--r--drivers/sh/intc/Makefile2
-rw-r--r--drivers/sh/intc/core.c38
-rw-r--r--drivers/sh/intc/dynamic.c57
-rw-r--r--drivers/sh/intc/internals.h5
-rw-r--r--drivers/sh/intc/irqdomain.c68
-rw-r--r--drivers/sh/intc/virq.c4
-rw-r--r--drivers/sh/pfc.c739
-rw-r--r--drivers/sh/pfc/Kconfig26
-rw-r--r--drivers/sh/pfc/Makefile3
-rw-r--r--drivers/sh/pfc/core.c572
-rw-r--r--drivers/sh/pfc/gpio.c239
-rw-r--r--drivers/sh/pfc/pinctrl.c526
-rw-r--r--drivers/spi/Kconfig20
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-bcm63xx.c37
-rw-r--r--drivers/spi/spi-coldfire-qspi.c5
-rw-r--r--drivers/spi/spi-falcon.c469
-rw-r--r--drivers/spi/spi-gpio.c3
-rw-r--r--drivers/spi/spi-imx.c14
-rw-r--r--drivers/spi/spi-omap2-mcspi.c246
-rw-r--r--drivers/spi/spi-orion.c22
-rw-r--r--drivers/spi/spi-pl022.c22
-rw-r--r--drivers/spi/spi-s3c64xx.c542
-rw-r--r--drivers/spi/spi-tegra.c93
-rw-r--r--drivers/spi/spi-xcomm.c276
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/scan.c2
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/Kconfig5
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/alarm-dev.c37
-rw-r--r--drivers/staging/android/ashmem.c14
-rw-r--r--drivers/staging/android/binder.c54
-rw-r--r--drivers/staging/android/logger.c10
-rw-r--r--drivers/staging/android/ram_console.c179
-rw-r--r--drivers/staging/android/timed_output.c4
-rw-r--r--drivers/staging/asus_oled/asus_oled.c6
-rw-r--r--drivers/staging/bcm/Adapter.h220
-rw-r--r--drivers/staging/bcm/Bcmchar.c32
-rw-r--r--drivers/staging/bcm/Bcmnet.c18
-rw-r--r--drivers/staging/bcm/CmHost.c111
-rw-r--r--drivers/staging/bcm/CmHost.h10
-rw-r--r--drivers/staging/bcm/DDRInit.c66
-rw-r--r--drivers/staging/bcm/DDRInit.h4
-rw-r--r--drivers/staging/bcm/Debug.h5
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c10
-rw-r--r--drivers/staging/bcm/IPv6Protocol.c26
-rw-r--r--drivers/staging/bcm/IPv6ProtocolHdr.h10
-rw-r--r--drivers/staging/bcm/InterfaceAdapter.h2
-rw-r--r--drivers/staging/bcm/InterfaceDld.c14
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c12
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.h10
-rw-r--r--drivers/staging/bcm/InterfaceInit.c8
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c2
-rw-r--r--drivers/staging/bcm/InterfaceIsr.h4
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c2
-rw-r--r--drivers/staging/bcm/InterfaceMisc.h2
-rw-r--r--drivers/staging/bcm/InterfaceRx.c12
-rw-r--r--drivers/staging/bcm/InterfaceTx.c6
-rw-r--r--drivers/staging/bcm/LeakyBucket.c16
-rw-r--r--drivers/staging/bcm/Macros.h13
-rw-r--r--drivers/staging/bcm/Misc.c211
-rw-r--r--drivers/staging/bcm/PHSDefines.h1
-rw-r--r--drivers/staging/bcm/PHSModule.c68
-rw-r--r--drivers/staging/bcm/PHSModule.h6
-rw-r--r--drivers/staging/bcm/Protocol.h8
-rw-r--r--drivers/staging/bcm/Prototypes.h168
-rw-r--r--drivers/staging/bcm/Qos.c84
-rw-r--r--drivers/staging/bcm/Transmit.c14
-rw-r--r--drivers/staging/bcm/cntrl_SignalingInterface.h2
-rw-r--r--drivers/staging/bcm/hostmibs.c6
-rw-r--r--drivers/staging/bcm/led_control.c22
-rw-r--r--drivers/staging/bcm/nvm.c6460
-rw-r--r--drivers/staging/bcm/sort.c16
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c6
-rw-r--r--drivers/staging/bcm/vendorspecificextn.h6
-rw-r--r--drivers/staging/ccg/Kconfig2
-rw-r--r--drivers/staging/ccg/ccg.c10
-rw-r--r--drivers/staging/comedi/Kconfig153
-rw-r--r--drivers/staging/comedi/comedi_compat32.c2
-rw-r--r--drivers/staging/comedi/comedi_compat32.h6
-rw-r--r--drivers/staging/comedi/comedi_fops.c431
-rw-r--r--drivers/staging/comedi/comedi_fops.h11
-rw-r--r--drivers/staging/comedi/comedi_internal.h (renamed from drivers/staging/comedi/internal.h)9
-rw-r--r--drivers/staging/comedi/comedidev.h47
-rw-r--r--drivers/staging/comedi/drivers.c141
-rw-r--r--drivers/staging/comedi/drivers/8255.c203
-rw-r--r--drivers/staging/comedi/drivers/8255.h8
-rw-r--r--drivers/staging/comedi/drivers/Makefile3
-rw-r--r--drivers/staging/comedi/drivers/acl7225b.c29
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c28
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h1
-rw-r--r--drivers/staging/comedi/drivers/addi-data/amcc_s5933_58.h4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c5
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.h15
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.h14
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c381
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7230.c86
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7296.c141
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7432.c147
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c174
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c127
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c129
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c33
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c135
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c104
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c129
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c11
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c32
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c529
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c660
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c541
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c175
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c1433
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c4
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c579
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c1711
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c143
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c104
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidio.c119
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c156
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c11
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c12
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c16
-rw-r--r--drivers/staging/comedi/drivers/comedi_pci.h60
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c20
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c147
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c155
-rw-r--r--drivers/staging/comedi/drivers/das08.c1052
-rw-r--r--drivers/staging/comedi/drivers/das08.h14
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c189
-rw-r--r--drivers/staging/comedi/drivers/das16.c114
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c17
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1199
-rw-r--r--drivers/staging/comedi/drivers/das6402.c14
-rw-r--r--drivers/staging/comedi/drivers/das800.c51
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c634
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c14
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c25
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c4
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c7
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c6
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c76
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c168
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c7
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c155
-rw-r--r--drivers/staging/comedi/drivers/fl512.c6
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c21
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c12
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.h1
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c16
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c25
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c118
-rw-r--r--drivers/staging/comedi/drivers/me4000.c405
-rw-r--r--drivers/staging/comedi/drivers/me4000.h37
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c108
-rw-r--r--drivers/staging/comedi/drivers/mite.c1
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c7
-rw-r--r--drivers/staging/comedi/drivers/mpc8260cpm.c8
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c15
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c18
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c350
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c20
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c45
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c516
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c23
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c39
-rw-r--r--drivers/staging/comedi/drivers/pcl724.c28
-rw-r--r--drivers/staging/comedi/drivers/pcl725.c16
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c49
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c29
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c239
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c94
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c113
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c20
-rw-r--r--drivers/staging/comedi/drivers/pcm3730.c14
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c14
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c27
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c80
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c81
-rw-r--r--drivers/staging/comedi/drivers/poc.c45
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c4
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c1661
-rw-r--r--drivers/staging/comedi/drivers/rti800.c20
-rw-r--r--drivers/staging/comedi/drivers/rti802.c9
-rw-r--r--drivers/staging/comedi/drivers/s526.c44
-rw-r--r--drivers/staging/comedi/drivers/s626.c2470
-rw-r--r--drivers/staging/comedi/drivers/s626.h12
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c30
-rw-r--r--drivers/staging/comedi/drivers/skel.c17
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c41
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c8
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c527
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c356
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c470
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c193
-rw-r--r--drivers/staging/comedi/proc.c2
-rw-r--r--drivers/staging/comedi/range.c2
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c4
-rw-r--r--drivers/staging/cptm1217/cp_tm1217.h3
-rw-r--r--drivers/staging/csr/Kconfig9
-rw-r--r--drivers/staging/csr/LICENSE.txt39
-rw-r--r--drivers/staging/csr/Makefile75
-rw-r--r--drivers/staging/csr/bh.c391
-rw-r--r--drivers/staging/csr/csr_formatted_io.c27
-rw-r--r--drivers/staging/csr/csr_formatted_io.h25
-rw-r--r--drivers/staging/csr/csr_framework_ext.c148
-rw-r--r--drivers/staging/csr/csr_framework_ext.h248
-rw-r--r--drivers/staging/csr/csr_framework_ext_types.h63
-rw-r--r--drivers/staging/csr/csr_lib.h188
-rw-r--r--drivers/staging/csr/csr_log.h249
-rw-r--r--drivers/staging/csr/csr_log_configure.h134
-rw-r--r--drivers/staging/csr/csr_log_text.h132
-rw-r--r--drivers/staging/csr/csr_macro.h114
-rw-r--r--drivers/staging/csr/csr_msg_transport.h25
-rw-r--r--drivers/staging/csr/csr_msgconv.c292
-rw-r--r--drivers/staging/csr/csr_msgconv.h87
-rw-r--r--drivers/staging/csr/csr_panic.c21
-rw-r--r--drivers/staging/csr/csr_panic.h53
-rw-r--r--drivers/staging/csr/csr_prim_defs.h62
-rw-r--r--drivers/staging/csr/csr_result.h25
-rw-r--r--drivers/staging/csr/csr_sched.h292
-rw-r--r--drivers/staging/csr/csr_sdio.h731
-rw-r--r--drivers/staging/csr/csr_serialize_primitive_types.c101
-rw-r--r--drivers/staging/csr/csr_time.c43
-rw-r--r--drivers/staging/csr/csr_time.h114
-rw-r--r--drivers/staging/csr/csr_util.c15
-rw-r--r--drivers/staging/csr/csr_wifi_common.h109
-rw-r--r--drivers/staging/csr/csr_wifi_fsm.h248
-rw-r--r--drivers/staging/csr/csr_wifi_fsm_event.h50
-rw-r--r--drivers/staging/csr/csr_wifi_fsm_types.h440
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card.h123
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio.c4163
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio.h702
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c2595
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c1713
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper.c793
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper.h471
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper_private.h208
-rw-r--r--drivers/staging/csr/csr_wifi_hip_conversions.h81
-rw-r--r--drivers/staging/csr/csr_wifi_hip_download.c835
-rw-r--r--drivers/staging/csr/csr_wifi_hip_dump.c865
-rw-r--r--drivers/staging/csr/csr_wifi_hip_packing.c4804
-rw-r--r--drivers/staging/csr/csr_wifi_hip_send.c422
-rw-r--r--drivers/staging/csr/csr_wifi_hip_signals.c1313
-rw-r--r--drivers/staging/csr/csr_wifi_hip_signals.h137
-rw-r--r--drivers/staging/csr/csr_wifi_hip_sigs.h1425
-rw-r--r--drivers/staging/csr/csr_wifi_hip_ta_sampling.c541
-rw-r--r--drivers/staging/csr/csr_wifi_hip_ta_sampling.h75
-rw-r--r--drivers/staging/csr/csr_wifi_hip_udi.c268
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi.h880
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_signal_names.c46
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_udi.h76
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifihw.h67
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifiversion.h38
-rw-r--r--drivers/staging/csr/csr_wifi_hip_xbv.c1076
-rw-r--r--drivers/staging/csr/csr_wifi_hip_xbv.h127
-rw-r--r--drivers/staging/csr/csr_wifi_hostio_prim.h27
-rw-r--r--drivers/staging/csr/csr_wifi_lib.h112
-rw-r--r--drivers/staging/csr/csr_wifi_msgconv.h58
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_converter_init.c90
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_converter_init.h49
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_free_downstream_contents.c84
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_free_upstream_contents.c39
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_lib.h523
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_prim.h503
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_sef.c30
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_sef.h31
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_serialize.c909
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_serialize.h103
-rw-r--r--drivers/staging/csr/csr_wifi_nme_converter_init.h46
-rw-r--r--drivers/staging/csr/csr_wifi_nme_lib.h1054
-rw-r--r--drivers/staging/csr/csr_wifi_nme_prim.h1666
-rw-r--r--drivers/staging/csr/csr_wifi_nme_serialize.h174
-rw-r--r--drivers/staging/csr/csr_wifi_nme_task.h38
-rw-r--r--drivers/staging/csr/csr_wifi_private_common.h89
-rw-r--r--drivers/staging/csr/csr_wifi_result.h35
-rw-r--r--drivers/staging/csr/csr_wifi_router_converter_init.c82
-rw-r--r--drivers/staging/csr/csr_wifi_router_converter_init.h42
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_converter_init.c134
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_converter_init.h42
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_free_downstream_contents.c108
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_free_upstream_contents.c87
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_lib.h2092
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_prim.h2122
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_sef.c45
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_sef.h58
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_serialize.c2591
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_serialize.h341
-rw-r--r--drivers/staging/csr/csr_wifi_router_free_downstream_contents.c53
-rw-r--r--drivers/staging/csr/csr_wifi_router_free_upstream_contents.c53
-rw-r--r--drivers/staging/csr/csr_wifi_router_lib.h427
-rw-r--r--drivers/staging/csr/csr_wifi_router_prim.h430
-rw-r--r--drivers/staging/csr/csr_wifi_router_sef.c19
-rw-r--r--drivers/staging/csr/csr_wifi_router_sef.h33
-rw-r--r--drivers/staging/csr/csr_wifi_router_serialize.c418
-rw-r--r--drivers/staging/csr/csr_wifi_router_serialize.h75
-rw-r--r--drivers/staging/csr/csr_wifi_router_task.h33
-rw-r--r--drivers/staging/csr/csr_wifi_router_transport.c199
-rw-r--r--drivers/staging/csr/csr_wifi_serialize_primitive_types.c256
-rw-r--r--drivers/staging/csr/csr_wifi_sme_ap_lib.h783
-rw-r--r--drivers/staging/csr/csr_wifi_sme_ap_prim.h1038
-rw-r--r--drivers/staging/csr/csr_wifi_sme_converter_init.c201
-rw-r--r--drivers/staging/csr/csr_wifi_sme_converter_init.h42
-rw-r--r--drivers/staging/csr/csr_wifi_sme_free_downstream_contents.c187
-rw-r--r--drivers/staging/csr/csr_wifi_sme_free_upstream_contents.c275
-rw-r--r--drivers/staging/csr/csr_wifi_sme_lib.h4313
-rw-r--r--drivers/staging/csr/csr_wifi_sme_prim.h6519
-rw-r--r--drivers/staging/csr/csr_wifi_sme_sef.c85
-rw-r--r--drivers/staging/csr/csr_wifi_sme_sef.h101
-rw-r--r--drivers/staging/csr/csr_wifi_sme_serialize.c5809
-rw-r--r--drivers/staging/csr/csr_wifi_sme_serialize.h670
-rw-r--r--drivers/staging/csr/csr_wifi_sme_task.h33
-rw-r--r--drivers/staging/csr/csr_wifi_vif_utils.h108
-rw-r--r--drivers/staging/csr/data_tx.c57
-rw-r--r--drivers/staging/csr/drv.c2262
-rw-r--r--drivers/staging/csr/firmware.c413
-rw-r--r--drivers/staging/csr/inet.c106
-rw-r--r--drivers/staging/csr/init_hw.c108
-rw-r--r--drivers/staging/csr/io.c1166
-rw-r--r--drivers/staging/csr/mlme.c436
-rw-r--r--drivers/staging/csr/monitor.c399
-rw-r--r--drivers/staging/csr/netdev.c3993
-rw-r--r--drivers/staging/csr/os.c483
-rw-r--r--drivers/staging/csr/putest.c685
-rw-r--r--drivers/staging/csr/sdio_events.c134
-rw-r--r--drivers/staging/csr/sdio_mmc.c1340
-rw-r--r--drivers/staging/csr/sdio_stubs.c82
-rw-r--r--drivers/staging/csr/sme_blocking.c1535
-rw-r--r--drivers/staging/csr/sme_mgt.c1012
-rw-r--r--drivers/staging/csr/sme_native.c591
-rw-r--r--drivers/staging/csr/sme_sys.c3262
-rw-r--r--drivers/staging/csr/sme_userspace.c315
-rw-r--r--drivers/staging/csr/sme_userspace.h38
-rw-r--r--drivers/staging/csr/sme_wext.c3381
-rw-r--r--drivers/staging/csr/ul_int.c532
-rw-r--r--drivers/staging/csr/unifi_clients.h129
-rw-r--r--drivers/staging/csr/unifi_config.h34
-rw-r--r--drivers/staging/csr/unifi_dbg.c110
-rw-r--r--drivers/staging/csr/unifi_event.c700
-rw-r--r--drivers/staging/csr/unifi_native.h257
-rw-r--r--drivers/staging/csr/unifi_os.h145
-rw-r--r--drivers/staging/csr/unifi_pdu_processing.c3755
-rw-r--r--drivers/staging/csr/unifi_priv.h1177
-rw-r--r--drivers/staging/csr/unifi_sme.c1239
-rw-r--r--drivers/staging/csr/unifi_sme.h245
-rw-r--r--drivers/staging/csr/unifi_wext.h124
-rw-r--r--drivers/staging/csr/unifiio.h398
-rw-r--r--drivers/staging/csr/wext_events.c285
-rw-r--r--drivers/staging/echo/echo.c9
-rw-r--r--drivers/staging/echo/echo.h28
-rw-r--r--drivers/staging/et131x/et131x.c59
-rw-r--r--drivers/staging/frontier/alphatrack.c6
-rw-r--r--drivers/staging/frontier/tranzport.c12
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c72
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c10
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c9
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c18
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h177
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c6
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c8
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c31
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c18
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.h1
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c19
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c8
-rw-r--r--drivers/staging/gdm72xx/usb_boot.c25
-rw-r--r--drivers/staging/iio/Documentation/device.txt12
-rw-r--r--drivers/staging/iio/Documentation/generic_buffer.c71
-rw-r--r--drivers/staging/iio/Documentation/iio_event_monitor.c14
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h33
-rw-r--r--[-rwxr-xr-x]drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl25830
-rw-r--r--[-rwxr-xr-x]drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x0
-rw-r--r--drivers/staging/iio/Documentation/overview.txt2
-rw-r--r--drivers/staging/iio/Documentation/ring.txt4
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light25
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c27
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c5
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c28
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c5
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c99
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c5
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c31
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c5
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c26
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c31
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c5
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c8
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c2
-rw-r--r--drivers/staging/iio/adc/Kconfig18
-rw-r--r--drivers/staging/iio/adc/ad7192.c107
-rw-r--r--drivers/staging/iio/adc/ad7298.h5
-rw-r--r--drivers/staging/iio/adc/ad7298_core.c15
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c70
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c9
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c48
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c12
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c46
-rw-r--r--drivers/staging/iio/adc/ad7780.c10
-rw-r--r--drivers/staging/iio/adc/ad7793.c166
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c9
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c37
-rw-r--r--drivers/staging/iio/adc/ad799x.h2
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c30
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c67
-rw-r--r--drivers/staging/iio/adc/adt7310.c9
-rw-r--r--drivers/staging/iio/adc/adt7410.c64
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c4
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c9
-rw-r--r--drivers/staging/iio/cdc/ad7150.c6
-rw-r--r--drivers/staging/iio/dac/ad5421.h32
-rw-r--r--drivers/staging/iio/dac/ad5504.h65
-rw-r--r--drivers/staging/iio/dac/ad5791.h112
-rw-r--r--drivers/staging/iio/dac/dac.h6
-rw-r--r--drivers/staging/iio/dac/max517.h19
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c2
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c23
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c5
-rw-r--r--drivers/staging/iio/iio_hwmon.c12
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c10
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c18
-rw-r--r--drivers/staging/iio/imu/adis16400.h3
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c36
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c13
-rw-r--r--drivers/staging/iio/light/isl29018.c16
-rw-r--r--drivers/staging/iio/light/tsl2583.c2
-rw-r--r--[-rwxr-xr-x]drivers/staging/iio/light/tsl2x7x.h0
-rw-r--r--[-rwxr-xr-x]drivers/staging/iio/light/tsl2x7x_core.c8
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig2
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c98
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c6
-rw-r--r--drivers/staging/iio/meter/ade7758.h1
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c7
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c1
-rw-r--r--drivers/staging/iio/ring_sw.c4
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c4
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c4
-rw-r--r--drivers/staging/iio/trigger/iio-trig-sysfs.c2
-rw-r--r--drivers/staging/ipack/TODO11
-rw-r--r--drivers/staging/ipack/bridges/tpci200.c473
-rw-r--r--drivers/staging/ipack/bridges/tpci200.h5
-rw-r--r--drivers/staging/ipack/devices/ipoctal.c79
-rw-r--r--drivers/staging/ipack/devices/ipoctal.h39
-rw-r--r--drivers/staging/ipack/ipack.c46
-rw-r--r--drivers/staging/keucr/init.c3
-rw-r--r--drivers/staging/keucr/scsiglue.c11
-rw-r--r--drivers/staging/line6/control.c4
-rw-r--r--drivers/staging/line6/driver.c4
-rw-r--r--drivers/staging/line6/driver.h4
-rw-r--r--drivers/staging/line6/pod.c16
-rw-r--r--drivers/staging/line6/pod.h2
-rw-r--r--drivers/staging/line6/variax.c8
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c15
-rw-r--r--drivers/staging/media/easycap/easycap_main.c1
-rw-r--r--drivers/staging/media/go7007/wis-i2c.h5
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c2
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c6
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c60
-rw-r--r--drivers/staging/media/solo6x10/TODO2
-rw-r--r--drivers/staging/media/solo6x10/core.c13
-rw-r--r--drivers/staging/media/solo6x10/i2c.c2
-rw-r--r--drivers/staging/nvec/Kconfig9
-rw-r--r--drivers/staging/nvec/Makefile2
-rw-r--r--drivers/staging/nvec/nvec.c85
-rw-r--r--drivers/staging/nvec/nvec_kbd.c16
-rw-r--r--drivers/staging/nvec/nvec_paz00.c (renamed from drivers/staging/nvec/nvec_leds.c)46
-rw-r--r--drivers/staging/nvec/nvec_power.c32
-rw-r--r--drivers/staging/nvec/nvec_ps2.c35
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c28
-rw-r--r--drivers/staging/octeon/ethernet.c153
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h3
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c1
-rw-r--r--drivers/staging/omap-thermal/Kconfig46
-rw-r--r--drivers/staging/omap-thermal/Makefile5
-rw-r--r--drivers/staging/omap-thermal/TODO28
-rw-r--r--drivers/staging/omap-thermal/omap-bandgap.c1187
-rw-r--r--drivers/staging/omap-thermal/omap-bandgap.h441
-rw-r--r--drivers/staging/omap-thermal/omap-thermal-common.c364
-rw-r--r--drivers/staging/omap-thermal/omap-thermal.h108
-rw-r--r--drivers/staging/omap-thermal/omap4-thermal.c259
-rw-r--r--drivers/staging/omap-thermal/omap5-thermal.c297
-rw-r--r--drivers/staging/omap-thermal/omap_bandgap.txt30
-rw-r--r--drivers/staging/omapdrm/TODO7
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c2
-rw-r--r--drivers/staging/omapdrm/omap_dmm_priv.h1
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c44
-rw-r--r--drivers/staging/omapdrm/omap_drv.h2
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c2
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c10
-rw-r--r--drivers/staging/omapdrm/omap_priv.h55
-rw-r--r--drivers/staging/ozwpan/TODO11
-rw-r--r--drivers/staging/ozwpan/ozappif.h6
-rw-r--r--drivers/staging/ozwpan/ozcdev.c28
-rw-r--r--drivers/staging/ozwpan/ozmain.c2
-rw-r--r--drivers/staging/ozwpan/ozpd.c182
-rw-r--r--drivers/staging/ozwpan/ozpd.h3
-rw-r--r--drivers/staging/ozwpan/ozproto.c6
-rw-r--r--drivers/staging/ozwpan/ozprotocol.h1
-rw-r--r--drivers/staging/panel/panel.c60
-rw-r--r--drivers/staging/phison/phison.c13
-rw-r--r--drivers/staging/ramster/r2net.c4
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c4
-rw-r--r--drivers/staging/rtl8712/usb_intf.c6
-rw-r--r--drivers/staging/rts5139/rts51x_card.c10
-rw-r--r--drivers/staging/rts5139/rts51x_chip.c5
-rw-r--r--drivers/staging/rts5139/rts51x_chip.h2
-rw-r--r--drivers/staging/rts5139/rts51x_fop.c1
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.h2
-rw-r--r--drivers/staging/rts5139/sd_cprm.c8
-rw-r--r--drivers/staging/rts_pstor/ms.c2
-rw-r--r--drivers/staging/rts_pstor/rtsx.c4
-rw-r--r--drivers/staging/rts_pstor/sd.c11
-rw-r--r--drivers/staging/sbe-2t3e3/2t3e3.h3
-rw-r--r--drivers/staging/sbe-2t3e3/cpld.c15
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.c19
-rw-r--r--drivers/staging/sbe-2t3e3/dc.c19
-rw-r--r--drivers/staging/sbe-2t3e3/exar7250.c40
-rw-r--r--drivers/staging/sbe-2t3e3/exar7300.c17
-rw-r--r--drivers/staging/sbe-2t3e3/intr.c60
-rw-r--r--drivers/staging/sbe-2t3e3/io.c23
-rw-r--r--drivers/staging/sbe-2t3e3/module.c13
-rw-r--r--drivers/staging/sep/sep_crypto.c123
-rw-r--r--drivers/staging/sep/sep_driver_api.h8
-rw-r--r--drivers/staging/sep/sep_driver_config.h4
-rw-r--r--drivers/staging/sep/sep_main.c101
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c4
-rw-r--r--drivers/staging/slicoss/slicoss.c7
-rw-r--r--drivers/staging/sm7xx/Kconfig8
-rw-r--r--drivers/staging/sm7xx/Makefile3
-rw-r--r--drivers/staging/sm7xxfb/Kconfig13
-rw-r--r--drivers/staging/sm7xxfb/Makefile1
-rw-r--r--drivers/staging/sm7xxfb/TODO (renamed from drivers/staging/sm7xx/TODO)2
-rw-r--r--drivers/staging/sm7xxfb/sm7xx.h (renamed from drivers/staging/sm7xx/smtcfb.h)1
-rw-r--r--drivers/staging/sm7xxfb/sm7xxfb.c (renamed from drivers/staging/sm7xx/smtcfb.c)547
-rw-r--r--drivers/staging/speakup/i18n.c2
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/selection.c6
-rw-r--r--drivers/staging/speakup/speakup_acnt.h2
-rw-r--r--drivers/staging/speakup/speakup_decpc.c2
-rw-r--r--drivers/staging/speakup/synth.c2
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c8
-rw-r--r--drivers/staging/telephony/ixj.c4
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c704
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c27
-rw-r--r--drivers/staging/tidspbridge/dynload/cload.c69
-rw-r--r--drivers/staging/usbip/userspace/src/Makefile.am5
-rw-r--r--drivers/staging/usbip/vhci.h7
-rw-r--r--drivers/staging/usbip/vhci_hcd.c39
-rw-r--r--drivers/staging/vme/devices/vme_user.c3
-rw-r--r--drivers/staging/vt6655/80211hdr.h2
-rw-r--r--drivers/staging/vt6655/baseband.c2
-rw-r--r--drivers/staging/vt6655/baseband.h30
-rw-r--r--drivers/staging/vt6655/bssdb.c6
-rw-r--r--drivers/staging/vt6655/desc.h11
-rw-r--r--drivers/staging/vt6655/device.h15
-rw-r--r--drivers/staging/vt6655/device_main.c37
-rw-r--r--drivers/staging/vt6655/dpc.c1
-rw-r--r--drivers/staging/vt6655/hostap.c6
-rw-r--r--drivers/staging/vt6655/iwctl.c36
-rw-r--r--drivers/staging/vt6655/mac.h24
-rw-r--r--drivers/staging/vt6655/mib.h2
-rw-r--r--drivers/staging/vt6655/rf.c78
-rw-r--r--drivers/staging/vt6655/rf.h1
-rw-r--r--drivers/staging/vt6655/tether.h3
-rw-r--r--drivers/staging/vt6655/vntwifi.c2
-rw-r--r--drivers/staging/vt6655/wcmd.c24
-rw-r--r--drivers/staging/vt6656/80211hdr.h2
-rw-r--r--drivers/staging/vt6656/bssdb.c9
-rw-r--r--drivers/staging/vt6656/bssdb.h2
-rw-r--r--drivers/staging/vt6656/card.c1
-rw-r--r--drivers/staging/vt6656/card.h1
-rw-r--r--drivers/staging/vt6656/channel.c2
-rw-r--r--drivers/staging/vt6656/desc.h19
-rw-r--r--drivers/staging/vt6656/device.h4
-rw-r--r--drivers/staging/vt6656/hostap.c63
-rw-r--r--drivers/staging/vt6656/int.c43
-rw-r--r--drivers/staging/vt6656/int.h43
-rw-r--r--drivers/staging/vt6656/iwctl.c2050
-rw-r--r--drivers/staging/vt6656/iwctl.h262
-rw-r--r--drivers/staging/vt6656/main_usb.c42
-rw-r--r--drivers/staging/vt6656/mib.h1
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/staging/vt6656/tether.h2
-rw-r--r--drivers/staging/vt6656/usbpipe.c3
-rw-r--r--drivers/staging/vt6656/wcmd.c85
-rw-r--r--drivers/staging/winbond/mds_s.h2
-rw-r--r--drivers/staging/winbond/mto.c2
-rw-r--r--drivers/staging/winbond/wbusb.c10
-rw-r--r--drivers/staging/wlags49_h2/dhf.c1
-rw-r--r--drivers/staging/wlags49_h2/dhf.h1
-rw-r--r--drivers/staging/wlags49_h2/hcf.c8
-rw-r--r--drivers/staging/wlags49_h2/hcf.h1
-rw-r--r--drivers/staging/wlags49_h2/hcfcfg.h1
-rw-r--r--drivers/staging/wlags49_h2/mdd.h1
-rw-r--r--drivers/staging/wlags49_h2/mmd.c1
-rw-r--r--drivers/staging/wlags49_h2/mmd.h1
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c4
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c20
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c134
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c8
-rw-r--r--drivers/staging/xgifb/XGI_main.h22
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c492
-rw-r--r--drivers/staging/xgifb/XGIfb.h11
-rw-r--r--drivers/staging/xgifb/vb_def.h22
-rw-r--r--drivers/staging/xgifb/vb_init.c473
-rw-r--r--drivers/staging/xgifb/vb_setmode.c897
-rw-r--r--drivers/staging/xgifb/vb_struct.h77
-rw-r--r--drivers/staging/xgifb/vb_table.h273
-rw-r--r--drivers/staging/xgifb/vb_util.c8
-rw-r--r--drivers/staging/xgifb/vgatypes.h35
-rw-r--r--drivers/staging/zcache/Kconfig5
-rw-r--r--drivers/staging/zcache/tmem.c63
-rw-r--r--drivers/staging/zcache/zcache-main.c178
-rw-r--r--drivers/staging/zram/Kconfig5
-rw-r--r--drivers/staging/zram/zram_drv.c132
-rw-r--r--drivers/staging/zram/zram_drv.h21
-rw-r--r--drivers/staging/zram/zram_sysfs.c6
-rw-r--r--drivers/staging/zsmalloc/Kconfig4
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c233
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h20
-rw-r--r--drivers/staging/zsmalloc/zsmalloc_int.h6
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/iscsi/iscsi_target.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c66
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c46
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c27
-rw-r--r--drivers/target/loopback/tcm_loop.c11
-rw-r--r--drivers/target/sbp/sbp_target.c39
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_device.c170
-rw-r--r--drivers/target/target_core_fabric_configfs.c3
-rw-r--r--drivers/target/target_core_file.c57
-rw-r--r--drivers/target/target_core_iblock.c142
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h22
-rw-r--r--drivers/target/target_core_pr.c13
-rw-r--r--drivers/target/target_core_pscsi.c93
-rw-r--r--drivers/target/target_core_rd.c17
-rw-r--r--drivers/target/target_core_sbc.c581
-rw-r--r--drivers/target/target_core_spc.c (renamed from drivers/target/target_core_cdb.c)434
-rw-r--r--drivers/target/target_core_tmr.c57
-rw-r--r--drivers/target/target_core_tpg.c14
-rw-r--r--drivers/target/target_core_transport.c2019
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_io.c13
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c7
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/spear_thermal.c28
-rw-r--r--drivers/thermal/thermal_sys.c225
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c46
-rw-r--r--drivers/tty/serial/8250/8250.c10
-rw-r--r--drivers/tty/serial/Kconfig10
-rw-r--r--drivers/tty/serial/amba-pl011.c47
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/imx.c6
-rw-r--r--drivers/tty/serial/mxs-auart.c56
-rw-r--r--drivers/tty/serial/of_serial.c1
-rw-r--r--drivers/tty/serial/pch_uart.c59
-rw-r--r--drivers/tty/serial/pmac_zilog.c12
-rw-r--r--drivers/tty/serial/samsung.c4
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c51
-rw-r--r--drivers/tty/serial/uartlite.c3
-rw-r--r--drivers/tty/tty_ldisc.c2
-rw-r--r--drivers/tty/vt/vt_ioctl.c47
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/xusbatm.c4
-rw-r--r--drivers/usb/chipidea/Kconfig10
-rw-r--r--drivers/usb/chipidea/Makefile9
-rw-r--r--drivers/usb/chipidea/ci.h32
-rw-r--r--drivers/usb/chipidea/ci13xxx_imx.c198
-rw-r--r--drivers/usb/chipidea/ci13xxx_msm.c63
-rw-r--r--drivers/usb/chipidea/ci13xxx_pci.c52
-rw-r--r--drivers/usb/chipidea/core.c70
-rw-r--r--drivers/usb/chipidea/debug.c146
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c635
-rw-r--r--drivers/usb/class/cdc-acm.c13
-rw-r--r--drivers/usb/class/cdc-wdm.c18
-rw-r--r--drivers/usb/core/devio.c169
-rw-r--r--drivers/usb/core/driver.c48
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hcd.c10
-rw-r--r--drivers/usb/core/hub.c168
-rw-r--r--drivers/usb/core/message.c19
-rw-r--r--drivers/usb/core/quirks.c151
-rw-r--r--drivers/usb/core/sysfs.c15
-rw-r--r--drivers/usb/core/usb.c1
-rw-r--r--drivers/usb/core/usb.h13
-rw-r--r--drivers/usb/dwc3/core.c13
-rw-r--r--drivers/usb/dwc3/core.h107
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c1
-rw-r--r--drivers/usb/dwc3/ep0.c146
-rw-r--r--drivers/usb/dwc3/gadget.c368
-rw-r--r--drivers/usb/dwc3/gadget.h6
-rw-r--r--drivers/usb/early/ehci-dbgp.c4
-rw-r--r--drivers/usb/gadget/Kconfig11
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/acm_ms.c1
-rw-r--r--drivers/usb/gadget/at91_udc.c1
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/composite.c7
-rw-r--r--drivers/usb/gadget/f_fs.c7
-rw-r--r--drivers/usb/gadget/f_hid.c208
-rw-r--r--drivers/usb/gadget/f_mass_storage.c133
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/usb/gadget/f_uvc.c287
-rw-r--r--drivers/usb/gadget/f_uvc.h8
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c74
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.h4
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c22
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h4
-rw-r--r--drivers/usb/gadget/goku_udc.c4
-rw-r--r--drivers/usb/gadget/imx_udc.c6
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c86
-rw-r--r--drivers/usb/gadget/m66592-udc.c9
-rw-r--r--drivers/usb/gadget/m66592-udc.h5
-rw-r--r--drivers/usb/gadget/mv_u3d.h320
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c2098
-rw-r--r--drivers/usb/gadget/mv_udc_core.c17
-rw-r--r--drivers/usb/gadget/omap_udc.c439
-rw-r--r--drivers/usb/gadget/pch_udc.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c31
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c15
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c11
-rw-r--r--drivers/usb/gadget/r8a66597-udc.h5
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c215
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c15
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/gadget/storage_common.c75
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c54
-rw-r--r--drivers/usb/gadget/u_ether.c20
-rw-r--r--drivers/usb/gadget/u_uac1.c6
-rw-r--r--drivers/usb/gadget/uvc.h4
-rw-r--r--drivers/usb/gadget/webcam.c29
-rw-r--r--drivers/usb/host/Kconfig6
-rw-r--r--drivers/usb/host/ehci-atmel.c19
-rw-r--r--drivers/usb/host/ehci-au1xxx.c87
-rw-r--r--drivers/usb/host/ehci-cns3xxx.c6
-rw-r--r--drivers/usb/host/ehci-dbg.c24
-rw-r--r--drivers/usb/host/ehci-fsl.c42
-rw-r--r--drivers/usb/host/ehci-grlib.c15
-rw-r--r--drivers/usb/host/ehci-hcd.c519
-rw-r--r--drivers/usb/host/ehci-hub.c129
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c6
-rw-r--r--drivers/usb/host/ehci-mem.c25
-rw-r--r--drivers/usb/host/ehci-msm.c27
-rw-r--r--drivers/usb/host/ehci-mv.c36
-rw-r--r--drivers/usb/host/ehci-mxc.c17
-rw-r--r--drivers/usb/host/ehci-octeon.c8
-rw-r--r--drivers/usb/host/ehci-omap.c117
-rw-r--r--drivers/usb/host/ehci-orion.c21
-rw-r--r--drivers/usb/host/ehci-pci.c246
-rw-r--r--drivers/usb/host/ehci-platform.c7
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c17
-rw-r--r--drivers/usb/host/ehci-ppc-of.c25
-rw-r--r--drivers/usb/host/ehci-ps3.c18
-rw-r--r--drivers/usb/host/ehci-q.c311
-rw-r--r--drivers/usb/host/ehci-s5p.c135
-rw-r--r--drivers/usb/host/ehci-sched.c552
-rw-r--r--drivers/usb/host/ehci-sead3.c76
-rw-r--r--drivers/usb/host/ehci-sh.c19
-rw-r--r--drivers/usb/host/ehci-spear.c71
-rw-r--r--drivers/usb/host/ehci-tegra.c59
-rw-r--r--drivers/usb/host/ehci-tilegx.c214
-rw-r--r--drivers/usb/host/ehci-timer.c401
-rw-r--r--drivers/usb/host/ehci-vt8500.c14
-rw-r--r--drivers/usb/host/ehci-w90x900.c9
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c35
-rw-r--r--drivers/usb/host/ehci-xls.c21
-rw-r--r--drivers/usb/host/ehci.h138
-rw-r--r--drivers/usb/host/fhci-dbg.c12
-rw-r--r--drivers/usb/host/fhci-hcd.c32
-rw-r--r--drivers/usb/host/fhci-hub.c16
-rw-r--r--drivers/usb/host/fhci-sched.c30
-rw-r--r--drivers/usb/host/fhci-tds.c14
-rw-r--r--drivers/usb/host/fhci.h22
-rw-r--r--drivers/usb/host/imx21-hcd.c6
-rw-r--r--drivers/usb/host/isp1362-hcd.c8
-rw-r--r--drivers/usb/host/ohci-exynos.c46
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/ohci-nxp.c88
-rw-r--r--drivers/usb/host/ohci-omap.c37
-rw-r--r--drivers/usb/host/ohci-tilegx.c203
-rw-r--r--drivers/usb/host/ohci.h5
-rw-r--r--drivers/usb/host/pci-quirks.c7
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/r8a66597-hcd.c12
-rw-r--r--drivers/usb/host/r8a66597.h5
-rw-r--r--drivers/usb/host/xhci-hub.c50
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci-ring.c51
-rw-r--r--drivers/usb/host/xhci.c20
-rw-r--r--drivers/usb/host/xhci.h9
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/usb/musb/Kconfig4
-rw-r--r--drivers/usb/musb/am35x.c7
-rw-r--r--drivers/usb/musb/blackfin.c7
-rw-r--r--drivers/usb/musb/da8xx.c7
-rw-r--r--drivers/usb/musb/davinci.c10
-rw-r--r--drivers/usb/musb/davinci.h4
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h10
-rw-r--r--drivers/usb/musb/musb_dsps.c28
-rw-r--r--drivers/usb/musb/musb_gadget.c15
-rw-r--r--drivers/usb/musb/musb_host.c20
-rw-r--r--drivers/usb/musb/omap2430.c124
-rw-r--r--drivers/usb/musb/tusb6010.c9
-rw-r--r--drivers/usb/musb/ux500.c7
-rw-r--r--drivers/usb/otg/Kconfig10
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/ab8500-usb.c4
-rw-r--r--drivers/usb/otg/fsl_otg.c6
-rw-r--r--drivers/usb/otg/gpio_vbus.c4
-rw-r--r--drivers/usb/otg/isp1301_omap.c22
-rw-r--r--drivers/usb/otg/msm_otg.c6
-rw-r--r--drivers/usb/otg/mv_otg.c6
-rw-r--r--drivers/usb/otg/mxs-phy.c186
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c4
-rw-r--r--drivers/usb/otg/otg.c181
-rw-r--r--drivers/usb/otg/twl4030-usb.c73
-rw-r--r--drivers/usb/otg/twl6030-usb.c86
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/renesas_usbhs/common.c8
-rw-r--r--drivers/usb/renesas_usbhs/common.h4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c28
-rw-r--r--drivers/usb/renesas_usbhs/mod.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.h2
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c8
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c4
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h4
-rw-r--r--drivers/usb/serial/bus.c15
-rw-r--r--drivers/usb/serial/cp210x.c12
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/ipw.c3
-rw-r--r--drivers/usb/serial/keyspan.c31
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/metro-usb.c8
-rw-r--r--drivers/usb/serial/mos7840.c18
-rw-r--r--drivers/usb/serial/option.c483
-rw-r--r--drivers/usb/serial/qcserial.c150
-rw-r--r--drivers/usb/serial/quatech2.c4
-rw-r--r--drivers/usb/serial/sierra.c5
-rw-r--r--drivers/usb/serial/usb-serial.c12
-rw-r--r--drivers/usb/serial/usb-wwan.h3
-rw-r--r--drivers/usb/serial/usb_wwan.c68
-rw-r--r--drivers/usb/storage/protocol.c6
-rw-r--r--drivers/usb/storage/scsiglue.c11
-rw-r--r--drivers/usb/storage/uas.c422
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/vfio/Kconfig16
-rw-r--r--drivers/vfio/Makefile3
-rw-r--r--drivers/vfio/pci/Kconfig8
-rw-r--r--drivers/vfio/pci/Makefile4
-rw-r--r--drivers/vfio/pci/vfio_pci.c579
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c1540
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c740
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h91
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c269
-rw-r--r--drivers/vfio/vfio.c1415
-rw-r--r--drivers/vfio/vfio_iommu_type1.c753
-rw-r--r--drivers/vhost/Kconfig3
-rw-r--r--drivers/vhost/Kconfig.tcm6
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/tcm_vhost.c1649
-rw-r--r--drivers/vhost/tcm_vhost.h103
-rw-r--r--drivers/vhost/test.c4
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/arcfb.c4
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/aty128fb.c180
-rw-r--r--drivers/video/aty/atyfb_base.c2
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/aty/radeon_monitor.c35
-rw-r--r--drivers/video/backlight/88pm860x_bl.c1
-rw-r--r--drivers/video/backlight/Kconfig4
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c19
-rw-r--r--drivers/video/backlight/corgi_lcd.c19
-rw-r--r--drivers/video/backlight/ili9320.c2
-rw-r--r--drivers/video/backlight/l4f00242t03.c29
-rw-r--r--drivers/video/backlight/lm3533_bl.c8
-rw-r--r--drivers/video/backlight/lms283gf05.c24
-rw-r--r--drivers/video/backlight/lp855x_bl.c10
-rw-r--r--drivers/video/backlight/ot200_bl.c21
-rw-r--r--drivers/video/backlight/pwm_bl.c159
-rw-r--r--drivers/video/backlight/tosa_bl.c8
-rw-r--r--drivers/video/backlight/tosa_lcd.c7
-rw-r--r--drivers/video/bfin_adv7393fb.c8
-rw-r--r--drivers/video/broadsheetfb.c2
-rw-r--r--drivers/video/cirrusfb.c2
-rw-r--r--drivers/video/console/Kconfig14
-rw-r--r--drivers/video/console/fbcon.c9
-rw-r--r--drivers/video/da8xx-fb.c78
-rw-r--r--drivers/video/epson1355fb.c8
-rw-r--r--drivers/video/exynos/exynos_dp_core.c23
-rw-r--r--drivers/video/exynos/exynos_dp_core.h4
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c6
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c4
-rw-r--r--drivers/video/exynos/s6e8ax0.h21
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/fb_draw.h7
-rw-r--r--drivers/video/grvga.c47
-rw-r--r--drivers/video/i740fb.c6
-rw-r--r--drivers/video/mbx/mbxfb.c2
-rw-r--r--drivers/video/mx3fb.c55
-rw-r--r--drivers/video/mxsfb.c62
-rw-r--r--drivers/video/omap2/dss/core.c6
-rw-r--r--drivers/video/s3c-fb.c14
-rw-r--r--drivers/video/s3fb.c31
-rw-r--r--drivers/video/savage/savagefb_driver.c16
-rw-r--r--drivers/video/sh_mipi_dsi.c7
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c1117
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h5
-rw-r--r--drivers/video/sh_mobile_meram.c235
-rw-r--r--drivers/video/sis/init.c3
-rw-r--r--drivers/video/smscufx.c10
-rw-r--r--drivers/video/sunxvr500.c2
-rw-r--r--drivers/video/w100fb.c12
-rw-r--r--drivers/virtio/virtio.c5
-rw-r--r--drivers/virtio/virtio_balloon.c24
-rw-r--r--drivers/vme/bridges/vme_tsi148.c85
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/ds1wm.c4
-rw-r--r--drivers/w1/masters/omap_hdq.c116
-rw-r--r--drivers/w1/slaves/Kconfig13
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_bq27000.c4
-rw-r--r--drivers/w1/slaves/w1_ds2408.c24
-rw-r--r--drivers/w1/slaves/w1_ds2423.c4
-rw-r--r--drivers/w1/slaves/w1_ds2431.c8
-rw-r--r--drivers/w1/slaves/w1_ds2433.c8
-rw-r--r--drivers/w1/slaves/w1_ds2760.c8
-rw-r--r--drivers/w1/slaves/w1_ds2780.c22
-rw-r--r--drivers/w1/slaves/w1_ds2780.h2
-rw-r--r--drivers/w1/slaves/w1_ds2781.c22
-rw-r--r--drivers/w1/slaves/w1_ds2781.h2
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c469
-rw-r--r--drivers/w1/slaves/w1_therm.c19
-rw-r--r--drivers/w1/w1.c23
-rw-r--r--drivers/w1/w1.h1
-rw-r--r--drivers/w1/w1_family.h2
-rw-r--r--drivers/w1/w1_int.c3
-rw-r--r--drivers/watchdog/Kconfig10
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c4
-rw-r--r--drivers/watchdog/booke_wdt.c11
-rw-r--r--drivers/watchdog/coh901327_wdt.c7
-rw-r--r--drivers/watchdog/da9052_wdt.c1
-rw-r--r--drivers/watchdog/f71808e_wdt.c4
-rw-r--r--drivers/watchdog/hpwdt.c4
-rw-r--r--drivers/watchdog/iTCO_wdt.c214
-rw-r--r--drivers/watchdog/ie6xx_wdt.c4
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c2
-rw-r--r--drivers/watchdog/omap_wdt.c24
-rw-r--r--drivers/watchdog/orion_wdt.c211
-rw-r--r--drivers/watchdog/s3c2410_wdt.c16
-rw-r--r--drivers/watchdog/sa1100_wdt.c14
-rw-r--r--drivers/watchdog/sch311x_wdt.c10
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/watchdog_dev.c2
-rw-r--r--drivers/xen/Kconfig8
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/events.c9
-rw-r--r--drivers/xen/mcelog.c414
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/pcpu.c371
-rw-r--r--drivers/xen/platform-pci.c3
-rw-r--r--drivers/xen/xen-acpi-processor.c9
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c20
-rw-r--r--drivers/zorro/zorro.c2
3793 files changed, 290045 insertions, 123082 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index bfc918633fd9..ece958d3762e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -112,6 +112,8 @@ source "drivers/auxdisplay/Kconfig"
112 112
113source "drivers/uio/Kconfig" 113source "drivers/uio/Kconfig"
114 114
115source "drivers/vfio/Kconfig"
116
115source "drivers/vlynq/Kconfig" 117source "drivers/vlynq/Kconfig"
116 118
117source "drivers/virtio/Kconfig" 119source "drivers/virtio/Kconfig"
@@ -148,4 +150,6 @@ source "drivers/iio/Kconfig"
148 150
149source "drivers/vme/Kconfig" 151source "drivers/vme/Kconfig"
150 152
153source "drivers/pwm/Kconfig"
154
151endmenu 155endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 2ba29ffef2cb..5b421840c48d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -8,6 +8,7 @@
8# GPIO must come after pinctrl as gpios may need to mux pins etc 8# GPIO must come after pinctrl as gpios may need to mux pins etc
9obj-y += pinctrl/ 9obj-y += pinctrl/
10obj-y += gpio/ 10obj-y += gpio/
11obj-y += pwm/
11obj-$(CONFIG_PCI) += pci/ 12obj-$(CONFIG_PCI) += pci/
12obj-$(CONFIG_PARISC) += parisc/ 13obj-$(CONFIG_PARISC) += parisc/
13obj-$(CONFIG_RAPIDIO) += rapidio/ 14obj-$(CONFIG_RAPIDIO) += rapidio/
@@ -59,6 +60,7 @@ obj-$(CONFIG_ATM) += atm/
59obj-$(CONFIG_FUSION) += message/ 60obj-$(CONFIG_FUSION) += message/
60obj-y += firewire/ 61obj-y += firewire/
61obj-$(CONFIG_UIO) += uio/ 62obj-$(CONFIG_UIO) += uio/
63obj-$(CONFIG_VFIO) += vfio/
62obj-y += cdrom/ 64obj-y += cdrom/
63obj-y += auxdisplay/ 65obj-y += auxdisplay/
64obj-$(CONFIG_PCCARD) += pcmcia/ 66obj-$(CONFIG_PCCARD) += pcmcia/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 6512b20aeccd..d5fdd36190cc 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -61,7 +61,6 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
61 61
62static int acpi_ac_add(struct acpi_device *device); 62static int acpi_ac_add(struct acpi_device *device);
63static int acpi_ac_remove(struct acpi_device *device, int type); 63static int acpi_ac_remove(struct acpi_device *device, int type);
64static int acpi_ac_resume(struct acpi_device *device);
65static void acpi_ac_notify(struct acpi_device *device, u32 event); 64static void acpi_ac_notify(struct acpi_device *device, u32 event);
66 65
67static const struct acpi_device_id ac_device_ids[] = { 66static const struct acpi_device_id ac_device_ids[] = {
@@ -70,6 +69,11 @@ static const struct acpi_device_id ac_device_ids[] = {
70}; 69};
71MODULE_DEVICE_TABLE(acpi, ac_device_ids); 70MODULE_DEVICE_TABLE(acpi, ac_device_ids);
72 71
72#ifdef CONFIG_PM_SLEEP
73static int acpi_ac_resume(struct device *dev);
74#endif
75static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
76
73static struct acpi_driver acpi_ac_driver = { 77static struct acpi_driver acpi_ac_driver = {
74 .name = "ac", 78 .name = "ac",
75 .class = ACPI_AC_CLASS, 79 .class = ACPI_AC_CLASS,
@@ -78,9 +82,9 @@ static struct acpi_driver acpi_ac_driver = {
78 .ops = { 82 .ops = {
79 .add = acpi_ac_add, 83 .add = acpi_ac_add,
80 .remove = acpi_ac_remove, 84 .remove = acpi_ac_remove,
81 .resume = acpi_ac_resume,
82 .notify = acpi_ac_notify, 85 .notify = acpi_ac_notify,
83 }, 86 },
87 .drv.pm = &acpi_ac_pm,
84}; 88};
85 89
86struct acpi_ac { 90struct acpi_ac {
@@ -292,7 +296,9 @@ static int acpi_ac_add(struct acpi_device *device)
292 ac->charger.properties = ac_props; 296 ac->charger.properties = ac_props;
293 ac->charger.num_properties = ARRAY_SIZE(ac_props); 297 ac->charger.num_properties = ARRAY_SIZE(ac_props);
294 ac->charger.get_property = get_ac_property; 298 ac->charger.get_property = get_ac_property;
295 power_supply_register(&ac->device->dev, &ac->charger); 299 result = power_supply_register(&ac->device->dev, &ac->charger);
300 if (result)
301 goto end;
296 302
297 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", 303 printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
298 acpi_device_name(device), acpi_device_bid(device), 304 acpi_device_name(device), acpi_device_bid(device),
@@ -309,13 +315,19 @@ static int acpi_ac_add(struct acpi_device *device)
309 return result; 315 return result;
310} 316}
311 317
312static int acpi_ac_resume(struct acpi_device *device) 318#ifdef CONFIG_PM_SLEEP
319static int acpi_ac_resume(struct device *dev)
313{ 320{
314 struct acpi_ac *ac; 321 struct acpi_ac *ac;
315 unsigned old_state; 322 unsigned old_state;
316 if (!device || !acpi_driver_data(device)) 323
324 if (!dev)
317 return -EINVAL; 325 return -EINVAL;
318 ac = acpi_driver_data(device); 326
327 ac = acpi_driver_data(to_acpi_device(dev));
328 if (!ac)
329 return -EINVAL;
330
319 old_state = ac->state; 331 old_state = ac->state;
320 if (acpi_ac_get_state(ac)) 332 if (acpi_ac_get_state(ac))
321 return 0; 333 return 0;
@@ -323,6 +335,7 @@ static int acpi_ac_resume(struct acpi_device *device)
323 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 335 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
324 return 0; 336 return 0;
325} 337}
338#endif
326 339
327static int acpi_ac_remove(struct acpi_device *device, int type) 340static int acpi_ac_remove(struct acpi_device *device, int type)
328{ 341{
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index d98571385656..24c807f96636 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -341,7 +341,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
341{ 341{
342 struct acpi_memory_device *mem_device; 342 struct acpi_memory_device *mem_device;
343 struct acpi_device *device; 343 struct acpi_device *device;
344 344 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
345 345
346 switch (event) { 346 switch (event) {
347 case ACPI_NOTIFY_BUS_CHECK: 347 case ACPI_NOTIFY_BUS_CHECK:
@@ -354,15 +354,20 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
354 "\nReceived DEVICE CHECK notification for device\n")); 354 "\nReceived DEVICE CHECK notification for device\n"));
355 if (acpi_memory_get_device(handle, &mem_device)) { 355 if (acpi_memory_get_device(handle, &mem_device)) {
356 printk(KERN_ERR PREFIX "Cannot find driver data\n"); 356 printk(KERN_ERR PREFIX "Cannot find driver data\n");
357 return; 357 break;
358 } 358 }
359 359
360 if (!acpi_memory_check_device(mem_device)) { 360 if (acpi_memory_check_device(mem_device))
361 if (acpi_memory_enable_device(mem_device)) 361 break;
362 printk(KERN_ERR PREFIX 362
363 "Cannot enable memory device\n"); 363 if (acpi_memory_enable_device(mem_device)) {
364 printk(KERN_ERR PREFIX "Cannot enable memory device\n");
365 break;
364 } 366 }
367
368 ost_code = ACPI_OST_SC_SUCCESS;
365 break; 369 break;
370
366 case ACPI_NOTIFY_EJECT_REQUEST: 371 case ACPI_NOTIFY_EJECT_REQUEST:
367 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 372 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
368 "\nReceived EJECT REQUEST notification for device\n")); 373 "\nReceived EJECT REQUEST notification for device\n"));
@@ -383,19 +388,35 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
383 * TBD: Can also be disabled by Callback registration 388 * TBD: Can also be disabled by Callback registration
384 * with generic sysfs driver 389 * with generic sysfs driver
385 */ 390 */
386 if (acpi_memory_disable_device(mem_device)) 391 if (acpi_memory_disable_device(mem_device)) {
387 printk(KERN_ERR PREFIX 392 printk(KERN_ERR PREFIX "Disable memory device\n");
388 "Disable memory device\n"); 393 /*
394 * If _EJ0 was called but failed, _OST is not
395 * necessary.
396 */
397 if (mem_device->state == MEMORY_INVALID_STATE)
398 return;
399
400 break;
401 }
402
389 /* 403 /*
390 * TBD: Invoke acpi_bus_remove to cleanup data structures 404 * TBD: Invoke acpi_bus_remove to cleanup data structures
391 */ 405 */
392 break; 406
407 /* _EJ0 succeeded; _OST is not necessary */
408 return;
409
393 default: 410 default:
394 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 411 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
395 "Unsupported event [0x%x]\n", event)); 412 "Unsupported event [0x%x]\n", event));
396 break; 413
414 /* non-hotplug event; possibly handled by other handler */
415 return;
397 } 416 }
398 417
418 /* Inform firmware that the hotplug operation has completed */
419 (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
399 return; 420 return;
400} 421}
401 422
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index a43fa1a57d57..af4aad6ee2eb 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -36,6 +36,7 @@
36#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" 36#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
37#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 37#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
38static DEFINE_MUTEX(isolated_cpus_lock); 38static DEFINE_MUTEX(isolated_cpus_lock);
39static DEFINE_MUTEX(round_robin_lock);
39 40
40static unsigned long power_saving_mwait_eax; 41static unsigned long power_saving_mwait_eax;
41 42
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
107 if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) 108 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
108 return; 109 return;
109 110
110 mutex_lock(&isolated_cpus_lock); 111 mutex_lock(&round_robin_lock);
111 cpumask_clear(tmp); 112 cpumask_clear(tmp);
112 for_each_cpu(cpu, pad_busy_cpus) 113 for_each_cpu(cpu, pad_busy_cpus)
113 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); 114 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
116 if (cpumask_empty(tmp)) 117 if (cpumask_empty(tmp))
117 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); 118 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
118 if (cpumask_empty(tmp)) { 119 if (cpumask_empty(tmp)) {
119 mutex_unlock(&isolated_cpus_lock); 120 mutex_unlock(&round_robin_lock);
120 return; 121 return;
121 } 122 }
122 for_each_cpu(cpu, tmp) { 123 for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
131 tsk_in_cpu[tsk_index] = preferred_cpu; 132 tsk_in_cpu[tsk_index] = preferred_cpu;
132 cpumask_set_cpu(preferred_cpu, pad_busy_cpus); 133 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
133 cpu_weight[preferred_cpu]++; 134 cpu_weight[preferred_cpu]++;
134 mutex_unlock(&isolated_cpus_lock); 135 mutex_unlock(&round_robin_lock);
135 136
136 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); 137 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
137} 138}
@@ -144,7 +145,7 @@ static void exit_round_robin(unsigned int tsk_index)
144} 145}
145 146
146static unsigned int idle_pct = 5; /* percentage */ 147static unsigned int idle_pct = 5; /* percentage */
147static unsigned int round_robin_time = 10; /* second */ 148static unsigned int round_robin_time = 1; /* second */
148static int power_saving_thread(void *data) 149static int power_saving_thread(void *data)
149{ 150{
150 struct sched_param param = {.sched_priority = 1}; 151 struct sched_param param = {.sched_priority = 1};
@@ -234,7 +235,7 @@ static int create_power_saving_task(void)
234 235
235 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, 236 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
236 (void *)(unsigned long)ps_tsk_num, 237 (void *)(unsigned long)ps_tsk_num,
237 "power_saving/%d", ps_tsk_num); 238 "acpi_pad/%d", ps_tsk_num);
238 rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0; 239 rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
239 if (!rc) 240 if (!rc)
240 ps_tsk_num++; 241 ps_tsk_num++;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 793b8cc8e256..0a1b3435f920 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -134,12 +134,14 @@ acpi-y += \
134 tbinstal.o \ 134 tbinstal.o \
135 tbutils.o \ 135 tbutils.o \
136 tbxface.o \ 136 tbxface.o \
137 tbxfload.o \
137 tbxfroot.o 138 tbxfroot.o
138 139
139acpi-y += \ 140acpi-y += \
140 utaddress.o \ 141 utaddress.o \
141 utalloc.o \ 142 utalloc.o \
142 utcopy.o \ 143 utcopy.o \
144 utexcep.o \
143 utdebug.o \ 145 utdebug.o \
144 utdecode.o \ 146 utdecode.o \
145 utdelete.o \ 147 utdelete.o \
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index d700f63e4701..c0a43b38c6a3 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -237,7 +237,7 @@ u32 acpi_ev_install_sci_handler(void);
237 237
238acpi_status acpi_ev_remove_sci_handler(void); 238acpi_status acpi_ev_remove_sci_handler(void);
239 239
240u32 acpi_ev_initialize_sCI(u32 program_sCI); 240u32 acpi_ev_initialize_SCI(u32 program_SCI);
241 241
242ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void)) 242ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
243#endif /* __ACEVENTS_H__ */ 243#endif /* __ACEVENTS_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 4f7d3f57d05c..ce79100fb5eb 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -278,8 +278,7 @@ ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
278 278
279/* Global handlers */ 279/* Global handlers */
280 280
281ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify; 281ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
282ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
283ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler; 282ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
284ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler; 283ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
285ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler; 284ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
@@ -327,14 +326,6 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
327 326
328#endif 327#endif
329 328
330/* Exception codes */
331
332extern char const *acpi_gbl_exception_names_env[];
333extern char const *acpi_gbl_exception_names_pgm[];
334extern char const *acpi_gbl_exception_names_tbl[];
335extern char const *acpi_gbl_exception_names_aml[];
336extern char const *acpi_gbl_exception_names_ctrl[];
337
338/***************************************************************************** 329/*****************************************************************************
339 * 330 *
340 * Namespace globals 331 * Namespace globals
@@ -463,4 +454,12 @@ ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
463 454
464#endif /* ACPI_DEBUGGER */ 455#endif /* ACPI_DEBUGGER */
465 456
457/*****************************************************************************
458 *
459 * Info/help support
460 *
461 ****************************************************************************/
462
463extern const struct ah_predefined_name asl_predefined_info[];
464
466#endif /* __ACGLOBAL_H__ */ 465#endif /* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5ccb99ae3a6f..5de4ec72766d 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void);
83/* 83/*
84 * hwsleep - sleep/wake support (Legacy sleep registers) 84 * hwsleep - sleep/wake support (Legacy sleep registers)
85 */ 85 */
86acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags); 86acpi_status acpi_hw_legacy_sleep(u8 sleep_state);
87 87
88acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags); 88acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state);
89 89
90acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags); 90acpi_status acpi_hw_legacy_wake(u8 sleep_state);
91 91
92/* 92/*
93 * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers) 93 * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
94 */ 94 */
95void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument); 95void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
96 96
97acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags); 97acpi_status acpi_hw_extended_sleep(u8 sleep_state);
98 98
99acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags); 99acpi_status acpi_hw_extended_wake_prep(u8 sleep_state);
100 100
101acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags); 101acpi_status acpi_hw_extended_wake(u8 sleep_state);
102 102
103/* 103/*
104 * hwvalid - Port I/O with validation 104 * hwvalid - Port I/O with validation
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index e3922ca20e7f..cc80fe10e8ea 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -299,7 +299,7 @@ acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
299 * Information structure for ACPI predefined names. 299 * Information structure for ACPI predefined names.
300 * Each entry in the table contains the following items: 300 * Each entry in the table contains the following items:
301 * 301 *
302 * Name - The ACPI reserved name 302 * name - The ACPI reserved name
303 * param_count - Number of arguments to the method 303 * param_count - Number of arguments to the method
304 * expected_return_btypes - Allowed type(s) for the return value 304 * expected_return_btypes - Allowed type(s) for the return value
305 */ 305 */
@@ -404,6 +404,13 @@ struct acpi_gpe_handler_info {
404 u8 originally_enabled; /* True if GPE was originally enabled */ 404 u8 originally_enabled; /* True if GPE was originally enabled */
405}; 405};
406 406
407/* Notify info for implicit notify, multiple device objects */
408
409struct acpi_gpe_notify_info {
410 struct acpi_namespace_node *device_node; /* Device to be notified */
411 struct acpi_gpe_notify_info *next;
412};
413
407struct acpi_gpe_notify_object { 414struct acpi_gpe_notify_object {
408 struct acpi_namespace_node *node; 415 struct acpi_namespace_node *node;
409 struct acpi_gpe_notify_object *next; 416 struct acpi_gpe_notify_object *next;
@@ -412,7 +419,7 @@ struct acpi_gpe_notify_object {
412union acpi_gpe_dispatch_info { 419union acpi_gpe_dispatch_info {
413 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */
414 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ 421 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
415 struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */ 422 struct acpi_gpe_notify_info *notify_list; /* List of _PRW devices for implicit notifies */
416}; 423};
417 424
418/* 425/*
@@ -420,7 +427,7 @@ union acpi_gpe_dispatch_info {
420 * NOTE: Important to keep this struct as small as possible. 427 * NOTE: Important to keep this struct as small as possible.
421 */ 428 */
422struct acpi_gpe_event_info { 429struct acpi_gpe_event_info {
423 union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */ 430 union acpi_gpe_dispatch_info dispatch; /* Either Method, Handler, or notify_list */
424 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ 431 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
425 u8 flags; /* Misc info about this GPE */ 432 u8 flags; /* Misc info about this GPE */
426 u8 gpe_number; /* This GPE */ 433 u8 gpe_number; /* This GPE */
@@ -600,13 +607,22 @@ acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
600 607
601typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state); 608typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
602 609
610/* Global handlers for AML Notifies */
611
612struct acpi_global_notify_handler {
613 acpi_notify_handler handler;
614 void *context;
615};
616
603/* 617/*
604 * Notify info - used to pass info to the deferred notify 618 * Notify info - used to pass info to the deferred notify
605 * handler/dispatcher. 619 * handler/dispatcher.
606 */ 620 */
607struct acpi_notify_info { 621struct acpi_notify_info {
608 ACPI_STATE_COMMON struct acpi_namespace_node *node; 622 ACPI_STATE_COMMON u8 handler_list_id;
609 union acpi_operand_object *handler_obj; 623 struct acpi_namespace_node *node;
624 union acpi_operand_object *handler_list_head;
625 struct acpi_global_notify_handler *global;
610}; 626};
611 627
612/* Generic state is union of structs above */ 628/* Generic state is union of structs above */
@@ -718,7 +734,7 @@ struct acpi_parse_obj_named {
718 u32 name; /* 4-byte name or zero if no name */ 734 u32 name; /* 4-byte name or zero if no name */
719}; 735};
720 736
721/* This version is used by the i_aSL compiler only */ 737/* This version is used by the iASL compiler only */
722 738
723#define ACPI_MAX_PARSEOP_NAME 20 739#define ACPI_MAX_PARSEOP_NAME 20
724 740
@@ -787,6 +803,7 @@ struct acpi_parse_state {
787#define ACPI_PARSEOP_IGNORE 0x01 803#define ACPI_PARSEOP_IGNORE 0x01
788#define ACPI_PARSEOP_PARAMLIST 0x02 804#define ACPI_PARSEOP_PARAMLIST 0x02
789#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04 805#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
806#define ACPI_PARSEOP_PREDEF_CHECKED 0x08
790#define ACPI_PARSEOP_SPECIAL 0x10 807#define ACPI_PARSEOP_SPECIAL 0x10
791 808
792/***************************************************************************** 809/*****************************************************************************
@@ -1075,4 +1092,18 @@ struct acpi_debug_mem_block {
1075#define ACPI_MEM_LIST_MAX 1 1092#define ACPI_MEM_LIST_MAX 1
1076#define ACPI_NUM_MEM_LISTS 2 1093#define ACPI_NUM_MEM_LISTS 2
1077 1094
1095/*****************************************************************************
1096 *
1097 * Info/help support
1098 *
1099 ****************************************************************************/
1100
1101struct ah_predefined_name {
1102 char *name;
1103 char *description;
1104#ifndef ACPI_ASL_COMPILER
1105 char *action;
1106#endif
1107};
1108
1078#endif /* __ACLOCAL_H__ */ 1109#endif /* __ACLOCAL_H__ */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index f119f473f71a..832b6198652e 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -62,7 +62,7 @@
62 * printf() format helpers 62 * printf() format helpers
63 */ 63 */
64 64
65/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */ 65/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
66 66
67#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i) 67#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
68 68
@@ -283,8 +283,8 @@
283#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask)) 283#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask))
284 284
285/* 285/*
286 * A struct acpi_namespace_node can appear in some contexts 286 * An object of type struct acpi_namespace_node can appear in some contexts
287 * where a pointer to a union acpi_operand_object can also 287 * where a pointer to an object of type union acpi_operand_object can also
288 * appear. This macro is used to distinguish them. 288 * appear. This macro is used to distinguish them.
289 * 289 *
290 * The "Descriptor" field is the first field in both structures. 290 * The "Descriptor" field is the first field in both structures.
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index c065078ca83b..364a1303fb8f 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -113,8 +113,8 @@ struct acpi_object_integer {
113}; 113};
114 114
115/* 115/*
116 * Note: The String and Buffer object must be identical through the Pointer 116 * Note: The String and Buffer object must be identical through the
117 * and length elements. There is code that depends on this. 117 * pointer and length elements. There is code that depends on this.
118 * 118 *
119 * Fields common to both Strings and Buffers 119 * Fields common to both Strings and Buffers
120 */ 120 */
@@ -206,8 +206,7 @@ struct acpi_object_method {
206 * Common fields for objects that support ASL notifications 206 * Common fields for objects that support ASL notifications
207 */ 207 */
208#define ACPI_COMMON_NOTIFY_INFO \ 208#define ACPI_COMMON_NOTIFY_INFO \
209 union acpi_operand_object *system_notify; /* Handler for system notifies */\ 209 union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\
210 union acpi_operand_object *device_notify; /* Handler for driver notifies */\
211 union acpi_operand_object *handler; /* Handler for Address space */ 210 union acpi_operand_object *handler; /* Handler for Address space */
212 211
213struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */ 212struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
@@ -296,10 +295,10 @@ struct acpi_object_buffer_field {
296 295
297struct acpi_object_notify_handler { 296struct acpi_object_notify_handler {
298 ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */ 297 ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */
299 u32 handler_type; 298 u32 handler_type; /* Type: Device/System/Both */
300 acpi_notify_handler handler; 299 acpi_notify_handler handler; /* Handler address */
301 void *context; 300 void *context;
302 struct acpi_object_notify_handler *next; 301 union acpi_operand_object *next[2]; /* Device and System handler lists */
303}; 302};
304 303
305struct acpi_object_addr_handler { 304struct acpi_object_addr_handler {
@@ -382,7 +381,7 @@ struct acpi_object_cache_list {
382 381
383/****************************************************************************** 382/******************************************************************************
384 * 383 *
385 * union acpi_operand_object Descriptor - a giant union of all of the above 384 * union acpi_operand_object descriptor - a giant union of all of the above
386 * 385 *
387 *****************************************************************************/ 386 *****************************************************************************/
388 387
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index bbb34c9be4e8..3080c017f5ba 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -140,7 +140,7 @@ enum acpi_return_package_types {
140 * 140 *
141 * The main entries in the table each contain the following items: 141 * The main entries in the table each contain the following items:
142 * 142 *
143 * Name - The ACPI reserved name 143 * name - The ACPI reserved name
144 * param_count - Number of arguments to the method 144 * param_count - Number of arguments to the method
145 * expected_btypes - Allowed type(s) for the return value. 145 * expected_btypes - Allowed type(s) for the return value.
146 * 0 means that no return value is expected. 146 * 0 means that no return value is expected.
@@ -511,14 +511,14 @@ static const union acpi_predefined_info predefined_names[] =
511 {{"_TMP", 0, ACPI_RTYPE_INTEGER}}, 511 {{"_TMP", 0, ACPI_RTYPE_INTEGER}},
512 {{"_TPC", 0, ACPI_RTYPE_INTEGER}}, 512 {{"_TPC", 0, ACPI_RTYPE_INTEGER}},
513 {{"_TPT", 1, 0}}, 513 {{"_TPT", 1, 0}},
514 {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2_ref/6_int */ 514 {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 Ref/6 Int */
515 {{{ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 6, 0}}, 515 {{{ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 6, 0}},
516 516
517 {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5_int with count */ 517 {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int with count */
518 {{{ACPI_PTYPE2_COUNT,ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, 518 {{{ACPI_PTYPE2_COUNT,ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
519 519
520 {{"_TSP", 0, ACPI_RTYPE_INTEGER}}, 520 {{"_TSP", 0, ACPI_RTYPE_INTEGER}},
521 {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5_int */ 521 {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int */
522 {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, 522 {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
523 523
524 {{"_TST", 0, ACPI_RTYPE_INTEGER}}, 524 {{"_TST", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 0404df605bc1..f196e2c9a71f 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -68,7 +68,7 @@
68#define ACPI_WALK_METHOD 0x01 68#define ACPI_WALK_METHOD 0x01
69#define ACPI_WALK_METHOD_RESTART 0x02 69#define ACPI_WALK_METHOD_RESTART 0x02
70 70
71/* Flags for i_aSL compiler only */ 71/* Flags for iASL compiler only */
72 72
73#define ACPI_WALK_CONST_REQUIRED 0x10 73#define ACPI_WALK_CONST_REQUIRED 0x10
74#define ACPI_WALK_CONST_OPTIONAL 0x20 74#define ACPI_WALK_CONST_OPTIONAL 0x20
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 925ccf22101b..5035327ebccc 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -460,6 +460,8 @@ acpi_ut_short_divide(u64 in_dividend,
460/* 460/*
461 * utmisc 461 * utmisc
462 */ 462 */
463void ut_convert_backslashes(char *pathname);
464
463const char *acpi_ut_validate_exception(acpi_status status); 465const char *acpi_ut_validate_exception(acpi_status status);
464 466
465u8 acpi_ut_is_pci_root_bridge(char *id); 467u8 acpi_ut_is_pci_root_bridge(char *id);
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 905280fec0fa..c26f8ff6c3b9 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -182,7 +182,7 @@
182 182
183/* 183/*
184 * Combination opcodes (actually two one-byte opcodes) 184 * Combination opcodes (actually two one-byte opcodes)
185 * Used by the disassembler and i_aSL compiler 185 * Used by the disassembler and iASL compiler
186 */ 186 */
187#define AML_LGREATEREQUAL_OP (u16) 0x9295 187#define AML_LGREATEREQUAL_OP (u16) 0x9295
188#define AML_LLESSEQUAL_OP (u16) 0x9294 188#define AML_LLESSEQUAL_OP (u16) 0x9294
@@ -280,7 +280,7 @@
280 280
281/* Multiple/complex types */ 281/* Multiple/complex types */
282 282
283#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a Node - Used only by size_of operator */ 283#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a node - Used only by size_of operator */
284#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */ 284#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */
285#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */ 285#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */
286#define ARGI_REGION_OR_BUFFER 0x15 /* Used by LOAD op only */ 286#define ARGI_REGION_OR_BUFFER 0x15 /* Used by LOAD op only */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 7b2128f274e7..af4947956ec2 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -98,7 +98,7 @@
98#define ACPI_RESTAG_TRANSLATION "_TRA" 98#define ACPI_RESTAG_TRANSLATION "_TRA"
99#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */ 99#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
100#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */ 100#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
101#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */ 101#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8And16(1), 16(2) */
102#define ACPI_RESTAG_VENDORDATA "_VEN" 102#define ACPI_RESTAG_VENDORDATA "_VEN"
103 103
104/* Default sizes for "small" resource descriptors */ 104/* Default sizes for "small" resource descriptors */
@@ -235,7 +235,7 @@ AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_ADDRESS_COMMON};
235 235
236struct aml_resource_extended_address64 { 236struct aml_resource_extended_address64 {
237 AML_RESOURCE_LARGE_HEADER_COMMON 237 AML_RESOURCE_LARGE_HEADER_COMMON
238 AML_RESOURCE_ADDRESS_COMMON u8 revision_iD; 238 AML_RESOURCE_ADDRESS_COMMON u8 revision_ID;
239 u8 reserved; 239 u8 reserved;
240 u64 granularity; 240 u64 granularity;
241 u64 minimum; 241 u64 minimum;
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 80eb1900297f..c8b5e2565b98 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -62,7 +62,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
62 * 62 *
63 * FUNCTION: acpi_ds_execute_arguments 63 * FUNCTION: acpi_ds_execute_arguments
64 * 64 *
65 * PARAMETERS: Node - Object NS node 65 * PARAMETERS: node - Object NS node
66 * scope_node - Parent NS node 66 * scope_node - Parent NS node
67 * aml_length - Length of executable AML 67 * aml_length - Length of executable AML
68 * aml_start - Pointer to the AML 68 * aml_start - Pointer to the AML
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index effe4ca1133f..465f02134b89 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -56,7 +56,7 @@ ACPI_MODULE_NAME("dscontrol")
56 * FUNCTION: acpi_ds_exec_begin_control_op 56 * FUNCTION: acpi_ds_exec_begin_control_op
57 * 57 *
58 * PARAMETERS: walk_list - The list that owns the walk stack 58 * PARAMETERS: walk_list - The list that owns the walk stack
59 * Op - The control Op 59 * op - The control Op
60 * 60 *
61 * RETURN: Status 61 * RETURN: Status
62 * 62 *
@@ -153,7 +153,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
153 * FUNCTION: acpi_ds_exec_end_control_op 153 * FUNCTION: acpi_ds_exec_end_control_op
154 * 154 *
155 * PARAMETERS: walk_list - The list that owns the walk stack 155 * PARAMETERS: walk_list - The list that owns the walk stack
156 * Op - The control Op 156 * op - The control Op
157 * 157 *
158 * RETURN: Status 158 * RETURN: Status
159 * 159 *
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index cd243cf2cab2..3da6fd8530c5 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -53,16 +53,84 @@
53ACPI_MODULE_NAME("dsfield") 53ACPI_MODULE_NAME("dsfield")
54 54
55/* Local prototypes */ 55/* Local prototypes */
56#ifdef ACPI_ASL_COMPILER
57#include "acdisasm.h"
58static acpi_status
59acpi_ds_create_external_region(acpi_status lookup_status,
60 union acpi_parse_object *op,
61 char *path,
62 struct acpi_walk_state *walk_state,
63 struct acpi_namespace_node **node);
64#endif
65
56static acpi_status 66static acpi_status
57acpi_ds_get_field_names(struct acpi_create_field_info *info, 67acpi_ds_get_field_names(struct acpi_create_field_info *info,
58 struct acpi_walk_state *walk_state, 68 struct acpi_walk_state *walk_state,
59 union acpi_parse_object *arg); 69 union acpi_parse_object *arg);
60 70
71#ifdef ACPI_ASL_COMPILER
72/*******************************************************************************
73 *
74 * FUNCTION: acpi_ds_create_external_region (iASL Disassembler only)
75 *
76 * PARAMETERS: lookup_status - Status from ns_lookup operation
77 * op - Op containing the Field definition and args
78 * path - Pathname of the region
79 * ` walk_state - Current method state
80 * node - Where the new region node is returned
81 *
82 * RETURN: Status
83 *
84 * DESCRIPTION: Add region to the external list if NOT_FOUND. Create a new
85 * region node/object.
86 *
87 ******************************************************************************/
88
89static acpi_status
90acpi_ds_create_external_region(acpi_status lookup_status,
91 union acpi_parse_object *op,
92 char *path,
93 struct acpi_walk_state *walk_state,
94 struct acpi_namespace_node **node)
95{
96 acpi_status status;
97 union acpi_operand_object *obj_desc;
98
99 if (lookup_status != AE_NOT_FOUND) {
100 return (lookup_status);
101 }
102
103 /*
104 * Table disassembly:
105 * operation_region not found. Generate an External for it, and
106 * insert the name into the namespace.
107 */
108 acpi_dm_add_to_external_list(op, path, ACPI_TYPE_REGION, 0);
109 status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_REGION,
110 ACPI_IMODE_LOAD_PASS1, ACPI_NS_SEARCH_PARENT,
111 walk_state, node);
112 if (ACPI_FAILURE(status)) {
113 return (status);
114 }
115
116 /* Must create and install a region object for the new node */
117
118 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION);
119 if (!obj_desc) {
120 return (AE_NO_MEMORY);
121 }
122
123 obj_desc->region.node = *node;
124 status = acpi_ns_attach_object(*node, obj_desc, ACPI_TYPE_REGION);
125 return (status);
126}
127#endif
128
61/******************************************************************************* 129/*******************************************************************************
62 * 130 *
63 * FUNCTION: acpi_ds_create_buffer_field 131 * FUNCTION: acpi_ds_create_buffer_field
64 * 132 *
65 * PARAMETERS: Op - Current parse op (create_xXField) 133 * PARAMETERS: op - Current parse op (create_XXField)
66 * walk_state - Current state 134 * walk_state - Current state
67 * 135 *
68 * RETURN: Status 136 * RETURN: Status
@@ -99,7 +167,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
99 167
100 arg = acpi_ps_get_arg(op, 3); 168 arg = acpi_ps_get_arg(op, 3);
101 } else { 169 } else {
102 /* For all other create_xXXField operators, name is the 3rd argument */ 170 /* For all other create_XXXField operators, name is the 3rd argument */
103 171
104 arg = acpi_ps_get_arg(op, 2); 172 arg = acpi_ps_get_arg(op, 2);
105 } 173 }
@@ -203,9 +271,9 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
203 * 271 *
204 * FUNCTION: acpi_ds_get_field_names 272 * FUNCTION: acpi_ds_get_field_names
205 * 273 *
206 * PARAMETERS: Info - create_field info structure 274 * PARAMETERS: info - create_field info structure
207 * ` walk_state - Current method state 275 * ` walk_state - Current method state
208 * Arg - First parser arg for the field name list 276 * arg - First parser arg for the field name list
209 * 277 *
210 * RETURN: Status 278 * RETURN: Status
211 * 279 *
@@ -234,10 +302,10 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
234 while (arg) { 302 while (arg) {
235 /* 303 /*
236 * Four types of field elements are handled: 304 * Four types of field elements are handled:
237 * 1) Name - Enters a new named field into the namespace 305 * 1) name - Enters a new named field into the namespace
238 * 2) Offset - specifies a bit offset 306 * 2) offset - specifies a bit offset
239 * 3) access_as - changes the access mode/attributes 307 * 3) access_as - changes the access mode/attributes
240 * 4) Connection - Associate a resource template with the field 308 * 4) connection - Associate a resource template with the field
241 */ 309 */
242 switch (arg->common.aml_opcode) { 310 switch (arg->common.aml_opcode) {
243 case AML_INT_RESERVEDFIELD_OP: 311 case AML_INT_RESERVEDFIELD_OP:
@@ -389,7 +457,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
389 * 457 *
390 * FUNCTION: acpi_ds_create_field 458 * FUNCTION: acpi_ds_create_field
391 * 459 *
392 * PARAMETERS: Op - Op containing the Field definition and args 460 * PARAMETERS: op - Op containing the Field definition and args
393 * region_node - Object for the containing Operation Region 461 * region_node - Object for the containing Operation Region
394 * ` walk_state - Current method state 462 * ` walk_state - Current method state
395 * 463 *
@@ -413,12 +481,19 @@ acpi_ds_create_field(union acpi_parse_object *op,
413 /* First arg is the name of the parent op_region (must already exist) */ 481 /* First arg is the name of the parent op_region (must already exist) */
414 482
415 arg = op->common.value.arg; 483 arg = op->common.value.arg;
484
416 if (!region_node) { 485 if (!region_node) {
417 status = 486 status =
418 acpi_ns_lookup(walk_state->scope_info, 487 acpi_ns_lookup(walk_state->scope_info,
419 arg->common.value.name, ACPI_TYPE_REGION, 488 arg->common.value.name, ACPI_TYPE_REGION,
420 ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, 489 ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
421 walk_state, &region_node); 490 walk_state, &region_node);
491#ifdef ACPI_ASL_COMPILER
492 status = acpi_ds_create_external_region(status, arg,
493 arg->common.value.name,
494 walk_state,
495 &region_node);
496#endif
422 if (ACPI_FAILURE(status)) { 497 if (ACPI_FAILURE(status)) {
423 ACPI_ERROR_NAMESPACE(arg->common.value.name, status); 498 ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
424 return_ACPI_STATUS(status); 499 return_ACPI_STATUS(status);
@@ -446,7 +521,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
446 * 521 *
447 * FUNCTION: acpi_ds_init_field_objects 522 * FUNCTION: acpi_ds_init_field_objects
448 * 523 *
449 * PARAMETERS: Op - Op containing the Field definition and args 524 * PARAMETERS: op - Op containing the Field definition and args
450 * ` walk_state - Current method state 525 * ` walk_state - Current method state
451 * 526 *
452 * RETURN: Status 527 * RETURN: Status
@@ -561,7 +636,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
561 * 636 *
562 * FUNCTION: acpi_ds_create_bank_field 637 * FUNCTION: acpi_ds_create_bank_field
563 * 638 *
564 * PARAMETERS: Op - Op containing the Field definition and args 639 * PARAMETERS: op - Op containing the Field definition and args
565 * region_node - Object for the containing Operation Region 640 * region_node - Object for the containing Operation Region
566 * walk_state - Current method state 641 * walk_state - Current method state
567 * 642 *
@@ -591,6 +666,12 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
591 arg->common.value.name, ACPI_TYPE_REGION, 666 arg->common.value.name, ACPI_TYPE_REGION,
592 ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, 667 ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
593 walk_state, &region_node); 668 walk_state, &region_node);
669#ifdef ACPI_ASL_COMPILER
670 status = acpi_ds_create_external_region(status, arg,
671 arg->common.value.name,
672 walk_state,
673 &region_node);
674#endif
594 if (ACPI_FAILURE(status)) { 675 if (ACPI_FAILURE(status)) {
595 ACPI_ERROR_NAMESPACE(arg->common.value.name, status); 676 ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
596 return_ACPI_STATUS(status); 677 return_ACPI_STATUS(status);
@@ -645,7 +726,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
645 * 726 *
646 * FUNCTION: acpi_ds_create_index_field 727 * FUNCTION: acpi_ds_create_index_field
647 * 728 *
648 * PARAMETERS: Op - Op containing the Field definition and args 729 * PARAMETERS: op - Op containing the Field definition and args
649 * region_node - Object for the containing Operation Region 730 * region_node - Object for the containing Operation Region
650 * ` walk_state - Current method state 731 * ` walk_state - Current method state
651 * 732 *
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 9e5ac7f780a7..87eff701ecfa 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -60,8 +60,8 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
60 * FUNCTION: acpi_ds_init_one_object 60 * FUNCTION: acpi_ds_init_one_object
61 * 61 *
62 * PARAMETERS: obj_handle - Node for the object 62 * PARAMETERS: obj_handle - Node for the object
63 * Level - Current nesting level 63 * level - Current nesting level
64 * Context - Points to a init info struct 64 * context - Points to a init info struct
65 * return_value - Not used 65 * return_value - Not used
66 * 66 *
67 * RETURN: Status 67 * RETURN: Status
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 00f5dab5bcc0..aa9a5d4e4052 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -61,7 +61,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
61 * 61 *
62 * FUNCTION: acpi_ds_method_error 62 * FUNCTION: acpi_ds_method_error
63 * 63 *
64 * PARAMETERS: Status - Execution status 64 * PARAMETERS: status - Execution status
65 * walk_state - Current state 65 * walk_state - Current state
66 * 66 *
67 * RETURN: Status 67 * RETURN: Status
@@ -306,9 +306,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
306 * 306 *
307 * FUNCTION: acpi_ds_call_control_method 307 * FUNCTION: acpi_ds_call_control_method
308 * 308 *
309 * PARAMETERS: Thread - Info for this thread 309 * PARAMETERS: thread - Info for this thread
310 * this_walk_state - Current walk state 310 * this_walk_state - Current walk state
311 * Op - Current Op to be walked 311 * op - Current Op to be walked
312 * 312 *
313 * RETURN: Status 313 * RETURN: Status
314 * 314 *
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index b40bd507be5d..8d55cebaa656 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -177,7 +177,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
177 * 177 *
178 * FUNCTION: acpi_ds_method_data_init_args 178 * FUNCTION: acpi_ds_method_data_init_args
179 * 179 *
180 * PARAMETERS: *Params - Pointer to a parameter list for the method 180 * PARAMETERS: *params - Pointer to a parameter list for the method
181 * max_param_count - The arg count for this method 181 * max_param_count - The arg count for this method
182 * walk_state - Current walk state object 182 * walk_state - Current walk state object
183 * 183 *
@@ -232,11 +232,11 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
232 * 232 *
233 * FUNCTION: acpi_ds_method_data_get_node 233 * FUNCTION: acpi_ds_method_data_get_node
234 * 234 *
235 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or 235 * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
236 * ACPI_REFCLASS_ARG 236 * ACPI_REFCLASS_ARG
237 * Index - Which Local or Arg whose type to get 237 * index - Which Local or Arg whose type to get
238 * walk_state - Current walk state object 238 * walk_state - Current walk state object
239 * Node - Where the node is returned. 239 * node - Where the node is returned.
240 * 240 *
241 * RETURN: Status and node 241 * RETURN: Status and node
242 * 242 *
@@ -296,10 +296,10 @@ acpi_ds_method_data_get_node(u8 type,
296 * 296 *
297 * FUNCTION: acpi_ds_method_data_set_value 297 * FUNCTION: acpi_ds_method_data_set_value
298 * 298 *
299 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or 299 * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
300 * ACPI_REFCLASS_ARG 300 * ACPI_REFCLASS_ARG
301 * Index - Which Local or Arg to get 301 * index - Which Local or Arg to get
302 * Object - Object to be inserted into the stack entry 302 * object - Object to be inserted into the stack entry
303 * walk_state - Current walk state object 303 * walk_state - Current walk state object
304 * 304 *
305 * RETURN: Status 305 * RETURN: Status
@@ -336,7 +336,7 @@ acpi_ds_method_data_set_value(u8 type,
336 * Increment ref count so object can't be deleted while installed. 336 * Increment ref count so object can't be deleted while installed.
337 * NOTE: We do not copy the object in order to preserve the call by 337 * NOTE: We do not copy the object in order to preserve the call by
338 * reference semantics of ACPI Control Method invocation. 338 * reference semantics of ACPI Control Method invocation.
339 * (See ACPI Specification 2.0_c) 339 * (See ACPI Specification 2.0C)
340 */ 340 */
341 acpi_ut_add_reference(object); 341 acpi_ut_add_reference(object);
342 342
@@ -350,9 +350,9 @@ acpi_ds_method_data_set_value(u8 type,
350 * 350 *
351 * FUNCTION: acpi_ds_method_data_get_value 351 * FUNCTION: acpi_ds_method_data_get_value
352 * 352 *
353 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or 353 * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
354 * ACPI_REFCLASS_ARG 354 * ACPI_REFCLASS_ARG
355 * Index - Which local_var or argument to get 355 * index - Which localVar or argument to get
356 * walk_state - Current walk state object 356 * walk_state - Current walk state object
357 * dest_desc - Where Arg or Local value is returned 357 * dest_desc - Where Arg or Local value is returned
358 * 358 *
@@ -458,9 +458,9 @@ acpi_ds_method_data_get_value(u8 type,
458 * 458 *
459 * FUNCTION: acpi_ds_method_data_delete_value 459 * FUNCTION: acpi_ds_method_data_delete_value
460 * 460 *
461 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or 461 * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
462 * ACPI_REFCLASS_ARG 462 * ACPI_REFCLASS_ARG
463 * Index - Which local_var or argument to delete 463 * index - Which localVar or argument to delete
464 * walk_state - Current walk state object 464 * walk_state - Current walk state object
465 * 465 *
466 * RETURN: None 466 * RETURN: None
@@ -515,9 +515,9 @@ acpi_ds_method_data_delete_value(u8 type,
515 * 515 *
516 * FUNCTION: acpi_ds_store_object_to_local 516 * FUNCTION: acpi_ds_store_object_to_local
517 * 517 *
518 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or 518 * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
519 * ACPI_REFCLASS_ARG 519 * ACPI_REFCLASS_ARG
520 * Index - Which Local or Arg to set 520 * index - Which Local or Arg to set
521 * obj_desc - Value to be stored 521 * obj_desc - Value to be stored
522 * walk_state - Current walk state 522 * walk_state - Current walk state
523 * 523 *
@@ -670,8 +670,8 @@ acpi_ds_store_object_to_local(u8 type,
670 * 670 *
671 * FUNCTION: acpi_ds_method_data_get_type 671 * FUNCTION: acpi_ds_method_data_get_type
672 * 672 *
673 * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP 673 * PARAMETERS: opcode - Either AML_LOCAL_OP or AML_ARG_OP
674 * Index - Which Local or Arg whose type to get 674 * index - Which Local or Arg whose type to get
675 * walk_state - Current walk state object 675 * walk_state - Current walk state object
676 * 676 *
677 * RETURN: Data type of current value of the selected Arg or Local 677 * RETURN: Data type of current value of the selected Arg or Local
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index d7045ca3e32a..68592dd34960 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -64,7 +64,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
64 * FUNCTION: acpi_ds_build_internal_object 64 * FUNCTION: acpi_ds_build_internal_object
65 * 65 *
66 * PARAMETERS: walk_state - Current walk state 66 * PARAMETERS: walk_state - Current walk state
67 * Op - Parser object to be translated 67 * op - Parser object to be translated
68 * obj_desc_ptr - Where the ACPI internal object is returned 68 * obj_desc_ptr - Where the ACPI internal object is returned
69 * 69 *
70 * RETURN: Status 70 * RETURN: Status
@@ -250,7 +250,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
250 * FUNCTION: acpi_ds_build_internal_buffer_obj 250 * FUNCTION: acpi_ds_build_internal_buffer_obj
251 * 251 *
252 * PARAMETERS: walk_state - Current walk state 252 * PARAMETERS: walk_state - Current walk state
253 * Op - Parser object to be translated 253 * op - Parser object to be translated
254 * buffer_length - Length of the buffer 254 * buffer_length - Length of the buffer
255 * obj_desc_ptr - Where the ACPI internal object is returned 255 * obj_desc_ptr - Where the ACPI internal object is returned
256 * 256 *
@@ -354,7 +354,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
354 * FUNCTION: acpi_ds_build_internal_package_obj 354 * FUNCTION: acpi_ds_build_internal_package_obj
355 * 355 *
356 * PARAMETERS: walk_state - Current walk state 356 * PARAMETERS: walk_state - Current walk state
357 * Op - Parser object to be translated 357 * op - Parser object to be translated
358 * element_count - Number of elements in the package - this is 358 * element_count - Number of elements in the package - this is
359 * the num_elements argument to Package() 359 * the num_elements argument to Package()
360 * obj_desc_ptr - Where the ACPI internal object is returned 360 * obj_desc_ptr - Where the ACPI internal object is returned
@@ -547,8 +547,8 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
547 * FUNCTION: acpi_ds_create_node 547 * FUNCTION: acpi_ds_create_node
548 * 548 *
549 * PARAMETERS: walk_state - Current walk state 549 * PARAMETERS: walk_state - Current walk state
550 * Node - NS Node to be initialized 550 * node - NS Node to be initialized
551 * Op - Parser object to be translated 551 * op - Parser object to be translated
552 * 552 *
553 * RETURN: Status 553 * RETURN: Status
554 * 554 *
@@ -611,8 +611,8 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
611 * FUNCTION: acpi_ds_init_object_from_op 611 * FUNCTION: acpi_ds_init_object_from_op
612 * 612 *
613 * PARAMETERS: walk_state - Current walk state 613 * PARAMETERS: walk_state - Current walk state
614 * Op - Parser op used to init the internal object 614 * op - Parser op used to init the internal object
615 * Opcode - AML opcode associated with the object 615 * opcode - AML opcode associated with the object
616 * ret_obj_desc - Namespace object to be initialized 616 * ret_obj_desc - Namespace object to be initialized
617 * 617 *
618 * RETURN: Status 618 * RETURN: Status
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index e5eff7585102..aa34d8984d34 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -286,7 +286,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
286 * FUNCTION: acpi_ds_eval_buffer_field_operands 286 * FUNCTION: acpi_ds_eval_buffer_field_operands
287 * 287 *
288 * PARAMETERS: walk_state - Current walk 288 * PARAMETERS: walk_state - Current walk
289 * Op - A valid buffer_field Op object 289 * op - A valid buffer_field Op object
290 * 290 *
291 * RETURN: Status 291 * RETURN: Status
292 * 292 *
@@ -370,7 +370,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
370 * FUNCTION: acpi_ds_eval_region_operands 370 * FUNCTION: acpi_ds_eval_region_operands
371 * 371 *
372 * PARAMETERS: walk_state - Current walk 372 * PARAMETERS: walk_state - Current walk
373 * Op - A valid region Op object 373 * op - A valid region Op object
374 * 374 *
375 * RETURN: Status 375 * RETURN: Status
376 * 376 *
@@ -397,7 +397,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
397 */ 397 */
398 node = op->common.node; 398 node = op->common.node;
399 399
400 /* next_op points to the op that holds the space_iD */ 400 /* next_op points to the op that holds the space_ID */
401 401
402 next_op = op->common.value.arg; 402 next_op = op->common.value.arg;
403 403
@@ -461,7 +461,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
461 * FUNCTION: acpi_ds_eval_table_region_operands 461 * FUNCTION: acpi_ds_eval_table_region_operands
462 * 462 *
463 * PARAMETERS: walk_state - Current walk 463 * PARAMETERS: walk_state - Current walk
464 * Op - A valid region Op object 464 * op - A valid region Op object
465 * 465 *
466 * RETURN: Status 466 * RETURN: Status
467 * 467 *
@@ -560,7 +560,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
560 * FUNCTION: acpi_ds_eval_data_object_operands 560 * FUNCTION: acpi_ds_eval_data_object_operands
561 * 561 *
562 * PARAMETERS: walk_state - Current walk 562 * PARAMETERS: walk_state - Current walk
563 * Op - A valid data_object Op object 563 * op - A valid data_object Op object
564 * obj_desc - data_object 564 * obj_desc - data_object
565 * 565 *
566 * RETURN: Status 566 * RETURN: Status
@@ -662,7 +662,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
662 * FUNCTION: acpi_ds_eval_bank_field_operands 662 * FUNCTION: acpi_ds_eval_bank_field_operands
663 * 663 *
664 * PARAMETERS: walk_state - Current walk 664 * PARAMETERS: walk_state - Current walk
665 * Op - A valid bank_field Op object 665 * op - A valid bank_field Op object
666 * 666 *
667 * RETURN: Status 667 * RETURN: Status
668 * 668 *
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 1abcda31037f..73a5447475f5 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -157,7 +157,7 @@ acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
157 * 157 *
158 * FUNCTION: acpi_ds_is_result_used 158 * FUNCTION: acpi_ds_is_result_used
159 * 159 *
160 * PARAMETERS: Op - Current Op 160 * PARAMETERS: op - Current Op
161 * walk_state - Current State 161 * walk_state - Current State
162 * 162 *
163 * RETURN: TRUE if result is used, FALSE otherwise 163 * RETURN: TRUE if result is used, FALSE otherwise
@@ -323,7 +323,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
323 * 323 *
324 * FUNCTION: acpi_ds_delete_result_if_not_used 324 * FUNCTION: acpi_ds_delete_result_if_not_used
325 * 325 *
326 * PARAMETERS: Op - Current parse Op 326 * PARAMETERS: op - Current parse Op
327 * result_obj - Result of the operation 327 * result_obj - Result of the operation
328 * walk_state - Current state 328 * walk_state - Current state
329 * 329 *
@@ -445,7 +445,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
445 * FUNCTION: acpi_ds_create_operand 445 * FUNCTION: acpi_ds_create_operand
446 * 446 *
447 * PARAMETERS: walk_state - Current walk state 447 * PARAMETERS: walk_state - Current walk state
448 * Arg - Parse object for the argument 448 * arg - Parse object for the argument
449 * arg_index - Which argument (zero based) 449 * arg_index - Which argument (zero based)
450 * 450 *
451 * RETURN: Status 451 * RETURN: Status
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9e9490a9cbf0..f6c4295470ae 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -85,8 +85,8 @@ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
85 * 85 *
86 * FUNCTION: acpi_ds_scope_stack_push 86 * FUNCTION: acpi_ds_scope_stack_push
87 * 87 *
88 * PARAMETERS: Node - Name to be made current 88 * PARAMETERS: node - Name to be made current
89 * Type - Type of frame being pushed 89 * type - Type of frame being pushed
90 * walk_state - Current state 90 * walk_state - Current state
91 * 91 *
92 * RETURN: Status 92 * RETURN: Status
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index c9c2ac13e7cc..d0e6555061e4 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -58,7 +58,7 @@ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
58 * 58 *
59 * FUNCTION: acpi_ds_result_pop 59 * FUNCTION: acpi_ds_result_pop
60 * 60 *
61 * PARAMETERS: Object - Where to return the popped object 61 * PARAMETERS: object - Where to return the popped object
62 * walk_state - Current Walk state 62 * walk_state - Current Walk state
63 * 63 *
64 * RETURN: Status 64 * RETURN: Status
@@ -132,7 +132,7 @@ acpi_ds_result_pop(union acpi_operand_object **object,
132 * 132 *
133 * FUNCTION: acpi_ds_result_push 133 * FUNCTION: acpi_ds_result_push
134 * 134 *
135 * PARAMETERS: Object - Where to return the popped object 135 * PARAMETERS: object - Where to return the popped object
136 * walk_state - Current Walk state 136 * walk_state - Current Walk state
137 * 137 *
138 * RETURN: Status 138 * RETURN: Status
@@ -296,7 +296,7 @@ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
296 * 296 *
297 * FUNCTION: acpi_ds_obj_stack_push 297 * FUNCTION: acpi_ds_obj_stack_push
298 * 298 *
299 * PARAMETERS: Object - Object to push 299 * PARAMETERS: object - Object to push
300 * walk_state - Current Walk state 300 * walk_state - Current Walk state
301 * 301 *
302 * RETURN: Status 302 * RETURN: Status
@@ -433,7 +433,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
433 * 433 *
434 * FUNCTION: acpi_ds_get_current_walk_state 434 * FUNCTION: acpi_ds_get_current_walk_state
435 * 435 *
436 * PARAMETERS: Thread - Get current active state for this Thread 436 * PARAMETERS: thread - Get current active state for this Thread
437 * 437 *
438 * RETURN: Pointer to the current walk state 438 * RETURN: Pointer to the current walk state
439 * 439 *
@@ -462,7 +462,7 @@ struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
462 * FUNCTION: acpi_ds_push_walk_state 462 * FUNCTION: acpi_ds_push_walk_state
463 * 463 *
464 * PARAMETERS: walk_state - State to push 464 * PARAMETERS: walk_state - State to push
465 * Thread - Thread state object 465 * thread - Thread state object
466 * 466 *
467 * RETURN: None 467 * RETURN: None
468 * 468 *
@@ -486,7 +486,7 @@ acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
486 * 486 *
487 * FUNCTION: acpi_ds_pop_walk_state 487 * FUNCTION: acpi_ds_pop_walk_state
488 * 488 *
489 * PARAMETERS: Thread - Current thread state 489 * PARAMETERS: thread - Current thread state
490 * 490 *
491 * RETURN: A walk_state object popped from the thread's stack 491 * RETURN: A walk_state object popped from the thread's stack
492 * 492 *
@@ -525,9 +525,9 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
525 * FUNCTION: acpi_ds_create_walk_state 525 * FUNCTION: acpi_ds_create_walk_state
526 * 526 *
527 * PARAMETERS: owner_id - ID for object creation 527 * PARAMETERS: owner_id - ID for object creation
528 * Origin - Starting point for this walk 528 * origin - Starting point for this walk
529 * method_desc - Method object 529 * method_desc - Method object
530 * Thread - Current thread state 530 * thread - Current thread state
531 * 531 *
532 * RETURN: Pointer to the new walk state. 532 * RETURN: Pointer to the new walk state.
533 * 533 *
@@ -578,11 +578,11 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union
578 * FUNCTION: acpi_ds_init_aml_walk 578 * FUNCTION: acpi_ds_init_aml_walk
579 * 579 *
580 * PARAMETERS: walk_state - New state to be initialized 580 * PARAMETERS: walk_state - New state to be initialized
581 * Op - Current parse op 581 * op - Current parse op
582 * method_node - Control method NS node, if any 582 * method_node - Control method NS node, if any
583 * aml_start - Start of AML 583 * aml_start - Start of AML
584 * aml_length - Length of AML 584 * aml_length - Length of AML
585 * Info - Method info block (params, etc.) 585 * info - Method info block (params, etc.)
586 * pass_number - 1, 2, or 3 586 * pass_number - 1, 2, or 3
587 * 587 *
588 * RETURN: Status 588 * RETURN: Status
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 07e4dc44f81c..d4acfbbe5b29 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -251,7 +251,7 @@ u32 acpi_ev_fixed_event_detect(void)
251 * 251 *
252 * FUNCTION: acpi_ev_fixed_event_dispatch 252 * FUNCTION: acpi_ev_fixed_event_dispatch
253 * 253 *
254 * PARAMETERS: Event - Event type 254 * PARAMETERS: event - Event type
255 * 255 *
256 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 256 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
257 * 257 *
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index cfeab38795d8..af14a7137632 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -135,7 +135,7 @@ acpi_status acpi_ev_remove_global_lock_handler(void)
135 * 135 *
136 * FUNCTION: acpi_ev_global_lock_handler 136 * FUNCTION: acpi_ev_global_lock_handler
137 * 137 *
138 * PARAMETERS: Context - From thread interface, not used 138 * PARAMETERS: context - From thread interface, not used
139 * 139 *
140 * RETURN: ACPI_INTERRUPT_HANDLED 140 * RETURN: ACPI_INTERRUPT_HANDLED
141 * 141 *
@@ -182,7 +182,7 @@ static u32 acpi_ev_global_lock_handler(void *context)
182 * 182 *
183 * FUNCTION: acpi_ev_acquire_global_lock 183 * FUNCTION: acpi_ev_acquire_global_lock
184 * 184 *
185 * PARAMETERS: Timeout - Max time to wait for the lock, in millisec. 185 * PARAMETERS: timeout - Max time to wait for the lock, in millisec.
186 * 186 *
187 * RETURN: Status 187 * RETURN: Status
188 * 188 *
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 8ba0e5f17091..afbd5cb391f6 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -466,7 +466,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
466 acpi_status status; 466 acpi_status status;
467 struct acpi_gpe_event_info *local_gpe_event_info; 467 struct acpi_gpe_event_info *local_gpe_event_info;
468 struct acpi_evaluate_info *info; 468 struct acpi_evaluate_info *info;
469 struct acpi_gpe_notify_object *notify_object; 469 struct acpi_gpe_notify_info *notify;
470 470
471 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 471 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
472 472
@@ -517,17 +517,17 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
517 * completes. The notify handlers are NOT invoked synchronously 517 * completes. The notify handlers are NOT invoked synchronously
518 * from this thread -- because handlers may in turn run other 518 * from this thread -- because handlers may in turn run other
519 * control methods. 519 * control methods.
520 *
521 * June 2012: Expand implicit notify mechanism to support
522 * notifies on multiple device objects.
520 */ 523 */
521 status = acpi_ev_queue_notify_request( 524 notify = local_gpe_event_info->dispatch.notify_list;
522 local_gpe_event_info->dispatch.device.node, 525 while (ACPI_SUCCESS(status) && notify) {
523 ACPI_NOTIFY_DEVICE_WAKE); 526 status =
524 527 acpi_ev_queue_notify_request(notify->device_node,
525 notify_object = local_gpe_event_info->dispatch.device.next; 528 ACPI_NOTIFY_DEVICE_WAKE);
526 while (ACPI_SUCCESS(status) && notify_object) { 529
527 status = acpi_ev_queue_notify_request( 530 notify = notify->next;
528 notify_object->node,
529 ACPI_NOTIFY_DEVICE_WAKE);
530 notify_object = notify_object->next;
531 } 531 }
532 532
533 break; 533 break;
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 23a3ca86b2eb..8cf4c104c7b7 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -318,7 +318,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
318 * FUNCTION: acpi_ev_create_gpe_block 318 * FUNCTION: acpi_ev_create_gpe_block
319 * 319 *
320 * PARAMETERS: gpe_device - Handle to the parent GPE block 320 * PARAMETERS: gpe_device - Handle to the parent GPE block
321 * gpe_block_address - Address and space_iD 321 * gpe_block_address - Address and space_ID
322 * register_count - Number of GPE register pairs in the block 322 * register_count - Number of GPE register pairs in the block
323 * gpe_block_base_number - Starting GPE number for the block 323 * gpe_block_base_number - Starting GPE number for the block
324 * interrupt_number - H/W interrupt for the block 324 * interrupt_number - H/W interrupt for the block
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 3c43796b8361..cb50dd91bc18 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("evgpeutil")
54 * FUNCTION: acpi_ev_walk_gpe_list 54 * FUNCTION: acpi_ev_walk_gpe_list
55 * 55 *
56 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block 56 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
57 * Context - Value passed to callback 57 * context - Value passed to callback
58 * 58 *
59 * RETURN: Status 59 * RETURN: Status
60 * 60 *
@@ -347,6 +347,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
347 void *context) 347 void *context)
348{ 348{
349 struct acpi_gpe_event_info *gpe_event_info; 349 struct acpi_gpe_event_info *gpe_event_info;
350 struct acpi_gpe_notify_info *notify;
351 struct acpi_gpe_notify_info *next;
350 u32 i; 352 u32 i;
351 u32 j; 353 u32 j;
352 354
@@ -365,10 +367,28 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
365 367
366 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 368 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
367 ACPI_GPE_DISPATCH_HANDLER) { 369 ACPI_GPE_DISPATCH_HANDLER) {
370
371 /* Delete an installed handler block */
372
368 ACPI_FREE(gpe_event_info->dispatch.handler); 373 ACPI_FREE(gpe_event_info->dispatch.handler);
369 gpe_event_info->dispatch.handler = NULL; 374 gpe_event_info->dispatch.handler = NULL;
370 gpe_event_info->flags &= 375 gpe_event_info->flags &=
371 ~ACPI_GPE_DISPATCH_MASK; 376 ~ACPI_GPE_DISPATCH_MASK;
377 } else if ((gpe_event_info->
378 flags & ACPI_GPE_DISPATCH_MASK) ==
379 ACPI_GPE_DISPATCH_NOTIFY) {
380
381 /* Delete the implicit notification device list */
382
383 notify = gpe_event_info->dispatch.notify_list;
384 while (notify) {
385 next = notify->next;
386 ACPI_FREE(notify);
387 notify = next;
388 }
389 gpe_event_info->dispatch.notify_list = NULL;
390 gpe_event_info->flags &=
391 ~ACPI_GPE_DISPATCH_MASK;
372 } 392 }
373 } 393 }
374 } 394 }
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 51ef9f5e002d..51f537937c1f 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -56,7 +56,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
56 * 56 *
57 * FUNCTION: acpi_ev_is_notify_object 57 * FUNCTION: acpi_ev_is_notify_object
58 * 58 *
59 * PARAMETERS: Node - Node to check 59 * PARAMETERS: node - Node to check
60 * 60 *
61 * RETURN: TRUE if notifies allowed on this object 61 * RETURN: TRUE if notifies allowed on this object
62 * 62 *
@@ -86,7 +86,7 @@ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
86 * 86 *
87 * FUNCTION: acpi_ev_queue_notify_request 87 * FUNCTION: acpi_ev_queue_notify_request
88 * 88 *
89 * PARAMETERS: Node - NS node for the notified object 89 * PARAMETERS: node - NS node for the notified object
90 * notify_value - Value from the Notify() request 90 * notify_value - Value from the Notify() request
91 * 91 *
92 * RETURN: Status 92 * RETURN: Status
@@ -101,102 +101,77 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
101 u32 notify_value) 101 u32 notify_value)
102{ 102{
103 union acpi_operand_object *obj_desc; 103 union acpi_operand_object *obj_desc;
104 union acpi_operand_object *handler_obj = NULL; 104 union acpi_operand_object *handler_list_head = NULL;
105 union acpi_generic_state *notify_info; 105 union acpi_generic_state *info;
106 u8 handler_list_id = 0;
106 acpi_status status = AE_OK; 107 acpi_status status = AE_OK;
107 108
108 ACPI_FUNCTION_NAME(ev_queue_notify_request); 109 ACPI_FUNCTION_NAME(ev_queue_notify_request);
109 110
110 /* 111 /* Are Notifies allowed on this object? */
111 * For value 0x03 (Ejection Request), may need to run a device method.
112 * For value 0x02 (Device Wake), if _PRW exists, may need to run
113 * the _PS0 method.
114 * For value 0x80 (Status Change) on the power button or sleep button,
115 * initiate soft-off or sleep operation.
116 *
117 * For all cases, simply dispatch the notify to the handler.
118 */
119 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
120 "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
121 acpi_ut_get_node_name(node),
122 acpi_ut_get_type_name(node->type), notify_value,
123 acpi_ut_get_notify_name(notify_value), node));
124 112
125 /* Get the notify object attached to the NS Node */ 113 if (!acpi_ev_is_notify_object(node)) {
126 114 return (AE_TYPE);
127 obj_desc = acpi_ns_get_attached_object(node); 115 }
128 if (obj_desc) {
129
130 /* We have the notify object, Get the correct handler */
131
132 switch (node->type) {
133 116
134 /* Notify is allowed only on these types */ 117 /* Get the correct notify list type (System or Device) */
135 118
136 case ACPI_TYPE_DEVICE: 119 if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
137 case ACPI_TYPE_THERMAL: 120 handler_list_id = ACPI_SYSTEM_HANDLER_LIST;
138 case ACPI_TYPE_PROCESSOR: 121 } else {
122 handler_list_id = ACPI_DEVICE_HANDLER_LIST;
123 }
139 124
140 if (notify_value <= ACPI_MAX_SYS_NOTIFY) { 125 /* Get the notify object attached to the namespace Node */
141 handler_obj =
142 obj_desc->common_notify.system_notify;
143 } else {
144 handler_obj =
145 obj_desc->common_notify.device_notify;
146 }
147 break;
148 126
149 default: 127 obj_desc = acpi_ns_get_attached_object(node);
128 if (obj_desc) {
150 129
151 /* All other types are not supported */ 130 /* We have an attached object, Get the correct handler list */
152 131
153 return (AE_TYPE); 132 handler_list_head =
154 } 133 obj_desc->common_notify.notify_list[handler_list_id];
155 } 134 }
156 135
157 /* 136 /*
158 * If there is a handler to run, schedule the dispatcher. 137 * If there is no notify handler (Global or Local)
159 * Check for: 138 * for this object, just ignore the notify
160 * 1) Global system notify handler
161 * 2) Global device notify handler
162 * 3) Per-device notify handler
163 */ 139 */
164 if ((acpi_gbl_system_notify.handler && 140 if (!acpi_gbl_global_notify[handler_list_id].handler
165 (notify_value <= ACPI_MAX_SYS_NOTIFY)) || 141 && !handler_list_head) {
166 (acpi_gbl_device_notify.handler && 142 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
167 (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) { 143 "No notify handler for Notify, ignoring (%4.4s, %X) node %p\n",
168 notify_info = acpi_ut_create_generic_state(); 144 acpi_ut_get_node_name(node), notify_value,
169 if (!notify_info) { 145 node));
170 return (AE_NO_MEMORY);
171 }
172 146
173 if (!handler_obj) { 147 return (AE_OK);
174 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 148 }
175 "Executing system notify handler for Notify (%4.4s, %X) "
176 "node %p\n",
177 acpi_ut_get_node_name(node),
178 notify_value, node));
179 }
180 149
181 notify_info->common.descriptor_type = 150 /* Setup notify info and schedule the notify dispatcher */
182 ACPI_DESC_TYPE_STATE_NOTIFY;
183 notify_info->notify.node = node;
184 notify_info->notify.value = (u16) notify_value;
185 notify_info->notify.handler_obj = handler_obj;
186 151
187 status = 152 info = acpi_ut_create_generic_state();
188 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, 153 if (!info) {
189 notify_info); 154 return (AE_NO_MEMORY);
190 if (ACPI_FAILURE(status)) { 155 }
191 acpi_ut_delete_generic_state(notify_info);
192 }
193 } else {
194 /* There is no notify handler (per-device or system) for this device */
195 156
196 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 157 info->common.descriptor_type = ACPI_DESC_TYPE_STATE_NOTIFY;
197 "No notify handler for Notify (%4.4s, %X) node %p\n", 158
198 acpi_ut_get_node_name(node), notify_value, 159 info->notify.node = node;
199 node)); 160 info->notify.value = (u16)notify_value;
161 info->notify.handler_list_id = handler_list_id;
162 info->notify.handler_list_head = handler_list_head;
163 info->notify.global = &acpi_gbl_global_notify[handler_list_id];
164
165 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
166 "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
167 acpi_ut_get_node_name(node),
168 acpi_ut_get_type_name(node->type), notify_value,
169 acpi_ut_get_notify_name(notify_value), node));
170
171 status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
172 info);
173 if (ACPI_FAILURE(status)) {
174 acpi_ut_delete_generic_state(info);
200 } 175 }
201 176
202 return (status); 177 return (status);
@@ -206,7 +181,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
206 * 181 *
207 * FUNCTION: acpi_ev_notify_dispatch 182 * FUNCTION: acpi_ev_notify_dispatch
208 * 183 *
209 * PARAMETERS: Context - To be passed to the notify handler 184 * PARAMETERS: context - To be passed to the notify handler
210 * 185 *
211 * RETURN: None. 186 * RETURN: None.
212 * 187 *
@@ -217,60 +192,34 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
217 192
218static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) 193static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
219{ 194{
220 union acpi_generic_state *notify_info = 195 union acpi_generic_state *info = (union acpi_generic_state *)context;
221 (union acpi_generic_state *)context;
222 acpi_notify_handler global_handler = NULL;
223 void *global_context = NULL;
224 union acpi_operand_object *handler_obj; 196 union acpi_operand_object *handler_obj;
225 197
226 ACPI_FUNCTION_ENTRY(); 198 ACPI_FUNCTION_ENTRY();
227 199
228 /* 200 /* Invoke a global notify handler if installed */
229 * We will invoke a global notify handler if installed. This is done
230 * _before_ we invoke the per-device handler attached to the device.
231 */
232 if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
233
234 /* Global system notification handler */
235
236 if (acpi_gbl_system_notify.handler) {
237 global_handler = acpi_gbl_system_notify.handler;
238 global_context = acpi_gbl_system_notify.context;
239 }
240 } else {
241 /* Global driver notification handler */
242
243 if (acpi_gbl_device_notify.handler) {
244 global_handler = acpi_gbl_device_notify.handler;
245 global_context = acpi_gbl_device_notify.context;
246 }
247 }
248
249 /* Invoke the system handler first, if present */
250 201
251 if (global_handler) { 202 if (info->notify.global->handler) {
252 global_handler(notify_info->notify.node, 203 info->notify.global->handler(info->notify.node,
253 notify_info->notify.value, global_context); 204 info->notify.value,
205 info->notify.global->context);
254 } 206 }
255 207
256 /* Now invoke the per-device handler, if present */ 208 /* Now invoke the local notify handler(s) if any are installed */
257 209
258 handler_obj = notify_info->notify.handler_obj; 210 handler_obj = info->notify.handler_list_head;
259 if (handler_obj) { 211 while (handler_obj) {
260 struct acpi_object_notify_handler *notifier; 212 handler_obj->notify.handler(info->notify.node,
213 info->notify.value,
214 handler_obj->notify.context);
261 215
262 notifier = &handler_obj->notify; 216 handler_obj =
263 while (notifier) { 217 handler_obj->notify.next[info->notify.handler_list_id];
264 notifier->handler(notify_info->notify.node,
265 notify_info->notify.value,
266 notifier->context);
267 notifier = notifier->next;
268 }
269 } 218 }
270 219
271 /* All done with the info object */ 220 /* All done with the info object */
272 221
273 acpi_ut_delete_generic_state(notify_info); 222 acpi_ut_delete_generic_state(info);
274} 223}
275 224
276#if (!ACPI_REDUCED_HARDWARE) 225#if (!ACPI_REDUCED_HARDWARE)
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 1b0180a1b798..0cc6a16fedc7 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -150,7 +150,7 @@ acpi_status acpi_ev_install_region_handlers(void)
150 * 150 *
151 * FUNCTION: acpi_ev_has_default_handler 151 * FUNCTION: acpi_ev_has_default_handler
152 * 152 *
153 * PARAMETERS: Node - Namespace node for the device 153 * PARAMETERS: node - Namespace node for the device
154 * space_id - The address space ID 154 * space_id - The address space ID
155 * 155 *
156 * RETURN: TRUE if default handler is installed, FALSE otherwise 156 * RETURN: TRUE if default handler is installed, FALSE otherwise
@@ -244,7 +244,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
244 * FUNCTION: acpi_ev_execute_reg_method 244 * FUNCTION: acpi_ev_execute_reg_method
245 * 245 *
246 * PARAMETERS: region_obj - Region object 246 * PARAMETERS: region_obj - Region object
247 * Function - Passed to _REG: On (1) or Off (0) 247 * function - Passed to _REG: On (1) or Off (0)
248 * 248 *
249 * RETURN: Status 249 * RETURN: Status
250 * 250 *
@@ -286,10 +286,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
286 /* 286 /*
287 * The _REG method has two arguments: 287 * The _REG method has two arguments:
288 * 288 *
289 * Arg0 - Integer: 289 * arg0 - Integer:
290 * Operation region space ID Same value as region_obj->Region.space_id 290 * Operation region space ID Same value as region_obj->Region.space_id
291 * 291 *
292 * Arg1 - Integer: 292 * arg1 - Integer:
293 * connection status 1 for connecting the handler, 0 for disconnecting 293 * connection status 1 for connecting the handler, 0 for disconnecting
294 * the handler (Passed as a parameter) 294 * the handler (Passed as a parameter)
295 */ 295 */
@@ -330,10 +330,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
330 * 330 *
331 * PARAMETERS: region_obj - Internal region object 331 * PARAMETERS: region_obj - Internal region object
332 * field_obj - Corresponding field. Can be NULL. 332 * field_obj - Corresponding field. Can be NULL.
333 * Function - Read or Write operation 333 * function - Read or Write operation
334 * region_offset - Where in the region to read or write 334 * region_offset - Where in the region to read or write
335 * bit_width - Field width in bits (8, 16, 32, or 64) 335 * bit_width - Field width in bits (8, 16, 32, or 64)
336 * Value - Pointer to in or out value, must be 336 * value - Pointer to in or out value, must be
337 * a full 64-bit integer 337 * a full 64-bit integer
338 * 338 *
339 * RETURN: Status 339 * RETURN: Status
@@ -840,11 +840,11 @@ acpi_ev_install_handler(acpi_handle obj_handle,
840 * 840 *
841 * FUNCTION: acpi_ev_install_space_handler 841 * FUNCTION: acpi_ev_install_space_handler
842 * 842 *
843 * PARAMETERS: Node - Namespace node for the device 843 * PARAMETERS: node - Namespace node for the device
844 * space_id - The address space ID 844 * space_id - The address space ID
845 * Handler - Address of the handler 845 * handler - Address of the handler
846 * Setup - Address of the setup function 846 * setup - Address of the setup function
847 * Context - Value passed to the handler on each access 847 * context - Value passed to the handler on each access
848 * 848 *
849 * RETURN: Status 849 * RETURN: Status
850 * 850 *
@@ -1061,7 +1061,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
1061 * 1061 *
1062 * FUNCTION: acpi_ev_execute_reg_methods 1062 * FUNCTION: acpi_ev_execute_reg_methods
1063 * 1063 *
1064 * PARAMETERS: Node - Namespace node for the device 1064 * PARAMETERS: node - Namespace node for the device
1065 * space_id - The address space ID 1065 * space_id - The address space ID
1066 * 1066 *
1067 * RETURN: Status 1067 * RETURN: Status
@@ -1104,7 +1104,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
1104 * 1104 *
1105 * PARAMETERS: walk_namespace callback 1105 * PARAMETERS: walk_namespace callback
1106 * 1106 *
1107 * DESCRIPTION: Run _REG method for region objects of the requested space_iD 1107 * DESCRIPTION: Run _REG method for region objects of the requested spaceID
1108 * 1108 *
1109 ******************************************************************************/ 1109 ******************************************************************************/
1110 1110
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 819c17f5897a..4c1c8261166f 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -56,8 +56,8 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
56 * 56 *
57 * FUNCTION: acpi_ev_system_memory_region_setup 57 * FUNCTION: acpi_ev_system_memory_region_setup
58 * 58 *
59 * PARAMETERS: Handle - Region we are interested in 59 * PARAMETERS: handle - Region we are interested in
60 * Function - Start or stop 60 * function - Start or stop
61 * handler_context - Address space handler context 61 * handler_context - Address space handler context
62 * region_context - Region specific context 62 * region_context - Region specific context
63 * 63 *
@@ -118,8 +118,8 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
118 * 118 *
119 * FUNCTION: acpi_ev_io_space_region_setup 119 * FUNCTION: acpi_ev_io_space_region_setup
120 * 120 *
121 * PARAMETERS: Handle - Region we are interested in 121 * PARAMETERS: handle - Region we are interested in
122 * Function - Start or stop 122 * function - Start or stop
123 * handler_context - Address space handler context 123 * handler_context - Address space handler context
124 * region_context - Region specific context 124 * region_context - Region specific context
125 * 125 *
@@ -149,8 +149,8 @@ acpi_ev_io_space_region_setup(acpi_handle handle,
149 * 149 *
150 * FUNCTION: acpi_ev_pci_config_region_setup 150 * FUNCTION: acpi_ev_pci_config_region_setup
151 * 151 *
152 * PARAMETERS: Handle - Region we are interested in 152 * PARAMETERS: handle - Region we are interested in
153 * Function - Start or stop 153 * function - Start or stop
154 * handler_context - Address space handler context 154 * handler_context - Address space handler context
155 * region_context - Region specific context 155 * region_context - Region specific context
156 * 156 *
@@ -338,7 +338,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
338 * 338 *
339 * FUNCTION: acpi_ev_is_pci_root_bridge 339 * FUNCTION: acpi_ev_is_pci_root_bridge
340 * 340 *
341 * PARAMETERS: Node - Device node being examined 341 * PARAMETERS: node - Device node being examined
342 * 342 *
343 * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge 343 * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
344 * 344 *
@@ -393,14 +393,14 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
393 * 393 *
394 * FUNCTION: acpi_ev_pci_bar_region_setup 394 * FUNCTION: acpi_ev_pci_bar_region_setup
395 * 395 *
396 * PARAMETERS: Handle - Region we are interested in 396 * PARAMETERS: handle - Region we are interested in
397 * Function - Start or stop 397 * function - Start or stop
398 * handler_context - Address space handler context 398 * handler_context - Address space handler context
399 * region_context - Region specific context 399 * region_context - Region specific context
400 * 400 *
401 * RETURN: Status 401 * RETURN: Status
402 * 402 *
403 * DESCRIPTION: Setup a pci_bAR operation region 403 * DESCRIPTION: Setup a pci_BAR operation region
404 * 404 *
405 * MUTEX: Assumes namespace is not locked 405 * MUTEX: Assumes namespace is not locked
406 * 406 *
@@ -420,8 +420,8 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
420 * 420 *
421 * FUNCTION: acpi_ev_cmos_region_setup 421 * FUNCTION: acpi_ev_cmos_region_setup
422 * 422 *
423 * PARAMETERS: Handle - Region we are interested in 423 * PARAMETERS: handle - Region we are interested in
424 * Function - Start or stop 424 * function - Start or stop
425 * handler_context - Address space handler context 425 * handler_context - Address space handler context
426 * region_context - Region specific context 426 * region_context - Region specific context
427 * 427 *
@@ -447,8 +447,8 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
447 * 447 *
448 * FUNCTION: acpi_ev_default_region_setup 448 * FUNCTION: acpi_ev_default_region_setup
449 * 449 *
450 * PARAMETERS: Handle - Region we are interested in 450 * PARAMETERS: handle - Region we are interested in
451 * Function - Start or stop 451 * function - Start or stop
452 * handler_context - Address space handler context 452 * handler_context - Address space handler context
453 * region_context - Region specific context 453 * region_context - Region specific context
454 * 454 *
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 6a57aa2d70d1..f9661e2b46a9 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -56,7 +56,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
56 * 56 *
57 * FUNCTION: acpi_ev_sci_xrupt_handler 57 * FUNCTION: acpi_ev_sci_xrupt_handler
58 * 58 *
59 * PARAMETERS: Context - Calling Context 59 * PARAMETERS: context - Calling Context
60 * 60 *
61 * RETURN: Status code indicates whether interrupt was handled. 61 * RETURN: Status code indicates whether interrupt was handled.
62 * 62 *
@@ -96,7 +96,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
96 * 96 *
97 * FUNCTION: acpi_ev_gpe_xrupt_handler 97 * FUNCTION: acpi_ev_gpe_xrupt_handler
98 * 98 *
99 * PARAMETERS: Context - Calling Context 99 * PARAMETERS: context - Calling Context
100 * 100 *
101 * RETURN: Status code indicates whether interrupt was handled. 101 * RETURN: Status code indicates whether interrupt was handled.
102 * 102 *
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 44bef5744ebb..7587eb6c9584 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -54,86 +54,25 @@ ACPI_MODULE_NAME("evxface")
54 54
55/******************************************************************************* 55/*******************************************************************************
56 * 56 *
57 * FUNCTION: acpi_populate_handler_object
58 *
59 * PARAMETERS: handler_obj - Handler object to populate
60 * handler_type - The type of handler:
61 * ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
62 * ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
63 * ACPI_ALL_NOTIFY: both system and device
64 * handler - Address of the handler
65 * context - Value passed to the handler on each GPE
66 * next - Address of a handler object to link to
67 *
68 * RETURN: None
69 *
70 * DESCRIPTION: Populate a handler object.
71 *
72 ******************************************************************************/
73static void
74acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj,
75 u32 handler_type,
76 acpi_notify_handler handler, void *context,
77 struct acpi_object_notify_handler *next)
78{
79 handler_obj->handler_type = handler_type;
80 handler_obj->handler = handler;
81 handler_obj->context = context;
82 handler_obj->next = next;
83}
84
85/*******************************************************************************
86 *
87 * FUNCTION: acpi_add_handler_object
88 *
89 * PARAMETERS: parent_obj - Parent of the new object
90 * handler - Address of the handler
91 * context - Value passed to the handler on each GPE
92 *
93 * RETURN: Status
94 *
95 * DESCRIPTION: Create a new handler object and populate it.
96 *
97 ******************************************************************************/
98static acpi_status
99acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
100 acpi_notify_handler handler, void *context)
101{
102 struct acpi_object_notify_handler *handler_obj;
103
104 /* The parent must not be a defice notify handler object. */
105 if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY)
106 return AE_BAD_PARAMETER;
107
108 handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj));
109 if (!handler_obj)
110 return AE_NO_MEMORY;
111
112 acpi_populate_handler_object(handler_obj,
113 ACPI_SYSTEM_NOTIFY,
114 handler, context,
115 parent_obj->next);
116 parent_obj->next = handler_obj;
117
118 return AE_OK;
119}
120
121
122/*******************************************************************************
123 *
124 * FUNCTION: acpi_install_notify_handler 57 * FUNCTION: acpi_install_notify_handler
125 * 58 *
126 * PARAMETERS: Device - The device for which notifies will be handled 59 * PARAMETERS: Device - The device for which notifies will be handled
127 * handler_type - The type of handler: 60 * handler_type - The type of handler:
128 * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) 61 * ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
129 * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) 62 * ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
130 * ACPI_ALL_NOTIFY: both system and device 63 * ACPI_ALL_NOTIFY: Both System and Device
131 * Handler - Address of the handler 64 * Handler - Address of the handler
132 * Context - Value passed to the handler on each GPE 65 * Context - Value passed to the handler on each GPE
133 * 66 *
134 * RETURN: Status 67 * RETURN: Status
135 * 68 *
136 * DESCRIPTION: Install a handler for notifies on an ACPI device 69 * DESCRIPTION: Install a handler for notifications on an ACPI Device,
70 * thermal_zone, or Processor object.
71 *
72 * NOTES: The Root namespace object may have only one handler for each
73 * type of notify (System/Device). Device/Thermal/Processor objects
74 * may have one device notify handler, and multiple system notify
75 * handlers.
137 * 76 *
138 ******************************************************************************/ 77 ******************************************************************************/
139acpi_status 78acpi_status
@@ -141,17 +80,19 @@ acpi_install_notify_handler(acpi_handle device,
141 u32 handler_type, 80 u32 handler_type,
142 acpi_notify_handler handler, void *context) 81 acpi_notify_handler handler, void *context)
143{ 82{
83 struct acpi_namespace_node *node =
84 ACPI_CAST_PTR(struct acpi_namespace_node, device);
144 union acpi_operand_object *obj_desc; 85 union acpi_operand_object *obj_desc;
145 union acpi_operand_object *notify_obj; 86 union acpi_operand_object *handler_obj;
146 struct acpi_namespace_node *node;
147 acpi_status status; 87 acpi_status status;
88 u32 i;
148 89
149 ACPI_FUNCTION_TRACE(acpi_install_notify_handler); 90 ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
150 91
151 /* Parameter validation */ 92 /* Parameter validation */
152 93
153 if ((!device) || 94 if ((!device) || (!handler) || (!handler_type) ||
154 (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { 95 (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
155 return_ACPI_STATUS(AE_BAD_PARAMETER); 96 return_ACPI_STATUS(AE_BAD_PARAMETER);
156 } 97 }
157 98
@@ -160,144 +101,112 @@ acpi_install_notify_handler(acpi_handle device,
160 return_ACPI_STATUS(status); 101 return_ACPI_STATUS(status);
161 } 102 }
162 103
163 /* Convert and validate the device handle */
164
165 node = acpi_ns_validate_handle(device);
166 if (!node) {
167 status = AE_BAD_PARAMETER;
168 goto unlock_and_exit;
169 }
170
171 /* 104 /*
172 * Root Object: 105 * Root Object:
173 * Registering a notify handler on the root object indicates that the 106 * Registering a notify handler on the root object indicates that the
174 * caller wishes to receive notifications for all objects. Note that 107 * caller wishes to receive notifications for all objects. Note that
175 * only one <external> global handler can be regsitered (per notify type). 108 * only one global handler can be registered per notify type.
109 * Ensure that a handler is not already installed.
176 */ 110 */
177 if (device == ACPI_ROOT_OBJECT) { 111 if (device == ACPI_ROOT_OBJECT) {
112 for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
113 if (handler_type & (i + 1)) {
114 if (acpi_gbl_global_notify[i].handler) {
115 status = AE_ALREADY_EXISTS;
116 goto unlock_and_exit;
117 }
178 118
179 /* Make sure the handler is not already installed */ 119 acpi_gbl_global_notify[i].handler = handler;
180 120 acpi_gbl_global_notify[i].context = context;
181 if (((handler_type & ACPI_SYSTEM_NOTIFY) && 121 }
182 acpi_gbl_system_notify.handler) ||
183 ((handler_type & ACPI_DEVICE_NOTIFY) &&
184 acpi_gbl_device_notify.handler)) {
185 status = AE_ALREADY_EXISTS;
186 goto unlock_and_exit;
187 }
188
189 if (handler_type & ACPI_SYSTEM_NOTIFY) {
190 acpi_gbl_system_notify.node = node;
191 acpi_gbl_system_notify.handler = handler;
192 acpi_gbl_system_notify.context = context;
193 }
194
195 if (handler_type & ACPI_DEVICE_NOTIFY) {
196 acpi_gbl_device_notify.node = node;
197 acpi_gbl_device_notify.handler = handler;
198 acpi_gbl_device_notify.context = context;
199 } 122 }
200 123
201 /* Global notify handler installed */ 124 goto unlock_and_exit; /* Global notify handler installed, all done */
202 } 125 }
203 126
204 /* 127 /*
205 * All Other Objects: 128 * All Other Objects:
206 * Caller will only receive notifications specific to the target object. 129 * Caller will only receive notifications specific to the target
207 * Note that only certain object types can receive notifications. 130 * object. Note that only certain object types are allowed to
131 * receive notifications.
208 */ 132 */
209 else {
210 /* Notifies allowed on this object? */
211 133
212 if (!acpi_ev_is_notify_object(node)) { 134 /* Are Notifies allowed on this object? */
213 status = AE_TYPE;
214 goto unlock_and_exit;
215 }
216 135
217 /* Check for an existing internal object */ 136 if (!acpi_ev_is_notify_object(node)) {
137 status = AE_TYPE;
138 goto unlock_and_exit;
139 }
218 140
219 obj_desc = acpi_ns_get_attached_object(node); 141 /* Check for an existing internal object, might not exist */
220 if (obj_desc) {
221 142
222 /* Object exists. */ 143 obj_desc = acpi_ns_get_attached_object(node);
144 if (!obj_desc) {
223 145
224 /* For a device notify, make sure there's no handler. */ 146 /* Create a new object */
225 if ((handler_type & ACPI_DEVICE_NOTIFY) &&
226 obj_desc->common_notify.device_notify) {
227 status = AE_ALREADY_EXISTS;
228 goto unlock_and_exit;
229 }
230 147
231 /* System notifies may have more handlers installed. */ 148 obj_desc = acpi_ut_create_internal_object(node->type);
232 notify_obj = obj_desc->common_notify.system_notify; 149 if (!obj_desc) {
150 status = AE_NO_MEMORY;
151 goto unlock_and_exit;
152 }
233 153
234 if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) { 154 /* Attach new object to the Node, remove local reference */
235 struct acpi_object_notify_handler *parent_obj; 155
156 status = acpi_ns_attach_object(device, obj_desc, node->type);
157 acpi_ut_remove_reference(obj_desc);
158 if (ACPI_FAILURE(status)) {
159 goto unlock_and_exit;
160 }
161 }
236 162
237 if (handler_type & ACPI_DEVICE_NOTIFY) { 163 /* Ensure that the handler is not already installed in the lists */
164
165 for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
166 if (handler_type & (i + 1)) {
167 handler_obj = obj_desc->common_notify.notify_list[i];
168 while (handler_obj) {
169 if (handler_obj->notify.handler == handler) {
238 status = AE_ALREADY_EXISTS; 170 status = AE_ALREADY_EXISTS;
239 goto unlock_and_exit; 171 goto unlock_and_exit;
240 } 172 }
241 173
242 parent_obj = &notify_obj->notify; 174 handler_obj = handler_obj->notify.next[i];
243 status = acpi_add_handler_object(parent_obj,
244 handler,
245 context);
246 goto unlock_and_exit;
247 }
248 } else {
249 /* Create a new object */
250
251 obj_desc = acpi_ut_create_internal_object(node->type);
252 if (!obj_desc) {
253 status = AE_NO_MEMORY;
254 goto unlock_and_exit;
255 }
256
257 /* Attach new object to the Node */
258
259 status =
260 acpi_ns_attach_object(device, obj_desc, node->type);
261
262 /* Remove local reference to the object */
263
264 acpi_ut_remove_reference(obj_desc);
265 if (ACPI_FAILURE(status)) {
266 goto unlock_and_exit;
267 } 175 }
268 } 176 }
177 }
269 178
270 /* Install the handler */ 179 /* Create and populate a new notify handler object */
271 180
272 notify_obj = 181 handler_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
273 acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY); 182 if (!handler_obj) {
274 if (!notify_obj) { 183 status = AE_NO_MEMORY;
275 status = AE_NO_MEMORY; 184 goto unlock_and_exit;
276 goto unlock_and_exit; 185 }
277 }
278 186
279 acpi_populate_handler_object(&notify_obj->notify, 187 handler_obj->notify.node = node;
280 handler_type, 188 handler_obj->notify.handler_type = handler_type;
281 handler, context, 189 handler_obj->notify.handler = handler;
282 NULL); 190 handler_obj->notify.context = context;
283 191
284 if (handler_type & ACPI_SYSTEM_NOTIFY) { 192 /* Install the handler at the list head(s) */
285 obj_desc->common_notify.system_notify = notify_obj;
286 }
287 193
288 if (handler_type & ACPI_DEVICE_NOTIFY) { 194 for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
289 obj_desc->common_notify.device_notify = notify_obj; 195 if (handler_type & (i + 1)) {
290 } 196 handler_obj->notify.next[i] =
197 obj_desc->common_notify.notify_list[i];
291 198
292 if (handler_type == ACPI_ALL_NOTIFY) { 199 obj_desc->common_notify.notify_list[i] = handler_obj;
200 }
201 }
293 202
294 /* Extra ref if installed in both */ 203 /* Add an extra reference if handler was installed in both lists */
295 204
296 acpi_ut_add_reference(notify_obj); 205 if (handler_type == ACPI_ALL_NOTIFY) {
297 } 206 acpi_ut_add_reference(handler_obj);
298 } 207 }
299 208
300 unlock_and_exit: 209unlock_and_exit:
301 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 210 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
302 return_ACPI_STATUS(status); 211 return_ACPI_STATUS(status);
303} 212}
@@ -308,11 +217,11 @@ ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
308 * 217 *
309 * FUNCTION: acpi_remove_notify_handler 218 * FUNCTION: acpi_remove_notify_handler
310 * 219 *
311 * PARAMETERS: Device - The device for which notifies will be handled 220 * PARAMETERS: Device - The device for which the handler is installed
312 * handler_type - The type of handler: 221 * handler_type - The type of handler:
313 * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) 222 * ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
314 * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) 223 * ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
315 * ACPI_ALL_NOTIFY: both system and device 224 * ACPI_ALL_NOTIFY: Both System and Device
316 * Handler - Address of the handler 225 * Handler - Address of the handler
317 * 226 *
318 * RETURN: Status 227 * RETURN: Status
@@ -324,165 +233,106 @@ acpi_status
324acpi_remove_notify_handler(acpi_handle device, 233acpi_remove_notify_handler(acpi_handle device,
325 u32 handler_type, acpi_notify_handler handler) 234 u32 handler_type, acpi_notify_handler handler)
326{ 235{
327 union acpi_operand_object *notify_obj; 236 struct acpi_namespace_node *node =
237 ACPI_CAST_PTR(struct acpi_namespace_node, device);
328 union acpi_operand_object *obj_desc; 238 union acpi_operand_object *obj_desc;
329 struct acpi_namespace_node *node; 239 union acpi_operand_object *handler_obj;
240 union acpi_operand_object *previous_handler_obj;
330 acpi_status status; 241 acpi_status status;
242 u32 i;
331 243
332 ACPI_FUNCTION_TRACE(acpi_remove_notify_handler); 244 ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
333 245
334 /* Parameter validation */ 246 /* Parameter validation */
335 247
336 if ((!device) || 248 if ((!device) || (!handler) || (!handler_type) ||
337 (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { 249 (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
338 status = AE_BAD_PARAMETER; 250 return_ACPI_STATUS(AE_BAD_PARAMETER);
339 goto exit;
340 } 251 }
341
342
343 /* Make sure all deferred tasks are completed */ 252 /* Make sure all deferred tasks are completed */
344 acpi_os_wait_events_complete(NULL); 253
254 acpi_os_wait_events_complete();
345 255
346 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 256 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
347 if (ACPI_FAILURE(status)) { 257 if (ACPI_FAILURE(status)) {
348 goto exit; 258 return_ACPI_STATUS(status);
349 }
350
351 /* Convert and validate the device handle */
352
353 node = acpi_ns_validate_handle(device);
354 if (!node) {
355 status = AE_BAD_PARAMETER;
356 goto unlock_and_exit;
357 } 259 }
358 260
359 /* Root Object */ 261 /* Root Object. Global handlers are removed here */
360 262
361 if (device == ACPI_ROOT_OBJECT) { 263 if (device == ACPI_ROOT_OBJECT) {
362 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 264 for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
363 "Removing notify handler for namespace root object\n")); 265 if (handler_type & (i + 1)) {
266 if (!acpi_gbl_global_notify[i].handler ||
267 (acpi_gbl_global_notify[i].handler !=
268 handler)) {
269 status = AE_NOT_EXIST;
270 goto unlock_and_exit;
271 }
364 272
365 if (((handler_type & ACPI_SYSTEM_NOTIFY) && 273 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
366 !acpi_gbl_system_notify.handler) || 274 "Removing global notify handler\n"));
367 ((handler_type & ACPI_DEVICE_NOTIFY) &&
368 !acpi_gbl_device_notify.handler)) {
369 status = AE_NOT_EXIST;
370 goto unlock_and_exit;
371 }
372 275
373 if (handler_type & ACPI_SYSTEM_NOTIFY) { 276 acpi_gbl_global_notify[i].handler = NULL;
374 acpi_gbl_system_notify.node = NULL; 277 acpi_gbl_global_notify[i].context = NULL;
375 acpi_gbl_system_notify.handler = NULL; 278 }
376 acpi_gbl_system_notify.context = NULL;
377 } 279 }
378 280
379 if (handler_type & ACPI_DEVICE_NOTIFY) { 281 goto unlock_and_exit;
380 acpi_gbl_device_notify.node = NULL;
381 acpi_gbl_device_notify.handler = NULL;
382 acpi_gbl_device_notify.context = NULL;
383 }
384 } 282 }
385 283
386 /* All Other Objects */ 284 /* All other objects: Are Notifies allowed on this object? */
387 285
388 else { 286 if (!acpi_ev_is_notify_object(node)) {
389 /* Notifies allowed on this object? */ 287 status = AE_TYPE;
288 goto unlock_and_exit;
289 }
390 290
391 if (!acpi_ev_is_notify_object(node)) { 291 /* Must have an existing internal object */
392 status = AE_TYPE;
393 goto unlock_and_exit;
394 }
395 292
396 /* Check for an existing internal object */ 293 obj_desc = acpi_ns_get_attached_object(node);
294 if (!obj_desc) {
295 status = AE_NOT_EXIST;
296 goto unlock_and_exit;
297 }
397 298
398 obj_desc = acpi_ns_get_attached_object(node); 299 /* Internal object exists. Find the handler and remove it */
399 if (!obj_desc) {
400 status = AE_NOT_EXIST;
401 goto unlock_and_exit;
402 }
403 300
404 /* Object exists - make sure there's an existing handler */ 301 for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
302 if (handler_type & (i + 1)) {
303 handler_obj = obj_desc->common_notify.notify_list[i];
304 previous_handler_obj = NULL;
405 305
406 if (handler_type & ACPI_SYSTEM_NOTIFY) { 306 /* Attempt to find the handler in the handler list */
407 struct acpi_object_notify_handler *handler_obj;
408 struct acpi_object_notify_handler *parent_obj;
409 307
410 notify_obj = obj_desc->common_notify.system_notify; 308 while (handler_obj &&
411 if (!notify_obj) { 309 (handler_obj->notify.handler != handler)) {
412 status = AE_NOT_EXIST; 310 previous_handler_obj = handler_obj;
413 goto unlock_and_exit; 311 handler_obj = handler_obj->notify.next[i];
414 }
415
416 handler_obj = &notify_obj->notify;
417 parent_obj = NULL;
418 while (handler_obj->handler != handler) {
419 if (handler_obj->next) {
420 parent_obj = handler_obj;
421 handler_obj = handler_obj->next;
422 } else {
423 break;
424 }
425 } 312 }
426 313
427 if (handler_obj->handler != handler) { 314 if (!handler_obj) {
428 status = AE_BAD_PARAMETER; 315 status = AE_NOT_EXIST;
429 goto unlock_and_exit; 316 goto unlock_and_exit;
430 } 317 }
431 318
432 /* 319 /* Remove the handler object from the list */
433 * Remove the handler. There are three possible cases.
434 * First, we may need to remove a non-embedded object.
435 * Second, we may need to remove the embedded object's
436 * handler data, while non-embedded objects exist.
437 * Finally, we may need to remove the embedded object
438 * entirely along with its container.
439 */
440 if (parent_obj) {
441 /* Non-embedded object is being removed. */
442 parent_obj->next = handler_obj->next;
443 ACPI_FREE(handler_obj);
444 } else if (notify_obj->notify.next) {
445 /*
446 * The handler matches the embedded object, but
447 * there are more handler objects in the list.
448 * Replace the embedded object's data with the
449 * first next object's data and remove that
450 * object.
451 */
452 parent_obj = &notify_obj->notify;
453 handler_obj = notify_obj->notify.next;
454 *parent_obj = *handler_obj;
455 ACPI_FREE(handler_obj);
456 } else {
457 /* No more handler objects in the list. */
458 obj_desc->common_notify.system_notify = NULL;
459 acpi_ut_remove_reference(notify_obj);
460 }
461 }
462 320
463 if (handler_type & ACPI_DEVICE_NOTIFY) { 321 if (previous_handler_obj) { /* Handler is not at the list head */
464 notify_obj = obj_desc->common_notify.device_notify; 322 previous_handler_obj->notify.next[i] =
465 if (!notify_obj) { 323 handler_obj->notify.next[i];
466 status = AE_NOT_EXIST; 324 } else { /* Handler is at the list head */
467 goto unlock_and_exit;
468 }
469 325
470 if (notify_obj->notify.handler != handler) { 326 obj_desc->common_notify.notify_list[i] =
471 status = AE_BAD_PARAMETER; 327 handler_obj->notify.next[i];
472 goto unlock_and_exit;
473 } 328 }
474 329
475 /* Remove the handler */ 330 acpi_ut_remove_reference(handler_obj);
476 obj_desc->common_notify.device_notify = NULL;
477 acpi_ut_remove_reference(notify_obj);
478 } 331 }
479 } 332 }
480 333
481 unlock_and_exit: 334unlock_and_exit:
482 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 335 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
483 exit:
484 if (ACPI_FAILURE(status))
485 ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler"));
486 return_ACPI_STATUS(status); 336 return_ACPI_STATUS(status);
487} 337}
488 338
@@ -492,7 +342,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
492 * 342 *
493 * FUNCTION: acpi_install_exception_handler 343 * FUNCTION: acpi_install_exception_handler
494 * 344 *
495 * PARAMETERS: Handler - Pointer to the handler function for the 345 * PARAMETERS: handler - Pointer to the handler function for the
496 * event 346 * event
497 * 347 *
498 * RETURN: Status 348 * RETURN: Status
@@ -536,8 +386,8 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
536 * 386 *
537 * FUNCTION: acpi_install_global_event_handler 387 * FUNCTION: acpi_install_global_event_handler
538 * 388 *
539 * PARAMETERS: Handler - Pointer to the global event handler function 389 * PARAMETERS: handler - Pointer to the global event handler function
540 * Context - Value passed to the handler on each event 390 * context - Value passed to the handler on each event
541 * 391 *
542 * RETURN: Status 392 * RETURN: Status
543 * 393 *
@@ -586,10 +436,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
586 * 436 *
587 * FUNCTION: acpi_install_fixed_event_handler 437 * FUNCTION: acpi_install_fixed_event_handler
588 * 438 *
589 * PARAMETERS: Event - Event type to enable. 439 * PARAMETERS: event - Event type to enable.
590 * Handler - Pointer to the handler function for the 440 * handler - Pointer to the handler function for the
591 * event 441 * event
592 * Context - Value passed to the handler on each GPE 442 * context - Value passed to the handler on each GPE
593 * 443 *
594 * RETURN: Status 444 * RETURN: Status
595 * 445 *
@@ -656,8 +506,8 @@ ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
656 * 506 *
657 * FUNCTION: acpi_remove_fixed_event_handler 507 * FUNCTION: acpi_remove_fixed_event_handler
658 * 508 *
659 * PARAMETERS: Event - Event type to disable. 509 * PARAMETERS: event - Event type to disable.
660 * Handler - Address of the handler 510 * handler - Address of the handler
661 * 511 *
662 * RETURN: Status 512 * RETURN: Status
663 * 513 *
@@ -713,10 +563,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
713 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT 563 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
714 * defined GPEs) 564 * defined GPEs)
715 * gpe_number - The GPE number within the GPE block 565 * gpe_number - The GPE number within the GPE block
716 * Type - Whether this GPE should be treated as an 566 * type - Whether this GPE should be treated as an
717 * edge- or level-triggered interrupt. 567 * edge- or level-triggered interrupt.
718 * Address - Address of the handler 568 * address - Address of the handler
719 * Context - Value passed to the handler on each GPE 569 * context - Value passed to the handler on each GPE
720 * 570 *
721 * RETURN: Status 571 * RETURN: Status
722 * 572 *
@@ -823,7 +673,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
823 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT 673 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
824 * defined GPEs) 674 * defined GPEs)
825 * gpe_number - The event to remove a handler 675 * gpe_number - The event to remove a handler
826 * Address - Address of the handler 676 * address - Address of the handler
827 * 677 *
828 * RETURN: Status 678 * RETURN: Status
829 * 679 *
@@ -849,7 +699,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
849 699
850 /* Make sure all deferred tasks are completed */ 700 /* Make sure all deferred tasks are completed */
851 701
852 acpi_os_wait_events_complete(NULL); 702 acpi_os_wait_events_complete();
853 703
854 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 704 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
855 if (ACPI_FAILURE(status)) { 705 if (ACPI_FAILURE(status)) {
@@ -919,8 +769,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
919 * 769 *
920 * FUNCTION: acpi_acquire_global_lock 770 * FUNCTION: acpi_acquire_global_lock
921 * 771 *
922 * PARAMETERS: Timeout - How long the caller is willing to wait 772 * PARAMETERS: timeout - How long the caller is willing to wait
923 * Handle - Where the handle to the lock is returned 773 * handle - Where the handle to the lock is returned
924 * (if acquired) 774 * (if acquired)
925 * 775 *
926 * RETURN: Status 776 * RETURN: Status
@@ -967,7 +817,7 @@ ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
967 * 817 *
968 * FUNCTION: acpi_release_global_lock 818 * FUNCTION: acpi_release_global_lock
969 * 819 *
970 * PARAMETERS: Handle - Returned from acpi_acquire_global_lock 820 * PARAMETERS: handle - Returned from acpi_acquire_global_lock
971 * 821 *
972 * RETURN: Status 822 * RETURN: Status
973 * 823 *
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 77cee5a5e891..35520c6eeefb 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -153,8 +153,8 @@ ACPI_EXPORT_SYMBOL(acpi_disable)
153 * 153 *
154 * FUNCTION: acpi_enable_event 154 * FUNCTION: acpi_enable_event
155 * 155 *
156 * PARAMETERS: Event - The fixed eventto be enabled 156 * PARAMETERS: event - The fixed eventto be enabled
157 * Flags - Reserved 157 * flags - Reserved
158 * 158 *
159 * RETURN: Status 159 * RETURN: Status
160 * 160 *
@@ -265,7 +265,7 @@ ACPI_EXPORT_SYMBOL(acpi_disable_event)
265 * 265 *
266 * FUNCTION: acpi_clear_event 266 * FUNCTION: acpi_clear_event
267 * 267 *
268 * PARAMETERS: Event - The fixed event to be cleared 268 * PARAMETERS: event - The fixed event to be cleared
269 * 269 *
270 * RETURN: Status 270 * RETURN: Status
271 * 271 *
@@ -301,7 +301,7 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
301 * 301 *
302 * FUNCTION: acpi_get_event_status 302 * FUNCTION: acpi_get_event_status
303 * 303 *
304 * PARAMETERS: Event - The fixed event 304 * PARAMETERS: event - The fixed event
305 * event_status - Where the current status of the event will 305 * event_status - Where the current status of the event will
306 * be returned 306 * be returned
307 * 307 *
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 86f9b343ebd4..6affbdb4b88c 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -197,12 +197,12 @@ acpi_status
197acpi_setup_gpe_for_wake(acpi_handle wake_device, 197acpi_setup_gpe_for_wake(acpi_handle wake_device,
198 acpi_handle gpe_device, u32 gpe_number) 198 acpi_handle gpe_device, u32 gpe_number)
199{ 199{
200 acpi_status status = AE_BAD_PARAMETER; 200 acpi_status status;
201 struct acpi_gpe_event_info *gpe_event_info; 201 struct acpi_gpe_event_info *gpe_event_info;
202 struct acpi_namespace_node *device_node; 202 struct acpi_namespace_node *device_node;
203 struct acpi_gpe_notify_object *notify_object; 203 struct acpi_gpe_notify_info *notify;
204 struct acpi_gpe_notify_info *new_notify;
204 acpi_cpu_flags flags; 205 acpi_cpu_flags flags;
205 u8 gpe_dispatch_mask;
206 206
207 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); 207 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
208 208
@@ -216,63 +216,95 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
216 return_ACPI_STATUS(AE_BAD_PARAMETER); 216 return_ACPI_STATUS(AE_BAD_PARAMETER);
217 } 217 }
218 218
219 /* Handle root object case */
220
221 if (wake_device == ACPI_ROOT_OBJECT) {
222 device_node = acpi_gbl_root_node;
223 } else {
224 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
225 }
226
227 /* Validate WakeDevice is of type Device */
228
229 if (device_node->type != ACPI_TYPE_DEVICE) {
230 return_ACPI_STATUS (AE_BAD_PARAMETER);
231 }
232
233 /*
234 * Allocate a new notify object up front, in case it is needed.
235 * Memory allocation while holding a spinlock is a big no-no
236 * on some hosts.
237 */
238 new_notify = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_notify_info));
239 if (!new_notify) {
240 return_ACPI_STATUS(AE_NO_MEMORY);
241 }
242
219 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 243 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
220 244
221 /* Ensure that we have a valid GPE number */ 245 /* Ensure that we have a valid GPE number */
222 246
223 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 247 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
224 if (!gpe_event_info) { 248 if (!gpe_event_info) {
249 status = AE_BAD_PARAMETER;
225 goto unlock_and_exit; 250 goto unlock_and_exit;
226 } 251 }
227 252
228 if (wake_device == ACPI_ROOT_OBJECT) {
229 goto out;
230 }
231
232 /* 253 /*
233 * If there is no method or handler for this GPE, then the 254 * If there is no method or handler for this GPE, then the
234 * wake_device will be notified whenever this GPE fires (aka 255 * wake_device will be notified whenever this GPE fires. This is
235 * "implicit notify") Note: The GPE is assumed to be 256 * known as an "implicit notify". Note: The GPE is assumed to be
236 * level-triggered (for windows compatibility). 257 * level-triggered (for windows compatibility).
237 */ 258 */
238 gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK; 259 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
239 if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE 260 ACPI_GPE_DISPATCH_NONE) {
240 && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) { 261 /*
241 goto out; 262 * This is the first device for implicit notify on this GPE.
242 } 263 * Just set the flags here, and enter the NOTIFY block below.
243 264 */
244 /* Validate wake_device is of type Device */ 265 gpe_event_info->flags =
245 266 (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
246 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
247 if (device_node->type != ACPI_TYPE_DEVICE) {
248 goto unlock_and_exit;
249 } 267 }
250 268
251 if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) { 269 /*
252 gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | 270 * If we already have an implicit notify on this GPE, add
253 ACPI_GPE_LEVEL_TRIGGERED); 271 * this device to the notify list.
254 gpe_event_info->dispatch.device.node = device_node; 272 */
255 gpe_event_info->dispatch.device.next = NULL; 273 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
256 } else { 274 ACPI_GPE_DISPATCH_NOTIFY) {
257 /* There are multiple devices to notify implicitly. */ 275
258 276 /* Ensure that the device is not already in the list */
259 notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object)); 277
260 if (!notify_object) { 278 notify = gpe_event_info->dispatch.notify_list;
261 status = AE_NO_MEMORY; 279 while (notify) {
262 goto unlock_and_exit; 280 if (notify->device_node == device_node) {
281 status = AE_ALREADY_EXISTS;
282 goto unlock_and_exit;
283 }
284 notify = notify->next;
263 } 285 }
264 286
265 notify_object->node = device_node; 287 /* Add this device to the notify list for this GPE */
266 notify_object->next = gpe_event_info->dispatch.device.next; 288
267 gpe_event_info->dispatch.device.next = notify_object; 289 new_notify->device_node = device_node;
290 new_notify->next = gpe_event_info->dispatch.notify_list;
291 gpe_event_info->dispatch.notify_list = new_notify;
292 new_notify = NULL;
268 } 293 }
269 294
270 out: 295 /* Mark the GPE as a possible wake event */
296
271 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 297 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
272 status = AE_OK; 298 status = AE_OK;
273 299
274 unlock_and_exit: 300unlock_and_exit:
275 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 301 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
302
303 /* Delete the notify object if it was not used above */
304
305 if (new_notify) {
306 ACPI_FREE(new_notify);
307 }
276 return_ACPI_STATUS(status); 308 return_ACPI_STATUS(status);
277} 309}
278ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) 310ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
@@ -283,7 +315,7 @@ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
283 * 315 *
284 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 316 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
285 * gpe_number - GPE level within the GPE block 317 * gpe_number - GPE level within the GPE block
286 * Action - Enable or Disable 318 * action - Enable or Disable
287 * 319 *
288 * RETURN: Status 320 * RETURN: Status
289 * 321 *
@@ -508,7 +540,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
508 * FUNCTION: acpi_install_gpe_block 540 * FUNCTION: acpi_install_gpe_block
509 * 541 *
510 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device 542 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
511 * gpe_block_address - Address and space_iD 543 * gpe_block_address - Address and space_ID
512 * register_count - Number of GPE register pairs in the block 544 * register_count - Number of GPE register pairs in the block
513 * interrupt_number - H/W interrupt for the block 545 * interrupt_number - H/W interrupt for the block
514 * 546 *
@@ -653,7 +685,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
653 * 685 *
654 * FUNCTION: acpi_get_gpe_device 686 * FUNCTION: acpi_get_gpe_device
655 * 687 *
656 * PARAMETERS: Index - System GPE index (0-current_gpe_count) 688 * PARAMETERS: index - System GPE index (0-current_gpe_count)
657 * gpe_device - Where the parent GPE Device is returned 689 * gpe_device - Where the parent GPE Device is returned
658 * 690 *
659 * RETURN: Status 691 * RETURN: Status
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 6019208cd4b6..96b412d03950 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -55,11 +55,11 @@ ACPI_MODULE_NAME("evxfregn")
55 * 55 *
56 * FUNCTION: acpi_install_address_space_handler 56 * FUNCTION: acpi_install_address_space_handler
57 * 57 *
58 * PARAMETERS: Device - Handle for the device 58 * PARAMETERS: device - Handle for the device
59 * space_id - The address space ID 59 * space_id - The address space ID
60 * Handler - Address of the handler 60 * handler - Address of the handler
61 * Setup - Address of the setup function 61 * setup - Address of the setup function
62 * Context - Value passed to the handler on each access 62 * context - Value passed to the handler on each access
63 * 63 *
64 * RETURN: Status 64 * RETURN: Status
65 * 65 *
@@ -112,16 +112,16 @@ acpi_install_address_space_handler(acpi_handle device,
112 } 112 }
113 113
114 /* 114 /*
115 * For the default space_iDs, (the IDs for which there are default region handlers 115 * For the default space_IDs, (the IDs for which there are default region handlers
116 * installed) Only execute the _REG methods if the global initialization _REG 116 * installed) Only execute the _REG methods if the global initialization _REG
117 * methods have already been run (via acpi_initialize_objects). In other words, 117 * methods have already been run (via acpi_initialize_objects). In other words,
118 * we will defer the execution of the _REG methods for these space_iDs until 118 * we will defer the execution of the _REG methods for these space_IDs until
119 * execution of acpi_initialize_objects. This is done because we need the handlers 119 * execution of acpi_initialize_objects. This is done because we need the handlers
120 * for the default spaces (mem/io/pci/table) to be installed before we can run 120 * for the default spaces (mem/io/pci/table) to be installed before we can run
121 * any control methods (or _REG methods). There is known BIOS code that depends 121 * any control methods (or _REG methods). There is known BIOS code that depends
122 * on this. 122 * on this.
123 * 123 *
124 * For all other space_iDs, we can safely execute the _REG methods immediately. 124 * For all other space_IDs, we can safely execute the _REG methods immediately.
125 * This means that for IDs like embedded_controller, this function should be called 125 * This means that for IDs like embedded_controller, this function should be called
126 * only after acpi_enable_subsystem has been called. 126 * only after acpi_enable_subsystem has been called.
127 */ 127 */
@@ -157,9 +157,9 @@ ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
157 * 157 *
158 * FUNCTION: acpi_remove_address_space_handler 158 * FUNCTION: acpi_remove_address_space_handler
159 * 159 *
160 * PARAMETERS: Device - Handle for the device 160 * PARAMETERS: device - Handle for the device
161 * space_id - The address space ID 161 * space_id - The address space ID
162 * Handler - Address of the handler 162 * handler - Address of the handler
163 * 163 *
164 * RETURN: Status 164 * RETURN: Status
165 * 165 *
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index c86d44e41bc8..16219bde48da 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -66,7 +66,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc,
66 * 66 *
67 * FUNCTION: acpi_ex_add_table 67 * FUNCTION: acpi_ex_add_table
68 * 68 *
69 * PARAMETERS: Table - Pointer to raw table 69 * PARAMETERS: table - Pointer to raw table
70 * parent_node - Where to load the table (scope) 70 * parent_node - Where to load the table (scope)
71 * ddb_handle - Where to return the table handle. 71 * ddb_handle - Where to return the table handle.
72 * 72 *
@@ -276,8 +276,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
276 * FUNCTION: acpi_ex_region_read 276 * FUNCTION: acpi_ex_region_read
277 * 277 *
278 * PARAMETERS: obj_desc - Region descriptor 278 * PARAMETERS: obj_desc - Region descriptor
279 * Length - Number of bytes to read 279 * length - Number of bytes to read
280 * Buffer - Pointer to where to put the data 280 * buffer - Pointer to where to put the data
281 * 281 *
282 * RETURN: Status 282 * RETURN: Status
283 * 283 *
@@ -318,7 +318,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
318 * 318 *
319 * PARAMETERS: obj_desc - Region or Buffer/Field where the table will be 319 * PARAMETERS: obj_desc - Region or Buffer/Field where the table will be
320 * obtained 320 * obtained
321 * Target - Where a handle to the table will be stored 321 * target - Where a handle to the table will be stored
322 * walk_state - Current state 322 * walk_state - Current state
323 * 323 *
324 * RETURN: Status 324 * RETURN: Status
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index e385436bd424..bfb062e4c4b4 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -60,7 +60,7 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
60 * PARAMETERS: obj_desc - Object to be converted. Must be an 60 * PARAMETERS: obj_desc - Object to be converted. Must be an
61 * Integer, Buffer, or String 61 * Integer, Buffer, or String
62 * result_desc - Where the new Integer object is returned 62 * result_desc - Where the new Integer object is returned
63 * Flags - Used for string conversion 63 * flags - Used for string conversion
64 * 64 *
65 * RETURN: Status 65 * RETURN: Status
66 * 66 *
@@ -272,9 +272,9 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
272 * 272 *
273 * FUNCTION: acpi_ex_convert_to_ascii 273 * FUNCTION: acpi_ex_convert_to_ascii
274 * 274 *
275 * PARAMETERS: Integer - Value to be converted 275 * PARAMETERS: integer - Value to be converted
276 * Base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX 276 * base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
277 * String - Where the string is returned 277 * string - Where the string is returned
278 * data_width - Size of data item to be converted, in bytes 278 * data_width - Size of data item to be converted, in bytes
279 * 279 *
280 * RETURN: Actual string length 280 * RETURN: Actual string length
@@ -385,7 +385,7 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
385 * PARAMETERS: obj_desc - Object to be converted. Must be an 385 * PARAMETERS: obj_desc - Object to be converted. Must be an
386 * Integer, Buffer, or String 386 * Integer, Buffer, or String
387 * result_desc - Where the string object is returned 387 * result_desc - Where the string object is returned
388 * Type - String flags (base and conversion type) 388 * type - String flags (base and conversion type)
389 * 389 *
390 * RETURN: Status 390 * RETURN: Status
391 * 391 *
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3f5bc998c1cb..691d4763102c 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -369,7 +369,7 @@ acpi_ex_create_region(u8 * aml_start,
369 * 369 *
370 * DESCRIPTION: Create a new processor object and populate the fields 370 * DESCRIPTION: Create a new processor object and populate the fields
371 * 371 *
372 * Processor (Name[0], cpu_iD[1], pblock_addr[2], pblock_length[3]) 372 * Processor (Name[0], cpu_ID[1], pblock_addr[2], pblock_length[3])
373 * 373 *
374 ******************************************************************************/ 374 ******************************************************************************/
375 375
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index e211e9c19215..bc5b9a6a1316 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -54,8 +54,8 @@ ACPI_MODULE_NAME("exdebug")
54 * FUNCTION: acpi_ex_do_debug_object 54 * FUNCTION: acpi_ex_do_debug_object
55 * 55 *
56 * PARAMETERS: source_desc - Object to be output to "Debug Object" 56 * PARAMETERS: source_desc - Object to be output to "Debug Object"
57 * Level - Indentation level (used for packages) 57 * level - Indentation level (used for packages)
58 * Index - Current package element, zero if not pkg 58 * index - Current package element, zero if not pkg
59 * 59 *
60 * RETURN: None 60 * RETURN: None
61 * 61 *
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 2a6ac0a3bc1e..213c081776fc 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -109,9 +109,9 @@ static struct acpi_exdump_info acpi_ex_dump_package[5] = {
109static struct acpi_exdump_info acpi_ex_dump_device[4] = { 109static struct acpi_exdump_info acpi_ex_dump_device[4] = {
110 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL}, 110 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
111 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"}, 111 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
112 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.system_notify), 112 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[0]),
113 "System Notify"}, 113 "System Notify"},
114 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.device_notify), 114 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[1]),
115 "Device Notify"} 115 "Device Notify"}
116}; 116};
117 117
@@ -158,9 +158,9 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
158 "System Level"}, 158 "System Level"},
159 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order), 159 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order),
160 "Resource Order"}, 160 "Resource Order"},
161 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.system_notify), 161 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[0]),
162 "System Notify"}, 162 "System Notify"},
163 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.device_notify), 163 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[1]),
164 "Device Notify"} 164 "Device Notify"}
165}; 165};
166 166
@@ -169,18 +169,18 @@ static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
169 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"}, 169 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
170 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"}, 170 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
171 {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"}, 171 {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
172 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify), 172 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.notify_list[0]),
173 "System Notify"}, 173 "System Notify"},
174 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.device_notify), 174 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.notify_list[1]),
175 "Device Notify"}, 175 "Device Notify"},
176 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"} 176 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"}
177}; 177};
178 178
179static struct acpi_exdump_info acpi_ex_dump_thermal[4] = { 179static struct acpi_exdump_info acpi_ex_dump_thermal[4] = {
180 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL}, 180 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL},
181 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.system_notify), 181 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.notify_list[0]),
182 "System Notify"}, 182 "System Notify"},
183 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.device_notify), 183 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.notify_list[1]),
184 "Device Notify"}, 184 "Device Notify"},
185 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"} 185 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"}
186}; 186};
@@ -241,10 +241,15 @@ static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
241 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"} 241 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
242}; 242};
243 243
244static struct acpi_exdump_info acpi_ex_dump_notify[3] = { 244static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
245 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL}, 245 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
246 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"}, 246 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
247 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"} 247 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(notify.handler_type), "Handler Type"},
248 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.handler), "Handler"},
249 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"},
250 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[0]),
251 "Next System Notify"},
252 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[1]), "Next Device Notify"}
248}; 253};
249 254
250/* Miscellaneous tables */ 255/* Miscellaneous tables */
@@ -318,7 +323,7 @@ static struct acpi_exdump_info *acpi_ex_dump_info[] = {
318 * FUNCTION: acpi_ex_dump_object 323 * FUNCTION: acpi_ex_dump_object
319 * 324 *
320 * PARAMETERS: obj_desc - Descriptor to dump 325 * PARAMETERS: obj_desc - Descriptor to dump
321 * Info - Info table corresponding to this object 326 * info - Info table corresponding to this object
322 * type 327 * type
323 * 328 *
324 * RETURN: None 329 * RETURN: None
@@ -444,7 +449,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
444 * FUNCTION: acpi_ex_dump_operand 449 * FUNCTION: acpi_ex_dump_operand
445 * 450 *
446 * PARAMETERS: *obj_desc - Pointer to entry to be dumped 451 * PARAMETERS: *obj_desc - Pointer to entry to be dumped
447 * Depth - Current nesting depth 452 * depth - Current nesting depth
448 * 453 *
449 * RETURN: None 454 * RETURN: None
450 * 455 *
@@ -726,7 +731,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
726 * 731 *
727 * FUNCTION: acpi_ex_dump_operands 732 * FUNCTION: acpi_ex_dump_operands
728 * 733 *
729 * PARAMETERS: Operands - A list of Operand objects 734 * PARAMETERS: operands - A list of Operand objects
730 * opcode_name - AML opcode name 735 * opcode_name - AML opcode name
731 * num_operands - Operand count for this opcode 736 * num_operands - Operand count for this opcode
732 * 737 *
@@ -769,8 +774,8 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
769 * 774 *
770 * FUNCTION: acpi_ex_out* functions 775 * FUNCTION: acpi_ex_out* functions
771 * 776 *
772 * PARAMETERS: Title - Descriptive text 777 * PARAMETERS: title - Descriptive text
773 * Value - Value to be displayed 778 * value - Value to be displayed
774 * 779 *
775 * DESCRIPTION: Object dump output formatting functions. These functions 780 * DESCRIPTION: Object dump output formatting functions. These functions
776 * reduce the number of format strings required and keeps them 781 * reduce the number of format strings required and keeps them
@@ -792,8 +797,8 @@ static void acpi_ex_out_pointer(char *title, void *value)
792 * 797 *
793 * FUNCTION: acpi_ex_dump_namespace_node 798 * FUNCTION: acpi_ex_dump_namespace_node
794 * 799 *
795 * PARAMETERS: Node - Descriptor to dump 800 * PARAMETERS: node - Descriptor to dump
796 * Flags - Force display if TRUE 801 * flags - Force display if TRUE
797 * 802 *
798 * DESCRIPTION: Dumps the members of the given.Node 803 * DESCRIPTION: Dumps the members of the given.Node
799 * 804 *
@@ -825,7 +830,7 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
825 * 830 *
826 * FUNCTION: acpi_ex_dump_reference_obj 831 * FUNCTION: acpi_ex_dump_reference_obj
827 * 832 *
828 * PARAMETERS: Object - Descriptor to dump 833 * PARAMETERS: object - Descriptor to dump
829 * 834 *
830 * DESCRIPTION: Dumps a reference object 835 * DESCRIPTION: Dumps a reference object
831 * 836 *
@@ -882,8 +887,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
882 * FUNCTION: acpi_ex_dump_package_obj 887 * FUNCTION: acpi_ex_dump_package_obj
883 * 888 *
884 * PARAMETERS: obj_desc - Descriptor to dump 889 * PARAMETERS: obj_desc - Descriptor to dump
885 * Level - Indentation Level 890 * level - Indentation Level
886 * Index - Package index for this object 891 * index - Package index for this object
887 * 892 *
888 * DESCRIPTION: Dumps the elements of the package 893 * DESCRIPTION: Dumps the elements of the package
889 * 894 *
@@ -926,9 +931,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
926 case ACPI_TYPE_STRING: 931 case ACPI_TYPE_STRING:
927 932
928 acpi_os_printf("[String] Value: "); 933 acpi_os_printf("[String] Value: ");
929 for (i = 0; i < obj_desc->string.length; i++) { 934 acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
930 acpi_os_printf("%c", obj_desc->string.pointer[i]);
931 }
932 acpi_os_printf("\n"); 935 acpi_os_printf("\n");
933 break; 936 break;
934 937
@@ -977,7 +980,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
977 * FUNCTION: acpi_ex_dump_object_descriptor 980 * FUNCTION: acpi_ex_dump_object_descriptor
978 * 981 *
979 * PARAMETERS: obj_desc - Descriptor to dump 982 * PARAMETERS: obj_desc - Descriptor to dump
980 * Flags - Force display if TRUE 983 * flags - Force display if TRUE
981 * 984 *
982 * DESCRIPTION: Dumps the members of the object descriptor given. 985 * DESCRIPTION: Dumps the members of the object descriptor given.
983 * 986 *
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 149de45fdadd..a7784152ed30 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -222,9 +222,9 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
222 * PARAMETERS: obj_desc - Field to be read 222 * PARAMETERS: obj_desc - Field to be read
223 * field_datum_byte_offset - Byte offset of this datum within the 223 * field_datum_byte_offset - Byte offset of this datum within the
224 * parent field 224 * parent field
225 * Value - Where to store value (must at least 225 * value - Where to store value (must at least
226 * 64 bits) 226 * 64 bits)
227 * Function - Read or Write flag plus other region- 227 * function - Read or Write flag plus other region-
228 * dependent flags 228 * dependent flags
229 * 229 *
230 * RETURN: Status 230 * RETURN: Status
@@ -315,7 +315,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
315 * FUNCTION: acpi_ex_register_overflow 315 * FUNCTION: acpi_ex_register_overflow
316 * 316 *
317 * PARAMETERS: obj_desc - Register(Field) to be written 317 * PARAMETERS: obj_desc - Register(Field) to be written
318 * Value - Value to be stored 318 * value - Value to be stored
319 * 319 *
320 * RETURN: TRUE if value overflows the field, FALSE otherwise 320 * RETURN: TRUE if value overflows the field, FALSE otherwise
321 * 321 *
@@ -365,7 +365,7 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
365 * PARAMETERS: obj_desc - Field to be read 365 * PARAMETERS: obj_desc - Field to be read
366 * field_datum_byte_offset - Byte offset of this datum within the 366 * field_datum_byte_offset - Byte offset of this datum within the
367 * parent field 367 * parent field
368 * Value - Where to store value (must be 64 bits) 368 * value - Where to store value (must be 64 bits)
369 * read_write - Read or Write flag 369 * read_write - Read or Write flag
370 * 370 *
371 * RETURN: Status 371 * RETURN: Status
@@ -574,7 +574,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
574 * FUNCTION: acpi_ex_write_with_update_rule 574 * FUNCTION: acpi_ex_write_with_update_rule
575 * 575 *
576 * PARAMETERS: obj_desc - Field to be written 576 * PARAMETERS: obj_desc - Field to be written
577 * Mask - bitmask within field datum 577 * mask - bitmask within field datum
578 * field_value - Value to write 578 * field_value - Value to write
579 * field_datum_byte_offset - Offset of datum within field 579 * field_datum_byte_offset - Offset of datum within field
580 * 580 *
@@ -678,7 +678,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
678 * FUNCTION: acpi_ex_extract_from_field 678 * FUNCTION: acpi_ex_extract_from_field
679 * 679 *
680 * PARAMETERS: obj_desc - Field to be read 680 * PARAMETERS: obj_desc - Field to be read
681 * Buffer - Where to store the field data 681 * buffer - Where to store the field data
682 * buffer_length - Length of Buffer 682 * buffer_length - Length of Buffer
683 * 683 *
684 * RETURN: Status 684 * RETURN: Status
@@ -823,7 +823,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
823 * FUNCTION: acpi_ex_insert_into_field 823 * FUNCTION: acpi_ex_insert_into_field
824 * 824 *
825 * PARAMETERS: obj_desc - Field to be written 825 * PARAMETERS: obj_desc - Field to be written
826 * Buffer - Data to be written 826 * buffer - Data to be written
827 * buffer_length - Length of Buffer 827 * buffer_length - Length of Buffer
828 * 828 *
829 * RETURN: Status 829 * RETURN: Status
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 0a0893310348..271c0c57ea10 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -144,8 +144,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
144 * 144 *
145 * FUNCTION: acpi_ex_concat_template 145 * FUNCTION: acpi_ex_concat_template
146 * 146 *
147 * PARAMETERS: Operand0 - First source object 147 * PARAMETERS: operand0 - First source object
148 * Operand1 - Second source object 148 * operand1 - Second source object
149 * actual_return_desc - Where to place the return object 149 * actual_return_desc - Where to place the return object
150 * walk_state - Current walk state 150 * walk_state - Current walk state
151 * 151 *
@@ -229,8 +229,8 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
229 * 229 *
230 * FUNCTION: acpi_ex_do_concatenate 230 * FUNCTION: acpi_ex_do_concatenate
231 * 231 *
232 * PARAMETERS: Operand0 - First source object 232 * PARAMETERS: operand0 - First source object
233 * Operand1 - Second source object 233 * operand1 - Second source object
234 * actual_return_desc - Where to place the return object 234 * actual_return_desc - Where to place the return object
235 * walk_state - Current walk state 235 * walk_state - Current walk state
236 * 236 *
@@ -397,9 +397,9 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
397 * 397 *
398 * FUNCTION: acpi_ex_do_math_op 398 * FUNCTION: acpi_ex_do_math_op
399 * 399 *
400 * PARAMETERS: Opcode - AML opcode 400 * PARAMETERS: opcode - AML opcode
401 * Integer0 - Integer operand #0 401 * integer0 - Integer operand #0
402 * Integer1 - Integer operand #1 402 * integer1 - Integer operand #1
403 * 403 *
404 * RETURN: Integer result of the operation 404 * RETURN: Integer result of the operation
405 * 405 *
@@ -479,9 +479,9 @@ u64 acpi_ex_do_math_op(u16 opcode, u64 integer0, u64 integer1)
479 * 479 *
480 * FUNCTION: acpi_ex_do_logical_numeric_op 480 * FUNCTION: acpi_ex_do_logical_numeric_op
481 * 481 *
482 * PARAMETERS: Opcode - AML opcode 482 * PARAMETERS: opcode - AML opcode
483 * Integer0 - Integer operand #0 483 * integer0 - Integer operand #0
484 * Integer1 - Integer operand #1 484 * integer1 - Integer operand #1
485 * logical_result - TRUE/FALSE result of the operation 485 * logical_result - TRUE/FALSE result of the operation
486 * 486 *
487 * RETURN: Status 487 * RETURN: Status
@@ -534,9 +534,9 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
534 * 534 *
535 * FUNCTION: acpi_ex_do_logical_op 535 * FUNCTION: acpi_ex_do_logical_op
536 * 536 *
537 * PARAMETERS: Opcode - AML opcode 537 * PARAMETERS: opcode - AML opcode
538 * Operand0 - operand #0 538 * operand0 - operand #0
539 * Operand1 - operand #1 539 * operand1 - operand #1
540 * logical_result - TRUE/FALSE result of the operation 540 * logical_result - TRUE/FALSE result of the operation
541 * 541 *
542 * RETURN: Status 542 * RETURN: Status
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 60933e9dc3c0..bcceda5be9e3 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -102,7 +102,7 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
102 * FUNCTION: acpi_ex_link_mutex 102 * FUNCTION: acpi_ex_link_mutex
103 * 103 *
104 * PARAMETERS: obj_desc - The mutex to be linked 104 * PARAMETERS: obj_desc - The mutex to be linked
105 * Thread - Current executing thread object 105 * thread - Current executing thread object
106 * 106 *
107 * RETURN: None 107 * RETURN: None
108 * 108 *
@@ -138,7 +138,7 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
138 * 138 *
139 * FUNCTION: acpi_ex_acquire_mutex_object 139 * FUNCTION: acpi_ex_acquire_mutex_object
140 * 140 *
141 * PARAMETERS: Timeout - Timeout in milliseconds 141 * PARAMETERS: timeout - Timeout in milliseconds
142 * obj_desc - Mutex object 142 * obj_desc - Mutex object
143 * thread_id - Current thread state 143 * thread_id - Current thread state
144 * 144 *
@@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
443 * 443 *
444 * FUNCTION: acpi_ex_release_all_mutexes 444 * FUNCTION: acpi_ex_release_all_mutexes
445 * 445 *
446 * PARAMETERS: Thread - Current executing thread object 446 * PARAMETERS: thread - Current executing thread object
447 * 447 *
448 * RETURN: Status 448 * RETURN: Status
449 * 449 *
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 30157f5a12d7..81eca60d2748 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -391,12 +391,12 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
391 * 391 *
392 * FUNCTION: acpi_ex_prep_field_value 392 * FUNCTION: acpi_ex_prep_field_value
393 * 393 *
394 * PARAMETERS: Info - Contains all field creation info 394 * PARAMETERS: info - Contains all field creation info
395 * 395 *
396 * RETURN: Status 396 * RETURN: Status
397 * 397 *
398 * DESCRIPTION: Construct a union acpi_operand_object of type def_field and 398 * DESCRIPTION: Construct an object of type union acpi_operand_object with a
399 * connect it to the parent Node. 399 * subtype of def_field and connect it to the parent Node.
400 * 400 *
401 ******************************************************************************/ 401 ******************************************************************************/
402 402
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 12d51df6d3bf..1f1ce0c3d2f8 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -53,10 +53,10 @@ ACPI_MODULE_NAME("exregion")
53 * 53 *
54 * FUNCTION: acpi_ex_system_memory_space_handler 54 * FUNCTION: acpi_ex_system_memory_space_handler
55 * 55 *
56 * PARAMETERS: Function - Read or Write operation 56 * PARAMETERS: function - Read or Write operation
57 * Address - Where in the space to read or write 57 * address - Where in the space to read or write
58 * bit_width - Field width in bits (8, 16, or 32) 58 * bit_width - Field width in bits (8, 16, or 32)
59 * Value - Pointer to in or out value 59 * value - Pointer to in or out value
60 * handler_context - Pointer to Handler's context 60 * handler_context - Pointer to Handler's context
61 * region_context - Pointer to context specific to the 61 * region_context - Pointer to context specific to the
62 * accessed region 62 * accessed region
@@ -270,10 +270,10 @@ acpi_ex_system_memory_space_handler(u32 function,
270 * 270 *
271 * FUNCTION: acpi_ex_system_io_space_handler 271 * FUNCTION: acpi_ex_system_io_space_handler
272 * 272 *
273 * PARAMETERS: Function - Read or Write operation 273 * PARAMETERS: function - Read or Write operation
274 * Address - Where in the space to read or write 274 * address - Where in the space to read or write
275 * bit_width - Field width in bits (8, 16, or 32) 275 * bit_width - Field width in bits (8, 16, or 32)
276 * Value - Pointer to in or out value 276 * value - Pointer to in or out value
277 * handler_context - Pointer to Handler's context 277 * handler_context - Pointer to Handler's context
278 * region_context - Pointer to context specific to the 278 * region_context - Pointer to context specific to the
279 * accessed region 279 * accessed region
@@ -329,10 +329,10 @@ acpi_ex_system_io_space_handler(u32 function,
329 * 329 *
330 * FUNCTION: acpi_ex_pci_config_space_handler 330 * FUNCTION: acpi_ex_pci_config_space_handler
331 * 331 *
332 * PARAMETERS: Function - Read or Write operation 332 * PARAMETERS: function - Read or Write operation
333 * Address - Where in the space to read or write 333 * address - Where in the space to read or write
334 * bit_width - Field width in bits (8, 16, or 32) 334 * bit_width - Field width in bits (8, 16, or 32)
335 * Value - Pointer to in or out value 335 * value - Pointer to in or out value
336 * handler_context - Pointer to Handler's context 336 * handler_context - Pointer to Handler's context
337 * region_context - Pointer to context specific to the 337 * region_context - Pointer to context specific to the
338 * accessed region 338 * accessed region
@@ -365,7 +365,7 @@ acpi_ex_pci_config_space_handler(u32 function,
365 * pci_function is the PCI device function number 365 * pci_function is the PCI device function number
366 * pci_register is the Config space register range 0-255 bytes 366 * pci_register is the Config space register range 0-255 bytes
367 * 367 *
368 * Value - input value for write, output address for read 368 * value - input value for write, output address for read
369 * 369 *
370 */ 370 */
371 pci_id = (struct acpi_pci_id *)region_context; 371 pci_id = (struct acpi_pci_id *)region_context;
@@ -402,10 +402,10 @@ acpi_ex_pci_config_space_handler(u32 function,
402 * 402 *
403 * FUNCTION: acpi_ex_cmos_space_handler 403 * FUNCTION: acpi_ex_cmos_space_handler
404 * 404 *
405 * PARAMETERS: Function - Read or Write operation 405 * PARAMETERS: function - Read or Write operation
406 * Address - Where in the space to read or write 406 * address - Where in the space to read or write
407 * bit_width - Field width in bits (8, 16, or 32) 407 * bit_width - Field width in bits (8, 16, or 32)
408 * Value - Pointer to in or out value 408 * value - Pointer to in or out value
409 * handler_context - Pointer to Handler's context 409 * handler_context - Pointer to Handler's context
410 * region_context - Pointer to context specific to the 410 * region_context - Pointer to context specific to the
411 * accessed region 411 * accessed region
@@ -434,10 +434,10 @@ acpi_ex_cmos_space_handler(u32 function,
434 * 434 *
435 * FUNCTION: acpi_ex_pci_bar_space_handler 435 * FUNCTION: acpi_ex_pci_bar_space_handler
436 * 436 *
437 * PARAMETERS: Function - Read or Write operation 437 * PARAMETERS: function - Read or Write operation
438 * Address - Where in the space to read or write 438 * address - Where in the space to read or write
439 * bit_width - Field width in bits (8, 16, or 32) 439 * bit_width - Field width in bits (8, 16, or 32)
440 * Value - Pointer to in or out value 440 * value - Pointer to in or out value
441 * handler_context - Pointer to Handler's context 441 * handler_context - Pointer to Handler's context
442 * region_context - Pointer to context specific to the 442 * region_context - Pointer to context specific to the
443 * accessed region 443 * accessed region
@@ -466,10 +466,10 @@ acpi_ex_pci_bar_space_handler(u32 function,
466 * 466 *
467 * FUNCTION: acpi_ex_data_table_space_handler 467 * FUNCTION: acpi_ex_data_table_space_handler
468 * 468 *
469 * PARAMETERS: Function - Read or Write operation 469 * PARAMETERS: function - Read or Write operation
470 * Address - Where in the space to read or write 470 * address - Where in the space to read or write
471 * bit_width - Field width in bits (8, 16, or 32) 471 * bit_width - Field width in bits (8, 16, or 32)
472 * Value - Pointer to in or out value 472 * value - Pointer to in or out value
473 * handler_context - Pointer to Handler's context 473 * handler_context - Pointer to Handler's context
474 * region_context - Pointer to context specific to the 474 * region_context - Pointer to context specific to the
475 * accessed region 475 * accessed region
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 6e335dc34528..bbf40ac27585 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -147,7 +147,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
147 147
148 stack_desc = *stack_ptr; 148 stack_desc = *stack_ptr;
149 149
150 /* This is a union acpi_operand_object */ 150 /* This is an object of type union acpi_operand_object */
151 151
152 switch (stack_desc->common.type) { 152 switch (stack_desc->common.type) {
153 case ACPI_TYPE_LOCAL_REFERENCE: 153 case ACPI_TYPE_LOCAL_REFERENCE:
@@ -321,7 +321,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
321 * FUNCTION: acpi_ex_resolve_multiple 321 * FUNCTION: acpi_ex_resolve_multiple
322 * 322 *
323 * PARAMETERS: walk_state - Current state (contains AML opcode) 323 * PARAMETERS: walk_state - Current state (contains AML opcode)
324 * Operand - Starting point for resolution 324 * operand - Starting point for resolution
325 * return_type - Where the object type is returned 325 * return_type - Where the object type is returned
326 * return_desc - Where the resolved object is returned 326 * return_desc - Where the resolved object is returned
327 * 327 *
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index a67b1d925ddd..f232fbabdea8 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -113,7 +113,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
113 * 113 *
114 * FUNCTION: acpi_ex_resolve_operands 114 * FUNCTION: acpi_ex_resolve_operands
115 * 115 *
116 * PARAMETERS: Opcode - Opcode being interpreted 116 * PARAMETERS: opcode - Opcode being interpreted
117 * stack_ptr - Pointer to the operand stack to be 117 * stack_ptr - Pointer to the operand stack to be
118 * resolved 118 * resolved
119 * walk_state - Current state 119 * walk_state - Current state
@@ -307,7 +307,7 @@ acpi_ex_resolve_operands(u16 opcode,
307 case ARGI_DEVICE_REF: 307 case ARGI_DEVICE_REF:
308 case ARGI_TARGETREF: /* Allows implicit conversion rules before store */ 308 case ARGI_TARGETREF: /* Allows implicit conversion rules before store */
309 case ARGI_FIXED_TARGET: /* No implicit conversion before store to target */ 309 case ARGI_FIXED_TARGET: /* No implicit conversion before store to target */
310 case ARGI_SIMPLE_TARGET: /* Name, Local, or Arg - no implicit conversion */ 310 case ARGI_SIMPLE_TARGET: /* Name, Local, or arg - no implicit conversion */
311 311
312 /* 312 /*
313 * Need an operand of type ACPI_TYPE_LOCAL_REFERENCE 313 * Need an operand of type ACPI_TYPE_LOCAL_REFERENCE
@@ -410,7 +410,7 @@ acpi_ex_resolve_operands(u16 opcode,
410 /* 410 /*
411 * Need an operand of type ACPI_TYPE_INTEGER, 411 * Need an operand of type ACPI_TYPE_INTEGER,
412 * But we can implicitly convert from a STRING or BUFFER 412 * But we can implicitly convert from a STRING or BUFFER
413 * Aka - "Implicit Source Operand Conversion" 413 * aka - "Implicit Source Operand Conversion"
414 */ 414 */
415 status = 415 status =
416 acpi_ex_convert_to_integer(obj_desc, stack_ptr, 16); 416 acpi_ex_convert_to_integer(obj_desc, stack_ptr, 16);
@@ -437,7 +437,7 @@ acpi_ex_resolve_operands(u16 opcode,
437 /* 437 /*
438 * Need an operand of type ACPI_TYPE_BUFFER, 438 * Need an operand of type ACPI_TYPE_BUFFER,
439 * But we can implicitly convert from a STRING or INTEGER 439 * But we can implicitly convert from a STRING or INTEGER
440 * Aka - "Implicit Source Operand Conversion" 440 * aka - "Implicit Source Operand Conversion"
441 */ 441 */
442 status = acpi_ex_convert_to_buffer(obj_desc, stack_ptr); 442 status = acpi_ex_convert_to_buffer(obj_desc, stack_ptr);
443 if (ACPI_FAILURE(status)) { 443 if (ACPI_FAILURE(status)) {
@@ -463,7 +463,7 @@ acpi_ex_resolve_operands(u16 opcode,
463 /* 463 /*
464 * Need an operand of type ACPI_TYPE_STRING, 464 * Need an operand of type ACPI_TYPE_STRING,
465 * But we can implicitly convert from a BUFFER or INTEGER 465 * But we can implicitly convert from a BUFFER or INTEGER
466 * Aka - "Implicit Source Operand Conversion" 466 * aka - "Implicit Source Operand Conversion"
467 */ 467 */
468 status = acpi_ex_convert_to_string(obj_desc, stack_ptr, 468 status = acpi_ex_convert_to_string(obj_desc, stack_ptr,
469 ACPI_IMPLICIT_CONVERT_HEX); 469 ACPI_IMPLICIT_CONVERT_HEX);
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index c6cf843cc4c9..5fffe7ab5ece 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -62,8 +62,8 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
62 * FUNCTION: acpi_ex_store 62 * FUNCTION: acpi_ex_store
63 * 63 *
64 * PARAMETERS: *source_desc - Value to be stored 64 * PARAMETERS: *source_desc - Value to be stored
65 * *dest_desc - Where to store it. Must be an NS node 65 * *dest_desc - Where to store it. Must be an NS node
66 * or a union acpi_operand_object of type 66 * or union acpi_operand_object of type
67 * Reference; 67 * Reference;
68 * walk_state - Current walk state 68 * walk_state - Current walk state
69 * 69 *
@@ -361,7 +361,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
361 * FUNCTION: acpi_ex_store_object_to_node 361 * FUNCTION: acpi_ex_store_object_to_node
362 * 362 *
363 * PARAMETERS: source_desc - Value to be stored 363 * PARAMETERS: source_desc - Value to be stored
364 * Node - Named object to receive the value 364 * node - Named object to receive the value
365 * walk_state - Current walk state 365 * walk_state - Current walk state
366 * implicit_conversion - Perform implicit conversion (yes/no) 366 * implicit_conversion - Perform implicit conversion (yes/no)
367 * 367 *
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 65a45d8335c8..53c248473547 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -110,7 +110,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
110 * NOTE: ACPI versions up to 3.0 specified that the buffer must be 110 * NOTE: ACPI versions up to 3.0 specified that the buffer must be
111 * truncated if the string is smaller than the buffer. However, "other" 111 * truncated if the string is smaller than the buffer. However, "other"
112 * implementations of ACPI never did this and thus became the defacto 112 * implementations of ACPI never did this and thus became the defacto
113 * standard. ACPI 3.0_a changes this behavior such that the buffer 113 * standard. ACPI 3.0A changes this behavior such that the buffer
114 * is no longer truncated. 114 * is no longer truncated.
115 */ 115 */
116 116
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 191a12945226..b760641e2fc6 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("exsystem")
53 * 53 *
54 * FUNCTION: acpi_ex_system_wait_semaphore 54 * FUNCTION: acpi_ex_system_wait_semaphore
55 * 55 *
56 * PARAMETERS: Semaphore - Semaphore to wait on 56 * PARAMETERS: semaphore - Semaphore to wait on
57 * Timeout - Max time to wait 57 * timeout - Max time to wait
58 * 58 *
59 * RETURN: Status 59 * RETURN: Status
60 * 60 *
@@ -98,8 +98,8 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
98 * 98 *
99 * FUNCTION: acpi_ex_system_wait_mutex 99 * FUNCTION: acpi_ex_system_wait_mutex
100 * 100 *
101 * PARAMETERS: Mutex - Mutex to wait on 101 * PARAMETERS: mutex - Mutex to wait on
102 * Timeout - Max time to wait 102 * timeout - Max time to wait
103 * 103 *
104 * RETURN: Status 104 * RETURN: Status
105 * 105 *
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index eb6798ba8b59..d1ab7917eed7 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
109 * 109 *
110 * DESCRIPTION: Reacquire the interpreter execution region from within the 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a 111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjunction with 112 * fatal system error. Used in conjunction with
113 * relinquish_interpreter 113 * relinquish_interpreter
114 * 114 *
115 ******************************************************************************/ 115 ******************************************************************************/
@@ -317,8 +317,8 @@ void acpi_ex_release_global_lock(u32 field_flags)
317 * 317 *
318 * FUNCTION: acpi_ex_digits_needed 318 * FUNCTION: acpi_ex_digits_needed
319 * 319 *
320 * PARAMETERS: Value - Value to be represented 320 * PARAMETERS: value - Value to be represented
321 * Base - Base of representation 321 * base - Base of representation
322 * 322 *
323 * RETURN: The number of digits. 323 * RETURN: The number of digits.
324 * 324 *
@@ -408,7 +408,7 @@ void acpi_ex_eisa_id_to_string(char *out_string, u64 compressed_id)
408 * PARAMETERS: out_string - Where to put the converted string. At least 408 * PARAMETERS: out_string - Where to put the converted string. At least
409 * 21 bytes are needed to hold the largest 409 * 21 bytes are needed to hold the largest
410 * possible 64-bit integer. 410 * possible 64-bit integer.
411 * Value - Value to be converted 411 * value - Value to be converted
412 * 412 *
413 * RETURN: None, string 413 * RETURN: None, string
414 * 414 *
@@ -443,7 +443,7 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
443 * 443 *
444 * RETURN: TRUE if valid/supported ID. 444 * RETURN: TRUE if valid/supported ID.
445 * 445 *
446 * DESCRIPTION: Validate an operation region space_iD. 446 * DESCRIPTION: Validate an operation region space_ID.
447 * 447 *
448 ******************************************************************************/ 448 ******************************************************************************/
449 449
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index d0b9ed5df97e..a1e71d0ef57b 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("hwacpi")
53 * 53 *
54 * FUNCTION: acpi_hw_set_mode 54 * FUNCTION: acpi_hw_set_mode
55 * 55 *
56 * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY 56 * PARAMETERS: mode - SYS_MODE_ACPI or SYS_MODE_LEGACY
57 * 57 *
58 * RETURN: Status 58 * RETURN: Status
59 * 59 *
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 29e859293edd..94996f9ae3ad 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
90 * FUNCTION: acpi_hw_extended_sleep 90 * FUNCTION: acpi_hw_extended_sleep
91 * 91 *
92 * PARAMETERS: sleep_state - Which sleep state to enter 92 * PARAMETERS: sleep_state - Which sleep state to enter
93 * Flags - ACPI_EXECUTE_GTS to run optional method
94 * 93 *
95 * RETURN: Status 94 * RETURN: Status
96 * 95 *
@@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
100 * 99 *
101 ******************************************************************************/ 100 ******************************************************************************/
102 101
103acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) 102acpi_status acpi_hw_extended_sleep(u8 sleep_state)
104{ 103{
105 acpi_status status; 104 acpi_status status;
106 u8 sleep_type_value; 105 u8 sleep_type_value;
@@ -117,19 +116,14 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
117 116
118 /* Clear wake status (WAK_STS) */ 117 /* Clear wake status (WAK_STS) */
119 118
120 status = acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status); 119 status =
120 acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
121 if (ACPI_FAILURE(status)) { 121 if (ACPI_FAILURE(status)) {
122 return_ACPI_STATUS(status); 122 return_ACPI_STATUS(status);
123 } 123 }
124 124
125 acpi_gbl_system_awake_and_running = FALSE; 125 acpi_gbl_system_awake_and_running = FALSE;
126 126
127 /* Optionally execute _GTS (Going To Sleep) */
128
129 if (flags & ACPI_EXECUTE_GTS) {
130 acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
131 }
132
133 /* Flush caches, as per ACPI specification */ 127 /* Flush caches, as per ACPI specification */
134 128
135 ACPI_FLUSH_CPU_CACHE(); 129 ACPI_FLUSH_CPU_CACHE();
@@ -147,7 +141,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
147 ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & 141 ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
148 ACPI_X_SLEEP_TYPE_MASK); 142 ACPI_X_SLEEP_TYPE_MASK);
149 143
150 status = acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE), 144 status = acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
151 &acpi_gbl_FADT.sleep_control); 145 &acpi_gbl_FADT.sleep_control);
152 if (ACPI_FAILURE(status)) { 146 if (ACPI_FAILURE(status)) {
153 return_ACPI_STATUS(status); 147 return_ACPI_STATUS(status);
@@ -171,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
171 * FUNCTION: acpi_hw_extended_wake_prep 165 * FUNCTION: acpi_hw_extended_wake_prep
172 * 166 *
173 * PARAMETERS: sleep_state - Which sleep state we just exited 167 * PARAMETERS: sleep_state - Which sleep state we just exited
174 * Flags - ACPI_EXECUTE_BFS to run optional method
175 * 168 *
176 * RETURN: Status 169 * RETURN: Status
177 * 170 *
@@ -180,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
180 * 173 *
181 ******************************************************************************/ 174 ******************************************************************************/
182 175
183acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) 176acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
184{ 177{
185 acpi_status status; 178 acpi_status status;
186 u8 sleep_type_value; 179 u8 sleep_type_value;
@@ -195,15 +188,10 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
195 ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & 188 ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
196 ACPI_X_SLEEP_TYPE_MASK); 189 ACPI_X_SLEEP_TYPE_MASK);
197 190
198 (void)acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE), 191 (void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
199 &acpi_gbl_FADT.sleep_control); 192 &acpi_gbl_FADT.sleep_control);
200 } 193 }
201 194
202 /* Optionally execute _BFS (Back From Sleep) */
203
204 if (flags & ACPI_EXECUTE_BFS) {
205 acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
206 }
207 return_ACPI_STATUS(AE_OK); 195 return_ACPI_STATUS(AE_OK);
208} 196}
209 197
@@ -212,7 +200,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
212 * FUNCTION: acpi_hw_extended_wake 200 * FUNCTION: acpi_hw_extended_wake
213 * 201 *
214 * PARAMETERS: sleep_state - Which sleep state we just exited 202 * PARAMETERS: sleep_state - Which sleep state we just exited
215 * Flags - Reserved, set to zero 203 * flags - Reserved, set to zero
216 * 204 *
217 * RETURN: Status 205 * RETURN: Status
218 * 206 *
@@ -221,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
221 * 209 *
222 ******************************************************************************/ 210 ******************************************************************************/
223 211
224acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags) 212acpi_status acpi_hw_extended_wake(u8 sleep_state)
225{ 213{
226 ACPI_FUNCTION_TRACE(hw_extended_wake); 214 ACPI_FUNCTION_TRACE(hw_extended_wake);
227 215
@@ -239,7 +227,7 @@ acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
239 * and use it to determine whether the system is rebooting or 227 * and use it to determine whether the system is rebooting or
240 * resuming. Clear WAK_STS for compatibility. 228 * resuming. Clear WAK_STS for compatibility.
241 */ 229 */
242 (void)acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status); 230 (void)acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
243 acpi_gbl_system_awake_and_running = TRUE; 231 acpi_gbl_system_awake_and_running = TRUE;
244 232
245 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING); 233 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 6b6c83b87b52..4af6d20ef077 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -69,9 +69,9 @@ acpi_hw_write_multiple(u32 value,
69 * 69 *
70 * FUNCTION: acpi_hw_validate_register 70 * FUNCTION: acpi_hw_validate_register
71 * 71 *
72 * PARAMETERS: Reg - GAS register structure 72 * PARAMETERS: reg - GAS register structure
73 * max_bit_width - Max bit_width supported (32 or 64) 73 * max_bit_width - Max bit_width supported (32 or 64)
74 * Address - Pointer to where the gas->address 74 * address - Pointer to where the gas->address
75 * is returned 75 * is returned
76 * 76 *
77 * RETURN: Status 77 * RETURN: Status
@@ -102,7 +102,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
102 return (AE_BAD_ADDRESS); 102 return (AE_BAD_ADDRESS);
103 } 103 }
104 104
105 /* Validate the space_iD */ 105 /* Validate the space_ID */
106 106
107 if ((reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && 107 if ((reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
108 (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { 108 (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
@@ -137,8 +137,8 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
137 * 137 *
138 * FUNCTION: acpi_hw_read 138 * FUNCTION: acpi_hw_read
139 * 139 *
140 * PARAMETERS: Value - Where the value is returned 140 * PARAMETERS: value - Where the value is returned
141 * Reg - GAS register structure 141 * reg - GAS register structure
142 * 142 *
143 * RETURN: Status 143 * RETURN: Status
144 * 144 *
@@ -148,7 +148,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
148 * 148 *
149 * LIMITATIONS: <These limitations also apply to acpi_hw_write> 149 * LIMITATIONS: <These limitations also apply to acpi_hw_write>
150 * bit_width must be exactly 8, 16, or 32. 150 * bit_width must be exactly 8, 16, or 32.
151 * space_iD must be system_memory or system_iO. 151 * space_ID must be system_memory or system_IO.
152 * bit_offset and access_width are currently ignored, as there has 152 * bit_offset and access_width are currently ignored, as there has
153 * not been a need to implement these. 153 * not been a need to implement these.
154 * 154 *
@@ -200,8 +200,8 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
200 * 200 *
201 * FUNCTION: acpi_hw_write 201 * FUNCTION: acpi_hw_write
202 * 202 *
203 * PARAMETERS: Value - Value to be written 203 * PARAMETERS: value - Value to be written
204 * Reg - GAS register structure 204 * reg - GAS register structure
205 * 205 *
206 * RETURN: Status 206 * RETURN: Status
207 * 207 *
@@ -439,7 +439,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
439 * FUNCTION: acpi_hw_register_write 439 * FUNCTION: acpi_hw_register_write
440 * 440 *
441 * PARAMETERS: register_id - ACPI Register ID 441 * PARAMETERS: register_id - ACPI Register ID
442 * Value - The value to write 442 * value - The value to write
443 * 443 *
444 * RETURN: Status 444 * RETURN: Status
445 * 445 *
@@ -571,7 +571,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
571 * 571 *
572 * FUNCTION: acpi_hw_read_multiple 572 * FUNCTION: acpi_hw_read_multiple
573 * 573 *
574 * PARAMETERS: Value - Where the register value is returned 574 * PARAMETERS: value - Where the register value is returned
575 * register_a - First ACPI register (required) 575 * register_a - First ACPI register (required)
576 * register_b - Second ACPI register (optional) 576 * register_b - Second ACPI register (optional)
577 * 577 *
@@ -624,7 +624,7 @@ acpi_hw_read_multiple(u32 *value,
624 * 624 *
625 * FUNCTION: acpi_hw_write_multiple 625 * FUNCTION: acpi_hw_write_multiple
626 * 626 *
627 * PARAMETERS: Value - The value to write 627 * PARAMETERS: value - The value to write
628 * register_a - First ACPI register (required) 628 * register_a - First ACPI register (required)
629 * register_b - Second ACPI register (optional) 629 * register_b - Second ACPI register (optional)
630 * 630 *
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 0ed85cac3231..3fddde056a5e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep")
56 * FUNCTION: acpi_hw_legacy_sleep 56 * FUNCTION: acpi_hw_legacy_sleep
57 * 57 *
58 * PARAMETERS: sleep_state - Which sleep state to enter 58 * PARAMETERS: sleep_state - Which sleep state to enter
59 * Flags - ACPI_EXECUTE_GTS to run optional method
60 * 59 *
61 * RETURN: Status 60 * RETURN: Status
62 * 61 *
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep")
64 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 63 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
65 * 64 *
66 ******************************************************************************/ 65 ******************************************************************************/
67acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) 66acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
68{ 67{
69 struct acpi_bit_register_info *sleep_type_reg_info; 68 struct acpi_bit_register_info *sleep_type_reg_info;
70 struct acpi_bit_register_info *sleep_enable_reg_info; 69 struct acpi_bit_register_info *sleep_enable_reg_info;
@@ -95,18 +94,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
95 return_ACPI_STATUS(status); 94 return_ACPI_STATUS(status);
96 } 95 }
97 96
98 if (sleep_state != ACPI_STATE_S5) {
99 /*
100 * Disable BM arbitration. This feature is contained within an
101 * optional register (PM2 Control), so ignore a BAD_ADDRESS
102 * exception.
103 */
104 status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
105 if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
106 return_ACPI_STATUS(status);
107 }
108 }
109
110 /* 97 /*
111 * 1) Disable/Clear all GPEs 98 * 1) Disable/Clear all GPEs
112 * 2) Enable all wakeup GPEs 99 * 2) Enable all wakeup GPEs
@@ -122,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
122 return_ACPI_STATUS(status); 109 return_ACPI_STATUS(status);
123 } 110 }
124 111
125 /* Optionally execute _GTS (Going To Sleep) */
126
127 if (flags & ACPI_EXECUTE_GTS) {
128 acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
129 }
130
131 /* Get current value of PM1A control */ 112 /* Get current value of PM1A control */
132 113
133 status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, 114 status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
@@ -226,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
226 * FUNCTION: acpi_hw_legacy_wake_prep 207 * FUNCTION: acpi_hw_legacy_wake_prep
227 * 208 *
228 * PARAMETERS: sleep_state - Which sleep state we just exited 209 * PARAMETERS: sleep_state - Which sleep state we just exited
229 * Flags - ACPI_EXECUTE_BFS to run optional method
230 * 210 *
231 * RETURN: Status 211 * RETURN: Status
232 * 212 *
@@ -236,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
236 * 216 *
237 ******************************************************************************/ 217 ******************************************************************************/
238 218
239acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) 219acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
240{ 220{
241 acpi_status status; 221 acpi_status status;
242 struct acpi_bit_register_info *sleep_type_reg_info; 222 struct acpi_bit_register_info *sleep_type_reg_info;
@@ -287,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
287 } 267 }
288 } 268 }
289 269
290 /* Optionally execute _BFS (Back From Sleep) */
291
292 if (flags & ACPI_EXECUTE_BFS) {
293 acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
294 }
295 return_ACPI_STATUS(status); 270 return_ACPI_STATUS(status);
296} 271}
297 272
@@ -300,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
300 * FUNCTION: acpi_hw_legacy_wake 275 * FUNCTION: acpi_hw_legacy_wake
301 * 276 *
302 * PARAMETERS: sleep_state - Which sleep state we just exited 277 * PARAMETERS: sleep_state - Which sleep state we just exited
303 * Flags - Reserved, set to zero
304 * 278 *
305 * RETURN: Status 279 * RETURN: Status
306 * 280 *
@@ -309,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
309 * 283 *
310 ******************************************************************************/ 284 ******************************************************************************/
311 285
312acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags) 286acpi_status acpi_hw_legacy_wake(u8 sleep_state)
313{ 287{
314 acpi_status status; 288 acpi_status status;
315 289
@@ -364,16 +338,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
364 [ACPI_EVENT_POWER_BUTTON]. 338 [ACPI_EVENT_POWER_BUTTON].
365 status_register_id, ACPI_CLEAR_STATUS); 339 status_register_id, ACPI_CLEAR_STATUS);
366 340
367 /*
368 * Enable BM arbitration. This feature is contained within an
369 * optional register (PM2 Control), so ignore a BAD_ADDRESS
370 * exception.
371 */
372 status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
373 if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
374 return_ACPI_STATUS(status);
375 }
376
377 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING); 341 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
378 return_ACPI_STATUS(status); 342 return_ACPI_STATUS(status);
379} 343}
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index f1b2c3b94cac..b6411f16832f 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("hwtimer")
54 * 54 *
55 * FUNCTION: acpi_get_timer_resolution 55 * FUNCTION: acpi_get_timer_resolution
56 * 56 *
57 * PARAMETERS: Resolution - Where the resolution is returned 57 * PARAMETERS: resolution - Where the resolution is returned
58 * 58 *
59 * RETURN: Status and timer resolution 59 * RETURN: Status and timer resolution
60 * 60 *
@@ -84,7 +84,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
84 * 84 *
85 * FUNCTION: acpi_get_timer 85 * FUNCTION: acpi_get_timer
86 * 86 *
87 * PARAMETERS: Ticks - Where the timer value is returned 87 * PARAMETERS: ticks - Where the timer value is returned
88 * 88 *
89 * RETURN: Status and current timer value (ticks) 89 * RETURN: Status and current timer value (ticks)
90 * 90 *
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 6e5c43a60bb7..c99d546b217f 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -58,7 +58,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
58 * 58 *
59 * The table is used to implement the Microsoft port access rules that 59 * The table is used to implement the Microsoft port access rules that
60 * first appeared in Windows XP. Some ports are always illegal, and some 60 * first appeared in Windows XP. Some ports are always illegal, and some
61 * ports are only illegal if the BIOS calls _OSI with a win_xP string or 61 * ports are only illegal if the BIOS calls _OSI with a win_XP string or
62 * later (meaning that the BIOS itelf is post-XP.) 62 * later (meaning that the BIOS itelf is post-XP.)
63 * 63 *
64 * This provides ACPICA with the desired port protections and 64 * This provides ACPICA with the desired port protections and
@@ -66,7 +66,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
66 * 66 *
67 * Description of port entries: 67 * Description of port entries:
68 * DMA: DMA controller 68 * DMA: DMA controller
69 * PIC0: Programmable Interrupt Controller (8259_a) 69 * PIC0: Programmable Interrupt Controller (8259A)
70 * PIT1: System Timer 1 70 * PIT1: System Timer 1
71 * PIT2: System Timer 2 failsafe 71 * PIT2: System Timer 2 failsafe
72 * RTC: Real-time clock 72 * RTC: Real-time clock
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index a716fede4f25..7bfd649d1996 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -104,8 +104,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
104 * 104 *
105 * FUNCTION: acpi_read 105 * FUNCTION: acpi_read
106 * 106 *
107 * PARAMETERS: Value - Where the value is returned 107 * PARAMETERS: value - Where the value is returned
108 * Reg - GAS register structure 108 * reg - GAS register structure
109 * 109 *
110 * RETURN: Status 110 * RETURN: Status
111 * 111 *
@@ -113,7 +113,7 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
113 * 113 *
114 * LIMITATIONS: <These limitations also apply to acpi_write> 114 * LIMITATIONS: <These limitations also apply to acpi_write>
115 * bit_width must be exactly 8, 16, 32, or 64. 115 * bit_width must be exactly 8, 16, 32, or 64.
116 * space_iD must be system_memory or system_iO. 116 * space_ID must be system_memory or system_IO.
117 * bit_offset and access_width are currently ignored, as there has 117 * bit_offset and access_width are currently ignored, as there has
118 * not been a need to implement these. 118 * not been a need to implement these.
119 * 119 *
@@ -196,8 +196,8 @@ ACPI_EXPORT_SYMBOL(acpi_read)
196 * 196 *
197 * FUNCTION: acpi_write 197 * FUNCTION: acpi_write
198 * 198 *
199 * PARAMETERS: Value - Value to be written 199 * PARAMETERS: value - Value to be written
200 * Reg - GAS register structure 200 * reg - GAS register structure
201 * 201 *
202 * RETURN: Status 202 * RETURN: Status
203 * 203 *
@@ -441,7 +441,7 @@ ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
441 * *sleep_type_a - Where SLP_TYPa is returned 441 * *sleep_type_a - Where SLP_TYPa is returned
442 * *sleep_type_b - Where SLP_TYPb is returned 442 * *sleep_type_b - Where SLP_TYPb is returned
443 * 443 *
444 * RETURN: Status - ACPI status 444 * RETURN: status - ACPI status
445 * 445 *
446 * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep 446 * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
447 * state. 447 * state.
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 762d059bb508..1f165a750ae2 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep")
50 50
51/* Local prototypes */ 51/* Local prototypes */
52static acpi_status 52static acpi_status
53acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id); 53acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
54 54
55/* 55/*
56 * Dispatch table used to efficiently branch to the various sleep 56 * Dispatch table used to efficiently branch to the various sleep
@@ -205,7 +205,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
205 ACPI_FLUSH_CPU_CACHE(); 205 ACPI_FLUSH_CPU_CACHE();
206 206
207 status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, 207 status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
208 (u32)acpi_gbl_FADT.S4bios_request, 8); 208 (u32)acpi_gbl_FADT.s4_bios_request, 8);
209 209
210 do { 210 do {
211 acpi_os_stall(1000); 211 acpi_os_stall(1000);
@@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
235 * 235 *
236 ******************************************************************************/ 236 ******************************************************************************/
237static acpi_status 237static acpi_status
238acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) 238acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
239{ 239{
240 acpi_status status; 240 acpi_status status;
241 struct acpi_sleep_functions *sleep_functions = 241 struct acpi_sleep_functions *sleep_functions =
@@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
248 * use the extended sleep registers 248 * use the extended sleep registers
249 */ 249 */
250 if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) { 250 if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
251 status = sleep_functions->extended_function(sleep_state, flags); 251 status = sleep_functions->extended_function(sleep_state);
252 } else { 252 } else {
253 /* Legacy sleep */ 253 /* Legacy sleep */
254 254
255 status = sleep_functions->legacy_function(sleep_state, flags); 255 status = sleep_functions->legacy_function(sleep_state);
256 } 256 }
257 257
258 return (status); 258 return (status);
@@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
262 * For the case where reduced-hardware-only code is being generated, 262 * For the case where reduced-hardware-only code is being generated,
263 * we know that only the extended sleep registers are available 263 * we know that only the extended sleep registers are available
264 */ 264 */
265 status = sleep_functions->extended_function(sleep_state, flags); 265 status = sleep_functions->extended_function(sleep_state);
266 return (status); 266 return (status);
267 267
268#endif /* !ACPI_REDUCED_HARDWARE */ 268#endif /* !ACPI_REDUCED_HARDWARE */
@@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
349 * FUNCTION: acpi_enter_sleep_state 349 * FUNCTION: acpi_enter_sleep_state
350 * 350 *
351 * PARAMETERS: sleep_state - Which sleep state to enter 351 * PARAMETERS: sleep_state - Which sleep state to enter
352 * Flags - ACPI_EXECUTE_GTS to run optional method
353 * 352 *
354 * RETURN: Status 353 * RETURN: Status
355 * 354 *
@@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
357 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 356 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
358 * 357 *
359 ******************************************************************************/ 358 ******************************************************************************/
360acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) 359acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
361{ 360{
362 acpi_status status; 361 acpi_status status;
363 362
@@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
371 } 370 }
372 371
373 status = 372 status =
374 acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID); 373 acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
375 return_ACPI_STATUS(status); 374 return_ACPI_STATUS(status);
376} 375}
377 376
@@ -382,7 +381,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
382 * FUNCTION: acpi_leave_sleep_state_prep 381 * FUNCTION: acpi_leave_sleep_state_prep
383 * 382 *
384 * PARAMETERS: sleep_state - Which sleep state we are exiting 383 * PARAMETERS: sleep_state - Which sleep state we are exiting
385 * Flags - ACPI_EXECUTE_BFS to run optional method 384 * flags - ACPI_EXECUTE_BFS to run optional method
386 * 385 *
387 * RETURN: Status 386 * RETURN: Status
388 * 387 *
@@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
391 * Called with interrupts DISABLED. 390 * Called with interrupts DISABLED.
392 * 391 *
393 ******************************************************************************/ 392 ******************************************************************************/
394acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags) 393acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
395{ 394{
396 acpi_status status; 395 acpi_status status;
397 396
398 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); 397 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
399 398
400 status = 399 status =
401 acpi_hw_sleep_dispatch(sleep_state, flags, 400 acpi_hw_sleep_dispatch(sleep_state,
402 ACPI_WAKE_PREP_FUNCTION_ID); 401 ACPI_WAKE_PREP_FUNCTION_ID);
403 return_ACPI_STATUS(status); 402 return_ACPI_STATUS(status);
404} 403}
@@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
423 422
424 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); 423 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
425 424
426 425 status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
427 status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
428 return_ACPI_STATUS(status); 426 return_ACPI_STATUS(status);
429} 427}
430 428
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 61623f3f6826..23db53ce2293 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -157,7 +157,7 @@ acpi_status acpi_ns_root_initialize(void)
157 157
158#if defined (ACPI_ASL_COMPILER) 158#if defined (ACPI_ASL_COMPILER)
159 159
160 /* Save the parameter count for the i_aSL compiler */ 160 /* Save the parameter count for the iASL compiler */
161 161
162 new_node->value = obj_desc->method.param_count; 162 new_node->value = obj_desc->method.param_count;
163#else 163#else
@@ -258,11 +258,11 @@ acpi_status acpi_ns_root_initialize(void)
258 * FUNCTION: acpi_ns_lookup 258 * FUNCTION: acpi_ns_lookup
259 * 259 *
260 * PARAMETERS: scope_info - Current scope info block 260 * PARAMETERS: scope_info - Current scope info block
261 * Pathname - Search pathname, in internal format 261 * pathname - Search pathname, in internal format
262 * (as represented in the AML stream) 262 * (as represented in the AML stream)
263 * Type - Type associated with name 263 * type - Type associated with name
264 * interpreter_mode - IMODE_LOAD_PASS2 => add name if not found 264 * interpreter_mode - IMODE_LOAD_PASS2 => add name if not found
265 * Flags - Flags describing the search restrictions 265 * flags - Flags describing the search restrictions
266 * walk_state - Current state of the walk 266 * walk_state - Current state of the walk
267 * return_node - Where the Node is placed (if found 267 * return_node - Where the Node is placed (if found
268 * or created successfully) 268 * or created successfully)
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 7c3d3ceb98b3..ac389e5bb594 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("nsalloc")
52 * 52 *
53 * FUNCTION: acpi_ns_create_node 53 * FUNCTION: acpi_ns_create_node
54 * 54 *
55 * PARAMETERS: Name - Name of the new node (4 char ACPI name) 55 * PARAMETERS: name - Name of the new node (4 char ACPI name)
56 * 56 *
57 * RETURN: New namespace node (Null on failure) 57 * RETURN: New namespace node (Null on failure)
58 * 58 *
@@ -92,7 +92,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
92 * 92 *
93 * FUNCTION: acpi_ns_delete_node 93 * FUNCTION: acpi_ns_delete_node
94 * 94 *
95 * PARAMETERS: Node - Node to be deleted 95 * PARAMETERS: node - Node to be deleted
96 * 96 *
97 * RETURN: None 97 * RETURN: None
98 * 98 *
@@ -143,7 +143,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
143 * 143 *
144 * FUNCTION: acpi_ns_remove_node 144 * FUNCTION: acpi_ns_remove_node
145 * 145 *
146 * PARAMETERS: Node - Node to be removed/deleted 146 * PARAMETERS: node - Node to be removed/deleted
147 * 147 *
148 * RETURN: None 148 * RETURN: None
149 * 149 *
@@ -196,8 +196,8 @@ void acpi_ns_remove_node(struct acpi_namespace_node *node)
196 * 196 *
197 * PARAMETERS: walk_state - Current state of the walk 197 * PARAMETERS: walk_state - Current state of the walk
198 * parent_node - The parent of the new Node 198 * parent_node - The parent of the new Node
199 * Node - The new Node to install 199 * node - The new Node to install
200 * Type - ACPI object type of the new Node 200 * type - ACPI object type of the new Node
201 * 201 *
202 * RETURN: None 202 * RETURN: None
203 * 203 *
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 3f7f3f6e7dd5..7ee4e6aeb0a2 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -63,7 +63,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
63 * FUNCTION: acpi_ns_print_pathname 63 * FUNCTION: acpi_ns_print_pathname
64 * 64 *
65 * PARAMETERS: num_segments - Number of ACPI name segments 65 * PARAMETERS: num_segments - Number of ACPI name segments
66 * Pathname - The compressed (internal) path 66 * pathname - The compressed (internal) path
67 * 67 *
68 * RETURN: None 68 * RETURN: None
69 * 69 *
@@ -107,10 +107,10 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
107 * 107 *
108 * FUNCTION: acpi_ns_dump_pathname 108 * FUNCTION: acpi_ns_dump_pathname
109 * 109 *
110 * PARAMETERS: Handle - Object 110 * PARAMETERS: handle - Object
111 * Msg - Prefix message 111 * msg - Prefix message
112 * Level - Desired debug level 112 * level - Desired debug level
113 * Component - Caller's component ID 113 * component - Caller's component ID
114 * 114 *
115 * RETURN: None 115 * RETURN: None
116 * 116 *
@@ -143,8 +143,8 @@ acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
143 * FUNCTION: acpi_ns_dump_one_object 143 * FUNCTION: acpi_ns_dump_one_object
144 * 144 *
145 * PARAMETERS: obj_handle - Node to be dumped 145 * PARAMETERS: obj_handle - Node to be dumped
146 * Level - Nesting level of the handle 146 * level - Nesting level of the handle
147 * Context - Passed into walk_namespace 147 * context - Passed into walk_namespace
148 * return_value - Not used 148 * return_value - Not used
149 * 149 *
150 * RETURN: Status 150 * RETURN: Status
@@ -615,7 +615,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
615 * 615 *
616 * FUNCTION: acpi_ns_dump_objects 616 * FUNCTION: acpi_ns_dump_objects
617 * 617 *
618 * PARAMETERS: Type - Object type to be dumped 618 * PARAMETERS: type - Object type to be dumped
619 * display_type - 0 or ACPI_DISPLAY_SUMMARY 619 * display_type - 0 or ACPI_DISPLAY_SUMMARY
620 * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX 620 * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX
621 * for an effectively unlimited depth. 621 * for an effectively unlimited depth.
@@ -671,7 +671,7 @@ acpi_ns_dump_objects(acpi_object_type type,
671 * 671 *
672 * FUNCTION: acpi_ns_dump_entry 672 * FUNCTION: acpi_ns_dump_entry
673 * 673 *
674 * PARAMETERS: Handle - Node to be dumped 674 * PARAMETERS: handle - Node to be dumped
675 * debug_level - Output level 675 * debug_level - Output level
676 * 676 *
677 * RETURN: None 677 * RETURN: None
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 3b5acb0eb406..944d4c8d9438 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -55,9 +55,9 @@ ACPI_MODULE_NAME("nsdumpdv")
55 * 55 *
56 * FUNCTION: acpi_ns_dump_one_device 56 * FUNCTION: acpi_ns_dump_one_device
57 * 57 *
58 * PARAMETERS: Handle - Node to be dumped 58 * PARAMETERS: handle - Node to be dumped
59 * Level - Nesting level of the handle 59 * level - Nesting level of the handle
60 * Context - Passed into walk_namespace 60 * context - Passed into walk_namespace
61 * return_value - Not used 61 * return_value - Not used
62 * 62 *
63 * RETURN: Status 63 * RETURN: Status
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f375cb82e321..69074be498e8 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -59,11 +59,11 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
59 * 59 *
60 * FUNCTION: acpi_ns_evaluate 60 * FUNCTION: acpi_ns_evaluate
61 * 61 *
62 * PARAMETERS: Info - Evaluation info block, contains: 62 * PARAMETERS: info - Evaluation info block, contains:
63 * prefix_node - Prefix or Method/Object Node to execute 63 * prefix_node - Prefix or Method/Object Node to execute
64 * Pathname - Name of method to execute, If NULL, the 64 * pathname - Name of method to execute, If NULL, the
65 * Node is the object to execute 65 * Node is the object to execute
66 * Parameters - List of parameters to pass to the method, 66 * parameters - List of parameters to pass to the method,
67 * terminated by NULL. Params itself may be 67 * terminated by NULL. Params itself may be
68 * NULL if no parameters are being passed. 68 * NULL if no parameters are being passed.
69 * return_object - Where to put method's return value (if 69 * return_object - Where to put method's return value (if
@@ -71,7 +71,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
71 * parameter_type - Type of Parameter list 71 * parameter_type - Type of Parameter list
72 * return_object - Where to put method's return value (if 72 * return_object - Where to put method's return value (if
73 * any). If NULL, no value is returned. 73 * any). If NULL, no value is returned.
74 * Flags - ACPI_IGNORE_RETURN_VALUE to delete return 74 * flags - ACPI_IGNORE_RETURN_VALUE to delete return
75 * 75 *
76 * RETURN: Status 76 * RETURN: Status
77 * 77 *
@@ -351,7 +351,7 @@ void acpi_ns_exec_module_code_list(void)
351 * FUNCTION: acpi_ns_exec_module_code 351 * FUNCTION: acpi_ns_exec_module_code
352 * 352 *
353 * PARAMETERS: method_obj - Object container for the module-level code 353 * PARAMETERS: method_obj - Object container for the module-level code
354 * Info - Info block for method evaluation 354 * info - Info block for method evaluation
355 * 355 *
356 * RETURN: None. Exceptions during method execution are ignored, since 356 * RETURN: None. Exceptions during method execution are ignored, since
357 * we cannot abort a table load. 357 * we cannot abort a table load.
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 9d84ec2f0211..95ffe8dfa1f1 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -224,8 +224,8 @@ acpi_status acpi_ns_initialize_devices(void)
224 * FUNCTION: acpi_ns_init_one_object 224 * FUNCTION: acpi_ns_init_one_object
225 * 225 *
226 * PARAMETERS: obj_handle - Node 226 * PARAMETERS: obj_handle - Node
227 * Level - Current nesting level 227 * level - Current nesting level
228 * Context - Points to a init info struct 228 * context - Points to a init info struct
229 * return_value - Not used 229 * return_value - Not used
230 * 230 *
231 * RETURN: Status 231 * RETURN: Status
@@ -530,7 +530,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
530 * we will not run _INI, but we continue to examine the children 530 * we will not run _INI, but we continue to examine the children
531 * of this device. 531 * of this device.
532 * 532 *
533 * From the ACPI spec, description of _STA: (Note - no mention 533 * From the ACPI spec, description of _STA: (note - no mention
534 * of whether to run _INI or not on the device in question) 534 * of whether to run _INI or not on the device in question)
535 * 535 *
536 * "_STA may return bit 0 clear (not present) with bit 3 set 536 * "_STA may return bit 0 clear (not present) with bit 3 set
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 5cbf15ffe7d8..76935ff29289 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -63,7 +63,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
63 * FUNCTION: acpi_ns_load_table 63 * FUNCTION: acpi_ns_load_table
64 * 64 *
65 * PARAMETERS: table_index - Index for table to be loaded 65 * PARAMETERS: table_index - Index for table to be loaded
66 * Node - Owning NS node 66 * node - Owning NS node
67 * 67 *
68 * RETURN: Status 68 * RETURN: Status
69 * 69 *
@@ -278,7 +278,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
278 * 278 *
279 * FUNCTION: acpi_ns_unload_name_space 279 * FUNCTION: acpi_ns_unload_name_space
280 * 280 *
281 * PARAMETERS: Handle - Root of namespace subtree to be deleted 281 * PARAMETERS: handle - Root of namespace subtree to be deleted
282 * 282 *
283 * RETURN: Status 283 * RETURN: Status
284 * 284 *
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index b20e7c8c3ffb..96e0eb609bb4 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("nsnames")
53 * 53 *
54 * FUNCTION: acpi_ns_build_external_path 54 * FUNCTION: acpi_ns_build_external_path
55 * 55 *
56 * PARAMETERS: Node - NS node whose pathname is needed 56 * PARAMETERS: node - NS node whose pathname is needed
57 * Size - Size of the pathname 57 * size - Size of the pathname
58 * *name_buffer - Where to return the pathname 58 * *name_buffer - Where to return the pathname
59 * 59 *
60 * RETURN: Status 60 * RETURN: Status
@@ -120,7 +120,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
120 * 120 *
121 * FUNCTION: acpi_ns_get_external_pathname 121 * FUNCTION: acpi_ns_get_external_pathname
122 * 122 *
123 * PARAMETERS: Node - Namespace node whose pathname is needed 123 * PARAMETERS: node - Namespace node whose pathname is needed
124 * 124 *
125 * RETURN: Pointer to storage containing the fully qualified name of 125 * RETURN: Pointer to storage containing the fully qualified name of
126 * the node, In external format (name segments separated by path 126 * the node, In external format (name segments separated by path
@@ -168,7 +168,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
168 * 168 *
169 * FUNCTION: acpi_ns_get_pathname_length 169 * FUNCTION: acpi_ns_get_pathname_length
170 * 170 *
171 * PARAMETERS: Node - Namespace node 171 * PARAMETERS: node - Namespace node
172 * 172 *
173 * RETURN: Length of path, including prefix 173 * RETURN: Length of path, including prefix
174 * 174 *
@@ -214,7 +214,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
214 * 214 *
215 * PARAMETERS: target_handle - Handle of named object whose name is 215 * PARAMETERS: target_handle - Handle of named object whose name is
216 * to be found 216 * to be found
217 * Buffer - Where the pathname is returned 217 * buffer - Where the pathname is returned
218 * 218 *
219 * RETURN: Status, Buffer is filled with pathname if status is AE_OK 219 * RETURN: Status, Buffer is filled with pathname if status is AE_OK
220 * 220 *
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index dd77a3ce6e50..d6c9a3cc6716 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -53,9 +53,9 @@ ACPI_MODULE_NAME("nsobject")
53 * 53 *
54 * FUNCTION: acpi_ns_attach_object 54 * FUNCTION: acpi_ns_attach_object
55 * 55 *
56 * PARAMETERS: Node - Parent Node 56 * PARAMETERS: node - Parent Node
57 * Object - Object to be attached 57 * object - Object to be attached
58 * Type - Type of object, or ACPI_TYPE_ANY if not 58 * type - Type of object, or ACPI_TYPE_ANY if not
59 * known 59 * known
60 * 60 *
61 * RETURN: Status 61 * RETURN: Status
@@ -191,7 +191,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
191 * 191 *
192 * FUNCTION: acpi_ns_detach_object 192 * FUNCTION: acpi_ns_detach_object
193 * 193 *
194 * PARAMETERS: Node - A Namespace node whose object will be detached 194 * PARAMETERS: node - A Namespace node whose object will be detached
195 * 195 *
196 * RETURN: None. 196 * RETURN: None.
197 * 197 *
@@ -250,7 +250,7 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
250 * 250 *
251 * FUNCTION: acpi_ns_get_attached_object 251 * FUNCTION: acpi_ns_get_attached_object
252 * 252 *
253 * PARAMETERS: Node - Namespace node 253 * PARAMETERS: node - Namespace node
254 * 254 *
255 * RETURN: Current value of the object field from the Node whose 255 * RETURN: Current value of the object field from the Node whose
256 * handle is passed 256 * handle is passed
@@ -285,7 +285,7 @@ union acpi_operand_object *acpi_ns_get_attached_object(struct
285 * 285 *
286 * FUNCTION: acpi_ns_get_secondary_object 286 * FUNCTION: acpi_ns_get_secondary_object
287 * 287 *
288 * PARAMETERS: Node - Namespace node 288 * PARAMETERS: node - Namespace node
289 * 289 *
290 * RETURN: Current value of the object field from the Node whose 290 * RETURN: Current value of the object field from the Node whose
291 * handle is passed. 291 * handle is passed.
@@ -315,9 +315,9 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union
315 * 315 *
316 * FUNCTION: acpi_ns_attach_data 316 * FUNCTION: acpi_ns_attach_data
317 * 317 *
318 * PARAMETERS: Node - Namespace node 318 * PARAMETERS: node - Namespace node
319 * Handler - Handler to be associated with the data 319 * handler - Handler to be associated with the data
320 * Data - Data to be attached 320 * data - Data to be attached
321 * 321 *
322 * RETURN: Status 322 * RETURN: Status
323 * 323 *
@@ -372,8 +372,8 @@ acpi_ns_attach_data(struct acpi_namespace_node *node,
372 * 372 *
373 * FUNCTION: acpi_ns_detach_data 373 * FUNCTION: acpi_ns_detach_data
374 * 374 *
375 * PARAMETERS: Node - Namespace node 375 * PARAMETERS: node - Namespace node
376 * Handler - Handler associated with the data 376 * handler - Handler associated with the data
377 * 377 *
378 * RETURN: Status 378 * RETURN: Status
379 * 379 *
@@ -416,9 +416,9 @@ acpi_ns_detach_data(struct acpi_namespace_node * node,
416 * 416 *
417 * FUNCTION: acpi_ns_get_attached_data 417 * FUNCTION: acpi_ns_get_attached_data
418 * 418 *
419 * PARAMETERS: Node - Namespace node 419 * PARAMETERS: node - Namespace node
420 * Handler - Handler associated with the data 420 * handler - Handler associated with the data
421 * Data - Where the data is returned 421 * data - Where the data is returned
422 * 422 *
423 * RETURN: Status 423 * RETURN: Status
424 * 424 *
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 23ce09686418..2419f417ea33 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -116,7 +116,7 @@ static const char *acpi_rtype_names[] = {
116 * 116 *
117 * FUNCTION: acpi_ns_check_predefined_names 117 * FUNCTION: acpi_ns_check_predefined_names
118 * 118 *
119 * PARAMETERS: Node - Namespace node for the method/object 119 * PARAMETERS: node - Namespace node for the method/object
120 * user_param_count - Number of parameters actually passed 120 * user_param_count - Number of parameters actually passed
121 * return_status - Status from the object evaluation 121 * return_status - Status from the object evaluation
122 * return_object_ptr - Pointer to the object returned from the 122 * return_object_ptr - Pointer to the object returned from the
@@ -275,10 +275,10 @@ cleanup:
275 * 275 *
276 * FUNCTION: acpi_ns_check_parameter_count 276 * FUNCTION: acpi_ns_check_parameter_count
277 * 277 *
278 * PARAMETERS: Pathname - Full pathname to the node (for error msgs) 278 * PARAMETERS: pathname - Full pathname to the node (for error msgs)
279 * Node - Namespace node for the method/object 279 * node - Namespace node for the method/object
280 * user_param_count - Number of args passed in by the caller 280 * user_param_count - Number of args passed in by the caller
281 * Predefined - Pointer to entry in predefined name table 281 * predefined - Pointer to entry in predefined name table
282 * 282 *
283 * RETURN: None 283 * RETURN: None
284 * 284 *
@@ -364,7 +364,7 @@ acpi_ns_check_parameter_count(char *pathname,
364 * 364 *
365 * FUNCTION: acpi_ns_check_for_predefined_name 365 * FUNCTION: acpi_ns_check_for_predefined_name
366 * 366 *
367 * PARAMETERS: Node - Namespace node for the method/object 367 * PARAMETERS: node - Namespace node for the method/object
368 * 368 *
369 * RETURN: Pointer to entry in predefined table. NULL indicates not found. 369 * RETURN: Pointer to entry in predefined table. NULL indicates not found.
370 * 370 *
@@ -410,7 +410,7 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
410 * 410 *
411 * FUNCTION: acpi_ns_check_package 411 * FUNCTION: acpi_ns_check_package
412 * 412 *
413 * PARAMETERS: Data - Pointer to validation data structure 413 * PARAMETERS: data - Pointer to validation data structure
414 * return_object_ptr - Pointer to the object returned from the 414 * return_object_ptr - Pointer to the object returned from the
415 * evaluation of a method or object 415 * evaluation of a method or object
416 * 416 *
@@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
638 /* Create the new outer package and populate it */ 638 /* Create the new outer package and populate it */
639 639
640 status = 640 status =
641 acpi_ns_wrap_with_package(data, *elements, 641 acpi_ns_wrap_with_package(data, return_object,
642 return_object_ptr); 642 return_object_ptr);
643 if (ACPI_FAILURE(status)) { 643 if (ACPI_FAILURE(status)) {
644 return (status); 644 return (status);
@@ -685,11 +685,11 @@ package_too_small:
685 * 685 *
686 * FUNCTION: acpi_ns_check_package_list 686 * FUNCTION: acpi_ns_check_package_list
687 * 687 *
688 * PARAMETERS: Data - Pointer to validation data structure 688 * PARAMETERS: data - Pointer to validation data structure
689 * Package - Pointer to package-specific info for method 689 * package - Pointer to package-specific info for method
690 * Elements - Element list of parent package. All elements 690 * elements - Element list of parent package. All elements
691 * of this list should be of type Package. 691 * of this list should be of type Package.
692 * Count - Count of subpackages 692 * count - Count of subpackages
693 * 693 *
694 * RETURN: Status 694 * RETURN: Status
695 * 695 *
@@ -911,12 +911,12 @@ package_too_small:
911 * 911 *
912 * FUNCTION: acpi_ns_check_package_elements 912 * FUNCTION: acpi_ns_check_package_elements
913 * 913 *
914 * PARAMETERS: Data - Pointer to validation data structure 914 * PARAMETERS: data - Pointer to validation data structure
915 * Elements - Pointer to the package elements array 915 * elements - Pointer to the package elements array
916 * Type1 - Object type for first group 916 * type1 - Object type for first group
917 * Count1 - Count for first group 917 * count1 - Count for first group
918 * Type2 - Object type for second group 918 * type2 - Object type for second group
919 * Count2 - Count for second group 919 * count2 - Count for second group
920 * start_index - Start of the first group of elements 920 * start_index - Start of the first group of elements
921 * 921 *
922 * RETURN: Status 922 * RETURN: Status
@@ -968,7 +968,7 @@ acpi_ns_check_package_elements(struct acpi_predefined_data *data,
968 * 968 *
969 * FUNCTION: acpi_ns_check_object_type 969 * FUNCTION: acpi_ns_check_object_type
970 * 970 *
971 * PARAMETERS: Data - Pointer to validation data structure 971 * PARAMETERS: data - Pointer to validation data structure
972 * return_object_ptr - Pointer to the object returned from the 972 * return_object_ptr - Pointer to the object returned from the
973 * evaluation of a method or object 973 * evaluation of a method or object
974 * expected_btypes - Bitmap of expected return type(s) 974 * expected_btypes - Bitmap of expected return type(s)
@@ -1102,7 +1102,7 @@ acpi_ns_check_object_type(struct acpi_predefined_data *data,
1102 * 1102 *
1103 * FUNCTION: acpi_ns_check_reference 1103 * FUNCTION: acpi_ns_check_reference
1104 * 1104 *
1105 * PARAMETERS: Data - Pointer to validation data structure 1105 * PARAMETERS: data - Pointer to validation data structure
1106 * return_object - Object returned from the evaluation of a 1106 * return_object - Object returned from the evaluation of a
1107 * method or object 1107 * method or object
1108 * 1108 *
@@ -1140,7 +1140,7 @@ acpi_ns_check_reference(struct acpi_predefined_data *data,
1140 * 1140 *
1141 * FUNCTION: acpi_ns_get_expected_types 1141 * FUNCTION: acpi_ns_get_expected_types
1142 * 1142 *
1143 * PARAMETERS: Buffer - Pointer to where the string is returned 1143 * PARAMETERS: buffer - Pointer to where the string is returned
1144 * expected_btypes - Bitmap of expected return type(s) 1144 * expected_btypes - Bitmap of expected return type(s)
1145 * 1145 *
1146 * RETURN: Buffer is populated with type names. 1146 * RETURN: Buffer is populated with type names.
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 5519a64a353f..8c5f292860fc 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -94,7 +94,7 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
94 * 94 *
95 * FUNCTION: acpi_ns_repair_object 95 * FUNCTION: acpi_ns_repair_object
96 * 96 *
97 * PARAMETERS: Data - Pointer to validation data structure 97 * PARAMETERS: data - Pointer to validation data structure
98 * expected_btypes - Object types expected 98 * expected_btypes - Object types expected
99 * package_index - Index of object within parent package (if 99 * package_index - Index of object within parent package (if
100 * applicable - ACPI_NOT_PACKAGE_ELEMENT 100 * applicable - ACPI_NOT_PACKAGE_ELEMENT
@@ -470,7 +470,7 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
470 * 470 *
471 * FUNCTION: acpi_ns_repair_null_element 471 * FUNCTION: acpi_ns_repair_null_element
472 * 472 *
473 * PARAMETERS: Data - Pointer to validation data structure 473 * PARAMETERS: data - Pointer to validation data structure
474 * expected_btypes - Object types expected 474 * expected_btypes - Object types expected
475 * package_index - Index of object within parent package (if 475 * package_index - Index of object within parent package (if
476 * applicable - ACPI_NOT_PACKAGE_ELEMENT 476 * applicable - ACPI_NOT_PACKAGE_ELEMENT
@@ -509,17 +509,17 @@ acpi_ns_repair_null_element(struct acpi_predefined_data *data,
509 */ 509 */
510 if (expected_btypes & ACPI_RTYPE_INTEGER) { 510 if (expected_btypes & ACPI_RTYPE_INTEGER) {
511 511
512 /* Need an Integer - create a zero-value integer */ 512 /* Need an integer - create a zero-value integer */
513 513
514 new_object = acpi_ut_create_integer_object((u64)0); 514 new_object = acpi_ut_create_integer_object((u64)0);
515 } else if (expected_btypes & ACPI_RTYPE_STRING) { 515 } else if (expected_btypes & ACPI_RTYPE_STRING) {
516 516
517 /* Need a String - create a NULL string */ 517 /* Need a string - create a NULL string */
518 518
519 new_object = acpi_ut_create_string_object(0); 519 new_object = acpi_ut_create_string_object(0);
520 } else if (expected_btypes & ACPI_RTYPE_BUFFER) { 520 } else if (expected_btypes & ACPI_RTYPE_BUFFER) {
521 521
522 /* Need a Buffer - create a zero-length buffer */ 522 /* Need a buffer - create a zero-length buffer */
523 523
524 new_object = acpi_ut_create_buffer_object(0); 524 new_object = acpi_ut_create_buffer_object(0);
525 } else { 525 } else {
@@ -552,7 +552,7 @@ acpi_ns_repair_null_element(struct acpi_predefined_data *data,
552 * 552 *
553 * FUNCTION: acpi_ns_remove_null_elements 553 * FUNCTION: acpi_ns_remove_null_elements
554 * 554 *
555 * PARAMETERS: Data - Pointer to validation data structure 555 * PARAMETERS: data - Pointer to validation data structure
556 * package_type - An acpi_return_package_types value 556 * package_type - An acpi_return_package_types value
557 * obj_desc - A Package object 557 * obj_desc - A Package object
558 * 558 *
@@ -635,7 +635,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
635 * 635 *
636 * FUNCTION: acpi_ns_wrap_with_package 636 * FUNCTION: acpi_ns_wrap_with_package
637 * 637 *
638 * PARAMETERS: Data - Pointer to validation data structure 638 * PARAMETERS: data - Pointer to validation data structure
639 * original_object - Pointer to the object to repair. 639 * original_object - Pointer to the object to repair.
640 * obj_desc_ptr - The new package object is returned here 640 * obj_desc_ptr - The new package object is returned here
641 * 641 *
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 726bc8e687f7..90189251cdf0 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -149,8 +149,8 @@ static const struct acpi_repair_info acpi_ns_repairable_names[] = {
149 * 149 *
150 * FUNCTION: acpi_ns_complex_repairs 150 * FUNCTION: acpi_ns_complex_repairs
151 * 151 *
152 * PARAMETERS: Data - Pointer to validation data structure 152 * PARAMETERS: data - Pointer to validation data structure
153 * Node - Namespace node for the method/object 153 * node - Namespace node for the method/object
154 * validate_status - Original status of earlier validation 154 * validate_status - Original status of earlier validation
155 * return_object_ptr - Pointer to the object returned from the 155 * return_object_ptr - Pointer to the object returned from the
156 * evaluation of a method or object 156 * evaluation of a method or object
@@ -187,7 +187,7 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
187 * 187 *
188 * FUNCTION: acpi_ns_match_repairable_name 188 * FUNCTION: acpi_ns_match_repairable_name
189 * 189 *
190 * PARAMETERS: Node - Namespace node for the method/object 190 * PARAMETERS: node - Namespace node for the method/object
191 * 191 *
192 * RETURN: Pointer to entry in repair table. NULL indicates not found. 192 * RETURN: Pointer to entry in repair table. NULL indicates not found.
193 * 193 *
@@ -218,7 +218,7 @@ static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct
218 * 218 *
219 * FUNCTION: acpi_ns_repair_ALR 219 * FUNCTION: acpi_ns_repair_ALR
220 * 220 *
221 * PARAMETERS: Data - Pointer to validation data structure 221 * PARAMETERS: data - Pointer to validation data structure
222 * return_object_ptr - Pointer to the object returned from the 222 * return_object_ptr - Pointer to the object returned from the
223 * evaluation of a method or object 223 * evaluation of a method or object
224 * 224 *
@@ -247,7 +247,7 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
247 * 247 *
248 * FUNCTION: acpi_ns_repair_FDE 248 * FUNCTION: acpi_ns_repair_FDE
249 * 249 *
250 * PARAMETERS: Data - Pointer to validation data structure 250 * PARAMETERS: data - Pointer to validation data structure
251 * return_object_ptr - Pointer to the object returned from the 251 * return_object_ptr - Pointer to the object returned from the
252 * evaluation of a method or object 252 * evaluation of a method or object
253 * 253 *
@@ -335,7 +335,7 @@ acpi_ns_repair_FDE(struct acpi_predefined_data *data,
335 * 335 *
336 * FUNCTION: acpi_ns_repair_CID 336 * FUNCTION: acpi_ns_repair_CID
337 * 337 *
338 * PARAMETERS: Data - Pointer to validation data structure 338 * PARAMETERS: data - Pointer to validation data structure
339 * return_object_ptr - Pointer to the object returned from the 339 * return_object_ptr - Pointer to the object returned from the
340 * evaluation of a method or object 340 * evaluation of a method or object
341 * 341 *
@@ -405,7 +405,7 @@ acpi_ns_repair_CID(struct acpi_predefined_data *data,
405 * 405 *
406 * FUNCTION: acpi_ns_repair_HID 406 * FUNCTION: acpi_ns_repair_HID
407 * 407 *
408 * PARAMETERS: Data - Pointer to validation data structure 408 * PARAMETERS: data - Pointer to validation data structure
409 * return_object_ptr - Pointer to the object returned from the 409 * return_object_ptr - Pointer to the object returned from the
410 * evaluation of a method or object 410 * evaluation of a method or object
411 * 411 *
@@ -487,7 +487,7 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
487 * 487 *
488 * FUNCTION: acpi_ns_repair_TSS 488 * FUNCTION: acpi_ns_repair_TSS
489 * 489 *
490 * PARAMETERS: Data - Pointer to validation data structure 490 * PARAMETERS: data - Pointer to validation data structure
491 * return_object_ptr - Pointer to the object returned from the 491 * return_object_ptr - Pointer to the object returned from the
492 * evaluation of a method or object 492 * evaluation of a method or object
493 * 493 *
@@ -531,7 +531,7 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data,
531 * 531 *
532 * FUNCTION: acpi_ns_repair_PSS 532 * FUNCTION: acpi_ns_repair_PSS
533 * 533 *
534 * PARAMETERS: Data - Pointer to validation data structure 534 * PARAMETERS: data - Pointer to validation data structure
535 * return_object_ptr - Pointer to the object returned from the 535 * return_object_ptr - Pointer to the object returned from the
536 * evaluation of a method or object 536 * evaluation of a method or object
537 * 537 *
@@ -600,7 +600,7 @@ acpi_ns_repair_PSS(struct acpi_predefined_data *data,
600 * 600 *
601 * FUNCTION: acpi_ns_check_sorted_list 601 * FUNCTION: acpi_ns_check_sorted_list
602 * 602 *
603 * PARAMETERS: Data - Pointer to validation data structure 603 * PARAMETERS: data - Pointer to validation data structure
604 * return_object - Pointer to the top-level returned object 604 * return_object - Pointer to the top-level returned object
605 * expected_count - Minimum length of each sub-package 605 * expected_count - Minimum length of each sub-package
606 * sort_index - Sub-package entry to sort on 606 * sort_index - Sub-package entry to sort on
@@ -707,9 +707,9 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
707 * 707 *
708 * FUNCTION: acpi_ns_sort_list 708 * FUNCTION: acpi_ns_sort_list
709 * 709 *
710 * PARAMETERS: Elements - Package object element list 710 * PARAMETERS: elements - Package object element list
711 * Count - Element count for above 711 * count - Element count for above
712 * Index - Sort by which package element 712 * index - Sort by which package element
713 * sort_direction - Ascending or Descending sort 713 * sort_direction - Ascending or Descending sort
714 * 714 *
715 * RETURN: None 715 * RETURN: None
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 507043d66114..456cc859f869 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -65,7 +65,7 @@ acpi_ns_search_parent_tree(u32 target_name,
65 * 65 *
66 * PARAMETERS: target_name - Ascii ACPI name to search for 66 * PARAMETERS: target_name - Ascii ACPI name to search for
67 * parent_node - Starting node where search will begin 67 * parent_node - Starting node where search will begin
68 * Type - Object type to match 68 * type - Object type to match
69 * return_node - Where the matched Named obj is returned 69 * return_node - Where the matched Named obj is returned
70 * 70 *
71 * RETURN: Status 71 * RETURN: Status
@@ -175,8 +175,8 @@ acpi_ns_search_one_scope(u32 target_name,
175 * FUNCTION: acpi_ns_search_parent_tree 175 * FUNCTION: acpi_ns_search_parent_tree
176 * 176 *
177 * PARAMETERS: target_name - Ascii ACPI name to search for 177 * PARAMETERS: target_name - Ascii ACPI name to search for
178 * Node - Starting node where search will begin 178 * node - Starting node where search will begin
179 * Type - Object type to match 179 * type - Object type to match
180 * return_node - Where the matched Node is returned 180 * return_node - Where the matched Node is returned
181 * 181 *
182 * RETURN: Status 182 * RETURN: Status
@@ -264,11 +264,11 @@ acpi_ns_search_parent_tree(u32 target_name,
264 * 264 *
265 * PARAMETERS: target_name - Ascii ACPI name to search for (4 chars) 265 * PARAMETERS: target_name - Ascii ACPI name to search for (4 chars)
266 * walk_state - Current state of the walk 266 * walk_state - Current state of the walk
267 * Node - Starting node where search will begin 267 * node - Starting node where search will begin
268 * interpreter_mode - Add names only in ACPI_MODE_LOAD_PASS_x. 268 * interpreter_mode - Add names only in ACPI_MODE_LOAD_PASS_x.
269 * Otherwise,search only. 269 * Otherwise,search only.
270 * Type - Object type to match 270 * type - Object type to match
271 * Flags - Flags describing the search restrictions 271 * flags - Flags describing the search restrictions
272 * return_node - Where the Node is returned 272 * return_node - Where the Node is returned
273 * 273 *
274 * RETURN: Status 274 * RETURN: Status
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 75113759f69d..ef753a41e087 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -62,8 +62,8 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
62 * 62 *
63 * FUNCTION: acpi_ns_print_node_pathname 63 * FUNCTION: acpi_ns_print_node_pathname
64 * 64 *
65 * PARAMETERS: Node - Object 65 * PARAMETERS: node - Object
66 * Message - Prefix message 66 * message - Prefix message
67 * 67 *
68 * DESCRIPTION: Print an object's full namespace pathname 68 * DESCRIPTION: Print an object's full namespace pathname
69 * Manages allocation/freeing of a pathname buffer 69 * Manages allocation/freeing of a pathname buffer
@@ -101,7 +101,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
101 * 101 *
102 * FUNCTION: acpi_ns_valid_root_prefix 102 * FUNCTION: acpi_ns_valid_root_prefix
103 * 103 *
104 * PARAMETERS: Prefix - Character to be checked 104 * PARAMETERS: prefix - Character to be checked
105 * 105 *
106 * RETURN: TRUE if a valid prefix 106 * RETURN: TRUE if a valid prefix
107 * 107 *
@@ -119,7 +119,7 @@ u8 acpi_ns_valid_root_prefix(char prefix)
119 * 119 *
120 * FUNCTION: acpi_ns_valid_path_separator 120 * FUNCTION: acpi_ns_valid_path_separator
121 * 121 *
122 * PARAMETERS: Sep - Character to be checked 122 * PARAMETERS: sep - Character to be checked
123 * 123 *
124 * RETURN: TRUE if a valid path separator 124 * RETURN: TRUE if a valid path separator
125 * 125 *
@@ -137,7 +137,7 @@ static u8 acpi_ns_valid_path_separator(char sep)
137 * 137 *
138 * FUNCTION: acpi_ns_get_type 138 * FUNCTION: acpi_ns_get_type
139 * 139 *
140 * PARAMETERS: Node - Parent Node to be examined 140 * PARAMETERS: node - Parent Node to be examined
141 * 141 *
142 * RETURN: Type field from Node whose handle is passed 142 * RETURN: Type field from Node whose handle is passed
143 * 143 *
@@ -161,7 +161,7 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
161 * 161 *
162 * FUNCTION: acpi_ns_local 162 * FUNCTION: acpi_ns_local
163 * 163 *
164 * PARAMETERS: Type - A namespace object type 164 * PARAMETERS: type - A namespace object type
165 * 165 *
166 * RETURN: LOCAL if names must be found locally in objects of the 166 * RETURN: LOCAL if names must be found locally in objects of the
167 * passed type, 0 if enclosing scopes should be searched 167 * passed type, 0 if enclosing scopes should be searched
@@ -189,7 +189,7 @@ u32 acpi_ns_local(acpi_object_type type)
189 * 189 *
190 * FUNCTION: acpi_ns_get_internal_name_length 190 * FUNCTION: acpi_ns_get_internal_name_length
191 * 191 *
192 * PARAMETERS: Info - Info struct initialized with the 192 * PARAMETERS: info - Info struct initialized with the
193 * external name pointer. 193 * external name pointer.
194 * 194 *
195 * RETURN: None 195 * RETURN: None
@@ -260,7 +260,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
260 * 260 *
261 * FUNCTION: acpi_ns_build_internal_name 261 * FUNCTION: acpi_ns_build_internal_name
262 * 262 *
263 * PARAMETERS: Info - Info struct fully initialized 263 * PARAMETERS: info - Info struct fully initialized
264 * 264 *
265 * RETURN: Status 265 * RETURN: Status
266 * 266 *
@@ -371,7 +371,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
371 * FUNCTION: acpi_ns_internalize_name 371 * FUNCTION: acpi_ns_internalize_name
372 * 372 *
373 * PARAMETERS: *external_name - External representation of name 373 * PARAMETERS: *external_name - External representation of name
374 * **Converted Name - Where to return the resulting 374 * **Converted name - Where to return the resulting
375 * internal represention of the name 375 * internal represention of the name
376 * 376 *
377 * RETURN: Status 377 * RETURN: Status
@@ -575,7 +575,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
575 * 575 *
576 * FUNCTION: acpi_ns_validate_handle 576 * FUNCTION: acpi_ns_validate_handle
577 * 577 *
578 * PARAMETERS: Handle - Handle to be validated and typecast to a 578 * PARAMETERS: handle - Handle to be validated and typecast to a
579 * namespace node. 579 * namespace node.
580 * 580 *
581 * RETURN: A pointer to a namespace node 581 * RETURN: A pointer to a namespace node
@@ -651,7 +651,7 @@ void acpi_ns_terminate(void)
651 * 651 *
652 * FUNCTION: acpi_ns_opens_scope 652 * FUNCTION: acpi_ns_opens_scope
653 * 653 *
654 * PARAMETERS: Type - A valid namespace type 654 * PARAMETERS: type - A valid namespace type
655 * 655 *
656 * RETURN: NEWSCOPE if the passed type "opens a name scope" according 656 * RETURN: NEWSCOPE if the passed type "opens a name scope" according
657 * to the ACPI specification, else 0 657 * to the ACPI specification, else 0
@@ -677,14 +677,14 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
677 * 677 *
678 * FUNCTION: acpi_ns_get_node 678 * FUNCTION: acpi_ns_get_node
679 * 679 *
680 * PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The 680 * PARAMETERS: *pathname - Name to be found, in external (ASL) format. The
681 * \ (backslash) and ^ (carat) prefixes, and the 681 * \ (backslash) and ^ (carat) prefixes, and the
682 * . (period) to separate segments are supported. 682 * . (period) to separate segments are supported.
683 * prefix_node - Root of subtree to be searched, or NS_ALL for the 683 * prefix_node - Root of subtree to be searched, or NS_ALL for the
684 * root of the name space. If Name is fully 684 * root of the name space. If Name is fully
685 * qualified (first s8 is '\'), the passed value 685 * qualified (first s8 is '\'), the passed value
686 * of Scope will not be accessed. 686 * of Scope will not be accessed.
687 * Flags - Used to indicate whether to perform upsearch or 687 * flags - Used to indicate whether to perform upsearch or
688 * not. 688 * not.
689 * return_node - Where the Node is returned 689 * return_node - Where the Node is returned
690 * 690 *
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index f69895a54895..730bccc5e7f7 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -88,7 +88,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
88 * 88 *
89 * FUNCTION: acpi_ns_get_next_node_typed 89 * FUNCTION: acpi_ns_get_next_node_typed
90 * 90 *
91 * PARAMETERS: Type - Type of node to be searched for 91 * PARAMETERS: type - Type of node to be searched for
92 * parent_node - Parent node whose children we are 92 * parent_node - Parent node whose children we are
93 * getting 93 * getting
94 * child_node - Previous child that was found. 94 * child_node - Previous child that was found.
@@ -151,16 +151,16 @@ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
151 * 151 *
152 * FUNCTION: acpi_ns_walk_namespace 152 * FUNCTION: acpi_ns_walk_namespace
153 * 153 *
154 * PARAMETERS: Type - acpi_object_type to search for 154 * PARAMETERS: type - acpi_object_type to search for
155 * start_node - Handle in namespace where search begins 155 * start_node - Handle in namespace where search begins
156 * max_depth - Depth to which search is to reach 156 * max_depth - Depth to which search is to reach
157 * Flags - Whether to unlock the NS before invoking 157 * flags - Whether to unlock the NS before invoking
158 * the callback routine 158 * the callback routine
159 * pre_order_visit - Called during tree pre-order visit 159 * pre_order_visit - Called during tree pre-order visit
160 * when an object of "Type" is found 160 * when an object of "Type" is found
161 * post_order_visit - Called during tree post-order visit 161 * post_order_visit - Called during tree post-order visit
162 * when an object of "Type" is found 162 * when an object of "Type" is found
163 * Context - Passed to user function(s) above 163 * context - Passed to user function(s) above
164 * return_value - from the user_function if terminated 164 * return_value - from the user_function if terminated
165 * early. Otherwise, returns NULL. 165 * early. Otherwise, returns NULL.
166 * RETURNS: Status 166 * RETURNS: Status
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 71d15f61807b..9692e6702333 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -58,8 +58,8 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
58 * 58 *
59 * FUNCTION: acpi_evaluate_object_typed 59 * FUNCTION: acpi_evaluate_object_typed
60 * 60 *
61 * PARAMETERS: Handle - Object handle (optional) 61 * PARAMETERS: handle - Object handle (optional)
62 * Pathname - Object pathname (optional) 62 * pathname - Object pathname (optional)
63 * external_params - List of parameters to pass to method, 63 * external_params - List of parameters to pass to method,
64 * terminated by NULL. May be NULL 64 * terminated by NULL. May be NULL
65 * if no parameters are being passed. 65 * if no parameters are being passed.
@@ -152,8 +152,8 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
152 * 152 *
153 * FUNCTION: acpi_evaluate_object 153 * FUNCTION: acpi_evaluate_object
154 * 154 *
155 * PARAMETERS: Handle - Object handle (optional) 155 * PARAMETERS: handle - Object handle (optional)
156 * Pathname - Object pathname (optional) 156 * pathname - Object pathname (optional)
157 * external_params - List of parameters to pass to method, 157 * external_params - List of parameters to pass to method,
158 * terminated by NULL. May be NULL 158 * terminated by NULL. May be NULL
159 * if no parameters are being passed. 159 * if no parameters are being passed.
@@ -364,7 +364,7 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
364 * 364 *
365 * FUNCTION: acpi_ns_resolve_references 365 * FUNCTION: acpi_ns_resolve_references
366 * 366 *
367 * PARAMETERS: Info - Evaluation info block 367 * PARAMETERS: info - Evaluation info block
368 * 368 *
369 * RETURN: Info->return_object is replaced with the dereferenced object 369 * RETURN: Info->return_object is replaced with the dereferenced object
370 * 370 *
@@ -431,14 +431,14 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
431 * 431 *
432 * FUNCTION: acpi_walk_namespace 432 * FUNCTION: acpi_walk_namespace
433 * 433 *
434 * PARAMETERS: Type - acpi_object_type to search for 434 * PARAMETERS: type - acpi_object_type to search for
435 * start_object - Handle in namespace where search begins 435 * start_object - Handle in namespace where search begins
436 * max_depth - Depth to which search is to reach 436 * max_depth - Depth to which search is to reach
437 * pre_order_visit - Called during tree pre-order visit 437 * pre_order_visit - Called during tree pre-order visit
438 * when an object of "Type" is found 438 * when an object of "Type" is found
439 * post_order_visit - Called during tree post-order visit 439 * post_order_visit - Called during tree post-order visit
440 * when an object of "Type" is found 440 * when an object of "Type" is found
441 * Context - Passed to user function(s) above 441 * context - Passed to user function(s) above
442 * return_value - Location where return value of 442 * return_value - Location where return value of
443 * user_function is put if terminated early 443 * user_function is put if terminated early
444 * 444 *
@@ -646,7 +646,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
646 * 646 *
647 * PARAMETERS: HID - HID to search for. Can be NULL. 647 * PARAMETERS: HID - HID to search for. Can be NULL.
648 * user_function - Called when a matching object is found 648 * user_function - Called when a matching object is found
649 * Context - Passed to user function 649 * context - Passed to user function
650 * return_value - Location where return value of 650 * return_value - Location where return value of
651 * user_function is put if terminated early 651 * user_function is put if terminated early
652 * 652 *
@@ -716,8 +716,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_devices)
716 * FUNCTION: acpi_attach_data 716 * FUNCTION: acpi_attach_data
717 * 717 *
718 * PARAMETERS: obj_handle - Namespace node 718 * PARAMETERS: obj_handle - Namespace node
719 * Handler - Handler for this attachment 719 * handler - Handler for this attachment
720 * Data - Pointer to data to be attached 720 * data - Pointer to data to be attached
721 * 721 *
722 * RETURN: Status 722 * RETURN: Status
723 * 723 *
@@ -764,7 +764,7 @@ ACPI_EXPORT_SYMBOL(acpi_attach_data)
764 * FUNCTION: acpi_detach_data 764 * FUNCTION: acpi_detach_data
765 * 765 *
766 * PARAMETERS: obj_handle - Namespace node handle 766 * PARAMETERS: obj_handle - Namespace node handle
767 * Handler - Handler used in call to acpi_attach_data 767 * handler - Handler used in call to acpi_attach_data
768 * 768 *
769 * RETURN: Status 769 * RETURN: Status
770 * 770 *
@@ -810,8 +810,8 @@ ACPI_EXPORT_SYMBOL(acpi_detach_data)
810 * FUNCTION: acpi_get_data 810 * FUNCTION: acpi_get_data
811 * 811 *
812 * PARAMETERS: obj_handle - Namespace node 812 * PARAMETERS: obj_handle - Namespace node
813 * Handler - Handler used in call to attach_data 813 * handler - Handler used in call to attach_data
814 * Data - Where the data is returned 814 * data - Where the data is returned
815 * 815 *
816 * RETURN: Status 816 * RETURN: Status
817 * 817 *
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index af401c9c4dfc..08e9610b34ca 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -61,8 +61,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
61 * 61 *
62 * FUNCTION: acpi_get_handle 62 * FUNCTION: acpi_get_handle
63 * 63 *
64 * PARAMETERS: Parent - Object to search under (search scope). 64 * PARAMETERS: parent - Object to search under (search scope).
65 * Pathname - Pointer to an asciiz string containing the 65 * pathname - Pointer to an asciiz string containing the
66 * name 66 * name
67 * ret_handle - Where the return handle is returned 67 * ret_handle - Where the return handle is returned
68 * 68 *
@@ -142,9 +142,9 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle)
142 * 142 *
143 * FUNCTION: acpi_get_name 143 * FUNCTION: acpi_get_name
144 * 144 *
145 * PARAMETERS: Handle - Handle to be converted to a pathname 145 * PARAMETERS: handle - Handle to be converted to a pathname
146 * name_type - Full pathname or single segment 146 * name_type - Full pathname or single segment
147 * Buffer - Buffer for returned path 147 * buffer - Buffer for returned path
148 * 148 *
149 * RETURN: Pointer to a string containing the fully qualified Name. 149 * RETURN: Pointer to a string containing the fully qualified Name.
150 * 150 *
@@ -219,8 +219,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_name)
219 * 219 *
220 * FUNCTION: acpi_ns_copy_device_id 220 * FUNCTION: acpi_ns_copy_device_id
221 * 221 *
222 * PARAMETERS: Dest - Pointer to the destination DEVICE_ID 222 * PARAMETERS: dest - Pointer to the destination DEVICE_ID
223 * Source - Pointer to the source DEVICE_ID 223 * source - Pointer to the source DEVICE_ID
224 * string_area - Pointer to where to copy the dest string 224 * string_area - Pointer to where to copy the dest string
225 * 225 *
226 * RETURN: Pointer to the next string area 226 * RETURN: Pointer to the next string area
@@ -247,7 +247,7 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
247 * 247 *
248 * FUNCTION: acpi_get_object_info 248 * FUNCTION: acpi_get_object_info
249 * 249 *
250 * PARAMETERS: Handle - Object Handle 250 * PARAMETERS: handle - Object Handle
251 * return_buffer - Where the info is returned 251 * return_buffer - Where the info is returned
252 * 252 *
253 * RETURN: Status 253 * RETURN: Status
@@ -493,7 +493,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_object_info)
493 * 493 *
494 * FUNCTION: acpi_install_method 494 * FUNCTION: acpi_install_method
495 * 495 *
496 * PARAMETERS: Buffer - An ACPI table containing one control method 496 * PARAMETERS: buffer - An ACPI table containing one control method
497 * 497 *
498 * RETURN: Status 498 * RETURN: Status
499 * 499 *
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 880a605cee20..6766fc4f088f 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -98,7 +98,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_id)
98 * 98 *
99 * FUNCTION: acpi_get_type 99 * FUNCTION: acpi_get_type
100 * 100 *
101 * PARAMETERS: Handle - Handle of object whose type is desired 101 * PARAMETERS: handle - Handle of object whose type is desired
102 * ret_type - Where the type will be placed 102 * ret_type - Where the type will be placed
103 * 103 *
104 * RETURN: Status 104 * RETURN: Status
@@ -151,7 +151,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_type)
151 * 151 *
152 * FUNCTION: acpi_get_parent 152 * FUNCTION: acpi_get_parent
153 * 153 *
154 * PARAMETERS: Handle - Handle of object whose parent is desired 154 * PARAMETERS: handle - Handle of object whose parent is desired
155 * ret_handle - Where the parent handle will be placed 155 * ret_handle - Where the parent handle will be placed
156 * 156 *
157 * RETURN: Status 157 * RETURN: Status
@@ -212,8 +212,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent)
212 * 212 *
213 * FUNCTION: acpi_get_next_object 213 * FUNCTION: acpi_get_next_object
214 * 214 *
215 * PARAMETERS: Type - Type of object to be searched for 215 * PARAMETERS: type - Type of object to be searched for
216 * Parent - Parent object whose children we are getting 216 * parent - Parent object whose children we are getting
217 * last_child - Previous child that was found. 217 * last_child - Previous child that was found.
218 * The NEXT child will be returned 218 * The NEXT child will be returned
219 * ret_handle - Where handle to the next object is placed 219 * ret_handle - Where handle to the next object is placed
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 5ac36aba507c..844464c4f901 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -210,7 +210,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
210 * FUNCTION: acpi_ps_get_next_namepath 210 * FUNCTION: acpi_ps_get_next_namepath
211 * 211 *
212 * PARAMETERS: parser_state - Current parser state object 212 * PARAMETERS: parser_state - Current parser state object
213 * Arg - Where the namepath will be stored 213 * arg - Where the namepath will be stored
214 * arg_count - If the namepath points to a control method 214 * arg_count - If the namepath points to a control method
215 * the method's argument is returned here. 215 * the method's argument is returned here.
216 * possible_method_call - Whether the namepath can possibly be the 216 * possible_method_call - Whether the namepath can possibly be the
@@ -379,7 +379,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
379 * 379 *
380 * PARAMETERS: parser_state - Current parser state object 380 * PARAMETERS: parser_state - Current parser state object
381 * arg_type - The argument type (AML_*_ARG) 381 * arg_type - The argument type (AML_*_ARG)
382 * Arg - Where the argument is returned 382 * arg - Where the argument is returned
383 * 383 *
384 * RETURN: None 384 * RETURN: None
385 * 385 *
@@ -618,6 +618,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
618 618
619 arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP); 619 arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
620 if (!arg) { 620 if (!arg) {
621 acpi_ps_free_op(field);
621 return_PTR(NULL); 622 return_PTR(NULL);
622 } 623 }
623 624
@@ -662,6 +663,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
662 } else { 663 } else {
663 arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); 664 arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
664 if (!arg) { 665 if (!arg) {
666 acpi_ps_free_op(field);
665 return_PTR(NULL); 667 return_PTR(NULL);
666 } 668 }
667 669
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 9547ad8a620b..799162c1b6df 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -167,7 +167,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
167 * PARAMETERS: walk_state - Current state 167 * PARAMETERS: walk_state - Current state
168 * aml_op_start - Begin of named Op in AML 168 * aml_op_start - Begin of named Op in AML
169 * unnamed_op - Early Op (not a named Op) 169 * unnamed_op - Early Op (not a named Op)
170 * Op - Returned Op 170 * op - Returned Op
171 * 171 *
172 * RETURN: Status 172 * RETURN: Status
173 * 173 *
@@ -323,7 +323,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
323 323
324 if (walk_state->op_info->flags & AML_CREATE) { 324 if (walk_state->op_info->flags & AML_CREATE) {
325 /* 325 /*
326 * Backup to beginning of create_xXXfield declaration 326 * Backup to beginning of create_XXXfield declaration
327 * body_length is unknown until we parse the body 327 * body_length is unknown until we parse the body
328 */ 328 */
329 op->named.data = aml_op_start; 329 op->named.data = aml_op_start;
@@ -380,7 +380,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
380 * 380 *
381 * PARAMETERS: walk_state - Current state 381 * PARAMETERS: walk_state - Current state
382 * aml_op_start - Op start in AML 382 * aml_op_start - Op start in AML
383 * Op - Current Op 383 * op - Current Op
384 * 384 *
385 * RETURN: Status 385 * RETURN: Status
386 * 386 *
@@ -679,8 +679,8 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
679 * FUNCTION: acpi_ps_complete_op 679 * FUNCTION: acpi_ps_complete_op
680 * 680 *
681 * PARAMETERS: walk_state - Current state 681 * PARAMETERS: walk_state - Current state
682 * Op - Returned Op 682 * op - Returned Op
683 * Status - Parse status before complete Op 683 * status - Parse status before complete Op
684 * 684 *
685 * RETURN: Status 685 * RETURN: Status
686 * 686 *
@@ -853,8 +853,8 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
853 * FUNCTION: acpi_ps_complete_final_op 853 * FUNCTION: acpi_ps_complete_final_op
854 * 854 *
855 * PARAMETERS: walk_state - Current state 855 * PARAMETERS: walk_state - Current state
856 * Op - Current Op 856 * op - Current Op
857 * Status - Current parse status before complete last 857 * status - Current parse status before complete last
858 * Op 858 * Op
859 * 859 *
860 * RETURN: Status 860 * RETURN: Status
@@ -1165,7 +1165,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
1165 1165
1166 if (walk_state->op_info->flags & AML_CREATE) { 1166 if (walk_state->op_info->flags & AML_CREATE) {
1167 /* 1167 /*
1168 * Backup to beginning of create_xXXfield declaration (1 for 1168 * Backup to beginning of create_XXXfield declaration (1 for
1169 * Opcode) 1169 * Opcode)
1170 * 1170 *
1171 * body_length is unknown until we parse the body 1171 * body_length is unknown until we parse the body
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index a0226fdcf75c..ed1d457bd5ca 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -724,7 +724,7 @@ static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
724 * 724 *
725 * FUNCTION: acpi_ps_get_opcode_info 725 * FUNCTION: acpi_ps_get_opcode_info
726 * 726 *
727 * PARAMETERS: Opcode - The AML opcode 727 * PARAMETERS: opcode - The AML opcode
728 * 728 *
729 * RETURN: A pointer to the info about the opcode. 729 * RETURN: A pointer to the info about the opcode.
730 * 730 *
@@ -769,7 +769,7 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
769 * 769 *
770 * FUNCTION: acpi_ps_get_opcode_name 770 * FUNCTION: acpi_ps_get_opcode_name
771 * 771 *
772 * PARAMETERS: Opcode - The AML opcode 772 * PARAMETERS: opcode - The AML opcode
773 * 773 *
774 * RETURN: A pointer to the name of the opcode (ASCII String) 774 * RETURN: A pointer to the name of the opcode (ASCII String)
775 * Note: Never returns NULL. 775 * Note: Never returns NULL.
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 2ff9c35a1968..01985703bb98 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -64,7 +64,7 @@ ACPI_MODULE_NAME("psparse")
64 * 64 *
65 * FUNCTION: acpi_ps_get_opcode_size 65 * FUNCTION: acpi_ps_get_opcode_size
66 * 66 *
67 * PARAMETERS: Opcode - An AML opcode 67 * PARAMETERS: opcode - An AML opcode
68 * 68 *
69 * RETURN: Size of the opcode, in bytes (1 or 2) 69 * RETURN: Size of the opcode, in bytes (1 or 2)
70 * 70 *
@@ -121,7 +121,7 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
121 * FUNCTION: acpi_ps_complete_this_op 121 * FUNCTION: acpi_ps_complete_this_op
122 * 122 *
123 * PARAMETERS: walk_state - Current State 123 * PARAMETERS: walk_state - Current State
124 * Op - Op to complete 124 * op - Op to complete
125 * 125 *
126 * RETURN: Status 126 * RETURN: Status
127 * 127 *
@@ -311,7 +311,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
311 * FUNCTION: acpi_ps_next_parse_state 311 * FUNCTION: acpi_ps_next_parse_state
312 * 312 *
313 * PARAMETERS: walk_state - Current state 313 * PARAMETERS: walk_state - Current state
314 * Op - Current parse op 314 * op - Current parse op
315 * callback_status - Status from previous operation 315 * callback_status - Status from previous operation
316 * 316 *
317 * RETURN: Status 317 * RETURN: Status
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index c872aa4b926e..608dc20dc173 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -93,7 +93,7 @@ u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state)
93 * FUNCTION: acpi_ps_init_scope 93 * FUNCTION: acpi_ps_init_scope
94 * 94 *
95 * PARAMETERS: parser_state - Current parser state object 95 * PARAMETERS: parser_state - Current parser state object
96 * Root - the Root Node of this new scope 96 * root - the Root Node of this new scope
97 * 97 *
98 * RETURN: Status 98 * RETURN: Status
99 * 99 *
@@ -131,7 +131,7 @@ acpi_ps_init_scope(struct acpi_parse_state * parser_state,
131 * FUNCTION: acpi_ps_push_scope 131 * FUNCTION: acpi_ps_push_scope
132 * 132 *
133 * PARAMETERS: parser_state - Current parser state object 133 * PARAMETERS: parser_state - Current parser state object
134 * Op - Current op to be pushed 134 * op - Current op to be pushed
135 * remaining_args - List of args remaining 135 * remaining_args - List of args remaining
136 * arg_count - Fixed or variable number of args 136 * arg_count - Fixed or variable number of args
137 * 137 *
@@ -184,7 +184,7 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
184 * FUNCTION: acpi_ps_pop_scope 184 * FUNCTION: acpi_ps_pop_scope
185 * 185 *
186 * PARAMETERS: parser_state - Current parser state object 186 * PARAMETERS: parser_state - Current parser state object
187 * Op - Where the popped op is returned 187 * op - Where the popped op is returned
188 * arg_list - Where the popped "next argument" is 188 * arg_list - Where the popped "next argument" is
189 * returned 189 * returned
190 * arg_count - Count of objects in arg_list 190 * arg_count - Count of objects in arg_list
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 2b03cdbbe1c0..fdb2e71f3046 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -58,8 +58,8 @@ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op);
58 * 58 *
59 * FUNCTION: acpi_ps_get_arg 59 * FUNCTION: acpi_ps_get_arg
60 * 60 *
61 * PARAMETERS: Op - Get an argument for this op 61 * PARAMETERS: op - Get an argument for this op
62 * Argn - Nth argument to get 62 * argn - Nth argument to get
63 * 63 *
64 * RETURN: The argument (as an Op object). NULL if argument does not exist 64 * RETURN: The argument (as an Op object). NULL if argument does not exist
65 * 65 *
@@ -114,8 +114,8 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
114 * 114 *
115 * FUNCTION: acpi_ps_append_arg 115 * FUNCTION: acpi_ps_append_arg
116 * 116 *
117 * PARAMETERS: Op - Append an argument to this Op. 117 * PARAMETERS: op - Append an argument to this Op.
118 * Arg - Argument Op to append 118 * arg - Argument Op to append
119 * 119 *
120 * RETURN: None. 120 * RETURN: None.
121 * 121 *
@@ -188,8 +188,8 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
188 * 188 *
189 * FUNCTION: acpi_ps_get_depth_next 189 * FUNCTION: acpi_ps_get_depth_next
190 * 190 *
191 * PARAMETERS: Origin - Root of subtree to search 191 * PARAMETERS: origin - Root of subtree to search
192 * Op - Last (previous) Op that was found 192 * op - Last (previous) Op that was found
193 * 193 *
194 * RETURN: Next Op found in the search. 194 * RETURN: Next Op found in the search.
195 * 195 *
@@ -261,7 +261,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
261 * 261 *
262 * FUNCTION: acpi_ps_get_child 262 * FUNCTION: acpi_ps_get_child
263 * 263 *
264 * PARAMETERS: Op - Get the child of this Op 264 * PARAMETERS: op - Get the child of this Op
265 * 265 *
266 * RETURN: Child Op, Null if none is found. 266 * RETURN: Child Op, Null if none is found.
267 * 267 *
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 13bb131ae125..8736ad5f04d3 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -77,8 +77,8 @@ union acpi_parse_object *acpi_ps_create_scope_op(void)
77 * 77 *
78 * FUNCTION: acpi_ps_init_op 78 * FUNCTION: acpi_ps_init_op
79 * 79 *
80 * PARAMETERS: Op - A newly allocated Op object 80 * PARAMETERS: op - A newly allocated Op object
81 * Opcode - Opcode to store in the Op 81 * opcode - Opcode to store in the Op
82 * 82 *
83 * RETURN: None 83 * RETURN: None
84 * 84 *
@@ -103,7 +103,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
103 * 103 *
104 * FUNCTION: acpi_ps_alloc_op 104 * FUNCTION: acpi_ps_alloc_op
105 * 105 *
106 * PARAMETERS: Opcode - Opcode that will be stored in the new Op 106 * PARAMETERS: opcode - Opcode that will be stored in the new Op
107 * 107 *
108 * RETURN: Pointer to the new Op, null on failure 108 * RETURN: Pointer to the new Op, null on failure
109 * 109 *
@@ -160,7 +160,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
160 * 160 *
161 * FUNCTION: acpi_ps_free_op 161 * FUNCTION: acpi_ps_free_op
162 * 162 *
163 * PARAMETERS: Op - Op to be freed 163 * PARAMETERS: op - Op to be freed
164 * 164 *
165 * RETURN: None. 165 * RETURN: None.
166 * 166 *
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 9d98c5ff66a5..963e16225797 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -66,7 +66,7 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
66 * PARAMETERS: method_name - Valid ACPI name string 66 * PARAMETERS: method_name - Valid ACPI name string
67 * debug_level - Optional level mask. 0 to use default 67 * debug_level - Optional level mask. 0 to use default
68 * debug_layer - Optional layer mask. 0 to use default 68 * debug_layer - Optional layer mask. 0 to use default
69 * Flags - bit 1: one shot(1) or persistent(0) 69 * flags - bit 1: one shot(1) or persistent(0)
70 * 70 *
71 * RETURN: Status 71 * RETURN: Status
72 * 72 *
@@ -105,7 +105,7 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
105 * 105 *
106 * FUNCTION: acpi_ps_start_trace 106 * FUNCTION: acpi_ps_start_trace
107 * 107 *
108 * PARAMETERS: Info - Method info struct 108 * PARAMETERS: info - Method info struct
109 * 109 *
110 * RETURN: None 110 * RETURN: None
111 * 111 *
@@ -150,7 +150,7 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
150 * 150 *
151 * FUNCTION: acpi_ps_stop_trace 151 * FUNCTION: acpi_ps_stop_trace
152 * 152 *
153 * PARAMETERS: Info - Method info struct 153 * PARAMETERS: info - Method info struct
154 * 154 *
155 * RETURN: None 155 * RETURN: None
156 * 156 *
@@ -193,10 +193,10 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
193 * 193 *
194 * FUNCTION: acpi_ps_execute_method 194 * FUNCTION: acpi_ps_execute_method
195 * 195 *
196 * PARAMETERS: Info - Method info block, contains: 196 * PARAMETERS: info - Method info block, contains:
197 * Node - Method Node to execute 197 * node - Method Node to execute
198 * obj_desc - Method object 198 * obj_desc - Method object
199 * Parameters - List of parameters to pass to the method, 199 * parameters - List of parameters to pass to the method,
200 * terminated by NULL. Params itself may be 200 * terminated by NULL. Params itself may be
201 * NULL if no parameters are being passed. 201 * NULL if no parameters are being passed.
202 * return_object - Where to put method's return value (if 202 * return_object - Where to put method's return value (if
@@ -361,9 +361,9 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
361 * 361 *
362 * FUNCTION: acpi_ps_update_parameter_list 362 * FUNCTION: acpi_ps_update_parameter_list
363 * 363 *
364 * PARAMETERS: Info - See struct acpi_evaluate_info 364 * PARAMETERS: info - See struct acpi_evaluate_info
365 * (Used: parameter_type and Parameters) 365 * (Used: parameter_type and Parameters)
366 * Action - Add or Remove reference 366 * action - Add or Remove reference
367 * 367 *
368 * RETURN: Status 368 * RETURN: Status
369 * 369 *
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index a0305652394f..856ff075b6ab 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -182,8 +182,8 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
182 182
183 /* Revision ID */ 183 /* Revision ID */
184 184
185 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_iD), 185 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_ID),
186 AML_OFFSET(ext_address64.revision_iD), 186 AML_OFFSET(ext_address64.revision_ID),
187 1}, 187 1},
188 /* 188 /*
189 * These fields are contiguous in both the source and destination: 189 * These fields are contiguous in both the source and destination:
@@ -215,7 +215,7 @@ static struct acpi_rsconvert_info acpi_rs_convert_general_flags[6] = {
215 AML_OFFSET(address.resource_type), 215 AML_OFFSET(address.resource_type),
216 1}, 216 1},
217 217
218 /* General Flags - Consume, Decode, min_fixed, max_fixed */ 218 /* General flags - Consume, Decode, min_fixed, max_fixed */
219 219
220 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.producer_consumer), 220 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.producer_consumer),
221 AML_OFFSET(address.flags), 221 AML_OFFSET(address.flags),
@@ -293,8 +293,8 @@ static struct acpi_rsconvert_info acpi_rs_convert_io_flags[4] = {
293 * 293 *
294 * FUNCTION: acpi_rs_get_address_common 294 * FUNCTION: acpi_rs_get_address_common
295 * 295 *
296 * PARAMETERS: Resource - Pointer to the internal resource struct 296 * PARAMETERS: resource - Pointer to the internal resource struct
297 * Aml - Pointer to the AML resource descriptor 297 * aml - Pointer to the AML resource descriptor
298 * 298 *
299 * RETURN: TRUE if the resource_type field is OK, FALSE otherwise 299 * RETURN: TRUE if the resource_type field is OK, FALSE otherwise
300 * 300 *
@@ -343,8 +343,8 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
343 * 343 *
344 * FUNCTION: acpi_rs_set_address_common 344 * FUNCTION: acpi_rs_set_address_common
345 * 345 *
346 * PARAMETERS: Aml - Pointer to the AML resource descriptor 346 * PARAMETERS: aml - Pointer to the AML resource descriptor
347 * Resource - Pointer to the internal resource struct 347 * resource - Pointer to the internal resource struct
348 * 348 *
349 * RETURN: None 349 * RETURN: None
350 * 350 *
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 3c6df4b7eb2d..de12469d1c9c 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -173,7 +173,7 @@ acpi_rs_stream_option_length(u32 resource_length,
173 * 173 *
174 * FUNCTION: acpi_rs_get_aml_length 174 * FUNCTION: acpi_rs_get_aml_length
175 * 175 *
176 * PARAMETERS: Resource - Pointer to the resource linked list 176 * PARAMETERS: resource - Pointer to the resource linked list
177 * size_needed - Where the required size is returned 177 * size_needed - Where the required size is returned
178 * 178 *
179 * RETURN: Status 179 * RETURN: Status
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 46d6eb38ae66..311cbc4f05fa 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -190,8 +190,8 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
190 * 190 *
191 * FUNCTION: acpi_rs_create_pci_routing_table 191 * FUNCTION: acpi_rs_create_pci_routing_table
192 * 192 *
193 * PARAMETERS: package_object - Pointer to a union acpi_operand_object 193 * PARAMETERS: package_object - Pointer to a package containing one
194 * package 194 * of more ACPI_OPERAND_OBJECTs
195 * output_buffer - Pointer to the user's buffer 195 * output_buffer - Pointer to the user's buffer
196 * 196 *
197 * RETURN: Status AE_OK if okay, else a valid acpi_status code. 197 * RETURN: Status AE_OK if okay, else a valid acpi_status code.
@@ -199,7 +199,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
199 * AE_BUFFER_OVERFLOW and output_buffer->Length will point 199 * AE_BUFFER_OVERFLOW and output_buffer->Length will point
200 * to the size buffer needed. 200 * to the size buffer needed.
201 * 201 *
202 * DESCRIPTION: Takes the union acpi_operand_object package and creates a 202 * DESCRIPTION: Takes the union acpi_operand_object package and creates a
203 * linked list of PCI interrupt descriptions 203 * linked list of PCI interrupt descriptions
204 * 204 *
205 * NOTE: It is the caller's responsibility to ensure that the start of the 205 * NOTE: It is the caller's responsibility to ensure that the start of the
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index b4c581132393..4d11b072388c 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -703,7 +703,7 @@ acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source)
703 * 703 *
704 * FUNCTION: acpi_rs_dump_address_common 704 * FUNCTION: acpi_rs_dump_address_common
705 * 705 *
706 * PARAMETERS: Resource - Pointer to an internal resource descriptor 706 * PARAMETERS: resource - Pointer to an internal resource descriptor
707 * 707 *
708 * RETURN: None 708 * RETURN: None
709 * 709 *
@@ -850,8 +850,8 @@ void acpi_rs_dump_irq_list(u8 * route_table)
850 * 850 *
851 * FUNCTION: acpi_rs_out* 851 * FUNCTION: acpi_rs_out*
852 * 852 *
853 * PARAMETERS: Title - Name of the resource field 853 * PARAMETERS: title - Name of the resource field
854 * Value - Value of the resource field 854 * value - Value of the resource field
855 * 855 *
856 * RETURN: None 856 * RETURN: None
857 * 857 *
@@ -898,8 +898,8 @@ static void acpi_rs_out_title(char *title)
898 * 898 *
899 * FUNCTION: acpi_rs_dump*List 899 * FUNCTION: acpi_rs_dump*List
900 * 900 *
901 * PARAMETERS: Length - Number of elements in the list 901 * PARAMETERS: length - Number of elements in the list
902 * Data - Start of the list 902 * data - Start of the list
903 * 903 *
904 * RETURN: None 904 * RETURN: None
905 * 905 *
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 9be129f5d6f4..46b5324b22d6 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -139,7 +139,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
139 * 139 *
140 * FUNCTION: acpi_rs_convert_resources_to_aml 140 * FUNCTION: acpi_rs_convert_resources_to_aml
141 * 141 *
142 * PARAMETERS: Resource - Pointer to the resource linked list 142 * PARAMETERS: resource - Pointer to the resource linked list
143 * aml_size_needed - Calculated size of the byte stream 143 * aml_size_needed - Calculated size of the byte stream
144 * needed from calling acpi_rs_get_aml_length() 144 * needed from calling acpi_rs_get_aml_length()
145 * The size of the output_buffer is 145 * The size of the output_buffer is
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 8073b371cc7c..c6f291c2bc83 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -57,9 +57,9 @@ ACPI_MODULE_NAME("rsmisc")
57 * 57 *
58 * FUNCTION: acpi_rs_convert_aml_to_resource 58 * FUNCTION: acpi_rs_convert_aml_to_resource
59 * 59 *
60 * PARAMETERS: Resource - Pointer to the resource descriptor 60 * PARAMETERS: resource - Pointer to the resource descriptor
61 * Aml - Where the AML descriptor is returned 61 * aml - Where the AML descriptor is returned
62 * Info - Pointer to appropriate conversion table 62 * info - Pointer to appropriate conversion table
63 * 63 *
64 * RETURN: Status 64 * RETURN: Status
65 * 65 *
@@ -406,7 +406,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
406 406
407 case ACPI_RSC_EXIT_NE: 407 case ACPI_RSC_EXIT_NE:
408 /* 408 /*
409 * Control - Exit conversion if not equal 409 * control - Exit conversion if not equal
410 */ 410 */
411 switch (info->resource_offset) { 411 switch (info->resource_offset) {
412 case ACPI_RSC_COMPARE_AML_LENGTH: 412 case ACPI_RSC_COMPARE_AML_LENGTH:
@@ -454,9 +454,9 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
454 * 454 *
455 * FUNCTION: acpi_rs_convert_resource_to_aml 455 * FUNCTION: acpi_rs_convert_resource_to_aml
456 * 456 *
457 * PARAMETERS: Resource - Pointer to the resource descriptor 457 * PARAMETERS: resource - Pointer to the resource descriptor
458 * Aml - Where the AML descriptor is returned 458 * aml - Where the AML descriptor is returned
459 * Info - Pointer to appropriate conversion table 459 * info - Pointer to appropriate conversion table
460 * 460 *
461 * RETURN: Status 461 * RETURN: Status
462 * 462 *
@@ -726,7 +726,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
726 726
727 case ACPI_RSC_EXIT_LE: 727 case ACPI_RSC_EXIT_LE:
728 /* 728 /*
729 * Control - Exit conversion if less than or equal 729 * control - Exit conversion if less than or equal
730 */ 730 */
731 if (item_count <= info->value) { 731 if (item_count <= info->value) {
732 goto exit; 732 goto exit;
@@ -735,7 +735,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
735 735
736 case ACPI_RSC_EXIT_NE: 736 case ACPI_RSC_EXIT_NE:
737 /* 737 /*
738 * Control - Exit conversion if not equal 738 * control - Exit conversion if not equal
739 */ 739 */
740 switch (COMPARE_OPCODE(info)) { 740 switch (COMPARE_OPCODE(info)) {
741 case ACPI_RSC_COMPARE_VALUE: 741 case ACPI_RSC_COMPARE_VALUE:
@@ -757,7 +757,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
757 757
758 case ACPI_RSC_EXIT_EQ: 758 case ACPI_RSC_EXIT_EQ:
759 /* 759 /*
760 * Control - Exit conversion if equal 760 * control - Exit conversion if equal
761 */ 761 */
762 if (*ACPI_ADD_PTR(u8, resource, 762 if (*ACPI_ADD_PTR(u8, resource,
763 COMPARE_TARGET(info)) == 763 COMPARE_TARGET(info)) ==
@@ -783,7 +783,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
783#if 0 783#if 0
784/* Previous resource validations */ 784/* Previous resource validations */
785 785
786if (aml->ext_address64.revision_iD != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) { 786if (aml->ext_address64.revision_ID != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
787 return_ACPI_STATUS(AE_SUPPORT); 787 return_ACPI_STATUS(AE_SUPPORT);
788} 788}
789 789
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 433a375deb93..37d5241c0acf 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("rsutils")
53 * 53 *
54 * FUNCTION: acpi_rs_decode_bitmask 54 * FUNCTION: acpi_rs_decode_bitmask
55 * 55 *
56 * PARAMETERS: Mask - Bitmask to decode 56 * PARAMETERS: mask - Bitmask to decode
57 * List - Where the converted list is returned 57 * list - Where the converted list is returned
58 * 58 *
59 * RETURN: Count of bits set (length of list) 59 * RETURN: Count of bits set (length of list)
60 * 60 *
@@ -86,8 +86,8 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
86 * 86 *
87 * FUNCTION: acpi_rs_encode_bitmask 87 * FUNCTION: acpi_rs_encode_bitmask
88 * 88 *
89 * PARAMETERS: List - List of values to encode 89 * PARAMETERS: list - List of values to encode
90 * Count - Length of list 90 * count - Length of list
91 * 91 *
92 * RETURN: Encoded bitmask 92 * RETURN: Encoded bitmask
93 * 93 *
@@ -115,8 +115,8 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
115 * 115 *
116 * FUNCTION: acpi_rs_move_data 116 * FUNCTION: acpi_rs_move_data
117 * 117 *
118 * PARAMETERS: Destination - Pointer to the destination descriptor 118 * PARAMETERS: destination - Pointer to the destination descriptor
119 * Source - Pointer to the source descriptor 119 * source - Pointer to the source descriptor
120 * item_count - How many items to move 120 * item_count - How many items to move
121 * move_type - Byte width 121 * move_type - Byte width
122 * 122 *
@@ -183,7 +183,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
183 * 183 *
184 * PARAMETERS: total_length - Length of the AML descriptor, including 184 * PARAMETERS: total_length - Length of the AML descriptor, including
185 * the header and length fields. 185 * the header and length fields.
186 * Aml - Pointer to the raw AML descriptor 186 * aml - Pointer to the raw AML descriptor
187 * 187 *
188 * RETURN: None 188 * RETURN: None
189 * 189 *
@@ -235,7 +235,7 @@ acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
235 * PARAMETERS: descriptor_type - Byte to be inserted as the type 235 * PARAMETERS: descriptor_type - Byte to be inserted as the type
236 * total_length - Length of the AML descriptor, including 236 * total_length - Length of the AML descriptor, including
237 * the header and length fields. 237 * the header and length fields.
238 * Aml - Pointer to the raw AML descriptor 238 * aml - Pointer to the raw AML descriptor
239 * 239 *
240 * RETURN: None 240 * RETURN: None
241 * 241 *
@@ -265,8 +265,8 @@ acpi_rs_set_resource_header(u8 descriptor_type,
265 * 265 *
266 * FUNCTION: acpi_rs_strcpy 266 * FUNCTION: acpi_rs_strcpy
267 * 267 *
268 * PARAMETERS: Destination - Pointer to the destination string 268 * PARAMETERS: destination - Pointer to the destination string
269 * Source - Pointer to the source string 269 * source - Pointer to the source string
270 * 270 *
271 * RETURN: String length, including NULL terminator 271 * RETURN: String length, including NULL terminator
272 * 272 *
@@ -300,7 +300,7 @@ static u16 acpi_rs_strcpy(char *destination, char *source)
300 * minimum_length - Minimum length of the descriptor (minus 300 * minimum_length - Minimum length of the descriptor (minus
301 * any optional fields) 301 * any optional fields)
302 * resource_source - Where the resource_source is returned 302 * resource_source - Where the resource_source is returned
303 * Aml - Pointer to the raw AML descriptor 303 * aml - Pointer to the raw AML descriptor
304 * string_ptr - (optional) where to store the actual 304 * string_ptr - (optional) where to store the actual
305 * resource_source string 305 * resource_source string
306 * 306 *
@@ -386,7 +386,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
386 * 386 *
387 * FUNCTION: acpi_rs_set_resource_source 387 * FUNCTION: acpi_rs_set_resource_source
388 * 388 *
389 * PARAMETERS: Aml - Pointer to the raw AML descriptor 389 * PARAMETERS: aml - Pointer to the raw AML descriptor
390 * minimum_length - Minimum length of the descriptor (minus 390 * minimum_length - Minimum length of the descriptor (minus
391 * any optional fields) 391 * any optional fields)
392 * resource_source - Internal resource_source 392 * resource_source - Internal resource_source
@@ -445,7 +445,7 @@ acpi_rs_set_resource_source(union aml_resource * aml,
445 * 445 *
446 * FUNCTION: acpi_rs_get_prt_method_data 446 * FUNCTION: acpi_rs_get_prt_method_data
447 * 447 *
448 * PARAMETERS: Node - Device node 448 * PARAMETERS: node - Device node
449 * ret_buffer - Pointer to a buffer structure for the 449 * ret_buffer - Pointer to a buffer structure for the
450 * results 450 * results
451 * 451 *
@@ -494,7 +494,7 @@ acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
494 * 494 *
495 * FUNCTION: acpi_rs_get_crs_method_data 495 * FUNCTION: acpi_rs_get_crs_method_data
496 * 496 *
497 * PARAMETERS: Node - Device node 497 * PARAMETERS: node - Device node
498 * ret_buffer - Pointer to a buffer structure for the 498 * ret_buffer - Pointer to a buffer structure for the
499 * results 499 * results
500 * 500 *
@@ -534,7 +534,7 @@ acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
534 */ 534 */
535 status = acpi_rs_create_resource_list(obj_desc, ret_buffer); 535 status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
536 536
537 /* On exit, we must delete the object returned by evaluate_object */ 537 /* On exit, we must delete the object returned by evaluateObject */
538 538
539 acpi_ut_remove_reference(obj_desc); 539 acpi_ut_remove_reference(obj_desc);
540 return_ACPI_STATUS(status); 540 return_ACPI_STATUS(status);
@@ -544,7 +544,7 @@ acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
544 * 544 *
545 * FUNCTION: acpi_rs_get_prs_method_data 545 * FUNCTION: acpi_rs_get_prs_method_data
546 * 546 *
547 * PARAMETERS: Node - Device node 547 * PARAMETERS: node - Device node
548 * ret_buffer - Pointer to a buffer structure for the 548 * ret_buffer - Pointer to a buffer structure for the
549 * results 549 * results
550 * 550 *
@@ -585,7 +585,7 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
585 */ 585 */
586 status = acpi_rs_create_resource_list(obj_desc, ret_buffer); 586 status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
587 587
588 /* On exit, we must delete the object returned by evaluate_object */ 588 /* On exit, we must delete the object returned by evaluateObject */
589 589
590 acpi_ut_remove_reference(obj_desc); 590 acpi_ut_remove_reference(obj_desc);
591 return_ACPI_STATUS(status); 591 return_ACPI_STATUS(status);
@@ -596,7 +596,7 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
596 * 596 *
597 * FUNCTION: acpi_rs_get_aei_method_data 597 * FUNCTION: acpi_rs_get_aei_method_data
598 * 598 *
599 * PARAMETERS: Node - Device node 599 * PARAMETERS: node - Device node
600 * ret_buffer - Pointer to a buffer structure for the 600 * ret_buffer - Pointer to a buffer structure for the
601 * results 601 * results
602 * 602 *
@@ -636,7 +636,7 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
636 */ 636 */
637 status = acpi_rs_create_resource_list(obj_desc, ret_buffer); 637 status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
638 638
639 /* On exit, we must delete the object returned by evaluate_object */ 639 /* On exit, we must delete the object returned by evaluateObject */
640 640
641 acpi_ut_remove_reference(obj_desc); 641 acpi_ut_remove_reference(obj_desc);
642 return_ACPI_STATUS(status); 642 return_ACPI_STATUS(status);
@@ -646,8 +646,8 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
646 * 646 *
647 * FUNCTION: acpi_rs_get_method_data 647 * FUNCTION: acpi_rs_get_method_data
648 * 648 *
649 * PARAMETERS: Handle - Handle to the containing object 649 * PARAMETERS: handle - Handle to the containing object
650 * Path - Path to method, relative to Handle 650 * path - Path to method, relative to Handle
651 * ret_buffer - Pointer to a buffer structure for the 651 * ret_buffer - Pointer to a buffer structure for the
652 * results 652 * results
653 * 653 *
@@ -697,7 +697,7 @@ acpi_rs_get_method_data(acpi_handle handle,
697 * 697 *
698 * FUNCTION: acpi_rs_set_srs_method_data 698 * FUNCTION: acpi_rs_set_srs_method_data
699 * 699 *
700 * PARAMETERS: Node - Device node 700 * PARAMETERS: node - Device node
701 * in_buffer - Pointer to a buffer structure of the 701 * in_buffer - Pointer to a buffer structure of the
702 * parameter 702 * parameter
703 * 703 *
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index f58c098c7aeb..5aad744b5b83 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -79,7 +79,7 @@ acpi_rs_validate_parameters(acpi_handle device_handle,
79 * FUNCTION: acpi_rs_validate_parameters 79 * FUNCTION: acpi_rs_validate_parameters
80 * 80 *
81 * PARAMETERS: device_handle - Handle to a device 81 * PARAMETERS: device_handle - Handle to a device
82 * Buffer - Pointer to a data buffer 82 * buffer - Pointer to a data buffer
83 * return_node - Pointer to where the device node is returned 83 * return_node - Pointer to where the device node is returned
84 * 84 *
85 * RETURN: Status 85 * RETURN: Status
@@ -351,8 +351,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
351 * 351 *
352 * FUNCTION: acpi_resource_to_address64 352 * FUNCTION: acpi_resource_to_address64
353 * 353 *
354 * PARAMETERS: Resource - Pointer to a resource 354 * PARAMETERS: resource - Pointer to a resource
355 * Out - Pointer to the users's return buffer 355 * out - Pointer to the users's return buffer
356 * (a struct acpi_resource_address64) 356 * (a struct acpi_resource_address64)
357 * 357 *
358 * RETURN: Status 358 * RETURN: Status
@@ -415,9 +415,9 @@ ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
415 * FUNCTION: acpi_get_vendor_resource 415 * FUNCTION: acpi_get_vendor_resource
416 * 416 *
417 * PARAMETERS: device_handle - Handle for the parent device object 417 * PARAMETERS: device_handle - Handle for the parent device object
418 * Name - Method name for the parent resource 418 * name - Method name for the parent resource
419 * (METHOD_NAME__CRS or METHOD_NAME__PRS) 419 * (METHOD_NAME__CRS or METHOD_NAME__PRS)
420 * Uuid - Pointer to the UUID to be matched. 420 * uuid - Pointer to the UUID to be matched.
421 * includes both subtype and 16-byte UUID 421 * includes both subtype and 16-byte UUID
422 * ret_buffer - Where the vendor resource is returned 422 * ret_buffer - Where the vendor resource is returned
423 * 423 *
@@ -526,11 +526,11 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
526 * 526 *
527 * PARAMETERS: device_handle - Handle to the device object for the 527 * PARAMETERS: device_handle - Handle to the device object for the
528 * device we are querying 528 * device we are querying
529 * Name - Method name of the resources we want. 529 * name - Method name of the resources we want.
530 * (METHOD_NAME__CRS, METHOD_NAME__PRS, or 530 * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
531 * METHOD_NAME__AEI) 531 * METHOD_NAME__AEI)
532 * user_function - Called for each resource 532 * user_function - Called for each resource
533 * Context - Passed to user_function 533 * context - Passed to user_function
534 * 534 *
535 * RETURN: Status 535 * RETURN: Status
536 * 536 *
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 4c9c760db4a4..390651860bf0 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -49,9 +49,10 @@
49ACPI_MODULE_NAME("tbfadt") 49ACPI_MODULE_NAME("tbfadt")
50 50
51/* Local prototypes */ 51/* Local prototypes */
52static ACPI_INLINE void 52static void
53acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, 53acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
54 u8 space_id, u8 byte_width, u64 address); 54 u8 space_id,
55 u8 byte_width, u64 address, char *register_name);
55 56
56static void acpi_tb_convert_fadt(void); 57static void acpi_tb_convert_fadt(void);
57 58
@@ -172,7 +173,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
172 * 173 *
173 * PARAMETERS: generic_address - GAS struct to be initialized 174 * PARAMETERS: generic_address - GAS struct to be initialized
174 * byte_width - Width of this register 175 * byte_width - Width of this register
175 * Address - Address of the register 176 * address - Address of the register
176 * 177 *
177 * RETURN: None 178 * RETURN: None
178 * 179 *
@@ -182,10 +183,25 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
182 * 183 *
183 ******************************************************************************/ 184 ******************************************************************************/
184 185
185static ACPI_INLINE void 186static void
186acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, 187acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
187 u8 space_id, u8 byte_width, u64 address) 188 u8 space_id,
189 u8 byte_width, u64 address, char *register_name)
188{ 190{
191 u8 bit_width;
192
193 /* Bit width field in the GAS is only one byte long, 255 max */
194
195 bit_width = (u8)(byte_width * 8);
196
197 if (byte_width > 31) { /* (31*8)=248 */
198 ACPI_ERROR((AE_INFO,
199 "%s - 32-bit FADT register is too long (%u bytes, %u bits) "
200 "to convert to GAS struct - 255 bits max, truncating",
201 register_name, byte_width, (byte_width * 8)));
202
203 bit_width = 255;
204 }
189 205
190 /* 206 /*
191 * The 64-bit Address field is non-aligned in the byte packed 207 * The 64-bit Address field is non-aligned in the byte packed
@@ -196,7 +212,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
196 /* All other fields are byte-wide */ 212 /* All other fields are byte-wide */
197 213
198 generic_address->space_id = space_id; 214 generic_address->space_id = space_id;
199 generic_address->bit_width = (u8)ACPI_MUL_8(byte_width); 215 generic_address->bit_width = bit_width;
200 generic_address->bit_offset = 0; 216 generic_address->bit_offset = 0;
201 generic_address->access_width = 0; /* Access width ANY */ 217 generic_address->access_width = 0; /* Access width ANY */
202} 218}
@@ -267,8 +283,8 @@ void acpi_tb_parse_fadt(u32 table_index)
267 * 283 *
268 * FUNCTION: acpi_tb_create_local_fadt 284 * FUNCTION: acpi_tb_create_local_fadt
269 * 285 *
270 * PARAMETERS: Table - Pointer to BIOS FADT 286 * PARAMETERS: table - Pointer to BIOS FADT
271 * Length - Length of the table 287 * length - Length of the table
272 * 288 *
273 * RETURN: None 289 * RETURN: None
274 * 290 *
@@ -287,11 +303,11 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
287 * a warning. 303 * a warning.
288 */ 304 */
289 if (length > sizeof(struct acpi_table_fadt)) { 305 if (length > sizeof(struct acpi_table_fadt)) {
290 ACPI_WARNING((AE_INFO, 306 ACPI_BIOS_WARNING((AE_INFO,
291 "FADT (revision %u) is longer than ACPI 5.0 version, " 307 "FADT (revision %u) is longer than ACPI 5.0 version, "
292 "truncating length %u to %u", 308 "truncating length %u to %u",
293 table->revision, length, 309 table->revision, length,
294 (u32)sizeof(struct acpi_table_fadt))); 310 (u32)sizeof(struct acpi_table_fadt)));
295 } 311 }
296 312
297 /* Clear the entire local FADT */ 313 /* Clear the entire local FADT */
@@ -436,11 +452,13 @@ static void acpi_tb_convert_fadt(void)
436 * they must match. 452 * they must match.
437 */ 453 */
438 if (address64->address && address32 && 454 if (address64->address && address32 &&
439 (address64->address != (u64) address32)) { 455 (address64->address != (u64)address32)) {
440 ACPI_ERROR((AE_INFO, 456 ACPI_BIOS_ERROR((AE_INFO,
441 "32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32", 457 "32/64X address mismatch in FADT/%s: "
442 fadt_info_table[i].name, address32, 458 "0x%8.8X/0x%8.8X%8.8X, using 32",
443 ACPI_FORMAT_UINT64(address64->address))); 459 fadt_info_table[i].name, address32,
460 ACPI_FORMAT_UINT64(address64->
461 address)));
444 } 462 }
445 463
446 /* Always use 32-bit address if it is valid (non-null) */ 464 /* Always use 32-bit address if it is valid (non-null) */
@@ -456,7 +474,8 @@ static void acpi_tb_convert_fadt(void)
456 &acpi_gbl_FADT, 474 &acpi_gbl_FADT,
457 fadt_info_table 475 fadt_info_table
458 [i].length), 476 [i].length),
459 (u64) address32); 477 (u64) address32,
478 fadt_info_table[i].name);
460 } 479 }
461 } 480 }
462} 481}
@@ -465,7 +484,7 @@ static void acpi_tb_convert_fadt(void)
465 * 484 *
466 * FUNCTION: acpi_tb_validate_fadt 485 * FUNCTION: acpi_tb_validate_fadt
467 * 486 *
468 * PARAMETERS: Table - Pointer to the FADT to be validated 487 * PARAMETERS: table - Pointer to the FADT to be validated
469 * 488 *
470 * RETURN: None 489 * RETURN: None
471 * 490 *
@@ -494,25 +513,25 @@ static void acpi_tb_validate_fadt(void)
494 * DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables. 513 * DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables.
495 */ 514 */
496 if (acpi_gbl_FADT.facs && 515 if (acpi_gbl_FADT.facs &&
497 (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) { 516 (acpi_gbl_FADT.Xfacs != (u64)acpi_gbl_FADT.facs)) {
498 ACPI_WARNING((AE_INFO, 517 ACPI_BIOS_WARNING((AE_INFO,
499 "32/64X FACS address mismatch in FADT - " 518 "32/64X FACS address mismatch in FADT - "
500 "0x%8.8X/0x%8.8X%8.8X, using 32", 519 "0x%8.8X/0x%8.8X%8.8X, using 32",
501 acpi_gbl_FADT.facs, 520 acpi_gbl_FADT.facs,
502 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs))); 521 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
503 522
504 acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs; 523 acpi_gbl_FADT.Xfacs = (u64)acpi_gbl_FADT.facs;
505 } 524 }
506 525
507 if (acpi_gbl_FADT.dsdt && 526 if (acpi_gbl_FADT.dsdt &&
508 (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) { 527 (acpi_gbl_FADT.Xdsdt != (u64)acpi_gbl_FADT.dsdt)) {
509 ACPI_WARNING((AE_INFO, 528 ACPI_BIOS_WARNING((AE_INFO,
510 "32/64X DSDT address mismatch in FADT - " 529 "32/64X DSDT address mismatch in FADT - "
511 "0x%8.8X/0x%8.8X%8.8X, using 32", 530 "0x%8.8X/0x%8.8X%8.8X, using 32",
512 acpi_gbl_FADT.dsdt, 531 acpi_gbl_FADT.dsdt,
513 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt))); 532 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
514 533
515 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt; 534 acpi_gbl_FADT.Xdsdt = (u64)acpi_gbl_FADT.dsdt;
516 } 535 }
517 536
518 /* If Hardware Reduced flag is set, we are all done */ 537 /* If Hardware Reduced flag is set, we are all done */
@@ -542,10 +561,10 @@ static void acpi_tb_validate_fadt(void)
542 */ 561 */
543 if (address64->address && 562 if (address64->address &&
544 (address64->bit_width != ACPI_MUL_8(length))) { 563 (address64->bit_width != ACPI_MUL_8(length))) {
545 ACPI_WARNING((AE_INFO, 564 ACPI_BIOS_WARNING((AE_INFO,
546 "32/64X length mismatch in %s: %u/%u", 565 "32/64X length mismatch in FADT/%s: %u/%u",
547 name, ACPI_MUL_8(length), 566 name, ACPI_MUL_8(length),
548 address64->bit_width)); 567 address64->bit_width));
549 } 568 }
550 569
551 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) { 570 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
@@ -554,29 +573,29 @@ static void acpi_tb_validate_fadt(void)
554 * Both the address and length must be non-zero. 573 * Both the address and length must be non-zero.
555 */ 574 */
556 if (!address64->address || !length) { 575 if (!address64->address || !length) {
557 ACPI_ERROR((AE_INFO, 576 ACPI_BIOS_ERROR((AE_INFO,
558 "Required field %s has zero address and/or length:" 577 "Required FADT field %s has zero address and/or length: "
559 " 0x%8.8X%8.8X/0x%X", 578 "0x%8.8X%8.8X/0x%X",
560 name, 579 name,
561 ACPI_FORMAT_UINT64(address64-> 580 ACPI_FORMAT_UINT64(address64->
562 address), 581 address),
563 length)); 582 length));
564 } 583 }
565 } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) { 584 } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
566 /* 585 /*
567 * Field is optional (PM2Control, GPE0, GPE1) AND has its own 586 * Field is optional (Pm2_control, GPE0, GPE1) AND has its own
568 * length field. If present, both the address and length must 587 * length field. If present, both the address and length must
569 * be valid. 588 * be valid.
570 */ 589 */
571 if ((address64->address && !length) || 590 if ((address64->address && !length) ||
572 (!address64->address && length)) { 591 (!address64->address && length)) {
573 ACPI_WARNING((AE_INFO, 592 ACPI_BIOS_WARNING((AE_INFO,
574 "Optional field %s has zero address or length: " 593 "Optional FADT field %s has zero address or length: "
575 "0x%8.8X%8.8X/0x%X", 594 "0x%8.8X%8.8X/0x%X",
576 name, 595 name,
577 ACPI_FORMAT_UINT64(address64-> 596 ACPI_FORMAT_UINT64
578 address), 597 (address64->address),
579 length)); 598 length));
580 } 599 }
581 } 600 }
582 } 601 }
@@ -621,12 +640,12 @@ static void acpi_tb_setup_fadt_registers(void)
621 (fadt_info_table[i].default_length > 0) && 640 (fadt_info_table[i].default_length > 0) &&
622 (fadt_info_table[i].default_length != 641 (fadt_info_table[i].default_length !=
623 target64->bit_width)) { 642 target64->bit_width)) {
624 ACPI_WARNING((AE_INFO, 643 ACPI_BIOS_WARNING((AE_INFO,
625 "Invalid length for %s: %u, using default %u", 644 "Invalid length for FADT/%s: %u, using default %u",
626 fadt_info_table[i].name, 645 fadt_info_table[i].name,
627 target64->bit_width, 646 target64->bit_width,
628 fadt_info_table[i]. 647 fadt_info_table[i].
629 default_length)); 648 default_length));
630 649
631 /* Incorrect size, set width to the default */ 650 /* Incorrect size, set width to the default */
632 651
@@ -670,7 +689,8 @@ static void acpi_tb_setup_fadt_registers(void)
670 source64->address + 689 source64->address +
671 (fadt_pm_info_table[i]. 690 (fadt_pm_info_table[i].
672 register_num * 691 register_num *
673 pm1_register_byte_width)); 692 pm1_register_byte_width),
693 "PmRegisters");
674 } 694 }
675 } 695 }
676} 696}
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 4903e36ea75a..57deae166577 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("tbfind")
52 * 52 *
53 * FUNCTION: acpi_tb_find_table 53 * FUNCTION: acpi_tb_find_table
54 * 54 *
55 * PARAMETERS: Signature - String with ACPI table signature 55 * PARAMETERS: signature - String with ACPI table signature
56 * oem_id - String with the table OEM ID 56 * oem_id - String with the table OEM ID
57 * oem_table_id - String with the OEM Table ID 57 * oem_table_id - String with the OEM Table ID
58 * table_index - Where the table index is returned 58 * table_index - Where the table index is returned
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index c03500b4cc7a..74f97d74db1c 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -138,13 +138,14 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
138 if ((table_desc->pointer->signature[0] != 0x00) && 138 if ((table_desc->pointer->signature[0] != 0x00) &&
139 (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)) 139 (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
140 && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) { 140 && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) {
141 ACPI_ERROR((AE_INFO, 141 ACPI_BIOS_ERROR((AE_INFO,
142 "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx", 142 "Table has invalid signature [%4.4s] (0x%8.8X), "
143 acpi_ut_valid_acpi_name(*(u32 *)table_desc-> 143 "must be SSDT or OEMx",
144 pointer-> 144 acpi_ut_valid_acpi_name(*(u32 *)table_desc->
145 signature) ? table_desc-> 145 pointer->
146 pointer->signature : "????", 146 signature) ?
147 *(u32 *)table_desc->pointer->signature)); 147 table_desc->pointer->signature : "????",
148 *(u32 *)table_desc->pointer->signature));
148 149
149 return_ACPI_STATUS(AE_BAD_SIGNATURE); 150 return_ACPI_STATUS(AE_BAD_SIGNATURE);
150 } 151 }
@@ -396,10 +397,10 @@ acpi_status acpi_tb_resize_root_table_list(void)
396 * 397 *
397 * FUNCTION: acpi_tb_store_table 398 * FUNCTION: acpi_tb_store_table
398 * 399 *
399 * PARAMETERS: Address - Table address 400 * PARAMETERS: address - Table address
400 * Table - Table header 401 * table - Table header
401 * Length - Table length 402 * length - Table length
402 * Flags - flags 403 * flags - flags
403 * 404 *
404 * RETURN: Status and table index. 405 * RETURN: Status and table index.
405 * 406 *
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0a706cac37de..b6cea30da638 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -178,8 +178,8 @@ u8 acpi_tb_tables_loaded(void)
178 * 178 *
179 * FUNCTION: acpi_tb_fix_string 179 * FUNCTION: acpi_tb_fix_string
180 * 180 *
181 * PARAMETERS: String - String to be repaired 181 * PARAMETERS: string - String to be repaired
182 * Length - Maximum length 182 * length - Maximum length
183 * 183 *
184 * RETURN: None 184 * RETURN: None
185 * 185 *
@@ -205,7 +205,7 @@ static void acpi_tb_fix_string(char *string, acpi_size length)
205 * FUNCTION: acpi_tb_cleanup_table_header 205 * FUNCTION: acpi_tb_cleanup_table_header
206 * 206 *
207 * PARAMETERS: out_header - Where the cleaned header is returned 207 * PARAMETERS: out_header - Where the cleaned header is returned
208 * Header - Input ACPI table header 208 * header - Input ACPI table header
209 * 209 *
210 * RETURN: Returns the cleaned header in out_header 210 * RETURN: Returns the cleaned header in out_header
211 * 211 *
@@ -231,8 +231,8 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
231 * 231 *
232 * FUNCTION: acpi_tb_print_table_header 232 * FUNCTION: acpi_tb_print_table_header
233 * 233 *
234 * PARAMETERS: Address - Table physical address 234 * PARAMETERS: address - Table physical address
235 * Header - Table header 235 * header - Table header
236 * 236 *
237 * RETURN: None 237 * RETURN: None
238 * 238 *
@@ -296,8 +296,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
296 * 296 *
297 * FUNCTION: acpi_tb_validate_checksum 297 * FUNCTION: acpi_tb_validate_checksum
298 * 298 *
299 * PARAMETERS: Table - ACPI table to verify 299 * PARAMETERS: table - ACPI table to verify
300 * Length - Length of entire table 300 * length - Length of entire table
301 * 301 *
302 * RETURN: Status 302 * RETURN: Status
303 * 303 *
@@ -317,10 +317,11 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
317 /* Checksum ok? (should be zero) */ 317 /* Checksum ok? (should be zero) */
318 318
319 if (checksum) { 319 if (checksum) {
320 ACPI_WARNING((AE_INFO, 320 ACPI_BIOS_WARNING((AE_INFO,
321 "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X", 321 "Incorrect checksum in table [%4.4s] - 0x%2.2X, "
322 table->signature, table->checksum, 322 "should be 0x%2.2X",
323 (u8) (table->checksum - checksum))); 323 table->signature, table->checksum,
324 (u8)(table->checksum - checksum)));
324 325
325#if (ACPI_CHECKSUM_ABORT) 326#if (ACPI_CHECKSUM_ABORT)
326 327
@@ -335,8 +336,8 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
335 * 336 *
336 * FUNCTION: acpi_tb_checksum 337 * FUNCTION: acpi_tb_checksum
337 * 338 *
338 * PARAMETERS: Buffer - Pointer to memory region to be checked 339 * PARAMETERS: buffer - Pointer to memory region to be checked
339 * Length - Length of this memory region 340 * length - Length of this memory region
340 * 341 *
341 * RETURN: Checksum (u8) 342 * RETURN: Checksum (u8)
342 * 343 *
@@ -377,8 +378,9 @@ void acpi_tb_check_dsdt_header(void)
377 378
378 if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length || 379 if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
379 acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) { 380 acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
380 ACPI_ERROR((AE_INFO, 381 ACPI_BIOS_ERROR((AE_INFO,
381 "The DSDT has been corrupted or replaced - old, new headers below")); 382 "The DSDT has been corrupted or replaced - "
383 "old, new headers below"));
382 acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header); 384 acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
383 acpi_tb_print_table_header(0, acpi_gbl_DSDT); 385 acpi_tb_print_table_header(0, acpi_gbl_DSDT);
384 386
@@ -438,8 +440,8 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
438 * 440 *
439 * FUNCTION: acpi_tb_install_table 441 * FUNCTION: acpi_tb_install_table
440 * 442 *
441 * PARAMETERS: Address - Physical address of DSDT or FACS 443 * PARAMETERS: address - Physical address of DSDT or FACS
442 * Signature - Table signature, NULL if no need to 444 * signature - Table signature, NULL if no need to
443 * match 445 * match
444 * table_index - Index into root table array 446 * table_index - Index into root table array
445 * 447 *
@@ -480,9 +482,10 @@ acpi_tb_install_table(acpi_physical_address address,
480 /* If a particular signature is expected (DSDT/FACS), it must match */ 482 /* If a particular signature is expected (DSDT/FACS), it must match */
481 483
482 if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) { 484 if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
483 ACPI_ERROR((AE_INFO, 485 ACPI_BIOS_ERROR((AE_INFO,
484 "Invalid signature 0x%X for ACPI table, expected [%s]", 486 "Invalid signature 0x%X for ACPI table, expected [%s]",
485 *ACPI_CAST_PTR(u32, table->signature), signature)); 487 *ACPI_CAST_PTR(u32, table->signature),
488 signature));
486 goto unmap_and_exit; 489 goto unmap_and_exit;
487 } 490 }
488 491
@@ -589,10 +592,10 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
589 592
590 /* Will truncate 64-bit address to 32 bits, issue warning */ 593 /* Will truncate 64-bit address to 32 bits, issue warning */
591 594
592 ACPI_WARNING((AE_INFO, 595 ACPI_BIOS_WARNING((AE_INFO,
593 "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X)," 596 "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
594 " truncating", 597 " truncating",
595 ACPI_FORMAT_UINT64(address64))); 598 ACPI_FORMAT_UINT64(address64)));
596 } 599 }
597#endif 600#endif
598 return ((acpi_physical_address) (address64)); 601 return ((acpi_physical_address) (address64));
@@ -603,7 +606,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
603 * 606 *
604 * FUNCTION: acpi_tb_parse_root_table 607 * FUNCTION: acpi_tb_parse_root_table
605 * 608 *
606 * PARAMETERS: Rsdp - Pointer to the RSDP 609 * PARAMETERS: rsdp - Pointer to the RSDP
607 * 610 *
608 * RETURN: Status 611 * RETURN: Status
609 * 612 *
@@ -694,8 +697,9 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
694 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); 697 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
695 698
696 if (length < sizeof(struct acpi_table_header)) { 699 if (length < sizeof(struct acpi_table_header)) {
697 ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT", 700 ACPI_BIOS_ERROR((AE_INFO,
698 length)); 701 "Invalid table length 0x%X in RSDT/XSDT",
702 length));
699 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); 703 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
700 } 704 }
701 705
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index abcc6412c244..29e51bc01383 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -1,7 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: tbxface - Public interfaces to the ACPI subsystem 3 * Module Name: tbxface - ACPI table oriented external interfaces
4 * ACPI table oriented interfaces
5 * 4 *
6 *****************************************************************************/ 5 *****************************************************************************/
7 6
@@ -51,11 +50,6 @@
51#define _COMPONENT ACPI_TABLES 50#define _COMPONENT ACPI_TABLES
52ACPI_MODULE_NAME("tbxface") 51ACPI_MODULE_NAME("tbxface")
53 52
54/* Local prototypes */
55static acpi_status acpi_tb_load_namespace(void);
56
57static int no_auto_ssdt;
58
59/******************************************************************************* 53/*******************************************************************************
60 * 54 *
61 * FUNCTION: acpi_allocate_root_table 55 * FUNCTION: acpi_allocate_root_table
@@ -65,11 +59,10 @@ static int no_auto_ssdt;
65 * 59 *
66 * RETURN: Status 60 * RETURN: Status
67 * 61 *
68 * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and 62 * DESCRIPTION: Allocate a root table array. Used by iASL compiler and
69 * acpi_initialize_tables. 63 * acpi_initialize_tables.
70 * 64 *
71 ******************************************************************************/ 65 ******************************************************************************/
72
73acpi_status acpi_allocate_root_table(u32 initial_table_count) 66acpi_status acpi_allocate_root_table(u32 initial_table_count)
74{ 67{
75 68
@@ -222,52 +215,10 @@ acpi_status acpi_reallocate_root_table(void)
222 215
223/******************************************************************************* 216/*******************************************************************************
224 * 217 *
225 * FUNCTION: acpi_load_table
226 *
227 * PARAMETERS: table_ptr - pointer to a buffer containing the entire
228 * table to be loaded
229 *
230 * RETURN: Status
231 *
232 * DESCRIPTION: This function is called to load a table from the caller's
233 * buffer. The buffer must contain an entire ACPI Table including
234 * a valid header. The header fields will be verified, and if it
235 * is determined that the table is invalid, the call will fail.
236 *
237 ******************************************************************************/
238acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
239{
240 acpi_status status;
241 u32 table_index;
242 struct acpi_table_desc table_desc;
243
244 if (!table_ptr)
245 return AE_BAD_PARAMETER;
246
247 ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
248 table_desc.pointer = table_ptr;
249 table_desc.length = table_ptr->length;
250 table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
251
252 /*
253 * Install the new table into the local data structures
254 */
255 status = acpi_tb_add_table(&table_desc, &table_index);
256 if (ACPI_FAILURE(status)) {
257 return status;
258 }
259 status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
260 return status;
261}
262
263ACPI_EXPORT_SYMBOL(acpi_load_table)
264
265/*******************************************************************************
266 *
267 * FUNCTION: acpi_get_table_header 218 * FUNCTION: acpi_get_table_header
268 * 219 *
269 * PARAMETERS: Signature - ACPI signature of needed table 220 * PARAMETERS: signature - ACPI signature of needed table
270 * Instance - Which instance (for SSDTs) 221 * instance - Which instance (for SSDTs)
271 * out_table_header - The pointer to the table header to fill 222 * out_table_header - The pointer to the table header to fill
272 * 223 *
273 * RETURN: Status and pointer to mapped table header 224 * RETURN: Status and pointer to mapped table header
@@ -382,8 +333,8 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
382 * 333 *
383 * FUNCTION: acpi_get_table_with_size 334 * FUNCTION: acpi_get_table_with_size
384 * 335 *
385 * PARAMETERS: Signature - ACPI signature of needed table 336 * PARAMETERS: signature - ACPI signature of needed table
386 * Instance - Which instance (for SSDTs) 337 * instance - Which instance (for SSDTs)
387 * out_table - Where the pointer to the table is returned 338 * out_table - Where the pointer to the table is returned
388 * 339 *
389 * RETURN: Status and pointer to table 340 * RETURN: Status and pointer to table
@@ -436,6 +387,7 @@ acpi_get_table_with_size(char *signature,
436 387
437 return (AE_NOT_FOUND); 388 return (AE_NOT_FOUND);
438} 389}
390ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
439 391
440acpi_status 392acpi_status
441acpi_get_table(char *signature, 393acpi_get_table(char *signature,
@@ -453,7 +405,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
453 * FUNCTION: acpi_get_table_by_index 405 * FUNCTION: acpi_get_table_by_index
454 * 406 *
455 * PARAMETERS: table_index - Table index 407 * PARAMETERS: table_index - Table index
456 * Table - Where the pointer to the table is returned 408 * table - Where the pointer to the table is returned
457 * 409 *
458 * RETURN: Status and pointer to the table 410 * RETURN: Status and pointer to the table
459 * 411 *
@@ -502,157 +454,13 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
502 454
503ACPI_EXPORT_SYMBOL(acpi_get_table_by_index) 455ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
504 456
505/*******************************************************************************
506 *
507 * FUNCTION: acpi_tb_load_namespace
508 *
509 * PARAMETERS: None
510 *
511 * RETURN: Status
512 *
513 * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
514 * the RSDT/XSDT.
515 *
516 ******************************************************************************/
517static acpi_status acpi_tb_load_namespace(void)
518{
519 acpi_status status;
520 u32 i;
521 struct acpi_table_header *new_dsdt;
522
523 ACPI_FUNCTION_TRACE(tb_load_namespace);
524
525 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
526
527 /*
528 * Load the namespace. The DSDT is required, but any SSDT and
529 * PSDT tables are optional. Verify the DSDT.
530 */
531 if (!acpi_gbl_root_table_list.current_table_count ||
532 !ACPI_COMPARE_NAME(&
533 (acpi_gbl_root_table_list.
534 tables[ACPI_TABLE_INDEX_DSDT].signature),
535 ACPI_SIG_DSDT)
536 ||
537 ACPI_FAILURE(acpi_tb_verify_table
538 (&acpi_gbl_root_table_list.
539 tables[ACPI_TABLE_INDEX_DSDT]))) {
540 status = AE_NO_ACPI_TABLES;
541 goto unlock_and_exit;
542 }
543
544 /*
545 * Save the DSDT pointer for simple access. This is the mapped memory
546 * address. We must take care here because the address of the .Tables
547 * array can change dynamically as tables are loaded at run-time. Note:
548 * .Pointer field is not validated until after call to acpi_tb_verify_table.
549 */
550 acpi_gbl_DSDT =
551 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
552
553 /*
554 * Optionally copy the entire DSDT to local memory (instead of simply
555 * mapping it.) There are some BIOSs that corrupt or replace the original
556 * DSDT, creating the need for this option. Default is FALSE, do not copy
557 * the DSDT.
558 */
559 if (acpi_gbl_copy_dsdt_locally) {
560 new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
561 if (new_dsdt) {
562 acpi_gbl_DSDT = new_dsdt;
563 }
564 }
565
566 /*
567 * Save the original DSDT header for detection of table corruption
568 * and/or replacement of the DSDT from outside the OS.
569 */
570 ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
571 sizeof(struct acpi_table_header));
572
573 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
574
575 /* Load and parse tables */
576
577 status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
578 if (ACPI_FAILURE(status)) {
579 return_ACPI_STATUS(status);
580 }
581
582 /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
583
584 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
585 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
586 if ((!ACPI_COMPARE_NAME
587 (&(acpi_gbl_root_table_list.tables[i].signature),
588 ACPI_SIG_SSDT)
589 &&
590 !ACPI_COMPARE_NAME(&
591 (acpi_gbl_root_table_list.tables[i].
592 signature), ACPI_SIG_PSDT))
593 ||
594 ACPI_FAILURE(acpi_tb_verify_table
595 (&acpi_gbl_root_table_list.tables[i]))) {
596 continue;
597 }
598
599 if (no_auto_ssdt) {
600 printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
601 continue;
602 }
603
604 /* Ignore errors while loading tables, get as many as possible */
605
606 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
607 (void)acpi_ns_load_table(i, acpi_gbl_root_node);
608 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
609 }
610
611 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
612
613 unlock_and_exit:
614 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
615 return_ACPI_STATUS(status);
616}
617
618/*******************************************************************************
619 *
620 * FUNCTION: acpi_load_tables
621 *
622 * PARAMETERS: None
623 *
624 * RETURN: Status
625 *
626 * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
627 *
628 ******************************************************************************/
629
630acpi_status acpi_load_tables(void)
631{
632 acpi_status status;
633
634 ACPI_FUNCTION_TRACE(acpi_load_tables);
635
636 /* Load the namespace from the tables */
637
638 status = acpi_tb_load_namespace();
639 if (ACPI_FAILURE(status)) {
640 ACPI_EXCEPTION((AE_INFO, status,
641 "While loading namespace from ACPI tables"));
642 }
643
644 return_ACPI_STATUS(status);
645}
646
647ACPI_EXPORT_SYMBOL(acpi_load_tables)
648
649 457
650/******************************************************************************* 458/*******************************************************************************
651 * 459 *
652 * FUNCTION: acpi_install_table_handler 460 * FUNCTION: acpi_install_table_handler
653 * 461 *
654 * PARAMETERS: Handler - Table event handler 462 * PARAMETERS: handler - Table event handler
655 * Context - Value passed to the handler on each event 463 * context - Value passed to the handler on each event
656 * 464 *
657 * RETURN: Status 465 * RETURN: Status
658 * 466 *
@@ -698,7 +506,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
698 * 506 *
699 * FUNCTION: acpi_remove_table_handler 507 * FUNCTION: acpi_remove_table_handler
700 * 508 *
701 * PARAMETERS: Handler - Table event handler that was installed 509 * PARAMETERS: handler - Table event handler that was installed
702 * previously. 510 * previously.
703 * 511 *
704 * RETURN: Status 512 * RETURN: Status
@@ -734,15 +542,3 @@ acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
734} 542}
735 543
736ACPI_EXPORT_SYMBOL(acpi_remove_table_handler) 544ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
737
738
739static int __init acpi_no_auto_ssdt_setup(char *s) {
740
741 printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
742
743 no_auto_ssdt = 1;
744
745 return 1;
746}
747
748__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
new file mode 100644
index 000000000000..f87cc63e69a1
--- /dev/null
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -0,0 +1,389 @@
1/******************************************************************************
2 *
3 * Module Name: tbxfload - Table load/unload external interfaces
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <linux/export.h>
45#include <acpi/acpi.h>
46#include "accommon.h"
47#include "acnamesp.h"
48#include "actables.h"
49
50#define _COMPONENT ACPI_TABLES
51ACPI_MODULE_NAME("tbxfload")
52
53/* Local prototypes */
54static acpi_status acpi_tb_load_namespace(void);
55
56static int no_auto_ssdt;
57
58/*******************************************************************************
59 *
60 * FUNCTION: acpi_load_tables
61 *
62 * PARAMETERS: None
63 *
64 * RETURN: Status
65 *
66 * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
67 *
68 ******************************************************************************/
69
70acpi_status acpi_load_tables(void)
71{
72 acpi_status status;
73
74 ACPI_FUNCTION_TRACE(acpi_load_tables);
75
76 /* Load the namespace from the tables */
77
78 status = acpi_tb_load_namespace();
79 if (ACPI_FAILURE(status)) {
80 ACPI_EXCEPTION((AE_INFO, status,
81 "While loading namespace from ACPI tables"));
82 }
83
84 return_ACPI_STATUS(status);
85}
86
87ACPI_EXPORT_SYMBOL(acpi_load_tables)
88
89/*******************************************************************************
90 *
91 * FUNCTION: acpi_tb_load_namespace
92 *
93 * PARAMETERS: None
94 *
95 * RETURN: Status
96 *
97 * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
98 * the RSDT/XSDT.
99 *
100 ******************************************************************************/
101static acpi_status acpi_tb_load_namespace(void)
102{
103 acpi_status status;
104 u32 i;
105 struct acpi_table_header *new_dsdt;
106
107 ACPI_FUNCTION_TRACE(tb_load_namespace);
108
109 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
110
111 /*
112 * Load the namespace. The DSDT is required, but any SSDT and
113 * PSDT tables are optional. Verify the DSDT.
114 */
115 if (!acpi_gbl_root_table_list.current_table_count ||
116 !ACPI_COMPARE_NAME(&
117 (acpi_gbl_root_table_list.
118 tables[ACPI_TABLE_INDEX_DSDT].signature),
119 ACPI_SIG_DSDT)
120 ||
121 ACPI_FAILURE(acpi_tb_verify_table
122 (&acpi_gbl_root_table_list.
123 tables[ACPI_TABLE_INDEX_DSDT]))) {
124 status = AE_NO_ACPI_TABLES;
125 goto unlock_and_exit;
126 }
127
128 /*
129 * Save the DSDT pointer for simple access. This is the mapped memory
130 * address. We must take care here because the address of the .Tables
131 * array can change dynamically as tables are loaded at run-time. Note:
132 * .Pointer field is not validated until after call to acpi_tb_verify_table.
133 */
134 acpi_gbl_DSDT =
135 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
136
137 /*
138 * Optionally copy the entire DSDT to local memory (instead of simply
139 * mapping it.) There are some BIOSs that corrupt or replace the original
140 * DSDT, creating the need for this option. Default is FALSE, do not copy
141 * the DSDT.
142 */
143 if (acpi_gbl_copy_dsdt_locally) {
144 new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
145 if (new_dsdt) {
146 acpi_gbl_DSDT = new_dsdt;
147 }
148 }
149
150 /*
151 * Save the original DSDT header for detection of table corruption
152 * and/or replacement of the DSDT from outside the OS.
153 */
154 ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
155 sizeof(struct acpi_table_header));
156
157 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
158
159 /* Load and parse tables */
160
161 status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
162 if (ACPI_FAILURE(status)) {
163 return_ACPI_STATUS(status);
164 }
165
166 /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
167
168 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
169 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
170 if ((!ACPI_COMPARE_NAME
171 (&(acpi_gbl_root_table_list.tables[i].signature),
172 ACPI_SIG_SSDT)
173 &&
174 !ACPI_COMPARE_NAME(&
175 (acpi_gbl_root_table_list.tables[i].
176 signature), ACPI_SIG_PSDT))
177 ||
178 ACPI_FAILURE(acpi_tb_verify_table
179 (&acpi_gbl_root_table_list.tables[i]))) {
180 continue;
181 }
182
183 if (no_auto_ssdt) {
184 printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
185 continue;
186 }
187
188 /* Ignore errors while loading tables, get as many as possible */
189
190 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
191 (void)acpi_ns_load_table(i, acpi_gbl_root_node);
192 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
193 }
194
195 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
196
197 unlock_and_exit:
198 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
199 return_ACPI_STATUS(status);
200}
201
202/*******************************************************************************
203 *
204 * FUNCTION: acpi_load_table
205 *
206 * PARAMETERS: table - Pointer to a buffer containing the ACPI
207 * table to be loaded.
208 *
209 * RETURN: Status
210 *
211 * DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must
212 * be a valid ACPI table with a valid ACPI table header.
213 * Note1: Mainly intended to support hotplug addition of SSDTs.
214 * Note2: Does not copy the incoming table. User is reponsible
215 * to ensure that the table is not deleted or unmapped.
216 *
217 ******************************************************************************/
218
219acpi_status acpi_load_table(struct acpi_table_header *table)
220{
221 acpi_status status;
222 struct acpi_table_desc table_desc;
223 u32 table_index;
224
225 ACPI_FUNCTION_TRACE(acpi_load_table);
226
227 /* Parameter validation */
228
229 if (!table) {
230 return_ACPI_STATUS(AE_BAD_PARAMETER);
231 }
232
233 /* Init local table descriptor */
234
235 ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
236 table_desc.address = ACPI_PTR_TO_PHYSADDR(table);
237 table_desc.pointer = table;
238 table_desc.length = table->length;
239 table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
240
241 /* Must acquire the interpreter lock during this operation */
242
243 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
244 if (ACPI_FAILURE(status)) {
245 return_ACPI_STATUS(status);
246 }
247
248 /* Install the table and load it into the namespace */
249
250 ACPI_INFO((AE_INFO, "Host-directed Dynamic ACPI Table Load:"));
251 status = acpi_tb_add_table(&table_desc, &table_index);
252 if (ACPI_FAILURE(status)) {
253 goto unlock_and_exit;
254 }
255
256 status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
257
258 /* Invoke table handler if present */
259
260 if (acpi_gbl_table_handler) {
261 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
262 acpi_gbl_table_handler_context);
263 }
264
265 unlock_and_exit:
266 (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
267 return_ACPI_STATUS(status);
268}
269
270ACPI_EXPORT_SYMBOL(acpi_load_table)
271
272/*******************************************************************************
273 *
274 * FUNCTION: acpi_unload_parent_table
275 *
276 * PARAMETERS: object - Handle to any namespace object owned by
277 * the table to be unloaded
278 *
279 * RETURN: Status
280 *
281 * DESCRIPTION: Via any namespace object within an SSDT or OEMx table, unloads
282 * the table and deletes all namespace objects associated with
283 * that table. Unloading of the DSDT is not allowed.
284 * Note: Mainly intended to support hotplug removal of SSDTs.
285 *
286 ******************************************************************************/
287acpi_status acpi_unload_parent_table(acpi_handle object)
288{
289 struct acpi_namespace_node *node =
290 ACPI_CAST_PTR(struct acpi_namespace_node, object);
291 acpi_status status = AE_NOT_EXIST;
292 acpi_owner_id owner_id;
293 u32 i;
294
295 ACPI_FUNCTION_TRACE(acpi_unload_parent_table);
296
297 /* Parameter validation */
298
299 if (!object) {
300 return_ACPI_STATUS(AE_BAD_PARAMETER);
301 }
302
303 /*
304 * The node owner_id is currently the same as the parent table ID.
305 * However, this could change in the future.
306 */
307 owner_id = node->owner_id;
308 if (!owner_id) {
309
310 /* owner_id==0 means DSDT is the owner. DSDT cannot be unloaded */
311
312 return_ACPI_STATUS(AE_TYPE);
313 }
314
315 /* Must acquire the interpreter lock during this operation */
316
317 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
318 if (ACPI_FAILURE(status)) {
319 return_ACPI_STATUS(status);
320 }
321
322 /* Find the table in the global table list */
323
324 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
325 if (owner_id != acpi_gbl_root_table_list.tables[i].owner_id) {
326 continue;
327 }
328
329 /*
330 * Allow unload of SSDT and OEMx tables only. Do not allow unload
331 * of the DSDT. No other types of tables should get here, since
332 * only these types can contain AML and thus are the only types
333 * that can create namespace objects.
334 */
335 if (ACPI_COMPARE_NAME
336 (acpi_gbl_root_table_list.tables[i].signature.ascii,
337 ACPI_SIG_DSDT)) {
338 status = AE_TYPE;
339 break;
340 }
341
342 /* Ensure the table is actually loaded */
343
344 if (!acpi_tb_is_table_loaded(i)) {
345 status = AE_NOT_EXIST;
346 break;
347 }
348
349 /* Invoke table handler if present */
350
351 if (acpi_gbl_table_handler) {
352 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
353 acpi_gbl_root_table_list.
354 tables[i].pointer,
355 acpi_gbl_table_handler_context);
356 }
357
358 /*
359 * Delete all namespace objects owned by this table. Note that
360 * these objects can appear anywhere in the namespace by virtue
361 * of the AML "Scope" operator. Thus, we need to track ownership
362 * by an ID, not simply a position within the hierarchy.
363 */
364 status = acpi_tb_delete_namespace_by_owner(i);
365 if (ACPI_FAILURE(status)) {
366 break;
367 }
368
369 status = acpi_tb_release_owner_id(i);
370 acpi_tb_set_table_loaded_flag(i, FALSE);
371 break;
372 }
373
374 (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
375 return_ACPI_STATUS(status);
376}
377
378ACPI_EXPORT_SYMBOL(acpi_unload_parent_table)
379
380static int __init acpi_no_auto_ssdt_setup(char *s) {
381
382 printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
383
384 no_auto_ssdt = 1;
385
386 return 1;
387}
388
389__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 4258f647ca3d..74e720800037 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -57,7 +57,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
57 * 57 *
58 * FUNCTION: acpi_tb_validate_rsdp 58 * FUNCTION: acpi_tb_validate_rsdp
59 * 59 *
60 * PARAMETERS: Rsdp - Pointer to unvalidated RSDP 60 * PARAMETERS: rsdp - Pointer to unvalidated RSDP
61 * 61 *
62 * RETURN: Status 62 * RETURN: Status
63 * 63 *
@@ -107,10 +107,10 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
107 * 107 *
108 * RETURN: Status, RSDP physical address 108 * RETURN: Status, RSDP physical address
109 * 109 *
110 * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor 110 * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor
111 * pointer structure. If it is found, set *RSDP to point to it. 111 * pointer structure. If it is found, set *RSDP to point to it.
112 * 112 *
113 * NOTE1: The RSDP must be either in the first 1_k of the Extended 113 * NOTE1: The RSDP must be either in the first 1K of the Extended
114 * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.) 114 * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
115 * Only a 32-bit physical address is necessary. 115 * Only a 32-bit physical address is necessary.
116 * 116 *
@@ -152,7 +152,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
152 if (physical_address > 0x400) { 152 if (physical_address > 0x400) {
153 /* 153 /*
154 * 1b) Search EBDA paragraphs (EBDA is required to be a 154 * 1b) Search EBDA paragraphs (EBDA is required to be a
155 * minimum of 1_k length) 155 * minimum of 1K length)
156 */ 156 */
157 table_ptr = acpi_os_map_memory((acpi_physical_address) 157 table_ptr = acpi_os_map_memory((acpi_physical_address)
158 physical_address, 158 physical_address,
@@ -216,7 +216,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
216 216
217 /* A valid RSDP was not found */ 217 /* A valid RSDP was not found */
218 218
219 ACPI_ERROR((AE_INFO, "A valid RSDP was not found")); 219 ACPI_BIOS_ERROR((AE_INFO, "A valid RSDP was not found"));
220 return_ACPI_STATUS(AE_NOT_FOUND); 220 return_ACPI_STATUS(AE_NOT_FOUND);
221} 221}
222 222
@@ -225,7 +225,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
225 * FUNCTION: acpi_tb_scan_memory_for_rsdp 225 * FUNCTION: acpi_tb_scan_memory_for_rsdp
226 * 226 *
227 * PARAMETERS: start_address - Starting pointer for search 227 * PARAMETERS: start_address - Starting pointer for search
228 * Length - Maximum length to search 228 * length - Maximum length to search
229 * 229 *
230 * RETURN: Pointer to the RSDP if found, otherwise NULL. 230 * RETURN: Pointer to the RSDP if found, otherwise NULL.
231 * 231 *
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 67932aebe6dd..64880306133d 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("utaddress")
53 * FUNCTION: acpi_ut_add_address_range 53 * FUNCTION: acpi_ut_add_address_range
54 * 54 *
55 * PARAMETERS: space_id - Address space ID 55 * PARAMETERS: space_id - Address space ID
56 * Address - op_region start address 56 * address - op_region start address
57 * Length - op_region length 57 * length - op_region length
58 * region_node - op_region namespace node 58 * region_node - op_region namespace node
59 * 59 *
60 * RETURN: Status 60 * RETURN: Status
@@ -186,9 +186,9 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
186 * FUNCTION: acpi_ut_check_address_range 186 * FUNCTION: acpi_ut_check_address_range
187 * 187 *
188 * PARAMETERS: space_id - Address space ID 188 * PARAMETERS: space_id - Address space ID
189 * Address - Start address 189 * address - Start address
190 * Length - Length of address range 190 * length - Length of address range
191 * Warn - TRUE if warning on overlap desired 191 * warn - TRUE if warning on overlap desired
192 * 192 *
193 * RETURN: Count of the number of conflicts detected. Zero is always 193 * RETURN: Count of the number of conflicts detected. Zero is always
194 * returned for Space IDs other than Memory or I/O. 194 * returned for Space IDs other than Memory or I/O.
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 9982d2ea66fb..ed29d474095e 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -189,7 +189,7 @@ acpi_status acpi_ut_delete_caches(void)
189 * 189 *
190 * FUNCTION: acpi_ut_validate_buffer 190 * FUNCTION: acpi_ut_validate_buffer
191 * 191 *
192 * PARAMETERS: Buffer - Buffer descriptor to be validated 192 * PARAMETERS: buffer - Buffer descriptor to be validated
193 * 193 *
194 * RETURN: Status 194 * RETURN: Status
195 * 195 *
@@ -227,7 +227,7 @@ acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
227 * 227 *
228 * FUNCTION: acpi_ut_initialize_buffer 228 * FUNCTION: acpi_ut_initialize_buffer
229 * 229 *
230 * PARAMETERS: Buffer - Buffer to be validated 230 * PARAMETERS: buffer - Buffer to be validated
231 * required_length - Length needed 231 * required_length - Length needed
232 * 232 *
233 * RETURN: Status 233 * RETURN: Status
@@ -308,10 +308,10 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
308 * 308 *
309 * FUNCTION: acpi_ut_allocate 309 * FUNCTION: acpi_ut_allocate
310 * 310 *
311 * PARAMETERS: Size - Size of the allocation 311 * PARAMETERS: size - Size of the allocation
312 * Component - Component type of caller 312 * component - Component type of caller
313 * Module - Source file name of caller 313 * module - Source file name of caller
314 * Line - Line number of caller 314 * line - Line number of caller
315 * 315 *
316 * RETURN: Address of the allocated memory on success, NULL on failure. 316 * RETURN: Address of the allocated memory on success, NULL on failure.
317 * 317 *
@@ -352,10 +352,10 @@ void *acpi_ut_allocate(acpi_size size,
352 * 352 *
353 * FUNCTION: acpi_ut_allocate_zeroed 353 * FUNCTION: acpi_ut_allocate_zeroed
354 * 354 *
355 * PARAMETERS: Size - Size of the allocation 355 * PARAMETERS: size - Size of the allocation
356 * Component - Component type of caller 356 * component - Component type of caller
357 * Module - Source file name of caller 357 * module - Source file name of caller
358 * Line - Line number of caller 358 * line - Line number of caller
359 * 359 *
360 * RETURN: Address of the allocated memory on success, NULL on failure. 360 * RETURN: Address of the allocated memory on success, NULL on failure.
361 * 361 *
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 3317c0a406ee..294692ae76e9 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -317,7 +317,7 @@ acpi_ut_copy_ielement_to_eelement(u8 object_type,
317 * FUNCTION: acpi_ut_copy_ipackage_to_epackage 317 * FUNCTION: acpi_ut_copy_ipackage_to_epackage
318 * 318 *
319 * PARAMETERS: internal_object - Pointer to the object we are returning 319 * PARAMETERS: internal_object - Pointer to the object we are returning
320 * Buffer - Where the object is returned 320 * buffer - Where the object is returned
321 * space_used - Where the object length is returned 321 * space_used - Where the object length is returned
322 * 322 *
323 * RETURN: Status 323 * RETURN: Status
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a0998a886318..e810894149ae 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -145,7 +145,7 @@ static const char *acpi_ut_trim_function_name(const char *function_name)
145 * function_name - Caller's procedure name 145 * function_name - Caller's procedure name
146 * module_name - Caller's module name 146 * module_name - Caller's module name
147 * component_id - Caller's component ID 147 * component_id - Caller's component ID
148 * Format - Printf format field 148 * format - Printf format field
149 * ... - Optional printf arguments 149 * ... - Optional printf arguments
150 * 150 *
151 * RETURN: None 151 * RETURN: None
@@ -217,7 +217,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print)
217 * function_name - Caller's procedure name 217 * function_name - Caller's procedure name
218 * module_name - Caller's module name 218 * module_name - Caller's module name
219 * component_id - Caller's component ID 219 * component_id - Caller's component ID
220 * Format - Printf format field 220 * format - Printf format field
221 * ... - Optional printf arguments 221 * ... - Optional printf arguments
222 * 222 *
223 * RETURN: None 223 * RETURN: None
@@ -286,7 +286,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace)
286 * function_name - Caller's procedure name 286 * function_name - Caller's procedure name
287 * module_name - Caller's module name 287 * module_name - Caller's module name
288 * component_id - Caller's component ID 288 * component_id - Caller's component ID
289 * Pointer - Pointer to display 289 * pointer - Pointer to display
290 * 290 *
291 * RETURN: None 291 * RETURN: None
292 * 292 *
@@ -315,7 +315,7 @@ acpi_ut_trace_ptr(u32 line_number,
315 * function_name - Caller's procedure name 315 * function_name - Caller's procedure name
316 * module_name - Caller's module name 316 * module_name - Caller's module name
317 * component_id - Caller's component ID 317 * component_id - Caller's component ID
318 * String - Additional string to display 318 * string - Additional string to display
319 * 319 *
320 * RETURN: None 320 * RETURN: None
321 * 321 *
@@ -346,7 +346,7 @@ acpi_ut_trace_str(u32 line_number,
346 * function_name - Caller's procedure name 346 * function_name - Caller's procedure name
347 * module_name - Caller's module name 347 * module_name - Caller's module name
348 * component_id - Caller's component ID 348 * component_id - Caller's component ID
349 * Integer - Integer to display 349 * integer - Integer to display
350 * 350 *
351 * RETURN: None 351 * RETURN: None
352 * 352 *
@@ -408,7 +408,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit)
408 * function_name - Caller's procedure name 408 * function_name - Caller's procedure name
409 * module_name - Caller's module name 409 * module_name - Caller's module name
410 * component_id - Caller's component ID 410 * component_id - Caller's component ID
411 * Status - Exit status code 411 * status - Exit status code
412 * 412 *
413 * RETURN: None 413 * RETURN: None
414 * 414 *
@@ -449,7 +449,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
449 * function_name - Caller's procedure name 449 * function_name - Caller's procedure name
450 * module_name - Caller's module name 450 * module_name - Caller's module name
451 * component_id - Caller's component ID 451 * component_id - Caller's component ID
452 * Value - Value to be printed with exit msg 452 * value - Value to be printed with exit msg
453 * 453 *
454 * RETURN: None 454 * RETURN: None
455 * 455 *
@@ -481,7 +481,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
481 * function_name - Caller's procedure name 481 * function_name - Caller's procedure name
482 * module_name - Caller's module name 482 * module_name - Caller's module name
483 * component_id - Caller's component ID 483 * component_id - Caller's component ID
484 * Ptr - Pointer to display 484 * ptr - Pointer to display
485 * 485 *
486 * RETURN: None 486 * RETURN: None
487 * 487 *
@@ -508,10 +508,10 @@ acpi_ut_ptr_exit(u32 line_number,
508 * 508 *
509 * FUNCTION: acpi_ut_dump_buffer 509 * FUNCTION: acpi_ut_dump_buffer
510 * 510 *
511 * PARAMETERS: Buffer - Buffer to dump 511 * PARAMETERS: buffer - Buffer to dump
512 * Count - Amount to dump, in bytes 512 * count - Amount to dump, in bytes
513 * Display - BYTE, WORD, DWORD, or QWORD display 513 * display - BYTE, WORD, DWORD, or QWORD display
514 * component_iD - Caller's component ID 514 * component_ID - Caller's component ID
515 * 515 *
516 * RETURN: None 516 * RETURN: None
517 * 517 *
@@ -625,10 +625,10 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
625 * 625 *
626 * FUNCTION: acpi_ut_dump_buffer 626 * FUNCTION: acpi_ut_dump_buffer
627 * 627 *
628 * PARAMETERS: Buffer - Buffer to dump 628 * PARAMETERS: buffer - Buffer to dump
629 * Count - Amount to dump, in bytes 629 * count - Amount to dump, in bytes
630 * Display - BYTE, WORD, DWORD, or QWORD display 630 * display - BYTE, WORD, DWORD, or QWORD display
631 * component_iD - Caller's component ID 631 * component_ID - Caller's component ID
632 * 632 *
633 * RETURN: None 633 * RETURN: None
634 * 634 *
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 684849949bf3..60a158472d82 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -49,41 +49,6 @@
49#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utdecode") 50ACPI_MODULE_NAME("utdecode")
51 51
52/*******************************************************************************
53 *
54 * FUNCTION: acpi_format_exception
55 *
56 * PARAMETERS: Status - The acpi_status code to be formatted
57 *
58 * RETURN: A string containing the exception text. A valid pointer is
59 * always returned.
60 *
61 * DESCRIPTION: This function translates an ACPI exception into an ASCII string
62 * It is here instead of utxface.c so it is always present.
63 *
64 ******************************************************************************/
65const char *acpi_format_exception(acpi_status status)
66{
67 const char *exception = NULL;
68
69 ACPI_FUNCTION_ENTRY();
70
71 exception = acpi_ut_validate_exception(status);
72 if (!exception) {
73
74 /* Exception code was not recognized */
75
76 ACPI_ERROR((AE_INFO,
77 "Unknown exception code: 0x%8.8X", status));
78
79 exception = "UNKNOWN_STATUS_CODE";
80 }
81
82 return (ACPI_CAST_PTR(const char, exception));
83}
84
85ACPI_EXPORT_SYMBOL(acpi_format_exception)
86
87/* 52/*
88 * Properties of the ACPI Object Types, both internal and external. 53 * Properties of the ACPI Object Types, both internal and external.
89 * The table is indexed by values of acpi_object_type 54 * The table is indexed by values of acpi_object_type
@@ -126,8 +91,8 @@ const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = {
126 * 91 *
127 * FUNCTION: acpi_ut_hex_to_ascii_char 92 * FUNCTION: acpi_ut_hex_to_ascii_char
128 * 93 *
129 * PARAMETERS: Integer - Contains the hex digit 94 * PARAMETERS: integer - Contains the hex digit
130 * Position - bit position of the digit within the 95 * position - bit position of the digit within the
131 * integer (multiple of 4) 96 * integer (multiple of 4)
132 * 97 *
133 * RETURN: The converted Ascii character 98 * RETURN: The converted Ascii character
@@ -164,16 +129,17 @@ char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
164/* Region type decoding */ 129/* Region type decoding */
165 130
166const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { 131const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
167 "SystemMemory", 132 "SystemMemory", /* 0x00 */
168 "SystemIO", 133 "SystemIO", /* 0x01 */
169 "PCI_Config", 134 "PCI_Config", /* 0x02 */
170 "EmbeddedControl", 135 "EmbeddedControl", /* 0x03 */
171 "SMBus", 136 "SMBus", /* 0x04 */
172 "SystemCMOS", 137 "SystemCMOS", /* 0x05 */
173 "PCIBARTarget", 138 "PCIBARTarget", /* 0x06 */
174 "IPMI", 139 "IPMI", /* 0x07 */
175 "GeneralPurposeIo", 140 "GeneralPurposeIo", /* 0x08 */
176 "GenericSerialBus" 141 "GenericSerialBus", /* 0x09 */
142 "PCC" /* 0x0A */
177}; 143};
178 144
179char *acpi_ut_get_region_name(u8 space_id) 145char *acpi_ut_get_region_name(u8 space_id)
@@ -228,7 +194,7 @@ char *acpi_ut_get_event_name(u32 event_id)
228 * 194 *
229 * FUNCTION: acpi_ut_get_type_name 195 * FUNCTION: acpi_ut_get_type_name
230 * 196 *
231 * PARAMETERS: Type - An ACPI object type 197 * PARAMETERS: type - An ACPI object type
232 * 198 *
233 * RETURN: Decoded ACPI object type name 199 * RETURN: Decoded ACPI object type name
234 * 200 *
@@ -306,7 +272,7 @@ char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
306 * 272 *
307 * FUNCTION: acpi_ut_get_node_name 273 * FUNCTION: acpi_ut_get_node_name
308 * 274 *
309 * PARAMETERS: Object - A namespace node 275 * PARAMETERS: object - A namespace node
310 * 276 *
311 * RETURN: ASCII name of the node 277 * RETURN: ASCII name of the node
312 * 278 *
@@ -351,7 +317,7 @@ char *acpi_ut_get_node_name(void *object)
351 * 317 *
352 * FUNCTION: acpi_ut_get_descriptor_name 318 * FUNCTION: acpi_ut_get_descriptor_name
353 * 319 *
354 * PARAMETERS: Object - An ACPI object 320 * PARAMETERS: object - An ACPI object
355 * 321 *
356 * RETURN: Decoded name of the descriptor type 322 * RETURN: Decoded name of the descriptor type
357 * 323 *
@@ -401,7 +367,7 @@ char *acpi_ut_get_descriptor_name(void *object)
401 * 367 *
402 * FUNCTION: acpi_ut_get_reference_name 368 * FUNCTION: acpi_ut_get_reference_name
403 * 369 *
404 * PARAMETERS: Object - An ACPI reference object 370 * PARAMETERS: object - An ACPI reference object
405 * 371 *
406 * RETURN: Decoded name of the type of reference 372 * RETURN: Decoded name of the type of reference
407 * 373 *
@@ -532,7 +498,7 @@ const char *acpi_ut_get_notify_name(u32 notify_value)
532 * 498 *
533 * FUNCTION: acpi_ut_valid_object_type 499 * FUNCTION: acpi_ut_valid_object_type
534 * 500 *
535 * PARAMETERS: Type - Object type to be validated 501 * PARAMETERS: type - Object type to be validated
536 * 502 *
537 * RETURN: TRUE if valid object type, FALSE otherwise 503 * RETURN: TRUE if valid object type, FALSE otherwise
538 * 504 *
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 2a6c3e183697..798105443d0f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -60,7 +60,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action);
60 * 60 *
61 * FUNCTION: acpi_ut_delete_internal_obj 61 * FUNCTION: acpi_ut_delete_internal_obj
62 * 62 *
63 * PARAMETERS: Object - Object to be deleted 63 * PARAMETERS: object - Object to be deleted
64 * 64 *
65 * RETURN: None 65 * RETURN: None
66 * 66 *
@@ -152,7 +152,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
152 case ACPI_TYPE_PROCESSOR: 152 case ACPI_TYPE_PROCESSOR:
153 case ACPI_TYPE_THERMAL: 153 case ACPI_TYPE_THERMAL:
154 154
155 /* Walk the notify handler list for this object */ 155 /* Walk the address handler list for this object */
156 156
157 handler_desc = object->common_notify.handler; 157 handler_desc = object->common_notify.handler;
158 while (handler_desc) { 158 while (handler_desc) {
@@ -358,8 +358,8 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
358 * 358 *
359 * FUNCTION: acpi_ut_update_ref_count 359 * FUNCTION: acpi_ut_update_ref_count
360 * 360 *
361 * PARAMETERS: Object - Object whose ref count is to be updated 361 * PARAMETERS: object - Object whose ref count is to be updated
362 * Action - What to do 362 * action - What to do
363 * 363 *
364 * RETURN: New ref count 364 * RETURN: New ref count
365 * 365 *
@@ -456,9 +456,9 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
456 * 456 *
457 * FUNCTION: acpi_ut_update_object_reference 457 * FUNCTION: acpi_ut_update_object_reference
458 * 458 *
459 * PARAMETERS: Object - Increment ref count for this object 459 * PARAMETERS: object - Increment ref count for this object
460 * and all sub-objects 460 * and all sub-objects
461 * Action - Either REF_INCREMENT or REF_DECREMENT or 461 * action - Either REF_INCREMENT or REF_DECREMENT or
462 * REF_FORCE_DELETE 462 * REF_FORCE_DELETE
463 * 463 *
464 * RETURN: Status 464 * RETURN: Status
@@ -480,6 +480,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
480 acpi_status status = AE_OK; 480 acpi_status status = AE_OK;
481 union acpi_generic_state *state_list = NULL; 481 union acpi_generic_state *state_list = NULL;
482 union acpi_operand_object *next_object = NULL; 482 union acpi_operand_object *next_object = NULL;
483 union acpi_operand_object *prev_object;
483 union acpi_generic_state *state; 484 union acpi_generic_state *state;
484 u32 i; 485 u32 i;
485 486
@@ -505,12 +506,21 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
505 case ACPI_TYPE_POWER: 506 case ACPI_TYPE_POWER:
506 case ACPI_TYPE_THERMAL: 507 case ACPI_TYPE_THERMAL:
507 508
508 /* Update the notify objects for these types (if present) */ 509 /*
509 510 * Update the notify objects for these types (if present)
510 acpi_ut_update_ref_count(object->common_notify. 511 * Two lists, system and device notify handlers.
511 system_notify, action); 512 */
512 acpi_ut_update_ref_count(object->common_notify. 513 for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
513 device_notify, action); 514 prev_object =
515 object->common_notify.notify_list[i];
516 while (prev_object) {
517 next_object =
518 prev_object->notify.next[i];
519 acpi_ut_update_ref_count(prev_object,
520 action);
521 prev_object = next_object;
522 }
523 }
514 break; 524 break;
515 525
516 case ACPI_TYPE_PACKAGE: 526 case ACPI_TYPE_PACKAGE:
@@ -630,7 +640,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
630 * 640 *
631 * FUNCTION: acpi_ut_add_reference 641 * FUNCTION: acpi_ut_add_reference
632 * 642 *
633 * PARAMETERS: Object - Object whose reference count is to be 643 * PARAMETERS: object - Object whose reference count is to be
634 * incremented 644 * incremented
635 * 645 *
636 * RETURN: None 646 * RETURN: None
@@ -664,7 +674,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
664 * 674 *
665 * FUNCTION: acpi_ut_remove_reference 675 * FUNCTION: acpi_ut_remove_reference
666 * 676 *
667 * PARAMETERS: Object - Object whose ref count will be decremented 677 * PARAMETERS: object - Object whose ref count will be decremented
668 * 678 *
669 * RETURN: None 679 * RETURN: None
670 * 680 *
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 479f32b33415..a9c65fbea5f4 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("uteval")
53 * FUNCTION: acpi_ut_evaluate_object 53 * FUNCTION: acpi_ut_evaluate_object
54 * 54 *
55 * PARAMETERS: prefix_node - Starting node 55 * PARAMETERS: prefix_node - Starting node
56 * Path - Path to object from starting node 56 * path - Path to object from starting node
57 * expected_return_types - Bitmap of allowed return types 57 * expected_return_types - Bitmap of allowed return types
58 * return_desc - Where a return value is stored 58 * return_desc - Where a return value is stored
59 * 59 *
@@ -187,7 +187,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
187 * 187 *
188 * PARAMETERS: object_name - Object name to be evaluated 188 * PARAMETERS: object_name - Object name to be evaluated
189 * device_node - Node for the device 189 * device_node - Node for the device
190 * Value - Where the value is returned 190 * value - Where the value is returned
191 * 191 *
192 * RETURN: Status 192 * RETURN: Status
193 * 193 *
@@ -229,7 +229,7 @@ acpi_ut_evaluate_numeric_object(char *object_name,
229 * FUNCTION: acpi_ut_execute_STA 229 * FUNCTION: acpi_ut_execute_STA
230 * 230 *
231 * PARAMETERS: device_node - Node for the device 231 * PARAMETERS: device_node - Node for the device
232 * Flags - Where the status flags are returned 232 * flags - Where the status flags are returned
233 * 233 *
234 * RETURN: Status 234 * RETURN: Status
235 * 235 *
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
new file mode 100644
index 000000000000..23b98945f6b7
--- /dev/null
+++ b/drivers/acpi/acpica/utexcep.c
@@ -0,0 +1,153 @@
1/*******************************************************************************
2 *
3 * Module Name: utexcep - Exception code support
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#define ACPI_DEFINE_EXCEPTION_TABLE
45#include <linux/export.h>
46#include <acpi/acpi.h>
47#include "accommon.h"
48
49#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utexcep")
51
52/*******************************************************************************
53 *
54 * FUNCTION: acpi_format_exception
55 *
56 * PARAMETERS: status - The acpi_status code to be formatted
57 *
58 * RETURN: A string containing the exception text. A valid pointer is
59 * always returned.
60 *
61 * DESCRIPTION: This function translates an ACPI exception into an ASCII
62 * string. Returns "unknown status" string for invalid codes.
63 *
64 ******************************************************************************/
65const char *acpi_format_exception(acpi_status status)
66{
67 const char *exception = NULL;
68
69 ACPI_FUNCTION_ENTRY();
70
71 exception = acpi_ut_validate_exception(status);
72 if (!exception) {
73
74 /* Exception code was not recognized */
75
76 ACPI_ERROR((AE_INFO,
77 "Unknown exception code: 0x%8.8X", status));
78
79 exception = "UNKNOWN_STATUS_CODE";
80 }
81
82 return (ACPI_CAST_PTR(const char, exception));
83}
84
85ACPI_EXPORT_SYMBOL(acpi_format_exception)
86
87/*******************************************************************************
88 *
89 * FUNCTION: acpi_ut_validate_exception
90 *
91 * PARAMETERS: status - The acpi_status code to be formatted
92 *
93 * RETURN: A string containing the exception text. NULL if exception is
94 * not valid.
95 *
96 * DESCRIPTION: This function validates and translates an ACPI exception into
97 * an ASCII string.
98 *
99 ******************************************************************************/
100const char *acpi_ut_validate_exception(acpi_status status)
101{
102 u32 sub_status;
103 const char *exception = NULL;
104
105 ACPI_FUNCTION_ENTRY();
106
107 /*
108 * Status is composed of two parts, a "type" and an actual code
109 */
110 sub_status = (status & ~AE_CODE_MASK);
111
112 switch (status & AE_CODE_MASK) {
113 case AE_CODE_ENVIRONMENTAL:
114
115 if (sub_status <= AE_CODE_ENV_MAX) {
116 exception = acpi_gbl_exception_names_env[sub_status];
117 }
118 break;
119
120 case AE_CODE_PROGRAMMER:
121
122 if (sub_status <= AE_CODE_PGM_MAX) {
123 exception = acpi_gbl_exception_names_pgm[sub_status];
124 }
125 break;
126
127 case AE_CODE_ACPI_TABLES:
128
129 if (sub_status <= AE_CODE_TBL_MAX) {
130 exception = acpi_gbl_exception_names_tbl[sub_status];
131 }
132 break;
133
134 case AE_CODE_AML:
135
136 if (sub_status <= AE_CODE_AML_MAX) {
137 exception = acpi_gbl_exception_names_aml[sub_status];
138 }
139 break;
140
141 case AE_CODE_CONTROL:
142
143 if (sub_status <= AE_CODE_CTRL_MAX) {
144 exception = acpi_gbl_exception_names_ctrl[sub_status];
145 }
146 break;
147
148 default:
149 break;
150 }
151
152 return (ACPI_CAST_PTR(const char, exception));
153}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 90f53b42eca9..ed1893155f8b 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -247,8 +247,9 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
247 * 247 *
248 * RETURN: Status 248 * RETURN: Status
249 * 249 *
250 * DESCRIPTION: Init library globals. All globals that require specific 250 * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
251 * initialization should be initialized here! 251 * initialization should be initialized here. This allows for
252 * a warm restart.
252 * 253 *
253 ******************************************************************************/ 254 ******************************************************************************/
254 255
@@ -284,7 +285,7 @@ acpi_status acpi_ut_init_globals(void)
284 acpi_gbl_owner_id_mask[i] = 0; 285 acpi_gbl_owner_id_mask[i] = 0;
285 } 286 }
286 287
287 /* Last owner_iD is never valid */ 288 /* Last owner_ID is never valid */
288 289
289 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000; 290 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
290 291
@@ -304,8 +305,8 @@ acpi_status acpi_ut_init_globals(void)
304 305
305 /* Global handlers */ 306 /* Global handlers */
306 307
307 acpi_gbl_system_notify.handler = NULL; 308 acpi_gbl_global_notify[0].handler = NULL;
308 acpi_gbl_device_notify.handler = NULL; 309 acpi_gbl_global_notify[1].handler = NULL;
309 acpi_gbl_exception_handler = NULL; 310 acpi_gbl_exception_handler = NULL;
310 acpi_gbl_init_handler = NULL; 311 acpi_gbl_init_handler = NULL;
311 acpi_gbl_table_handler = NULL; 312 acpi_gbl_table_handler = NULL;
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index c92eb1d93785..5d84e1954575 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: utids - support for device IDs - HID, UID, CID 3 * Module Name: utids - support for device Ids - HID, UID, CID
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 155fd786d0f2..b1eb7f17e110 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("utlock")
52 * FUNCTION: acpi_ut_create_rw_lock 52 * FUNCTION: acpi_ut_create_rw_lock
53 * acpi_ut_delete_rw_lock 53 * acpi_ut_delete_rw_lock
54 * 54 *
55 * PARAMETERS: Lock - Pointer to a valid RW lock 55 * PARAMETERS: lock - Pointer to a valid RW lock
56 * 56 *
57 * RETURN: Status 57 * RETURN: Status
58 * 58 *
@@ -89,7 +89,7 @@ void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock)
89 * FUNCTION: acpi_ut_acquire_read_lock 89 * FUNCTION: acpi_ut_acquire_read_lock
90 * acpi_ut_release_read_lock 90 * acpi_ut_release_read_lock
91 * 91 *
92 * PARAMETERS: Lock - Pointer to a valid RW lock 92 * PARAMETERS: lock - Pointer to a valid RW lock
93 * 93 *
94 * RETURN: Status 94 * RETURN: Status
95 * 95 *
@@ -149,7 +149,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
149 * FUNCTION: acpi_ut_acquire_write_lock 149 * FUNCTION: acpi_ut_acquire_write_lock
150 * acpi_ut_release_write_lock 150 * acpi_ut_release_write_lock
151 * 151 *
152 * PARAMETERS: Lock - Pointer to a valid RW lock 152 * PARAMETERS: lock - Pointer to a valid RW lock
153 * 153 *
154 * RETURN: Status 154 * RETURN: Status
155 * 155 *
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 2491a552b0e6..d88a8aaab2a6 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -73,8 +73,8 @@ typedef union uint64_overlay {
73 * 73 *
74 * FUNCTION: acpi_ut_short_divide 74 * FUNCTION: acpi_ut_short_divide
75 * 75 *
76 * PARAMETERS: Dividend - 64-bit dividend 76 * PARAMETERS: dividend - 64-bit dividend
77 * Divisor - 32-bit divisor 77 * divisor - 32-bit divisor
78 * out_quotient - Pointer to where the quotient is returned 78 * out_quotient - Pointer to where the quotient is returned
79 * out_remainder - Pointer to where the remainder is returned 79 * out_remainder - Pointer to where the remainder is returned
80 * 80 *
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 86f19db74e05..33c6cf7ff467 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -50,79 +50,41 @@
50#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
51ACPI_MODULE_NAME("utmisc") 51ACPI_MODULE_NAME("utmisc")
52 52
53#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
53/******************************************************************************* 54/*******************************************************************************
54 * 55 *
55 * FUNCTION: acpi_ut_validate_exception 56 * FUNCTION: ut_convert_backslashes
56 * 57 *
57 * PARAMETERS: Status - The acpi_status code to be formatted 58 * PARAMETERS: pathname - File pathname string to be converted
58 * 59 *
59 * RETURN: A string containing the exception text. NULL if exception is 60 * RETURN: Modifies the input Pathname
60 * not valid.
61 * 61 *
62 * DESCRIPTION: This function validates and translates an ACPI exception into 62 * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
63 * an ASCII string. 63 * the entire input file pathname string.
64 * 64 *
65 ******************************************************************************/ 65 ******************************************************************************/
66const char *acpi_ut_validate_exception(acpi_status status) 66void ut_convert_backslashes(char *pathname)
67{ 67{
68 u32 sub_status;
69 const char *exception = NULL;
70 68
71 ACPI_FUNCTION_ENTRY(); 69 if (!pathname) {
72 70 return;
73 /* 71 }
74 * Status is composed of two parts, a "type" and an actual code
75 */
76 sub_status = (status & ~AE_CODE_MASK);
77
78 switch (status & AE_CODE_MASK) {
79 case AE_CODE_ENVIRONMENTAL:
80
81 if (sub_status <= AE_CODE_ENV_MAX) {
82 exception = acpi_gbl_exception_names_env[sub_status];
83 }
84 break;
85
86 case AE_CODE_PROGRAMMER:
87
88 if (sub_status <= AE_CODE_PGM_MAX) {
89 exception = acpi_gbl_exception_names_pgm[sub_status];
90 }
91 break;
92
93 case AE_CODE_ACPI_TABLES:
94
95 if (sub_status <= AE_CODE_TBL_MAX) {
96 exception = acpi_gbl_exception_names_tbl[sub_status];
97 }
98 break;
99
100 case AE_CODE_AML:
101
102 if (sub_status <= AE_CODE_AML_MAX) {
103 exception = acpi_gbl_exception_names_aml[sub_status];
104 }
105 break;
106
107 case AE_CODE_CONTROL:
108 72
109 if (sub_status <= AE_CODE_CTRL_MAX) { 73 while (*pathname) {
110 exception = acpi_gbl_exception_names_ctrl[sub_status]; 74 if (*pathname == '\\') {
75 *pathname = '/';
111 } 76 }
112 break;
113 77
114 default: 78 pathname++;
115 break;
116 } 79 }
117
118 return (ACPI_CAST_PTR(const char, exception));
119} 80}
81#endif
120 82
121/******************************************************************************* 83/*******************************************************************************
122 * 84 *
123 * FUNCTION: acpi_ut_is_pci_root_bridge 85 * FUNCTION: acpi_ut_is_pci_root_bridge
124 * 86 *
125 * PARAMETERS: Id - The HID/CID in string format 87 * PARAMETERS: id - The HID/CID in string format
126 * 88 *
127 * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge 89 * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
128 * 90 *
@@ -150,7 +112,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
150 * 112 *
151 * FUNCTION: acpi_ut_is_aml_table 113 * FUNCTION: acpi_ut_is_aml_table
152 * 114 *
153 * PARAMETERS: Table - An ACPI table 115 * PARAMETERS: table - An ACPI table
154 * 116 *
155 * RETURN: TRUE if table contains executable AML; FALSE otherwise 117 * RETURN: TRUE if table contains executable AML; FALSE otherwise
156 * 118 *
@@ -284,7 +246,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
284 * 246 *
285 * FUNCTION: acpi_ut_release_owner_id 247 * FUNCTION: acpi_ut_release_owner_id
286 * 248 *
287 * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_iD 249 * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID
288 * 250 *
289 * RETURN: None. No error is returned because we are either exiting a 251 * RETURN: None. No error is returned because we are either exiting a
290 * control method or unloading a table. Either way, we would 252 * control method or unloading a table. Either way, we would
@@ -307,7 +269,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
307 269
308 *owner_id_ptr = 0; 270 *owner_id_ptr = 0;
309 271
310 /* Zero is not a valid owner_iD */ 272 /* Zero is not a valid owner_ID */
311 273
312 if (owner_id == 0) { 274 if (owner_id == 0) {
313 ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id)); 275 ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
@@ -381,7 +343,7 @@ void acpi_ut_strupr(char *src_string)
381 * 343 *
382 * FUNCTION: acpi_ut_print_string 344 * FUNCTION: acpi_ut_print_string
383 * 345 *
384 * PARAMETERS: String - Null terminated ASCII string 346 * PARAMETERS: string - Null terminated ASCII string
385 * max_length - Maximum output length 347 * max_length - Maximum output length
386 * 348 *
387 * RETURN: None 349 * RETURN: None
@@ -467,7 +429,7 @@ void acpi_ut_print_string(char *string, u8 max_length)
467 * 429 *
468 * FUNCTION: acpi_ut_dword_byte_swap 430 * FUNCTION: acpi_ut_dword_byte_swap
469 * 431 *
470 * PARAMETERS: Value - Value to be converted 432 * PARAMETERS: value - Value to be converted
471 * 433 *
472 * RETURN: u32 integer with bytes swapped 434 * RETURN: u32 integer with bytes swapped
473 * 435 *
@@ -537,9 +499,9 @@ void acpi_ut_set_integer_width(u8 revision)
537 * 499 *
538 * FUNCTION: acpi_ut_display_init_pathname 500 * FUNCTION: acpi_ut_display_init_pathname
539 * 501 *
540 * PARAMETERS: Type - Object type of the node 502 * PARAMETERS: type - Object type of the node
541 * obj_handle - Handle whose pathname will be displayed 503 * obj_handle - Handle whose pathname will be displayed
542 * Path - Additional path string to be appended. 504 * path - Additional path string to be appended.
543 * (NULL if no extra path) 505 * (NULL if no extra path)
544 * 506 *
545 * RETURN: acpi_status 507 * RETURN: acpi_status
@@ -604,8 +566,8 @@ acpi_ut_display_init_pathname(u8 type,
604 * 566 *
605 * FUNCTION: acpi_ut_valid_acpi_char 567 * FUNCTION: acpi_ut_valid_acpi_char
606 * 568 *
607 * PARAMETERS: Char - The character to be examined 569 * PARAMETERS: char - The character to be examined
608 * Position - Byte position (0-3) 570 * position - Byte position (0-3)
609 * 571 *
610 * RETURN: TRUE if the character is valid, FALSE otherwise 572 * RETURN: TRUE if the character is valid, FALSE otherwise
611 * 573 *
@@ -640,7 +602,7 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position)
640 * 602 *
641 * FUNCTION: acpi_ut_valid_acpi_name 603 * FUNCTION: acpi_ut_valid_acpi_name
642 * 604 *
643 * PARAMETERS: Name - The name to be examined 605 * PARAMETERS: name - The name to be examined
644 * 606 *
645 * RETURN: TRUE if the name is valid, FALSE otherwise 607 * RETURN: TRUE if the name is valid, FALSE otherwise
646 * 608 *
@@ -671,7 +633,7 @@ u8 acpi_ut_valid_acpi_name(u32 name)
671 * 633 *
672 * FUNCTION: acpi_ut_repair_name 634 * FUNCTION: acpi_ut_repair_name
673 * 635 *
674 * PARAMETERS: Name - The ACPI name to be repaired 636 * PARAMETERS: name - The ACPI name to be repaired
675 * 637 *
676 * RETURN: Repaired version of the name 638 * RETURN: Repaired version of the name
677 * 639 *
@@ -705,8 +667,8 @@ acpi_name acpi_ut_repair_name(char *name)
705 * 667 *
706 * FUNCTION: acpi_ut_strtoul64 668 * FUNCTION: acpi_ut_strtoul64
707 * 669 *
708 * PARAMETERS: String - Null terminated string 670 * PARAMETERS: string - Null terminated string
709 * Base - Radix of the string: 16 or ACPI_ANY_BASE; 671 * base - Radix of the string: 16 or ACPI_ANY_BASE;
710 * ACPI_ANY_BASE means 'in behalf of to_integer' 672 * ACPI_ANY_BASE means 'in behalf of to_integer'
711 * ret_integer - Where the converted integer is returned 673 * ret_integer - Where the converted integer is returned
712 * 674 *
@@ -755,7 +717,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
755 717
756 if (to_integer_op) { 718 if (to_integer_op) {
757 /* 719 /*
758 * Base equal to ACPI_ANY_BASE means 'to_integer operation case'. 720 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
759 * We need to determine if it is decimal or hexadecimal. 721 * We need to determine if it is decimal or hexadecimal.
760 */ 722 */
761 if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) { 723 if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
@@ -878,8 +840,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
878 * 840 *
879 * FUNCTION: acpi_ut_create_update_state_and_push 841 * FUNCTION: acpi_ut_create_update_state_and_push
880 * 842 *
881 * PARAMETERS: Object - Object to be added to the new state 843 * PARAMETERS: object - Object to be added to the new state
882 * Action - Increment/Decrement 844 * action - Increment/Decrement
883 * state_list - List the state will be added to 845 * state_list - List the state will be added to
884 * 846 *
885 * RETURN: Status 847 * RETURN: Status
@@ -919,7 +881,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
919 * PARAMETERS: source_object - The package to walk 881 * PARAMETERS: source_object - The package to walk
920 * target_object - Target object (if package is being copied) 882 * target_object - Target object (if package is being copied)
921 * walk_callback - Called once for each package element 883 * walk_callback - Called once for each package element
922 * Context - Passed to the callback function 884 * context - Passed to the callback function
923 * 885 *
924 * RETURN: Status 886 * RETURN: Status
925 * 887 *
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 43174df33121..296baa676bc5 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -147,7 +147,7 @@ void acpi_ut_mutex_terminate(void)
147 * 147 *
148 * FUNCTION: acpi_ut_create_mutex 148 * FUNCTION: acpi_ut_create_mutex
149 * 149 *
150 * PARAMETERS: mutex_iD - ID of the mutex to be created 150 * PARAMETERS: mutex_ID - ID of the mutex to be created
151 * 151 *
152 * RETURN: Status 152 * RETURN: Status
153 * 153 *
@@ -176,7 +176,7 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
176 * 176 *
177 * FUNCTION: acpi_ut_delete_mutex 177 * FUNCTION: acpi_ut_delete_mutex
178 * 178 *
179 * PARAMETERS: mutex_iD - ID of the mutex to be deleted 179 * PARAMETERS: mutex_ID - ID of the mutex to be deleted
180 * 180 *
181 * RETURN: Status 181 * RETURN: Status
182 * 182 *
@@ -199,7 +199,7 @@ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
199 * 199 *
200 * FUNCTION: acpi_ut_acquire_mutex 200 * FUNCTION: acpi_ut_acquire_mutex
201 * 201 *
202 * PARAMETERS: mutex_iD - ID of the mutex to be acquired 202 * PARAMETERS: mutex_ID - ID of the mutex to be acquired
203 * 203 *
204 * RETURN: Status 204 * RETURN: Status
205 * 205 *
@@ -283,7 +283,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
283 * 283 *
284 * FUNCTION: acpi_ut_release_mutex 284 * FUNCTION: acpi_ut_release_mutex
285 * 285 *
286 * PARAMETERS: mutex_iD - ID of the mutex to be released 286 * PARAMETERS: mutex_ID - ID of the mutex to be released
287 * 287 *
288 * RETURN: Status 288 * RETURN: Status
289 * 289 *
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index b112744fc9ae..655f0799a391 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -69,7 +69,7 @@ acpi_ut_get_element_length(u8 object_type,
69 * PARAMETERS: module_name - Source file name of caller 69 * PARAMETERS: module_name - Source file name of caller
70 * line_number - Line number of caller 70 * line_number - Line number of caller
71 * component_id - Component type of caller 71 * component_id - Component type of caller
72 * Type - ACPI Type of the new object 72 * type - ACPI Type of the new object
73 * 73 *
74 * RETURN: A new internal object, null on failure 74 * RETURN: A new internal object, null on failure
75 * 75 *
@@ -150,7 +150,7 @@ union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
150 * 150 *
151 * FUNCTION: acpi_ut_create_package_object 151 * FUNCTION: acpi_ut_create_package_object
152 * 152 *
153 * PARAMETERS: Count - Number of package elements 153 * PARAMETERS: count - Number of package elements
154 * 154 *
155 * RETURN: Pointer to a new Package object, null on failure 155 * RETURN: Pointer to a new Package object, null on failure
156 * 156 *
@@ -323,11 +323,11 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
323 * 323 *
324 * FUNCTION: acpi_ut_valid_internal_object 324 * FUNCTION: acpi_ut_valid_internal_object
325 * 325 *
326 * PARAMETERS: Object - Object to be validated 326 * PARAMETERS: object - Object to be validated
327 * 327 *
328 * RETURN: TRUE if object is valid, FALSE otherwise 328 * RETURN: TRUE if object is valid, FALSE otherwise
329 * 329 *
330 * DESCRIPTION: Validate a pointer to be a union acpi_operand_object 330 * DESCRIPTION: Validate a pointer to be of type union acpi_operand_object
331 * 331 *
332 ******************************************************************************/ 332 ******************************************************************************/
333 333
@@ -348,7 +348,7 @@ u8 acpi_ut_valid_internal_object(void *object)
348 switch (ACPI_GET_DESCRIPTOR_TYPE(object)) { 348 switch (ACPI_GET_DESCRIPTOR_TYPE(object)) {
349 case ACPI_DESC_TYPE_OPERAND: 349 case ACPI_DESC_TYPE_OPERAND:
350 350
351 /* The object appears to be a valid union acpi_operand_object */ 351 /* The object appears to be a valid union acpi_operand_object */
352 352
353 return (TRUE); 353 return (TRUE);
354 354
@@ -407,7 +407,7 @@ void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
407 * 407 *
408 * FUNCTION: acpi_ut_delete_object_desc 408 * FUNCTION: acpi_ut_delete_object_desc
409 * 409 *
410 * PARAMETERS: Object - An Acpi internal object to be deleted 410 * PARAMETERS: object - An Acpi internal object to be deleted
411 * 411 *
412 * RETURN: None. 412 * RETURN: None.
413 * 413 *
@@ -419,7 +419,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
419{ 419{
420 ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object); 420 ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
421 421
422 /* Object must be a union acpi_operand_object */ 422 /* Object must be a union acpi_operand_object */
423 423
424 if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) { 424 if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
425 ACPI_ERROR((AE_INFO, 425 ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 2360cf70c18c..34ef0bd7e4b4 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -68,7 +68,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
68 {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */ 68 {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */
69 {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */ 69 {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */
70 {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */ 70 {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */
71 {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */ 71 {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows vista - Added 03/2006 */
72 {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */ 72 {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */
73 {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */ 73 {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
74 {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */ 74 {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 9d441ea70305..e38bef4980bc 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -356,13 +356,13 @@ static const u8 acpi_gbl_resource_types[] = {
356 ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */ 356 ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
357 ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */ 357 ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
358 ACPI_FIXED_LENGTH, /* 08 IO */ 358 ACPI_FIXED_LENGTH, /* 08 IO */
359 ACPI_FIXED_LENGTH, /* 09 fixed_iO */ 359 ACPI_FIXED_LENGTH, /* 09 fixed_IO */
360 ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */ 360 ACPI_FIXED_LENGTH, /* 0A fixed_DMA */
361 0, 361 0,
362 0, 362 0,
363 0, 363 0,
364 ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */ 364 ACPI_VARIABLE_LENGTH, /* 0E vendor_short */
365 ACPI_FIXED_LENGTH, /* 0_f end_tag */ 365 ACPI_FIXED_LENGTH, /* 0F end_tag */
366 366
367 /* Large descriptors */ 367 /* Large descriptors */
368 368
@@ -375,16 +375,16 @@ static const u8 acpi_gbl_resource_types[] = {
375 ACPI_FIXED_LENGTH, /* 06 memory32_fixed */ 375 ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
376 ACPI_VARIABLE_LENGTH, /* 07 Dword* address */ 376 ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
377 ACPI_VARIABLE_LENGTH, /* 08 Word* address */ 377 ACPI_VARIABLE_LENGTH, /* 08 Word* address */
378 ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */ 378 ACPI_VARIABLE_LENGTH, /* 09 extended_IRQ */
379 ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */ 379 ACPI_VARIABLE_LENGTH, /* 0A Qword* address */
380 ACPI_FIXED_LENGTH, /* 0_b Extended* address */ 380 ACPI_FIXED_LENGTH, /* 0B Extended* address */
381 ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */ 381 ACPI_VARIABLE_LENGTH, /* 0C Gpio* */
382 0, 382 0,
383 ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */ 383 ACPI_VARIABLE_LENGTH /* 0E *serial_bus */
384}; 384};
385 385
386/* 386/*
387 * For the i_aSL compiler/disassembler, we don't want any error messages 387 * For the iASL compiler/disassembler, we don't want any error messages
388 * because the disassembler uses the resource validation code to determine 388 * because the disassembler uses the resource validation code to determine
389 * if Buffer objects are actually Resource Templates. 389 * if Buffer objects are actually Resource Templates.
390 */ 390 */
@@ -398,11 +398,11 @@ static const u8 acpi_gbl_resource_types[] = {
398 * 398 *
399 * FUNCTION: acpi_ut_walk_aml_resources 399 * FUNCTION: acpi_ut_walk_aml_resources
400 * 400 *
401 * PARAMETERS: Aml - Pointer to the raw AML resource template 401 * PARAMETERS: aml - Pointer to the raw AML resource template
402 * aml_length - Length of the entire template 402 * aml_length - Length of the entire template
403 * user_function - Called once for each descriptor found. If 403 * user_function - Called once for each descriptor found. If
404 * NULL, a pointer to the end_tag is returned 404 * NULL, a pointer to the end_tag is returned
405 * Context - Passed to user_function 405 * context - Passed to user_function
406 * 406 *
407 * RETURN: Status 407 * RETURN: Status
408 * 408 *
@@ -513,7 +513,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
513 * 513 *
514 * FUNCTION: acpi_ut_validate_resource 514 * FUNCTION: acpi_ut_validate_resource
515 * 515 *
516 * PARAMETERS: Aml - Pointer to the raw AML resource descriptor 516 * PARAMETERS: aml - Pointer to the raw AML resource descriptor
517 * return_index - Where the resource index is returned. NULL 517 * return_index - Where the resource index is returned. NULL
518 * if the index is not required. 518 * if the index is not required.
519 * 519 *
@@ -664,7 +664,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
664 * 664 *
665 * FUNCTION: acpi_ut_get_resource_type 665 * FUNCTION: acpi_ut_get_resource_type
666 * 666 *
667 * PARAMETERS: Aml - Pointer to the raw AML resource descriptor 667 * PARAMETERS: aml - Pointer to the raw AML resource descriptor
668 * 668 *
669 * RETURN: The Resource Type with no extraneous bits (except the 669 * RETURN: The Resource Type with no extraneous bits (except the
670 * Large/Small descriptor bit -- this is left alone) 670 * Large/Small descriptor bit -- this is left alone)
@@ -698,7 +698,7 @@ u8 acpi_ut_get_resource_type(void *aml)
698 * 698 *
699 * FUNCTION: acpi_ut_get_resource_length 699 * FUNCTION: acpi_ut_get_resource_length
700 * 700 *
701 * PARAMETERS: Aml - Pointer to the raw AML resource descriptor 701 * PARAMETERS: aml - Pointer to the raw AML resource descriptor
702 * 702 *
703 * RETURN: Byte Length 703 * RETURN: Byte Length
704 * 704 *
@@ -738,7 +738,7 @@ u16 acpi_ut_get_resource_length(void *aml)
738 * 738 *
739 * FUNCTION: acpi_ut_get_resource_header_length 739 * FUNCTION: acpi_ut_get_resource_header_length
740 * 740 *
741 * PARAMETERS: Aml - Pointer to the raw AML resource descriptor 741 * PARAMETERS: aml - Pointer to the raw AML resource descriptor
742 * 742 *
743 * RETURN: Length of the AML header (depends on large/small descriptor) 743 * RETURN: Length of the AML header (depends on large/small descriptor)
744 * 744 *
@@ -763,7 +763,7 @@ u8 acpi_ut_get_resource_header_length(void *aml)
763 * 763 *
764 * FUNCTION: acpi_ut_get_descriptor_length 764 * FUNCTION: acpi_ut_get_descriptor_length
765 * 765 *
766 * PARAMETERS: Aml - Pointer to the raw AML resource descriptor 766 * PARAMETERS: aml - Pointer to the raw AML resource descriptor
767 * 767 *
768 * RETURN: Byte length 768 * RETURN: Byte length
769 * 769 *
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 4267477c2797..a1c988260073 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -51,8 +51,8 @@ ACPI_MODULE_NAME("utstate")
51 * 51 *
52 * FUNCTION: acpi_ut_create_pkg_state_and_push 52 * FUNCTION: acpi_ut_create_pkg_state_and_push
53 * 53 *
54 * PARAMETERS: Object - Object to be added to the new state 54 * PARAMETERS: object - Object to be added to the new state
55 * Action - Increment/Decrement 55 * action - Increment/Decrement
56 * state_list - List the state will be added to 56 * state_list - List the state will be added to
57 * 57 *
58 * RETURN: Status 58 * RETURN: Status
@@ -85,7 +85,7 @@ acpi_ut_create_pkg_state_and_push(void *internal_object,
85 * FUNCTION: acpi_ut_push_generic_state 85 * FUNCTION: acpi_ut_push_generic_state
86 * 86 *
87 * PARAMETERS: list_head - Head of the state stack 87 * PARAMETERS: list_head - Head of the state stack
88 * State - State object to push 88 * state - State object to push
89 * 89 *
90 * RETURN: None 90 * RETURN: None
91 * 91 *
@@ -214,8 +214,8 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
214 * 214 *
215 * FUNCTION: acpi_ut_create_update_state 215 * FUNCTION: acpi_ut_create_update_state
216 * 216 *
217 * PARAMETERS: Object - Initial Object to be installed in the state 217 * PARAMETERS: object - Initial Object to be installed in the state
218 * Action - Update action to be performed 218 * action - Update action to be performed
219 * 219 *
220 * RETURN: New state object, null on failure 220 * RETURN: New state object, null on failure
221 * 221 *
@@ -252,8 +252,8 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
252 * 252 *
253 * FUNCTION: acpi_ut_create_pkg_state 253 * FUNCTION: acpi_ut_create_pkg_state
254 * 254 *
255 * PARAMETERS: Object - Initial Object to be installed in the state 255 * PARAMETERS: object - Initial Object to be installed in the state
256 * Action - Update action to be performed 256 * action - Update action to be performed
257 * 257 *
258 * RETURN: New state object, null on failure 258 * RETURN: New state object, null on failure
259 * 259 *
@@ -325,7 +325,7 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
325 * 325 *
326 * FUNCTION: acpi_ut_delete_generic_state 326 * FUNCTION: acpi_ut_delete_generic_state
327 * 327 *
328 * PARAMETERS: State - The state object to be deleted 328 * PARAMETERS: state - The state object to be deleted
329 * 329 *
330 * RETURN: None 330 * RETURN: None
331 * 331 *
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index afa94f51ff0b..534179f1177b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -131,7 +131,7 @@ acpi_status __init acpi_initialize_subsystem(void)
131 * 131 *
132 * FUNCTION: acpi_enable_subsystem 132 * FUNCTION: acpi_enable_subsystem
133 * 133 *
134 * PARAMETERS: Flags - Init/enable Options 134 * PARAMETERS: flags - Init/enable Options
135 * 135 *
136 * RETURN: Status 136 * RETURN: Status
137 * 137 *
@@ -234,7 +234,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
234 * 234 *
235 * FUNCTION: acpi_initialize_objects 235 * FUNCTION: acpi_initialize_objects
236 * 236 *
237 * PARAMETERS: Flags - Init/enable Options 237 * PARAMETERS: flags - Init/enable Options
238 * 238 *
239 * RETURN: Status 239 * RETURN: Status
240 * 240 *
@@ -409,7 +409,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
409 * PARAMETERS: out_buffer - A buffer to receive the resources for the 409 * PARAMETERS: out_buffer - A buffer to receive the resources for the
410 * device 410 * device
411 * 411 *
412 * RETURN: Status - the status of the call 412 * RETURN: status - the status of the call
413 * 413 *
414 * DESCRIPTION: This function is called to get information about the current 414 * DESCRIPTION: This function is called to get information about the current
415 * state of the ACPI subsystem. It will return system information 415 * state of the ACPI subsystem. It will return system information
@@ -480,8 +480,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_system_info)
480 * 480 *
481 * FUNCTION: acpi_install_initialization_handler 481 * FUNCTION: acpi_install_initialization_handler
482 * 482 *
483 * PARAMETERS: Handler - Callback procedure 483 * PARAMETERS: handler - Callback procedure
484 * Function - Not (currently) used, see below 484 * function - Not (currently) used, see below
485 * 485 *
486 * RETURN: Status 486 * RETURN: Status
487 * 487 *
@@ -618,7 +618,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_interface)
618 * 618 *
619 * FUNCTION: acpi_install_interface_handler 619 * FUNCTION: acpi_install_interface_handler
620 * 620 *
621 * PARAMETERS: Handler - The _OSI interface handler to install 621 * PARAMETERS: handler - The _OSI interface handler to install
622 * NULL means "remove existing handler" 622 * NULL means "remove existing handler"
623 * 623 *
624 * RETURN: Status 624 * RETURN: Status
@@ -651,9 +651,9 @@ ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
651 * FUNCTION: acpi_check_address_range 651 * FUNCTION: acpi_check_address_range
652 * 652 *
653 * PARAMETERS: space_id - Address space ID 653 * PARAMETERS: space_id - Address space ID
654 * Address - Start address 654 * address - Start address
655 * Length - Length 655 * length - Length
656 * Warn - TRUE if warning on overlap desired 656 * warn - TRUE if warning on overlap desired
657 * 657 *
658 * RETURN: Count of the number of conflicts detected. 658 * RETURN: Count of the number of conflicts detected.
659 * 659 *
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 52b568af1819..6d63cc39b9ae 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("utxferror")
53 * This module is used for the in-kernel ACPICA as well as the ACPICA 53 * This module is used for the in-kernel ACPICA as well as the ACPICA
54 * tools/applications. 54 * tools/applications.
55 * 55 *
56 * For the i_aSL compiler case, the output is redirected to stderr so that 56 * For the iASL compiler case, the output is redirected to stderr so that
57 * any of the various ACPI errors and warnings do not appear in the output 57 * any of the various ACPI errors and warnings do not appear in the output
58 * files, for either the compiler or disassembler portions of the tool. 58 * files, for either the compiler or disassembler portions of the tool.
59 */ 59 */
@@ -70,7 +70,7 @@ extern FILE *acpi_gbl_output_file;
70 70
71#else 71#else
72/* 72/*
73 * non-i_aSL case - no redirection, nothing to do 73 * non-iASL case - no redirection, nothing to do
74 */ 74 */
75#define ACPI_MSG_REDIRECT_BEGIN 75#define ACPI_MSG_REDIRECT_BEGIN
76#define ACPI_MSG_REDIRECT_END 76#define ACPI_MSG_REDIRECT_END
@@ -82,6 +82,8 @@ extern FILE *acpi_gbl_output_file;
82#define ACPI_MSG_EXCEPTION "ACPI Exception: " 82#define ACPI_MSG_EXCEPTION "ACPI Exception: "
83#define ACPI_MSG_WARNING "ACPI Warning: " 83#define ACPI_MSG_WARNING "ACPI Warning: "
84#define ACPI_MSG_INFO "ACPI: " 84#define ACPI_MSG_INFO "ACPI: "
85#define ACPI_MSG_BIOS_ERROR "ACPI BIOS Bug: Error: "
86#define ACPI_MSG_BIOS_WARNING "ACPI BIOS Bug: Warning: "
85/* 87/*
86 * Common message suffix 88 * Common message suffix
87 */ 89 */
@@ -93,7 +95,7 @@ extern FILE *acpi_gbl_output_file;
93 * 95 *
94 * PARAMETERS: module_name - Caller's module name (for error output) 96 * PARAMETERS: module_name - Caller's module name (for error output)
95 * line_number - Caller's line number (for error output) 97 * line_number - Caller's line number (for error output)
96 * Format - Printf format string + additional args 98 * format - Printf format string + additional args
97 * 99 *
98 * RETURN: None 100 * RETURN: None
99 * 101 *
@@ -124,8 +126,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
124 * 126 *
125 * PARAMETERS: module_name - Caller's module name (for error output) 127 * PARAMETERS: module_name - Caller's module name (for error output)
126 * line_number - Caller's line number (for error output) 128 * line_number - Caller's line number (for error output)
127 * Status - Status to be formatted 129 * status - Status to be formatted
128 * Format - Printf format string + additional args 130 * format - Printf format string + additional args
129 * 131 *
130 * RETURN: None 132 * RETURN: None
131 * 133 *
@@ -159,7 +161,7 @@ ACPI_EXPORT_SYMBOL(acpi_exception)
159 * 161 *
160 * PARAMETERS: module_name - Caller's module name (for error output) 162 * PARAMETERS: module_name - Caller's module name (for error output)
161 * line_number - Caller's line number (for error output) 163 * line_number - Caller's line number (for error output)
162 * Format - Printf format string + additional args 164 * format - Printf format string + additional args
163 * 165 *
164 * RETURN: None 166 * RETURN: None
165 * 167 *
@@ -190,7 +192,7 @@ ACPI_EXPORT_SYMBOL(acpi_warning)
190 * 192 *
191 * PARAMETERS: module_name - Caller's module name (for error output) 193 * PARAMETERS: module_name - Caller's module name (for error output)
192 * line_number - Caller's line number (for error output) 194 * line_number - Caller's line number (for error output)
193 * Format - Printf format string + additional args 195 * format - Printf format string + additional args
194 * 196 *
195 * RETURN: None 197 * RETURN: None
196 * 198 *
@@ -218,6 +220,72 @@ acpi_info(const char *module_name, u32 line_number, const char *format, ...)
218 220
219ACPI_EXPORT_SYMBOL(acpi_info) 221ACPI_EXPORT_SYMBOL(acpi_info)
220 222
223/*******************************************************************************
224 *
225 * FUNCTION: acpi_bios_error
226 *
227 * PARAMETERS: module_name - Caller's module name (for error output)
228 * line_number - Caller's line number (for error output)
229 * format - Printf format string + additional args
230 *
231 * RETURN: None
232 *
233 * DESCRIPTION: Print "ACPI Firmware Error" message with module/line/version
234 * info
235 *
236 ******************************************************************************/
237void ACPI_INTERNAL_VAR_XFACE
238acpi_bios_error(const char *module_name,
239 u32 line_number, const char *format, ...)
240{
241 va_list arg_list;
242
243 ACPI_MSG_REDIRECT_BEGIN;
244 acpi_os_printf(ACPI_MSG_BIOS_ERROR);
245
246 va_start(arg_list, format);
247 acpi_os_vprintf(format, arg_list);
248 ACPI_MSG_SUFFIX;
249 va_end(arg_list);
250
251 ACPI_MSG_REDIRECT_END;
252}
253
254ACPI_EXPORT_SYMBOL(acpi_bios_error)
255
256/*******************************************************************************
257 *
258 * FUNCTION: acpi_bios_warning
259 *
260 * PARAMETERS: module_name - Caller's module name (for error output)
261 * line_number - Caller's line number (for error output)
262 * format - Printf format string + additional args
263 *
264 * RETURN: None
265 *
266 * DESCRIPTION: Print "ACPI Firmware Warning" message with module/line/version
267 * info
268 *
269 ******************************************************************************/
270void ACPI_INTERNAL_VAR_XFACE
271acpi_bios_warning(const char *module_name,
272 u32 line_number, const char *format, ...)
273{
274 va_list arg_list;
275
276 ACPI_MSG_REDIRECT_BEGIN;
277 acpi_os_printf(ACPI_MSG_BIOS_WARNING);
278
279 va_start(arg_list, format);
280 acpi_os_vprintf(format, arg_list);
281 ACPI_MSG_SUFFIX;
282 va_end(arg_list);
283
284 ACPI_MSG_REDIRECT_END;
285}
286
287ACPI_EXPORT_SYMBOL(acpi_bios_warning)
288
221/* 289/*
222 * The remainder of this module contains internal error functions that may 290 * The remainder of this module contains internal error functions that may
223 * be configured out. 291 * be configured out.
@@ -271,9 +339,9 @@ acpi_ut_predefined_warning(const char *module_name,
271 * 339 *
272 * PARAMETERS: module_name - Caller's module name (for error output) 340 * PARAMETERS: module_name - Caller's module name (for error output)
273 * line_number - Caller's line number (for error output) 341 * line_number - Caller's line number (for error output)
274 * Pathname - Full pathname to the node 342 * pathname - Full pathname to the node
275 * node_flags - From Namespace node for the method/object 343 * node_flags - From Namespace node for the method/object
276 * Format - Printf format string + additional args 344 * format - Printf format string + additional args
277 * 345 *
278 * RETURN: None 346 * RETURN: None
279 * 347 *
@@ -373,9 +441,9 @@ acpi_ut_namespace_error(const char *module_name,
373 * 441 *
374 * PARAMETERS: module_name - Caller's module name (for error output) 442 * PARAMETERS: module_name - Caller's module name (for error output)
375 * line_number - Caller's line number (for error output) 443 * line_number - Caller's line number (for error output)
376 * Message - Error message to use on failure 444 * message - Error message to use on failure
377 * prefix_node - Prefix relative to the path 445 * prefix_node - Prefix relative to the path
378 * Path - Path to the node (optional) 446 * path - Path to the node (optional)
379 * method_status - Execution status 447 * method_status - Execution status
380 * 448 *
381 * RETURN: None 449 * RETURN: None
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 1427d191d15a..0a40a851b354 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -58,8 +58,8 @@ acpi_ut_get_mutex_object(acpi_handle handle,
58 * 58 *
59 * FUNCTION: acpi_ut_get_mutex_object 59 * FUNCTION: acpi_ut_get_mutex_object
60 * 60 *
61 * PARAMETERS: Handle - Mutex or prefix handle (optional) 61 * PARAMETERS: handle - Mutex or prefix handle (optional)
62 * Pathname - Mutex pathname (optional) 62 * pathname - Mutex pathname (optional)
63 * ret_obj - Where the mutex object is returned 63 * ret_obj - Where the mutex object is returned
64 * 64 *
65 * RETURN: Status 65 * RETURN: Status
@@ -118,9 +118,9 @@ acpi_ut_get_mutex_object(acpi_handle handle,
118 * 118 *
119 * FUNCTION: acpi_acquire_mutex 119 * FUNCTION: acpi_acquire_mutex
120 * 120 *
121 * PARAMETERS: Handle - Mutex or prefix handle (optional) 121 * PARAMETERS: handle - Mutex or prefix handle (optional)
122 * Pathname - Mutex pathname (optional) 122 * pathname - Mutex pathname (optional)
123 * Timeout - Max time to wait for the lock (millisec) 123 * timeout - Max time to wait for the lock (millisec)
124 * 124 *
125 * RETURN: Status 125 * RETURN: Status
126 * 126 *
@@ -155,8 +155,8 @@ acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
155 * 155 *
156 * FUNCTION: acpi_release_mutex 156 * FUNCTION: acpi_release_mutex
157 * 157 *
158 * PARAMETERS: Handle - Mutex or prefix handle (optional) 158 * PARAMETERS: handle - Mutex or prefix handle (optional)
159 * Pathname - Mutex pathname (optional) 159 * pathname - Mutex pathname (optional)
160 * 160 *
161 * RETURN: Status 161 * RETURN: Status
162 * 162 *
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 5577762daee1..00a783661d0b 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
243 u8 ins = entry->instruction; 243 u8 ins = entry->instruction;
244 244
245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
246 return acpi_os_map_generic_address(&entry->register_region); 246 return apei_map_generic_address(&entry->register_region);
247 247
248 return 0; 248 return 0;
249} 249}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
276 u8 ins = entry->instruction; 276 u8 ins = entry->instruction;
277 277
278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
279 acpi_os_unmap_generic_address(&entry->register_region); 279 apei_unmap_generic_address(&entry->register_region);
280 280
281 return 0; 281 return 0;
282} 282}
@@ -586,6 +586,11 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
586 } 586 }
587 *access_bit_width = 1UL << (access_size_code + 2); 587 *access_bit_width = 1UL << (access_size_code + 2);
588 588
589 /* Fixup common BIOS bug */
590 if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
591 *access_bit_width < 32)
592 *access_bit_width = 32;
593
589 if ((bit_width + bit_offset) > *access_bit_width) { 594 if ((bit_width + bit_offset) > *access_bit_width) {
590 pr_warning(FW_BUG APEI_PFX 595 pr_warning(FW_BUG APEI_PFX
591 "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n", 596 "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
@@ -606,6 +611,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
606 return 0; 611 return 0;
607} 612}
608 613
614int apei_map_generic_address(struct acpi_generic_address *reg)
615{
616 int rc;
617 u32 access_bit_width;
618 u64 address;
619
620 rc = apei_check_gar(reg, &address, &access_bit_width);
621 if (rc)
622 return rc;
623 return acpi_os_map_generic_address(reg);
624}
625EXPORT_SYMBOL_GPL(apei_map_generic_address);
626
609/* read GAR in interrupt (including NMI) or process context */ 627/* read GAR in interrupt (including NMI) or process context */
610int apei_read(u64 *val, struct acpi_generic_address *reg) 628int apei_read(u64 *val, struct acpi_generic_address *reg)
611{ 629{
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cca240a33038..f220d642136e 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -7,6 +7,8 @@
7#define APEI_INTERNAL_H 7#define APEI_INTERNAL_H
8 8
9#include <linux/cper.h> 9#include <linux/cper.h>
10#include <linux/acpi.h>
11#include <linux/acpi_io.h>
10 12
11struct apei_exec_context; 13struct apei_exec_context;
12 14
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
68/* IP has been set in instruction function */ 70/* IP has been set in instruction function */
69#define APEI_EXEC_SET_IP 1 71#define APEI_EXEC_SET_IP 1
70 72
73int apei_map_generic_address(struct acpi_generic_address *reg);
74
75static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
76{
77 acpi_os_unmap_generic_address(reg);
78}
79
71int apei_read(u64 *val, struct acpi_generic_address *reg); 80int apei_read(u64 *val, struct acpi_generic_address *reg);
72int apei_write(u64 val, struct acpi_generic_address *reg); 81int apei_write(u64 val, struct acpi_generic_address *reg);
73 82
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 9b3cac0abecc..1599566ed1fe 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
301 if (!ghes) 301 if (!ghes)
302 return ERR_PTR(-ENOMEM); 302 return ERR_PTR(-ENOMEM);
303 ghes->generic = generic; 303 ghes->generic = generic;
304 rc = acpi_os_map_generic_address(&generic->error_status_address); 304 rc = apei_map_generic_address(&generic->error_status_address);
305 if (rc) 305 if (rc)
306 goto err_free; 306 goto err_free;
307 error_block_length = generic->error_block_length; 307 error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
321 return ghes; 321 return ghes;
322 322
323err_unmap: 323err_unmap:
324 acpi_os_unmap_generic_address(&generic->error_status_address); 324 apei_unmap_generic_address(&generic->error_status_address);
325err_free: 325err_free:
326 kfree(ghes); 326 kfree(ghes);
327 return ERR_PTR(rc); 327 return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
330static void ghes_fini(struct ghes *ghes) 330static void ghes_fini(struct ghes *ghes)
331{ 331{
332 kfree(ghes->estatus); 332 kfree(ghes->estatus);
333 acpi_os_unmap_generic_address(&ghes->generic->error_status_address); 333 apei_unmap_generic_address(&ghes->generic->error_status_address);
334} 334}
335 335
336enum { 336enum {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7dd3f9fb9f3f..45e3e1759fb8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -250,6 +250,13 @@ static int acpi_battery_get_property(struct power_supply *psy,
250 else 250 else
251 val->intval = battery->capacity_now * 1000; 251 val->intval = battery->capacity_now * 1000;
252 break; 252 break;
253 case POWER_SUPPLY_PROP_CAPACITY:
254 if (battery->capacity_now && battery->full_charge_capacity)
255 val->intval = battery->capacity_now * 100/
256 battery->full_charge_capacity;
257 else
258 val->intval = 0;
259 break;
253 case POWER_SUPPLY_PROP_MODEL_NAME: 260 case POWER_SUPPLY_PROP_MODEL_NAME:
254 val->strval = battery->model_number; 261 val->strval = battery->model_number;
255 break; 262 break;
@@ -276,6 +283,7 @@ static enum power_supply_property charge_battery_props[] = {
276 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 283 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
277 POWER_SUPPLY_PROP_CHARGE_FULL, 284 POWER_SUPPLY_PROP_CHARGE_FULL,
278 POWER_SUPPLY_PROP_CHARGE_NOW, 285 POWER_SUPPLY_PROP_CHARGE_NOW,
286 POWER_SUPPLY_PROP_CAPACITY,
279 POWER_SUPPLY_PROP_MODEL_NAME, 287 POWER_SUPPLY_PROP_MODEL_NAME,
280 POWER_SUPPLY_PROP_MANUFACTURER, 288 POWER_SUPPLY_PROP_MANUFACTURER,
281 POWER_SUPPLY_PROP_SERIAL_NUMBER, 289 POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -292,6 +300,7 @@ static enum power_supply_property energy_battery_props[] = {
292 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 300 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
293 POWER_SUPPLY_PROP_ENERGY_FULL, 301 POWER_SUPPLY_PROP_ENERGY_FULL,
294 POWER_SUPPLY_PROP_ENERGY_NOW, 302 POWER_SUPPLY_PROP_ENERGY_NOW,
303 POWER_SUPPLY_PROP_CAPACITY,
295 POWER_SUPPLY_PROP_MODEL_NAME, 304 POWER_SUPPLY_PROP_MODEL_NAME,
296 POWER_SUPPLY_PROP_MANUFACTURER, 305 POWER_SUPPLY_PROP_MANUFACTURER,
297 POWER_SUPPLY_PROP_SERIAL_NUMBER, 306 POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -1043,17 +1052,26 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
1043 return 0; 1052 return 0;
1044} 1053}
1045 1054
1055#ifdef CONFIG_PM_SLEEP
1046/* this is needed to learn about changes made in suspended state */ 1056/* this is needed to learn about changes made in suspended state */
1047static int acpi_battery_resume(struct acpi_device *device) 1057static int acpi_battery_resume(struct device *dev)
1048{ 1058{
1049 struct acpi_battery *battery; 1059 struct acpi_battery *battery;
1050 if (!device) 1060
1061 if (!dev)
1051 return -EINVAL; 1062 return -EINVAL;
1052 battery = acpi_driver_data(device); 1063
1064 battery = acpi_driver_data(to_acpi_device(dev));
1065 if (!battery)
1066 return -EINVAL;
1067
1053 battery->update_time = 0; 1068 battery->update_time = 0;
1054 acpi_battery_update(battery); 1069 acpi_battery_update(battery);
1055 return 0; 1070 return 0;
1056} 1071}
1072#endif
1073
1074static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
1057 1075
1058static struct acpi_driver acpi_battery_driver = { 1076static struct acpi_driver acpi_battery_driver = {
1059 .name = "battery", 1077 .name = "battery",
@@ -1062,10 +1080,10 @@ static struct acpi_driver acpi_battery_driver = {
1062 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, 1080 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
1063 .ops = { 1081 .ops = {
1064 .add = acpi_battery_add, 1082 .add = acpi_battery_add,
1065 .resume = acpi_battery_resume,
1066 .remove = acpi_battery_remove, 1083 .remove = acpi_battery_remove,
1067 .notify = acpi_battery_notify, 1084 .notify = acpi_battery_notify,
1068 }, 1085 },
1086 .drv.pm = &acpi_battery_pm,
1069}; 1087};
1070 1088
1071static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) 1089static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index adceafda9c17..9628652e080c 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -574,6 +574,10 @@ static void acpi_bus_osc_support(void)
574 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; 574 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
575#endif 575#endif
576 576
577#ifdef ACPI_HOTPLUG_OST
578 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_HOTPLUG_OST_SUPPORT;
579#endif
580
577 if (!ghes_disable) 581 if (!ghes_disable)
578 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT; 582 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
579 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) 583 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index d27d072472f9..314a3b84bbc7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -76,19 +76,23 @@ MODULE_DEVICE_TABLE(acpi, button_device_ids);
76 76
77static int acpi_button_add(struct acpi_device *device); 77static int acpi_button_add(struct acpi_device *device);
78static int acpi_button_remove(struct acpi_device *device, int type); 78static int acpi_button_remove(struct acpi_device *device, int type);
79static int acpi_button_resume(struct acpi_device *device);
80static void acpi_button_notify(struct acpi_device *device, u32 event); 79static void acpi_button_notify(struct acpi_device *device, u32 event);
81 80
81#ifdef CONFIG_PM_SLEEP
82static int acpi_button_resume(struct device *dev);
83#endif
84static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
85
82static struct acpi_driver acpi_button_driver = { 86static struct acpi_driver acpi_button_driver = {
83 .name = "button", 87 .name = "button",
84 .class = ACPI_BUTTON_CLASS, 88 .class = ACPI_BUTTON_CLASS,
85 .ids = button_device_ids, 89 .ids = button_device_ids,
86 .ops = { 90 .ops = {
87 .add = acpi_button_add, 91 .add = acpi_button_add,
88 .resume = acpi_button_resume,
89 .remove = acpi_button_remove, 92 .remove = acpi_button_remove,
90 .notify = acpi_button_notify, 93 .notify = acpi_button_notify,
91 }, 94 },
95 .drv.pm = &acpi_button_pm,
92}; 96};
93 97
94struct acpi_button { 98struct acpi_button {
@@ -308,14 +312,17 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
308 } 312 }
309} 313}
310 314
311static int acpi_button_resume(struct acpi_device *device) 315#ifdef CONFIG_PM_SLEEP
316static int acpi_button_resume(struct device *dev)
312{ 317{
318 struct acpi_device *device = to_acpi_device(dev);
313 struct acpi_button *button = acpi_driver_data(device); 319 struct acpi_button *button = acpi_driver_data(device);
314 320
315 if (button->type == ACPI_BUTTON_TYPE_LID) 321 if (button->type == ACPI_BUTTON_TYPE_LID)
316 return acpi_lid_send_state(device); 322 return acpi_lid_send_state(device);
317 return 0; 323 return 0;
318} 324}
325#endif
319 326
320static int acpi_button_add(struct acpi_device *device) 327static int acpi_button_add(struct acpi_device *device)
321{ 328{
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 45cd03b4630e..1f9f7d7d7bc5 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -158,9 +158,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
158 int result; 158 int result;
159 int present; 159 int present;
160 acpi_status status; 160 acpi_status status;
161 161 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
162
163 present = is_device_present(handle);
164 162
165 switch (type) { 163 switch (type) {
166 case ACPI_NOTIFY_BUS_CHECK: 164 case ACPI_NOTIFY_BUS_CHECK:
@@ -169,32 +167,47 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
169 printk(KERN_WARNING "Container driver received %s event\n", 167 printk(KERN_WARNING "Container driver received %s event\n",
170 (type == ACPI_NOTIFY_BUS_CHECK) ? 168 (type == ACPI_NOTIFY_BUS_CHECK) ?
171 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); 169 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
170
171 present = is_device_present(handle);
172 status = acpi_bus_get_device(handle, &device); 172 status = acpi_bus_get_device(handle, &device);
173 if (present) { 173 if (!present) {
174 if (ACPI_FAILURE(status) || !device) {
175 result = container_device_add(&device, handle);
176 if (!result)
177 kobject_uevent(&device->dev.kobj,
178 KOBJ_ONLINE);
179 else
180 printk(KERN_WARNING
181 "Failed to add container\n");
182 }
183 } else {
184 if (ACPI_SUCCESS(status)) { 174 if (ACPI_SUCCESS(status)) {
185 /* device exist and this is a remove request */ 175 /* device exist and this is a remove request */
176 device->flags.eject_pending = 1;
186 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 177 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
178 return;
187 } 179 }
180 break;
181 }
182
183 if (!ACPI_FAILURE(status) || device)
184 break;
185
186 result = container_device_add(&device, handle);
187 if (result) {
188 printk(KERN_WARNING "Failed to add container\n");
189 break;
188 } 190 }
191
192 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
193 ost_code = ACPI_OST_SC_SUCCESS;
189 break; 194 break;
195
190 case ACPI_NOTIFY_EJECT_REQUEST: 196 case ACPI_NOTIFY_EJECT_REQUEST:
191 if (!acpi_bus_get_device(handle, &device) && device) { 197 if (!acpi_bus_get_device(handle, &device) && device) {
198 device->flags.eject_pending = 1;
192 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 199 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
200 return;
193 } 201 }
194 break; 202 break;
203
195 default: 204 default:
196 break; 205 /* non-hotplug event; possibly handled by other handler */
206 return;
197 } 207 }
208
209 /* Inform firmware that the hotplug operation has completed */
210 (void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
198 return; 211 return;
199} 212}
200 213
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 0f0356ca1a9e..bc36a476f1ab 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -46,8 +46,6 @@ MODULE_LICENSE("GPL");
46 46
47static int acpi_fan_add(struct acpi_device *device); 47static int acpi_fan_add(struct acpi_device *device);
48static int acpi_fan_remove(struct acpi_device *device, int type); 48static int acpi_fan_remove(struct acpi_device *device, int type);
49static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
50static int acpi_fan_resume(struct acpi_device *device);
51 49
52static const struct acpi_device_id fan_device_ids[] = { 50static const struct acpi_device_id fan_device_ids[] = {
53 {"PNP0C0B", 0}, 51 {"PNP0C0B", 0},
@@ -55,6 +53,12 @@ static const struct acpi_device_id fan_device_ids[] = {
55}; 53};
56MODULE_DEVICE_TABLE(acpi, fan_device_ids); 54MODULE_DEVICE_TABLE(acpi, fan_device_ids);
57 55
56#ifdef CONFIG_PM_SLEEP
57static int acpi_fan_suspend(struct device *dev);
58static int acpi_fan_resume(struct device *dev);
59#endif
60static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
61
58static struct acpi_driver acpi_fan_driver = { 62static struct acpi_driver acpi_fan_driver = {
59 .name = "fan", 63 .name = "fan",
60 .class = ACPI_FAN_CLASS, 64 .class = ACPI_FAN_CLASS,
@@ -62,9 +66,8 @@ static struct acpi_driver acpi_fan_driver = {
62 .ops = { 66 .ops = {
63 .add = acpi_fan_add, 67 .add = acpi_fan_add,
64 .remove = acpi_fan_remove, 68 .remove = acpi_fan_remove,
65 .suspend = acpi_fan_suspend,
66 .resume = acpi_fan_resume,
67 }, 69 },
70 .drv.pm = &acpi_fan_pm,
68}; 71};
69 72
70/* thermal cooling device callbacks */ 73/* thermal cooling device callbacks */
@@ -183,29 +186,31 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
183 return 0; 186 return 0;
184} 187}
185 188
186static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state) 189#ifdef CONFIG_PM_SLEEP
190static int acpi_fan_suspend(struct device *dev)
187{ 191{
188 if (!device) 192 if (!dev)
189 return -EINVAL; 193 return -EINVAL;
190 194
191 acpi_bus_set_power(device->handle, ACPI_STATE_D0); 195 acpi_bus_set_power(to_acpi_device(dev)->handle, ACPI_STATE_D0);
192 196
193 return AE_OK; 197 return AE_OK;
194} 198}
195 199
196static int acpi_fan_resume(struct acpi_device *device) 200static int acpi_fan_resume(struct device *dev)
197{ 201{
198 int result; 202 int result;
199 203
200 if (!device) 204 if (!dev)
201 return -EINVAL; 205 return -EINVAL;
202 206
203 result = acpi_bus_update_power(device->handle, NULL); 207 result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL);
204 if (result) 208 if (result)
205 printk(KERN_ERR PREFIX "Error updating fan power state\n"); 209 printk(KERN_ERR PREFIX "Error updating fan power state\n");
206 210
207 return result; 211 return result;
208} 212}
213#endif
209 214
210static int __init acpi_fan_init(void) 215static int __init acpi_fan_init(void)
211{ 216{
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 1564e0927c21..243ee85e4d2e 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -39,6 +39,7 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
39 } 39 }
40 return -ENODEV; 40 return -ENODEV;
41} 41}
42EXPORT_SYMBOL_GPL(register_acpi_bus_type);
42 43
43int unregister_acpi_bus_type(struct acpi_bus_type *type) 44int unregister_acpi_bus_type(struct acpi_bus_type *type)
44{ 45{
@@ -54,6 +55,7 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
54 } 55 }
55 return -ENODEV; 56 return -ENODEV;
56} 57}
58EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
57 59
58static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type) 60static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
59{ 61{
@@ -69,7 +71,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
69 up_read(&bus_type_sem); 71 up_read(&bus_type_sem);
70 return ret; 72 return ret;
71} 73}
72EXPORT_SYMBOL_GPL(register_acpi_bus_type);
73 74
74static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle) 75static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
75{ 76{
@@ -86,7 +87,6 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
86 up_read(&bus_type_sem); 87 up_read(&bus_type_sem);
87 return ret; 88 return ret;
88} 89}
89EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
90 90
91/* Get device's handler per its address under its parent */ 91/* Get device's handler per its address under its parent */
92struct acpi_find_child { 92struct acpi_find_child {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e56f3be7b07d..cb31298ca684 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
237 return 0; 237 return 0;
238} 238}
239 239
240static int __initdata parsed_numa_memblks;
241
240static int __init 242static int __init
241acpi_parse_memory_affinity(struct acpi_subtable_header * header, 243acpi_parse_memory_affinity(struct acpi_subtable_header * header,
242 const unsigned long end) 244 const unsigned long end)
@@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
250 acpi_table_print_srat_entry(header); 252 acpi_table_print_srat_entry(header);
251 253
252 /* let architecture-dependent part to do it */ 254 /* let architecture-dependent part to do it */
253 acpi_numa_memory_affinity_init(memory_affinity); 255 if (!acpi_numa_memory_affinity_init(memory_affinity))
254 256 parsed_numa_memblks++;
255 return 0; 257 return 0;
256} 258}
257 259
@@ -304,8 +306,10 @@ int __init acpi_numa_init(void)
304 306
305 acpi_numa_arch_fixup(); 307 acpi_numa_arch_fixup();
306 308
307 if (cnt <= 0) 309 if (cnt < 0)
308 return cnt ?: -ENOENT; 310 return cnt;
311 else if (!parsed_numa_memblks)
312 return -ENOENT;
309 return 0; 313 return 0;
310} 314}
311 315
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c3881b2eb8b2..9eaf708f5885 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -891,7 +891,7 @@ static void acpi_os_execute_deferred(struct work_struct *work)
891 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 891 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
892 892
893 if (dpc->wait) 893 if (dpc->wait)
894 acpi_os_wait_events_complete(NULL); 894 acpi_os_wait_events_complete();
895 895
896 dpc->function(dpc->context); 896 dpc->function(dpc->context);
897 kfree(dpc); 897 kfree(dpc);
@@ -987,7 +987,7 @@ acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
987 return __acpi_os_execute(0, function, context, 1); 987 return __acpi_os_execute(0, function, context, 1);
988} 988}
989 989
990void acpi_os_wait_events_complete(void *context) 990void acpi_os_wait_events_complete(void)
991{ 991{
992 flush_workqueue(kacpid_wq); 992 flush_workqueue(kacpid_wq);
993 flush_workqueue(kacpi_notify_wq); 993 flush_workqueue(kacpi_notify_wq);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7aff6312ce7c..72a2c98bc429 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -505,6 +505,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
505 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 505 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
506 device->driver_data = root; 506 device->driver_data = root;
507 507
508 root->mcfg_addr = acpi_pci_root_get_mcfg_addr(device->handle);
509
508 /* 510 /*
509 * All supported architectures that use ACPI have support for 511 * All supported architectures that use ACPI have support for
510 * PCI domains, so we indicate this in _OSC support capabilities. 512 * PCI domains, so we indicate this in _OSC support capabilities.
@@ -571,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
571 OSC_CLOCK_PWR_CAPABILITY_SUPPORT; 573 OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
572 if (pci_msi_enabled()) 574 if (pci_msi_enabled())
573 flags |= OSC_MSI_SUPPORT; 575 flags |= OSC_MSI_SUPPORT;
574 if (flags != base_flags) 576 if (flags != base_flags) {
575 acpi_pci_osc_support(root, flags); 577 status = acpi_pci_osc_support(root, flags);
578 if (ACPI_FAILURE(status)) {
579 dev_info(root->bus->bridge, "ACPI _OSC support "
580 "notification failed, disabling PCIe ASPM\n");
581 pcie_no_aspm();
582 flags = base_flags;
583 }
584 }
576 585
577 if (!pcie_ports_disabled 586 if (!pcie_ports_disabled
578 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { 587 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index dd6d6a3c6780..fc1803414629 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -60,7 +60,6 @@ ACPI_MODULE_NAME("power");
60 60
61static int acpi_power_add(struct acpi_device *device); 61static int acpi_power_add(struct acpi_device *device);
62static int acpi_power_remove(struct acpi_device *device, int type); 62static int acpi_power_remove(struct acpi_device *device, int type);
63static int acpi_power_resume(struct acpi_device *device);
64 63
65static const struct acpi_device_id power_device_ids[] = { 64static const struct acpi_device_id power_device_ids[] = {
66 {ACPI_POWER_HID, 0}, 65 {ACPI_POWER_HID, 0},
@@ -68,6 +67,11 @@ static const struct acpi_device_id power_device_ids[] = {
68}; 67};
69MODULE_DEVICE_TABLE(acpi, power_device_ids); 68MODULE_DEVICE_TABLE(acpi, power_device_ids);
70 69
70#ifdef CONFIG_PM_SLEEP
71static int acpi_power_resume(struct device *dev);
72#endif
73static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
74
71static struct acpi_driver acpi_power_driver = { 75static struct acpi_driver acpi_power_driver = {
72 .name = "power", 76 .name = "power",
73 .class = ACPI_POWER_CLASS, 77 .class = ACPI_POWER_CLASS,
@@ -75,8 +79,8 @@ static struct acpi_driver acpi_power_driver = {
75 .ops = { 79 .ops = {
76 .add = acpi_power_add, 80 .add = acpi_power_add,
77 .remove = acpi_power_remove, 81 .remove = acpi_power_remove,
78 .resume = acpi_power_resume,
79 }, 82 },
83 .drv.pm = &acpi_power_pm,
80}; 84};
81 85
82/* 86/*
@@ -390,6 +394,7 @@ void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handl
390 __acpi_power_resource_unregister_device(dev, 394 __acpi_power_resource_unregister_device(dev,
391 list->handles[i]); 395 list->handles[i]);
392} 396}
397EXPORT_SYMBOL_GPL(acpi_power_resource_unregister_device);
393 398
394static int __acpi_power_resource_register_device( 399static int __acpi_power_resource_register_device(
395 struct acpi_power_managed_device *powered_device, acpi_handle handle) 400 struct acpi_power_managed_device *powered_device, acpi_handle handle)
@@ -460,6 +465,7 @@ no_power_resource:
460 printk(KERN_WARNING PREFIX "Invalid Power Resource to register!"); 465 printk(KERN_WARNING PREFIX "Invalid Power Resource to register!");
461 return -ENODEV; 466 return -ENODEV;
462} 467}
468EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
463 469
464/** 470/**
465 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in 471 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
@@ -771,14 +777,17 @@ static int acpi_power_remove(struct acpi_device *device, int type)
771 return 0; 777 return 0;
772} 778}
773 779
774static int acpi_power_resume(struct acpi_device *device) 780#ifdef CONFIG_PM_SLEEP
781static int acpi_power_resume(struct device *dev)
775{ 782{
776 int result = 0, state; 783 int result = 0, state;
784 struct acpi_device *device;
777 struct acpi_power_resource *resource; 785 struct acpi_power_resource *resource;
778 786
779 if (!device) 787 if (!dev)
780 return -EINVAL; 788 return -EINVAL;
781 789
790 device = to_acpi_device(dev);
782 resource = acpi_driver_data(device); 791 resource = acpi_driver_data(device);
783 if (!resource) 792 if (!resource)
784 return -EINVAL; 793 return -EINVAL;
@@ -797,6 +806,7 @@ static int acpi_power_resume(struct acpi_device *device)
797 806
798 return result; 807 return result;
799} 808}
809#endif
800 810
801int __init acpi_power_init(void) 811int __init acpi_power_init(void)
802{ 812{
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index c850de4c9a14..eff722278ff5 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
189 * Processor (CPU3, 0x03, 0x00000410, 0x06) {} 189 * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
190 * } 190 * }
191 * 191 *
192 * Ignores apic_id and always return 0 for CPU0's handle. 192 * Ignores apic_id and always returns 0 for the processor
193 * handle with acpi id 0 if nr_cpu_ids is 1.
194 * This should be the case if SMP tables are not found.
193 * Return -1 for other CPU's handle. 195 * Return -1 for other CPU's handle.
194 */ 196 */
195 if (acpi_id == 0) 197 if (nr_cpu_ids <= 1 && acpi_id == 0)
196 return acpi_id; 198 return acpi_id;
197 else 199 else
198 return apic_id; 200 return apic_id;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 0734086537b8..bfc31cb0dd3e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -93,6 +93,9 @@ static const struct acpi_device_id processor_device_ids[] = {
93}; 93};
94MODULE_DEVICE_TABLE(acpi, processor_device_ids); 94MODULE_DEVICE_TABLE(acpi, processor_device_ids);
95 95
96static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
97 acpi_processor_suspend, acpi_processor_resume);
98
96static struct acpi_driver acpi_processor_driver = { 99static struct acpi_driver acpi_processor_driver = {
97 .name = "processor", 100 .name = "processor",
98 .class = ACPI_PROCESSOR_CLASS, 101 .class = ACPI_PROCESSOR_CLASS,
@@ -100,10 +103,9 @@ static struct acpi_driver acpi_processor_driver = {
100 .ops = { 103 .ops = {
101 .add = acpi_processor_add, 104 .add = acpi_processor_add,
102 .remove = acpi_processor_remove, 105 .remove = acpi_processor_remove,
103 .suspend = acpi_processor_suspend,
104 .resume = acpi_processor_resume,
105 .notify = acpi_processor_notify, 106 .notify = acpi_processor_notify,
106 }, 107 },
108 .drv.pm = &acpi_processor_pm,
107}; 109};
108 110
109#define INSTALL_NOTIFY_HANDLER 1 111#define INSTALL_NOTIFY_HANDLER 1
@@ -427,22 +429,15 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
427 * Initialize missing things 429 * Initialize missing things
428 */ 430 */
429 if (pr->flags.need_hotplug_init) { 431 if (pr->flags.need_hotplug_init) {
430 struct cpuidle_driver *idle_driver =
431 cpuidle_get_driver();
432
433 printk(KERN_INFO "Will online and init hotplugged " 432 printk(KERN_INFO "Will online and init hotplugged "
434 "CPU: %d\n", pr->id); 433 "CPU: %d\n", pr->id);
435 WARN(acpi_processor_start(pr), "Failed to start CPU:" 434 WARN(acpi_processor_start(pr), "Failed to start CPU:"
436 " %d\n", pr->id); 435 " %d\n", pr->id);
437 pr->flags.need_hotplug_init = 0; 436 pr->flags.need_hotplug_init = 0;
438 if (idle_driver && !strcmp(idle_driver->name,
439 "intel_idle")) {
440 intel_idle_cpu_init(pr->id);
441 }
442 /* Normal CPU soft online event */ 437 /* Normal CPU soft online event */
443 } else { 438 } else {
444 acpi_processor_ppc_has_changed(pr, 0); 439 acpi_processor_ppc_has_changed(pr, 0);
445 acpi_processor_cst_has_changed(pr); 440 acpi_processor_hotplug(pr);
446 acpi_processor_reevaluate_tstate(pr, action); 441 acpi_processor_reevaluate_tstate(pr, action);
447 acpi_processor_tstate_has_changed(pr); 442 acpi_processor_tstate_has_changed(pr);
448 } 443 }
@@ -701,9 +696,9 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
701{ 696{
702 struct acpi_processor *pr; 697 struct acpi_processor *pr;
703 struct acpi_device *device = NULL; 698 struct acpi_device *device = NULL;
699 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
704 int result; 700 int result;
705 701
706
707 switch (event) { 702 switch (event) {
708 case ACPI_NOTIFY_BUS_CHECK: 703 case ACPI_NOTIFY_BUS_CHECK:
709 case ACPI_NOTIFY_DEVICE_CHECK: 704 case ACPI_NOTIFY_DEVICE_CHECK:
@@ -715,14 +710,18 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
715 if (!is_processor_present(handle)) 710 if (!is_processor_present(handle))
716 break; 711 break;
717 712
718 if (acpi_bus_get_device(handle, &device)) { 713 if (!acpi_bus_get_device(handle, &device))
719 result = acpi_processor_device_add(handle, &device); 714 break;
720 if (result) 715
721 printk(KERN_ERR PREFIX 716 result = acpi_processor_device_add(handle, &device);
722 "Unable to add the device\n"); 717 if (result) {
718 printk(KERN_ERR PREFIX "Unable to add the device\n");
723 break; 719 break;
724 } 720 }
721
722 ost_code = ACPI_OST_SC_SUCCESS;
725 break; 723 break;
724
726 case ACPI_NOTIFY_EJECT_REQUEST: 725 case ACPI_NOTIFY_EJECT_REQUEST:
727 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 726 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
728 "received ACPI_NOTIFY_EJECT_REQUEST\n")); 727 "received ACPI_NOTIFY_EJECT_REQUEST\n"));
@@ -736,15 +735,23 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
736 if (!pr) { 735 if (!pr) {
737 printk(KERN_ERR PREFIX 736 printk(KERN_ERR PREFIX
738 "Driver data is NULL, dropping EJECT\n"); 737 "Driver data is NULL, dropping EJECT\n");
739 return; 738 break;
740 } 739 }
740
741 /* REVISIT: update when eject is supported */
742 ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
741 break; 743 break;
744
742 default: 745 default:
743 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 746 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
744 "Unsupported event [0x%x]\n", event)); 747 "Unsupported event [0x%x]\n", event));
745 break; 748
749 /* non-hotplug event; possibly handled by other handler */
750 return;
746 } 751 }
747 752
753 /* Inform firmware that the hotplug operation has completed */
754 (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
748 return; 755 return;
749} 756}
750 757
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f3decb30223f..ad3730b4038b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -221,9 +221,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
221 221
222#endif 222#endif
223 223
224/*
225 * Suspend / resume control
226 */
227static u32 saved_bm_rld; 224static u32 saved_bm_rld;
228 225
229static void acpi_idle_bm_rld_save(void) 226static void acpi_idle_bm_rld_save(void)
@@ -240,13 +237,13 @@ static void acpi_idle_bm_rld_restore(void)
240 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); 237 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
241} 238}
242 239
243int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 240int acpi_processor_suspend(struct device *dev)
244{ 241{
245 acpi_idle_bm_rld_save(); 242 acpi_idle_bm_rld_save();
246 return 0; 243 return 0;
247} 244}
248 245
249int acpi_processor_resume(struct acpi_device * device) 246int acpi_processor_resume(struct device *dev)
250{ 247{
251 acpi_idle_bm_rld_restore(); 248 acpi_idle_bm_rld_restore();
252 return 0; 249 return 0;
@@ -304,16 +301,16 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
304 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 301 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
305 302
306 /* determine latencies from FADT */ 303 /* determine latencies from FADT */
307 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 304 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
308 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 305 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
309 306
310 /* 307 /*
311 * FADT specified C2 latency must be less than or equal to 308 * FADT specified C2 latency must be less than or equal to
312 * 100 microseconds. 309 * 100 microseconds.
313 */ 310 */
314 if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 311 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
315 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 312 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
316 "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency)); 313 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
317 /* invalidate C2 */ 314 /* invalidate C2 */
318 pr->power.states[ACPI_STATE_C2].address = 0; 315 pr->power.states[ACPI_STATE_C2].address = 0;
319 } 316 }
@@ -322,9 +319,9 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
322 * FADT supplied C3 latency must be less than or equal to 319 * FADT supplied C3 latency must be less than or equal to
323 * 1000 microseconds. 320 * 1000 microseconds.
324 */ 321 */
325 if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 322 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
326 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 323 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
327 "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency)); 324 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
328 /* invalidate C3 */ 325 /* invalidate C3 */
329 pr->power.states[ACPI_STATE_C3].address = 0; 326 pr->power.states[ACPI_STATE_C3].address = 0;
330 } 327 }
@@ -586,7 +583,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
586 */ 583 */
587 cx->valid = 1; 584 cx->valid = 1;
588 585
589 cx->latency_ticks = cx->latency;
590 /* 586 /*
591 * On older chipsets, BM_RLD needs to be set 587 * On older chipsets, BM_RLD needs to be set
592 * in order for Bus Master activity to wake the 588 * in order for Bus Master activity to wake the
@@ -619,7 +615,6 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
619 if (!cx->address) 615 if (!cx->address)
620 break; 616 break;
621 cx->valid = 1; 617 cx->valid = 1;
622 cx->latency_ticks = cx->latency; /* Normalize latency */
623 break; 618 break;
624 619
625 case ACPI_STATE_C3: 620 case ACPI_STATE_C3:
@@ -754,6 +749,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
754 749
755 local_irq_disable(); 750 local_irq_disable();
756 751
752
757 lapic_timer_state_broadcast(pr, cx, 1); 753 lapic_timer_state_broadcast(pr, cx, 1);
758 kt1 = ktime_get_real(); 754 kt1 = ktime_get_real();
759 acpi_idle_do_entry(cx); 755 acpi_idle_do_entry(cx);
@@ -764,7 +760,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
764 dev->last_residency = (int)idle_time; 760 dev->last_residency = (int)idle_time;
765 761
766 local_irq_enable(); 762 local_irq_enable();
767 cx->usage++;
768 lapic_timer_state_broadcast(pr, cx, 0); 763 lapic_timer_state_broadcast(pr, cx, 0);
769 764
770 return index; 765 return index;
@@ -823,6 +818,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
823 818
824 local_irq_disable(); 819 local_irq_disable();
825 820
821
826 if (cx->entry_method != ACPI_CSTATE_FFH) { 822 if (cx->entry_method != ACPI_CSTATE_FFH) {
827 current_thread_info()->status &= ~TS_POLLING; 823 current_thread_info()->status &= ~TS_POLLING;
828 /* 824 /*
@@ -866,10 +862,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
866 if (cx->entry_method != ACPI_CSTATE_FFH) 862 if (cx->entry_method != ACPI_CSTATE_FFH)
867 current_thread_info()->status |= TS_POLLING; 863 current_thread_info()->status |= TS_POLLING;
868 864
869 cx->usage++;
870
871 lapic_timer_state_broadcast(pr, cx, 0); 865 lapic_timer_state_broadcast(pr, cx, 0);
872 cx->time += idle_time;
873 return index; 866 return index;
874} 867}
875 868
@@ -909,12 +902,13 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
909 local_irq_disable(); 902 local_irq_disable();
910 acpi_safe_halt(); 903 acpi_safe_halt();
911 local_irq_enable(); 904 local_irq_enable();
912 return -EINVAL; 905 return -EBUSY;
913 } 906 }
914 } 907 }
915 908
916 local_irq_disable(); 909 local_irq_disable();
917 910
911
918 if (cx->entry_method != ACPI_CSTATE_FFH) { 912 if (cx->entry_method != ACPI_CSTATE_FFH) {
919 current_thread_info()->status &= ~TS_POLLING; 913 current_thread_info()->status &= ~TS_POLLING;
920 /* 914 /*
@@ -986,10 +980,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
986 if (cx->entry_method != ACPI_CSTATE_FFH) 980 if (cx->entry_method != ACPI_CSTATE_FFH)
987 current_thread_info()->status |= TS_POLLING; 981 current_thread_info()->status |= TS_POLLING;
988 982
989 cx->usage++;
990
991 lapic_timer_state_broadcast(pr, cx, 0); 983 lapic_timer_state_broadcast(pr, cx, 0);
992 cx->time += idle_time;
993 return index; 984 return index;
994} 985}
995 986
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 6e36d0c0057c..ff0740e0a9c2 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -988,15 +988,19 @@ static void acpi_sbs_rmdirs(void)
988#endif 988#endif
989} 989}
990 990
991static int acpi_sbs_resume(struct acpi_device *device) 991#ifdef CONFIG_PM_SLEEP
992static int acpi_sbs_resume(struct device *dev)
992{ 993{
993 struct acpi_sbs *sbs; 994 struct acpi_sbs *sbs;
994 if (!device) 995 if (!dev)
995 return -EINVAL; 996 return -EINVAL;
996 sbs = device->driver_data; 997 sbs = to_acpi_device(dev)->driver_data;
997 acpi_sbs_callback(sbs); 998 acpi_sbs_callback(sbs);
998 return 0; 999 return 0;
999} 1000}
1001#endif
1002
1003static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
1000 1004
1001static struct acpi_driver acpi_sbs_driver = { 1005static struct acpi_driver acpi_sbs_driver = {
1002 .name = "sbs", 1006 .name = "sbs",
@@ -1005,8 +1009,8 @@ static struct acpi_driver acpi_sbs_driver = {
1005 .ops = { 1009 .ops = {
1006 .add = acpi_sbs_add, 1010 .add = acpi_sbs_add,
1007 .remove = acpi_sbs_remove, 1011 .remove = acpi_sbs_remove,
1008 .resume = acpi_sbs_resume,
1009 }, 1012 },
1013 .drv.pm = &acpi_sbs_pm,
1010}; 1014};
1011 1015
1012static int __init acpi_sbs_init(void) 1016static int __init acpi_sbs_init(void)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index c8a1f3b68110..d1ecca2b641a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -83,19 +83,29 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
83} 83}
84static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); 84static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
85 85
86static void acpi_bus_hot_remove_device(void *context) 86/**
87 * acpi_bus_hot_remove_device: hot-remove a device and its children
88 * @context: struct acpi_eject_event pointer (freed in this func)
89 *
90 * Hot-remove a device and its children. This function frees up the
91 * memory space passed by arg context, so that the caller may call
92 * this function asynchronously through acpi_os_hotplug_execute().
93 */
94void acpi_bus_hot_remove_device(void *context)
87{ 95{
96 struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context;
88 struct acpi_device *device; 97 struct acpi_device *device;
89 acpi_handle handle = context; 98 acpi_handle handle = ej_event->handle;
90 struct acpi_object_list arg_list; 99 struct acpi_object_list arg_list;
91 union acpi_object arg; 100 union acpi_object arg;
92 acpi_status status = AE_OK; 101 acpi_status status = AE_OK;
102 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
93 103
94 if (acpi_bus_get_device(handle, &device)) 104 if (acpi_bus_get_device(handle, &device))
95 return; 105 goto err_out;
96 106
97 if (!device) 107 if (!device)
98 return; 108 goto err_out;
99 109
100 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 110 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
101 "Hot-removing device %s...\n", dev_name(&device->dev))); 111 "Hot-removing device %s...\n", dev_name(&device->dev)));
@@ -103,7 +113,7 @@ static void acpi_bus_hot_remove_device(void *context)
103 if (acpi_bus_trim(device, 1)) { 113 if (acpi_bus_trim(device, 1)) {
104 printk(KERN_ERR PREFIX 114 printk(KERN_ERR PREFIX
105 "Removing device failed\n"); 115 "Removing device failed\n");
106 return; 116 goto err_out;
107 } 117 }
108 118
109 /* power off device */ 119 /* power off device */
@@ -129,10 +139,21 @@ static void acpi_bus_hot_remove_device(void *context)
129 * TBD: _EJD support. 139 * TBD: _EJD support.
130 */ 140 */
131 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); 141 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
132 if (ACPI_FAILURE(status)) 142 if (ACPI_FAILURE(status)) {
133 printk(KERN_WARNING PREFIX 143 if (status != AE_NOT_FOUND)
134 "Eject device failed\n"); 144 printk(KERN_WARNING PREFIX
145 "Eject device failed\n");
146 goto err_out;
147 }
148
149 kfree(context);
150 return;
135 151
152err_out:
153 /* Inform firmware the hot-remove operation has completed w/ error */
154 (void) acpi_evaluate_hotplug_ost(handle,
155 ej_event->event, ost_code, NULL);
156 kfree(context);
136 return; 157 return;
137} 158}
138 159
@@ -144,6 +165,7 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
144 acpi_status status; 165 acpi_status status;
145 acpi_object_type type = 0; 166 acpi_object_type type = 0;
146 struct acpi_device *acpi_device = to_acpi_device(d); 167 struct acpi_device *acpi_device = to_acpi_device(d);
168 struct acpi_eject_event *ej_event;
147 169
148 if ((!count) || (buf[0] != '1')) { 170 if ((!count) || (buf[0] != '1')) {
149 return -EINVAL; 171 return -EINVAL;
@@ -160,7 +182,25 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
160 goto err; 182 goto err;
161 } 183 }
162 184
163 acpi_os_hotplug_execute(acpi_bus_hot_remove_device, acpi_device->handle); 185 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
186 if (!ej_event) {
187 ret = -ENOMEM;
188 goto err;
189 }
190
191 ej_event->handle = acpi_device->handle;
192 if (acpi_device->flags.eject_pending) {
193 /* event originated from ACPI eject notification */
194 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
195 acpi_device->flags.eject_pending = 0;
196 } else {
197 /* event originated from user */
198 ej_event->event = ACPI_OST_EC_OSPM_EJECT;
199 (void) acpi_evaluate_hotplug_ost(ej_event->handle,
200 ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
201 }
202
203 acpi_os_hotplug_execute(acpi_bus_hot_remove_device, (void *)ej_event);
164err: 204err:
165 return ret; 205 return ret;
166} 206}
@@ -290,26 +330,6 @@ static void acpi_device_release(struct device *dev)
290 kfree(acpi_dev); 330 kfree(acpi_dev);
291} 331}
292 332
293static int acpi_device_suspend(struct device *dev, pm_message_t state)
294{
295 struct acpi_device *acpi_dev = to_acpi_device(dev);
296 struct acpi_driver *acpi_drv = acpi_dev->driver;
297
298 if (acpi_drv && acpi_drv->ops.suspend)
299 return acpi_drv->ops.suspend(acpi_dev, state);
300 return 0;
301}
302
303static int acpi_device_resume(struct device *dev)
304{
305 struct acpi_device *acpi_dev = to_acpi_device(dev);
306 struct acpi_driver *acpi_drv = acpi_dev->driver;
307
308 if (acpi_drv && acpi_drv->ops.resume)
309 return acpi_drv->ops.resume(acpi_dev);
310 return 0;
311}
312
313static int acpi_bus_match(struct device *dev, struct device_driver *drv) 333static int acpi_bus_match(struct device *dev, struct device_driver *drv)
314{ 334{
315 struct acpi_device *acpi_dev = to_acpi_device(dev); 335 struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -441,8 +461,6 @@ static int acpi_device_remove(struct device * dev)
441 461
442struct bus_type acpi_bus_type = { 462struct bus_type acpi_bus_type = {
443 .name = "acpi", 463 .name = "acpi",
444 .suspend = acpi_device_suspend,
445 .resume = acpi_device_resume,
446 .match = acpi_bus_match, 464 .match = acpi_bus_match,
447 .probe = acpi_device_probe, 465 .probe = acpi_device_probe,
448 .remove = acpi_device_remove, 466 .remove = acpi_device_remove,
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 88561029cca8..fdcdbb652915 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,36 +28,7 @@
28#include "internal.h" 28#include "internal.h"
29#include "sleep.h" 29#include "sleep.h"
30 30
31u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
32static unsigned int gts, bfs;
33static int set_param_wake_flag(const char *val, struct kernel_param *kp)
34{
35 int ret = param_set_int(val, kp);
36
37 if (ret)
38 return ret;
39
40 if (kp->arg == (const char *)&gts) {
41 if (gts)
42 wake_sleep_flags |= ACPI_EXECUTE_GTS;
43 else
44 wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
45 }
46 if (kp->arg == (const char *)&bfs) {
47 if (bfs)
48 wake_sleep_flags |= ACPI_EXECUTE_BFS;
49 else
50 wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
51 }
52 return ret;
53}
54module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
55module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
56MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
57MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
58
59static u8 sleep_states[ACPI_S_STATE_COUNT]; 31static u8 sleep_states[ACPI_S_STATE_COUNT];
60static bool pwr_btn_event_pending;
61 32
62static void acpi_sleep_tts_switch(u32 acpi_state) 33static void acpi_sleep_tts_switch(u32 acpi_state)
63{ 34{
@@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
110 81
111#ifdef CONFIG_ACPI_SLEEP 82#ifdef CONFIG_ACPI_SLEEP
112static u32 acpi_target_sleep_state = ACPI_STATE_S0; 83static u32 acpi_target_sleep_state = ACPI_STATE_S0;
84static bool pwr_btn_event_pending;
113 85
114/* 86/*
115 * The ACPI specification wants us to save NVS memory regions during hibernation 87 * The ACPI specification wants us to save NVS memory regions during hibernation
@@ -143,7 +115,7 @@ void __init acpi_old_suspend_ordering(void)
143static int acpi_pm_freeze(void) 115static int acpi_pm_freeze(void)
144{ 116{
145 acpi_disable_all_gpes(); 117 acpi_disable_all_gpes();
146 acpi_os_wait_events_complete(NULL); 118 acpi_os_wait_events_complete();
147 acpi_ec_block_transactions(); 119 acpi_ec_block_transactions();
148 return 0; 120 return 0;
149} 121}
@@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
305 switch (acpi_state) { 277 switch (acpi_state) {
306 case ACPI_STATE_S1: 278 case ACPI_STATE_S1:
307 barrier(); 279 barrier();
308 status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); 280 status = acpi_enter_sleep_state(acpi_state);
309 break; 281 break;
310 282
311 case ACPI_STATE_S3: 283 case ACPI_STATE_S3:
@@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
319 /* This violates the spec but is required for bug compatibility. */ 291 /* This violates the spec but is required for bug compatibility. */
320 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 292 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
321 293
322 /* Reprogram control registers and execute _BFS */ 294 /* Reprogram control registers */
323 acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); 295 acpi_leave_sleep_state_prep(acpi_state);
324 296
325 /* ACPI 3.0 specs (P62) says that it's the responsibility 297 /* ACPI 3.0 specs (P62) says that it's the responsibility
326 * of the OSPM to clear the status bit [ implying that the 298 * of the OSPM to clear the status bit [ implying that the
@@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void)
603 ACPI_FLUSH_CPU_CACHE(); 575 ACPI_FLUSH_CPU_CACHE();
604 576
605 /* This shouldn't return. If it returns, we have a problem */ 577 /* This shouldn't return. If it returns, we have a problem */
606 status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); 578 status = acpi_enter_sleep_state(ACPI_STATE_S4);
607 /* Reprogram control registers and execute _BFS */ 579 /* Reprogram control registers */
608 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); 580 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
609 581
610 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 582 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
611} 583}
@@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void)
617 * enable it here. 589 * enable it here.
618 */ 590 */
619 acpi_enable(); 591 acpi_enable();
620 /* Reprogram control registers and execute _BFS */ 592 /* Reprogram control registers */
621 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); 593 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
622 /* Check the hardware signature */ 594 /* Check the hardware signature */
623 if (facs && s4_hardware_signature != facs->hardware_signature) { 595 if (facs && s4_hardware_signature != facs->hardware_signature) {
624 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " 596 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -716,8 +688,9 @@ int acpi_suspend(u32 acpi_state)
716 * @dev: device to examine; its driver model wakeup flags control 688 * @dev: device to examine; its driver model wakeup flags control
717 * whether it should be able to wake up the system 689 * whether it should be able to wake up the system
718 * @d_min_p: used to store the upper limit of allowed states range 690 * @d_min_p: used to store the upper limit of allowed states range
719 * Return value: preferred power state of the device on success, -ENODEV on 691 * @d_max_in: specify the lowest allowed states
720 * failure (ie. if there's no 'struct acpi_device' for @dev) 692 * Return value: preferred power state of the device on success, -ENODEV
693 * (ie. if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
721 * 694 *
722 * Find the lowest power (highest number) ACPI device power state that 695 * Find the lowest power (highest number) ACPI device power state that
723 * device @dev can be in while the system is in the sleep state represented 696 * device @dev can be in while the system is in the sleep state represented
@@ -732,13 +705,15 @@ int acpi_suspend(u32 acpi_state)
732 * via @wake. 705 * via @wake.
733 */ 706 */
734 707
735int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) 708int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
736{ 709{
737 acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 710 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
738 struct acpi_device *adev; 711 struct acpi_device *adev;
739 char acpi_method[] = "_SxD"; 712 char acpi_method[] = "_SxD";
740 unsigned long long d_min, d_max; 713 unsigned long long d_min, d_max;
741 714
715 if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3)
716 return -EINVAL;
742 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 717 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
743 printk(KERN_DEBUG "ACPI handle has no context!\n"); 718 printk(KERN_DEBUG "ACPI handle has no context!\n");
744 return -ENODEV; 719 return -ENODEV;
@@ -746,8 +721,10 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
746 721
747 acpi_method[2] = '0' + acpi_target_sleep_state; 722 acpi_method[2] = '0' + acpi_target_sleep_state;
748 /* 723 /*
749 * If the sleep state is S0, we will return D3, but if the device has 724 * If the sleep state is S0, the lowest limit from ACPI is D3,
750 * _S0W, we will use the value from _S0W 725 * but if the device has _S0W, we will use the value from _S0W
726 * as the lowest limit from ACPI. Finally, we will constrain
727 * the lowest limit with the specified one.
751 */ 728 */
752 d_min = ACPI_STATE_D0; 729 d_min = ACPI_STATE_D0;
753 d_max = ACPI_STATE_D3; 730 d_max = ACPI_STATE_D3;
@@ -791,10 +768,20 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
791 } 768 }
792 } 769 }
793 770
771 if (d_max_in < d_min)
772 return -EINVAL;
794 if (d_min_p) 773 if (d_min_p)
795 *d_min_p = d_min; 774 *d_min_p = d_min;
775 /* constrain d_max with specified lowest limit (max number) */
776 if (d_max > d_max_in) {
777 for (d_max = d_max_in; d_max > d_min; d_max--) {
778 if (adev->power.states[d_max].flags.valid)
779 break;
780 }
781 }
796 return d_max; 782 return d_max;
797} 783}
784EXPORT_SYMBOL(acpi_pm_device_sleep_state);
798#endif /* CONFIG_PM */ 785#endif /* CONFIG_PM */
799 786
800#ifdef CONFIG_PM_SLEEP 787#ifdef CONFIG_PM_SLEEP
@@ -831,6 +818,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
831 818
832 return 0; 819 return 0;
833} 820}
821EXPORT_SYMBOL(acpi_pm_device_run_wake);
834 822
835/** 823/**
836 * acpi_pm_device_sleep_wake - enable or disable the system wake-up 824 * acpi_pm_device_sleep_wake - enable or disable the system wake-up
@@ -876,33 +864,7 @@ static void acpi_power_off(void)
876 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 864 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
877 printk(KERN_DEBUG "%s called\n", __func__); 865 printk(KERN_DEBUG "%s called\n", __func__);
878 local_irq_disable(); 866 local_irq_disable();
879 acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); 867 acpi_enter_sleep_state(ACPI_STATE_S5);
880}
881
882/*
883 * ACPI 2.0 created the optional _GTS and _BFS,
884 * but industry adoption has been neither rapid nor broad.
885 *
886 * Linux gets into trouble when it executes poorly validated
887 * paths through the BIOS, so disable _GTS and _BFS by default,
888 * but do speak up and offer the option to enable them.
889 */
890static void __init acpi_gts_bfs_check(void)
891{
892 acpi_handle dummy;
893
894 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
895 {
896 printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
897 printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
898 "please notify linux-acpi@vger.kernel.org\n");
899 }
900 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
901 {
902 printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
903 printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
904 "please notify linux-acpi@vger.kernel.org\n");
905 }
906} 868}
907 869
908int __init acpi_sleep_init(void) 870int __init acpi_sleep_init(void)
@@ -963,6 +925,5 @@ int __init acpi_sleep_init(void)
963 * object can also be evaluated when the system enters S5. 925 * object can also be evaluated when the system enters S5.
964 */ 926 */
965 register_reboot_notifier(&tts_notifier); 927 register_reboot_notifier(&tts_notifier);
966 acpi_gts_bfs_check();
967 return 0; 928 return 0;
968} 929}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 9f66181c814e..7c3f98ba4afe 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
173{ 173{
174 int result = 0; 174 int result = 0;
175 175
176 if (!strncmp(val, "enable", strlen("enable") - 1)) { 176 if (!strncmp(val, "enable", sizeof("enable") - 1)) {
177 result = acpi_debug_trace(trace_method_name, trace_debug_level, 177 result = acpi_debug_trace(trace_method_name, trace_debug_level,
178 trace_debug_layer, 0); 178 trace_debug_layer, 0);
179 if (result) 179 if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
181 goto exit; 181 goto exit;
182 } 182 }
183 183
184 if (!strncmp(val, "disable", strlen("disable") - 1)) { 184 if (!strncmp(val, "disable", sizeof("disable") - 1)) {
185 int name = 0; 185 int name = 0;
186 result = acpi_debug_trace((char *)&name, trace_debug_level, 186 result = acpi_debug_trace((char *)&name, trace_debug_level,
187 trace_debug_layer, 0); 187 trace_debug_layer, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 7dbebea1ec31..edda74a43406 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -98,7 +98,6 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
98 98
99static int acpi_thermal_add(struct acpi_device *device); 99static int acpi_thermal_add(struct acpi_device *device);
100static int acpi_thermal_remove(struct acpi_device *device, int type); 100static int acpi_thermal_remove(struct acpi_device *device, int type);
101static int acpi_thermal_resume(struct acpi_device *device);
102static void acpi_thermal_notify(struct acpi_device *device, u32 event); 101static void acpi_thermal_notify(struct acpi_device *device, u32 event);
103 102
104static const struct acpi_device_id thermal_device_ids[] = { 103static const struct acpi_device_id thermal_device_ids[] = {
@@ -107,6 +106,11 @@ static const struct acpi_device_id thermal_device_ids[] = {
107}; 106};
108MODULE_DEVICE_TABLE(acpi, thermal_device_ids); 107MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
109 108
109#ifdef CONFIG_PM_SLEEP
110static int acpi_thermal_resume(struct device *dev);
111#endif
112static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
113
110static struct acpi_driver acpi_thermal_driver = { 114static struct acpi_driver acpi_thermal_driver = {
111 .name = "thermal", 115 .name = "thermal",
112 .class = ACPI_THERMAL_CLASS, 116 .class = ACPI_THERMAL_CLASS,
@@ -114,9 +118,9 @@ static struct acpi_driver acpi_thermal_driver = {
114 .ops = { 118 .ops = {
115 .add = acpi_thermal_add, 119 .add = acpi_thermal_add,
116 .remove = acpi_thermal_remove, 120 .remove = acpi_thermal_remove,
117 .resume = acpi_thermal_resume,
118 .notify = acpi_thermal_notify, 121 .notify = acpi_thermal_notify,
119 }, 122 },
123 .drv.pm = &acpi_thermal_pm,
120}; 124};
121 125
122struct acpi_thermal_state { 126struct acpi_thermal_state {
@@ -550,8 +554,6 @@ static int thermal_get_temp(struct thermal_zone_device *thermal,
550 return 0; 554 return 0;
551} 555}
552 556
553static const char enabled[] = "kernel";
554static const char disabled[] = "user";
555static int thermal_get_mode(struct thermal_zone_device *thermal, 557static int thermal_get_mode(struct thermal_zone_device *thermal,
556 enum thermal_device_mode *mode) 558 enum thermal_device_mode *mode)
557{ 559{
@@ -588,8 +590,8 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
588 if (enable != tz->tz_enabled) { 590 if (enable != tz->tz_enabled) {
589 tz->tz_enabled = enable; 591 tz->tz_enabled = enable;
590 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 592 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
591 "%s ACPI thermal control\n", 593 "%s kernel ACPI thermal control\n",
592 tz->tz_enabled ? enabled : disabled)); 594 tz->tz_enabled ? "Enable" : "Disable"));
593 acpi_thermal_check(tz); 595 acpi_thermal_check(tz);
594 } 596 }
595 return 0; 597 return 0;
@@ -845,7 +847,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
845 847
846 if (tz->trips.passive.flags.valid) 848 if (tz->trips.passive.flags.valid)
847 tz->thermal_zone = 849 tz->thermal_zone =
848 thermal_zone_device_register("acpitz", trips, tz, 850 thermal_zone_device_register("acpitz", trips, 0, tz,
849 &acpi_thermal_zone_ops, 851 &acpi_thermal_zone_ops,
850 tz->trips.passive.tc1, 852 tz->trips.passive.tc1,
851 tz->trips.passive.tc2, 853 tz->trips.passive.tc2,
@@ -853,7 +855,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
853 tz->polling_frequency*100); 855 tz->polling_frequency*100);
854 else 856 else
855 tz->thermal_zone = 857 tz->thermal_zone =
856 thermal_zone_device_register("acpitz", trips, tz, 858 thermal_zone_device_register("acpitz", trips, 0, tz,
857 &acpi_thermal_zone_ops, 859 &acpi_thermal_zone_ops,
858 0, 0, 0, 860 0, 0, 0,
859 tz->polling_frequency*100); 861 tz->polling_frequency*100);
@@ -1041,16 +1043,18 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
1041 return 0; 1043 return 0;
1042} 1044}
1043 1045
1044static int acpi_thermal_resume(struct acpi_device *device) 1046#ifdef CONFIG_PM_SLEEP
1047static int acpi_thermal_resume(struct device *dev)
1045{ 1048{
1046 struct acpi_thermal *tz = NULL; 1049 struct acpi_thermal *tz;
1047 int i, j, power_state, result; 1050 int i, j, power_state, result;
1048 1051
1049 1052 if (!dev)
1050 if (!device || !acpi_driver_data(device))
1051 return -EINVAL; 1053 return -EINVAL;
1052 1054
1053 tz = acpi_driver_data(device); 1055 tz = acpi_driver_data(to_acpi_device(dev));
1056 if (!tz)
1057 return -EINVAL;
1054 1058
1055 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { 1059 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
1056 if (!(&tz->trips.active[i])) 1060 if (!(&tz->trips.active[i]))
@@ -1074,6 +1078,7 @@ static int acpi_thermal_resume(struct acpi_device *device)
1074 1078
1075 return AE_OK; 1079 return AE_OK;
1076} 1080}
1081#endif
1077 1082
1078static int thermal_act(const struct dmi_system_id *d) { 1083static int thermal_act(const struct dmi_system_id *d) {
1079 1084
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index adbbc1c80a26..3e87c9c538aa 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -412,3 +412,45 @@ out:
412 return status; 412 return status;
413} 413}
414EXPORT_SYMBOL(acpi_get_physical_device_location); 414EXPORT_SYMBOL(acpi_get_physical_device_location);
415
416/**
417 * acpi_evaluate_hotplug_ost: Evaluate _OST for hotplug operations
418 * @handle: ACPI device handle
419 * @source_event: source event code
420 * @status_code: status code
421 * @status_buf: optional detailed information (NULL if none)
422 *
423 * Evaluate _OST for hotplug operations. All ACPI hotplug handlers
424 * must call this function when evaluating _OST for hotplug operations.
425 * When the platform does not support _OST, this function has no effect.
426 */
427acpi_status
428acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
429 u32 status_code, struct acpi_buffer *status_buf)
430{
431#ifdef ACPI_HOTPLUG_OST
432 union acpi_object params[3] = {
433 {.type = ACPI_TYPE_INTEGER,},
434 {.type = ACPI_TYPE_INTEGER,},
435 {.type = ACPI_TYPE_BUFFER,}
436 };
437 struct acpi_object_list arg_list = {3, params};
438 acpi_status status;
439
440 params[0].integer.value = source_event;
441 params[1].integer.value = status_code;
442 if (status_buf != NULL) {
443 params[2].buffer.pointer = status_buf->pointer;
444 params[2].buffer.length = status_buf->length;
445 } else {
446 params[2].buffer.pointer = NULL;
447 params[2].buffer.length = 0;
448 }
449
450 status = acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
451 return status;
452#else
453 return AE_OK;
454#endif
455}
456EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a576575617d7..1e0a9e17c31d 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
558 union acpi_object arg0 = { ACPI_TYPE_INTEGER }; 558 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
559 struct acpi_object_list args = { 1, &arg0 }; 559 struct acpi_object_list args = { 1, &arg0 };
560 560
561 if (!video->cap._DOS)
562 return 0;
561 563
562 if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) 564 if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
563 return -EINVAL; 565 return -EINVAL;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 45d8097ef4cf..b728880ef10e 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -132,6 +132,33 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
132 return AE_OK; 132 return AE_OK;
133} 133}
134 134
135/* Force to use vendor driver when the ACPI device is known to be
136 * buggy */
137static int video_detect_force_vendor(const struct dmi_system_id *d)
138{
139 acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
140 return 0;
141}
142
143static struct dmi_system_id video_detect_dmi_table[] = {
144 /* On Samsung X360, the BIOS will set a flag (VDRV) if generic
145 * ACPI backlight device is used. This flag will definitively break
146 * the backlight interface (even the vendor interface) untill next
147 * reboot. It's why we should prevent video.ko from being used here
148 * and we can't rely on a later call to acpi_video_unregister().
149 */
150 {
151 .callback = video_detect_force_vendor,
152 .ident = "X360",
153 .matches = {
154 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
155 DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
156 DMI_MATCH(DMI_BOARD_NAME, "X360"),
157 },
158 },
159 { },
160};
161
135/* 162/*
136 * Returns the video capabilities of a specific ACPI graphics device 163 * Returns the video capabilities of a specific ACPI graphics device
137 * 164 *
@@ -164,6 +191,8 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
164 * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; 191 * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
165 *} 192 *}
166 */ 193 */
194
195 dmi_check_system(video_detect_dmi_table);
167 } else { 196 } else {
168 status = acpi_bus_get_device(graphics_handle, &tmp_dev); 197 status = acpi_bus_get_device(graphics_handle, &tmp_dev);
169 if (ACPI_FAILURE(status)) { 198 if (ACPI_FAILURE(status)) {
@@ -182,8 +211,7 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
182} 211}
183EXPORT_SYMBOL(acpi_video_get_capabilities); 212EXPORT_SYMBOL(acpi_video_get_capabilities);
184 213
185/* Returns true if video.ko can do backlight switching */ 214static void acpi_video_caps_check(void)
186int acpi_video_backlight_support(void)
187{ 215{
188 /* 216 /*
189 * We must check whether the ACPI graphics device is physically plugged 217 * We must check whether the ACPI graphics device is physically plugged
@@ -191,6 +219,34 @@ int acpi_video_backlight_support(void)
191 */ 219 */
192 if (!acpi_video_caps_checked) 220 if (!acpi_video_caps_checked)
193 acpi_video_get_capabilities(NULL); 221 acpi_video_get_capabilities(NULL);
222}
223
224/* Promote the vendor interface instead of the generic video module.
225 * This function allow DMI blacklists to be implemented by externals
226 * platform drivers instead of putting a big blacklist in video_detect.c
227 * After calling this function you will probably want to call
228 * acpi_video_unregister() to make sure the video module is not loaded
229 */
230void acpi_video_dmi_promote_vendor(void)
231{
232 acpi_video_caps_check();
233 acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
234}
235EXPORT_SYMBOL(acpi_video_dmi_promote_vendor);
236
237/* To be called when a driver who previously promoted the vendor
238 * interface */
239void acpi_video_dmi_demote_vendor(void)
240{
241 acpi_video_caps_check();
242 acpi_video_support &= ~ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
243}
244EXPORT_SYMBOL(acpi_video_dmi_demote_vendor);
245
246/* Returns true if video.ko can do backlight switching */
247int acpi_video_backlight_support(void)
248{
249 acpi_video_caps_check();
194 250
195 /* First check for boot param -> highest prio */ 251 /* First check for boot param -> highest prio */
196 if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR) 252 if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR)
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index b7e728517284..e8eb91bd0d28 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -16,9 +16,9 @@
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/amba/bus.h> 18#include <linux/amba/bus.h>
19#include <linux/sizes.h>
19 20
20#include <asm/irq.h> 21#include <asm/irq.h>
21#include <asm/sizes.h>
22 22
23#define to_amba_driver(d) container_of(d, struct amba_driver, drv) 23#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
24 24
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index aa0b1f160528..0b6f0b28a487 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -264,11 +264,6 @@ static int __devinit tegra_ahb_probe(struct platform_device *pdev)
264 return 0; 264 return 0;
265} 265}
266 266
267static int __devexit tegra_ahb_remove(struct platform_device *pdev)
268{
269 return 0;
270}
271
272static const struct of_device_id tegra_ahb_of_match[] __devinitconst = { 267static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
273 { .compatible = "nvidia,tegra30-ahb", }, 268 { .compatible = "nvidia,tegra30-ahb", },
274 { .compatible = "nvidia,tegra20-ahb", }, 269 { .compatible = "nvidia,tegra20-ahb", },
@@ -277,7 +272,6 @@ static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
277 272
278static struct platform_driver tegra_ahb_driver = { 273static struct platform_driver tegra_ahb_driver = {
279 .probe = tegra_ahb_probe, 274 .probe = tegra_ahb_probe,
280 .remove = __devexit_p(tegra_ahb_remove),
281 .driver = { 275 .driver = {
282 .name = DRV_NAME, 276 .name = DRV_NAME,
283 .owner = THIS_MODULE, 277 .owner = THIS_MODULE,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2be8ef1d3093..27cecd313e75 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -115,7 +115,7 @@ config SATA_SIL24
115 If unsure, say N. 115 If unsure, say N.
116 116
117config ATA_SFF 117config ATA_SFF
118 bool "ATA SFF support" 118 bool "ATA SFF support (for legacy IDE and PATA)"
119 default y 119 default y
120 help 120 help
121 This option adds support for ATA controllers with SFF 121 This option adds support for ATA controllers with SFF
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 3bc8c79bf2c7..4e94ba29cb8d 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -503,21 +503,10 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
503 &acard_ahci_sht); 503 &acard_ahci_sht);
504} 504}
505 505
506static int __init acard_ahci_init(void) 506module_pci_driver(acard_ahci_pci_driver);
507{
508 return pci_register_driver(&acard_ahci_pci_driver);
509}
510
511static void __exit acard_ahci_exit(void)
512{
513 pci_unregister_driver(&acard_ahci_pci_driver);
514}
515 507
516MODULE_AUTHOR("Jeff Garzik"); 508MODULE_AUTHOR("Jeff Garzik");
517MODULE_DESCRIPTION("ACard AHCI SATA low-level driver"); 509MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
518MODULE_LICENSE("GPL"); 510MODULE_LICENSE("GPL");
519MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl); 511MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
520MODULE_VERSION(DRV_VERSION); 512MODULE_VERSION(DRV_VERSION);
521
522module_init(acard_ahci_init);
523module_exit(acard_ahci_exit);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ebaf67e4b2bc..50d5dea0ff59 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -105,31 +105,27 @@ static struct ata_port_operations ahci_p5wdh_ops = {
105 105
106static const struct ata_port_info ahci_port_info[] = { 106static const struct ata_port_info ahci_port_info[] = {
107 /* by features */ 107 /* by features */
108 [board_ahci] = 108 [board_ahci] = {
109 {
110 .flags = AHCI_FLAG_COMMON, 109 .flags = AHCI_FLAG_COMMON,
111 .pio_mask = ATA_PIO4, 110 .pio_mask = ATA_PIO4,
112 .udma_mask = ATA_UDMA6, 111 .udma_mask = ATA_UDMA6,
113 .port_ops = &ahci_ops, 112 .port_ops = &ahci_ops,
114 }, 113 },
115 [board_ahci_ign_iferr] = 114 [board_ahci_ign_iferr] = {
116 {
117 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 115 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
118 .flags = AHCI_FLAG_COMMON, 116 .flags = AHCI_FLAG_COMMON,
119 .pio_mask = ATA_PIO4, 117 .pio_mask = ATA_PIO4,
120 .udma_mask = ATA_UDMA6, 118 .udma_mask = ATA_UDMA6,
121 .port_ops = &ahci_ops, 119 .port_ops = &ahci_ops,
122 }, 120 },
123 [board_ahci_nosntf] = 121 [board_ahci_nosntf] = {
124 {
125 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), 122 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
126 .flags = AHCI_FLAG_COMMON, 123 .flags = AHCI_FLAG_COMMON,
127 .pio_mask = ATA_PIO4, 124 .pio_mask = ATA_PIO4,
128 .udma_mask = ATA_UDMA6, 125 .udma_mask = ATA_UDMA6,
129 .port_ops = &ahci_ops, 126 .port_ops = &ahci_ops,
130 }, 127 },
131 [board_ahci_yes_fbs] = 128 [board_ahci_yes_fbs] = {
132 {
133 AHCI_HFLAGS (AHCI_HFLAG_YES_FBS), 129 AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
134 .flags = AHCI_FLAG_COMMON, 130 .flags = AHCI_FLAG_COMMON,
135 .pio_mask = ATA_PIO4, 131 .pio_mask = ATA_PIO4,
@@ -137,8 +133,7 @@ static const struct ata_port_info ahci_port_info[] = {
137 .port_ops = &ahci_ops, 133 .port_ops = &ahci_ops,
138 }, 134 },
139 /* by chipsets */ 135 /* by chipsets */
140 [board_ahci_mcp65] = 136 [board_ahci_mcp65] = {
141 {
142 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | 137 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
143 AHCI_HFLAG_YES_NCQ), 138 AHCI_HFLAG_YES_NCQ),
144 .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, 139 .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
@@ -146,24 +141,21 @@ static const struct ata_port_info ahci_port_info[] = {
146 .udma_mask = ATA_UDMA6, 141 .udma_mask = ATA_UDMA6,
147 .port_ops = &ahci_ops, 142 .port_ops = &ahci_ops,
148 }, 143 },
149 [board_ahci_mcp77] = 144 [board_ahci_mcp77] = {
150 {
151 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP), 145 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
152 .flags = AHCI_FLAG_COMMON, 146 .flags = AHCI_FLAG_COMMON,
153 .pio_mask = ATA_PIO4, 147 .pio_mask = ATA_PIO4,
154 .udma_mask = ATA_UDMA6, 148 .udma_mask = ATA_UDMA6,
155 .port_ops = &ahci_ops, 149 .port_ops = &ahci_ops,
156 }, 150 },
157 [board_ahci_mcp89] = 151 [board_ahci_mcp89] = {
158 {
159 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA), 152 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
160 .flags = AHCI_FLAG_COMMON, 153 .flags = AHCI_FLAG_COMMON,
161 .pio_mask = ATA_PIO4, 154 .pio_mask = ATA_PIO4,
162 .udma_mask = ATA_UDMA6, 155 .udma_mask = ATA_UDMA6,
163 .port_ops = &ahci_ops, 156 .port_ops = &ahci_ops,
164 }, 157 },
165 [board_ahci_mv] = 158 [board_ahci_mv] = {
166 {
167 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 159 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
168 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), 160 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
169 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 161 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
@@ -171,8 +163,7 @@ static const struct ata_port_info ahci_port_info[] = {
171 .udma_mask = ATA_UDMA6, 163 .udma_mask = ATA_UDMA6,
172 .port_ops = &ahci_ops, 164 .port_ops = &ahci_ops,
173 }, 165 },
174 [board_ahci_sb600] = 166 [board_ahci_sb600] = {
175 {
176 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 167 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
177 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 | 168 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
178 AHCI_HFLAG_32BIT_ONLY), 169 AHCI_HFLAG_32BIT_ONLY),
@@ -181,16 +172,14 @@ static const struct ata_port_info ahci_port_info[] = {
181 .udma_mask = ATA_UDMA6, 172 .udma_mask = ATA_UDMA6,
182 .port_ops = &ahci_pmp_retry_srst_ops, 173 .port_ops = &ahci_pmp_retry_srst_ops,
183 }, 174 },
184 [board_ahci_sb700] = /* for SB700 and SB800 */ 175 [board_ahci_sb700] = { /* for SB700 and SB800 */
185 {
186 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), 176 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
187 .flags = AHCI_FLAG_COMMON, 177 .flags = AHCI_FLAG_COMMON,
188 .pio_mask = ATA_PIO4, 178 .pio_mask = ATA_PIO4,
189 .udma_mask = ATA_UDMA6, 179 .udma_mask = ATA_UDMA6,
190 .port_ops = &ahci_pmp_retry_srst_ops, 180 .port_ops = &ahci_pmp_retry_srst_ops,
191 }, 181 },
192 [board_ahci_vt8251] = 182 [board_ahci_vt8251] = {
193 {
194 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 183 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
195 .flags = AHCI_FLAG_COMMON, 184 .flags = AHCI_FLAG_COMMON,
196 .pio_mask = ATA_PIO4, 185 .pio_mask = ATA_PIO4,
@@ -267,6 +256,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
267 { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ 256 { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
268 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ 257 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
269 { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ 258 { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
259 { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
260 { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
261 { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
262 { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
263 { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
264 { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
265 { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
266 { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
270 267
271 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 268 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
272 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 269 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -777,6 +774,22 @@ static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
777 }, 774 },
778 }, 775 },
779 /* 776 /*
777 * All BIOS versions for the MSI K9AGM2 (MS-7327) support
778 * 64bit DMA.
779 *
780 * This board also had the typo mentioned above in the
781 * Manufacturer DMI field (fixed in BIOS version 1.5), so
782 * match on DMI_BOARD_VENDOR of "MICRO-STAR INTER" again.
783 */
784 {
785 .ident = "MSI K9AGM2",
786 .matches = {
787 DMI_MATCH(DMI_BOARD_VENDOR,
788 "MICRO-STAR INTER"),
789 DMI_MATCH(DMI_BOARD_NAME, "MS-7327"),
790 },
791 },
792 /*
780 * All BIOS versions for the Asus M3A support 64bit DMA. 793 * All BIOS versions for the Asus M3A support 64bit DMA.
781 * (all release versions from 0301 to 1206 were tested) 794 * (all release versions from 0301 to 1206 were tested)
782 */ 795 */
@@ -1233,22 +1246,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1233 &ahci_sht); 1246 &ahci_sht);
1234} 1247}
1235 1248
1236static int __init ahci_init(void) 1249module_pci_driver(ahci_pci_driver);
1237{
1238 return pci_register_driver(&ahci_pci_driver);
1239}
1240
1241static void __exit ahci_exit(void)
1242{
1243 pci_unregister_driver(&ahci_pci_driver);
1244}
1245
1246 1250
1247MODULE_AUTHOR("Jeff Garzik"); 1251MODULE_AUTHOR("Jeff Garzik");
1248MODULE_DESCRIPTION("AHCI SATA low-level driver"); 1252MODULE_DESCRIPTION("AHCI SATA low-level driver");
1249MODULE_LICENSE("GPL"); 1253MODULE_LICENSE("GPL");
1250MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); 1254MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1251MODULE_VERSION(DRV_VERSION); 1255MODULE_VERSION(DRV_VERSION);
1252
1253module_init(ahci_init);
1254module_exit(ahci_exit);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index c2594ddf25b0..57eb1c212a4c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
320extern struct ata_port_operations ahci_ops; 320extern struct ata_port_operations ahci_ops;
321extern struct ata_port_operations ahci_pmp_retry_srst_ops; 321extern struct ata_port_operations ahci_pmp_retry_srst_ops;
322 322
323unsigned int ahci_dev_classify(struct ata_port *ap);
323void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 324void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
324 u32 opts); 325 u32 opts);
325void ahci_save_initial_config(struct device *dev, 326void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 9e419e1c2006..09728e09cb31 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/pm.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
20#include <linux/device.h> 21#include <linux/device.h>
@@ -271,13 +272,10 @@ static int ahci_resume(struct device *dev)
271 272
272 return 0; 273 return 0;
273} 274}
274
275static struct dev_pm_ops ahci_pm_ops = {
276 .suspend = &ahci_suspend,
277 .resume = &ahci_resume,
278};
279#endif 275#endif
280 276
277SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
278
281static const struct of_device_id ahci_of_match[] = { 279static const struct of_device_id ahci_of_match[] = {
282 { .compatible = "calxeda,hb-ahci", }, 280 { .compatible = "calxeda,hb-ahci", },
283 { .compatible = "snps,spear-ahci", }, 281 { .compatible = "snps,spear-ahci", },
@@ -291,9 +289,7 @@ static struct platform_driver ahci_driver = {
291 .name = "ahci", 289 .name = "ahci",
292 .owner = THIS_MODULE, 290 .owner = THIS_MODULE,
293 .of_match_table = ahci_of_match, 291 .of_match_table = ahci_of_match,
294#ifdef CONFIG_PM
295 .pm = &ahci_pm_ops, 292 .pm = &ahci_pm_ops,
296#endif
297 }, 293 },
298 .id_table = ahci_devtype, 294 .id_table = ahci_devtype,
299}; 295};
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index aae115600b74..f8f38a08abc5 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -255,17 +255,7 @@ static struct pci_driver ata_generic_pci_driver = {
255#endif 255#endif
256}; 256};
257 257
258static int __init ata_generic_init(void) 258module_pci_driver(ata_generic_pci_driver);
259{
260 return pci_register_driver(&ata_generic_pci_driver);
261}
262
263
264static void __exit ata_generic_exit(void)
265{
266 pci_unregister_driver(&ata_generic_pci_driver);
267}
268
269 259
270MODULE_AUTHOR("Alan Cox"); 260MODULE_AUTHOR("Alan Cox");
271MODULE_DESCRIPTION("low-level driver for generic ATA"); 261MODULE_DESCRIPTION("low-level driver for generic ATA");
@@ -273,7 +263,4 @@ MODULE_LICENSE("GPL");
273MODULE_DEVICE_TABLE(pci, ata_generic); 263MODULE_DEVICE_TABLE(pci, ata_generic);
274MODULE_VERSION(DRV_VERSION); 264MODULE_VERSION(DRV_VERSION);
275 265
276module_init(ata_generic_init);
277module_exit(ata_generic_exit);
278
279module_param(all_generic_ide, int, 0); 266module_param(all_generic_ide, int, 0);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3c809bfbccf5..ef773e12af79 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -329,6 +329,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
329 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 329 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
330 /* SATA Controller IDE (Lynx Point) */ 330 /* SATA Controller IDE (Lynx Point) */
331 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 331 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
332 /* SATA Controller IDE (Lynx Point-LP) */
333 { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
334 /* SATA Controller IDE (Lynx Point-LP) */
335 { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
336 /* SATA Controller IDE (Lynx Point-LP) */
337 { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
338 /* SATA Controller IDE (Lynx Point-LP) */
339 { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
332 /* SATA Controller IDE (DH89xxCC) */ 340 /* SATA Controller IDE (DH89xxCC) */
333 { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 341 { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
334 { } /* terminate list */ 342 { } /* terminate list */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index f9eaa82311a9..555c07afa05b 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev)
1139 } 1139 }
1140} 1140}
1141 1141
1142static unsigned int ahci_dev_classify(struct ata_port *ap) 1142unsigned int ahci_dev_classify(struct ata_port *ap)
1143{ 1143{
1144 void __iomem *port_mmio = ahci_port_base(ap); 1144 void __iomem *port_mmio = ahci_port_base(ap);
1145 struct ata_taskfile tf; 1145 struct ata_taskfile tf;
@@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
1153 1153
1154 return ata_dev_classify(&tf); 1154 return ata_dev_classify(&tf);
1155} 1155}
1156EXPORT_SYMBOL_GPL(ahci_dev_classify);
1156 1157
1157void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1158void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1158 u32 opts) 1159 u32 opts)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index bb7c5f1085cc..fd9ecf74e631 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -16,6 +16,7 @@
16#include <linux/libata.h> 16#include <linux/libata.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
19#include <scsi/scsi_device.h> 20#include <scsi/scsi_device.h>
20#include "libata.h" 21#include "libata.h"
21 22
@@ -48,62 +49,43 @@ static void ata_acpi_clear_gtf(struct ata_device *dev)
48} 49}
49 50
50/** 51/**
51 * ata_acpi_associate_sata_port - associate SATA port with ACPI objects 52 * ata_ap_acpi_handle - provide the acpi_handle for an ata_port
52 * @ap: target SATA port 53 * @ap: the acpi_handle returned will correspond to this port
53 * 54 *
54 * Look up ACPI objects associated with @ap and initialize acpi_handle 55 * Returns the acpi_handle for the ACPI namespace object corresponding to
55 * fields of @ap, the port and devices accordingly. 56 * the ata_port passed into the function, or NULL if no such object exists
56 *
57 * LOCKING:
58 * EH context.
59 *
60 * RETURNS:
61 * 0 on success, -errno on failure.
62 */ 57 */
63void ata_acpi_associate_sata_port(struct ata_port *ap) 58acpi_handle ata_ap_acpi_handle(struct ata_port *ap)
64{ 59{
65 WARN_ON(!(ap->flags & ATA_FLAG_ACPI_SATA)); 60 if (ap->flags & ATA_FLAG_ACPI_SATA)
66 61 return NULL;
67 if (!sata_pmp_attached(ap)) {
68 u64 adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
69
70 ap->link.device->acpi_handle =
71 acpi_get_child(ap->host->acpi_handle, adr);
72 } else {
73 struct ata_link *link;
74
75 ap->link.device->acpi_handle = NULL;
76
77 ata_for_each_link(link, ap, EDGE) {
78 u64 adr = SATA_ADR(ap->port_no, link->pmp);
79 62
80 link->device->acpi_handle = 63 return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no);
81 acpi_get_child(ap->host->acpi_handle, adr);
82 }
83 }
84} 64}
65EXPORT_SYMBOL(ata_ap_acpi_handle);
85 66
86static void ata_acpi_associate_ide_port(struct ata_port *ap) 67/**
68 * ata_dev_acpi_handle - provide the acpi_handle for an ata_device
69 * @dev: the acpi_device returned will correspond to this port
70 *
71 * Returns the acpi_handle for the ACPI namespace object corresponding to
72 * the ata_device passed into the function, or NULL if no such object exists
73 */
74acpi_handle ata_dev_acpi_handle(struct ata_device *dev)
87{ 75{
88 int max_devices, i; 76 acpi_integer adr;
89 77 struct ata_port *ap = dev->link->ap;
90 ap->acpi_handle = acpi_get_child(ap->host->acpi_handle, ap->port_no);
91 if (!ap->acpi_handle)
92 return;
93
94 max_devices = 1;
95 if (ap->flags & ATA_FLAG_SLAVE_POSS)
96 max_devices++;
97
98 for (i = 0; i < max_devices; i++) {
99 struct ata_device *dev = &ap->link.device[i];
100
101 dev->acpi_handle = acpi_get_child(ap->acpi_handle, i);
102 }
103 78
104 if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) 79 if (ap->flags & ATA_FLAG_ACPI_SATA) {
105 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; 80 if (!sata_pmp_attached(ap))
81 adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
82 else
83 adr = SATA_ADR(ap->port_no, dev->link->pmp);
84 return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), adr);
85 } else
86 return acpi_get_child(ata_ap_acpi_handle(ap), dev->devno);
106} 87}
88EXPORT_SYMBOL(ata_dev_acpi_handle);
107 89
108/* @ap and @dev are the same as ata_acpi_handle_hotplug() */ 90/* @ap and @dev are the same as ata_acpi_handle_hotplug() */
109static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) 91static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
@@ -229,56 +211,6 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
229}; 211};
230 212
231/** 213/**
232 * ata_acpi_associate - associate ATA host with ACPI objects
233 * @host: target ATA host
234 *
235 * Look up ACPI objects associated with @host and initialize
236 * acpi_handle fields of @host, its ports and devices accordingly.
237 *
238 * LOCKING:
239 * EH context.
240 *
241 * RETURNS:
242 * 0 on success, -errno on failure.
243 */
244void ata_acpi_associate(struct ata_host *host)
245{
246 int i, j;
247
248 if (!is_pci_dev(host->dev) || libata_noacpi)
249 return;
250
251 host->acpi_handle = DEVICE_ACPI_HANDLE(host->dev);
252 if (!host->acpi_handle)
253 return;
254
255 for (i = 0; i < host->n_ports; i++) {
256 struct ata_port *ap = host->ports[i];
257
258 if (host->ports[0]->flags & ATA_FLAG_ACPI_SATA)
259 ata_acpi_associate_sata_port(ap);
260 else
261 ata_acpi_associate_ide_port(ap);
262
263 if (ap->acpi_handle) {
264 /* we might be on a docking station */
265 register_hotplug_dock_device(ap->acpi_handle,
266 &ata_acpi_ap_dock_ops, ap);
267 }
268
269 for (j = 0; j < ata_link_max_devices(&ap->link); j++) {
270 struct ata_device *dev = &ap->link.device[j];
271
272 if (dev->acpi_handle) {
273 /* we might be on a docking station */
274 register_hotplug_dock_device(dev->acpi_handle,
275 &ata_acpi_dev_dock_ops, dev);
276 }
277 }
278 }
279}
280
281/**
282 * ata_acpi_dissociate - dissociate ATA host from ACPI objects 214 * ata_acpi_dissociate - dissociate ATA host from ACPI objects
283 * @host: target ATA host 215 * @host: target ATA host
284 * 216 *
@@ -299,7 +231,7 @@ void ata_acpi_dissociate(struct ata_host *host)
299 struct ata_port *ap = host->ports[i]; 231 struct ata_port *ap = host->ports[i];
300 const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); 232 const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
301 233
302 if (ap->acpi_handle && gtm) 234 if (ata_ap_acpi_handle(ap) && gtm)
303 ata_acpi_stm(ap, gtm); 235 ata_acpi_stm(ap, gtm);
304 } 236 }
305} 237}
@@ -324,7 +256,8 @@ int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
324 acpi_status status; 256 acpi_status status;
325 int rc = 0; 257 int rc = 0;
326 258
327 status = acpi_evaluate_object(ap->acpi_handle, "_GTM", NULL, &output); 259 status = acpi_evaluate_object(ata_ap_acpi_handle(ap), "_GTM", NULL,
260 &output);
328 261
329 rc = -ENOENT; 262 rc = -ENOENT;
330 if (status == AE_NOT_FOUND) 263 if (status == AE_NOT_FOUND)
@@ -394,7 +327,8 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm)
394 input.count = 3; 327 input.count = 3;
395 input.pointer = in_params; 328 input.pointer = in_params;
396 329
397 status = acpi_evaluate_object(ap->acpi_handle, "_STM", &input, NULL); 330 status = acpi_evaluate_object(ata_ap_acpi_handle(ap), "_STM", &input,
331 NULL);
398 332
399 if (status == AE_NOT_FOUND) 333 if (status == AE_NOT_FOUND)
400 return -ENOENT; 334 return -ENOENT;
@@ -451,7 +385,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
451 __func__, ap->port_no); 385 __func__, ap->port_no);
452 386
453 /* _GTF has no input parameters */ 387 /* _GTF has no input parameters */
454 status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output); 388 status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL,
389 &output);
455 out_obj = dev->gtf_cache = output.pointer; 390 out_obj = dev->gtf_cache = output.pointer;
456 391
457 if (ACPI_FAILURE(status)) { 392 if (ACPI_FAILURE(status)) {
@@ -817,7 +752,8 @@ static int ata_acpi_push_id(struct ata_device *dev)
817 752
818 /* It's OK for _SDD to be missing too. */ 753 /* It's OK for _SDD to be missing too. */
819 swap_buf_le16(dev->id, ATA_ID_WORDS); 754 swap_buf_le16(dev->id, ATA_ID_WORDS);
820 status = acpi_evaluate_object(dev->acpi_handle, "_SDD", &input, NULL); 755 status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_SDD", &input,
756 NULL);
821 swap_buf_le16(dev->id, ATA_ID_WORDS); 757 swap_buf_le16(dev->id, ATA_ID_WORDS);
822 758
823 if (status == AE_NOT_FOUND) 759 if (status == AE_NOT_FOUND)
@@ -867,7 +803,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
867 const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); 803 const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
868 struct ata_device *dev; 804 struct ata_device *dev;
869 805
870 if (ap->acpi_handle && gtm) { 806 if (ata_ap_acpi_handle(ap) && gtm) {
871 /* _GTM valid */ 807 /* _GTM valid */
872 808
873 /* restore timing parameters */ 809 /* restore timing parameters */
@@ -907,23 +843,39 @@ void ata_acpi_on_resume(struct ata_port *ap)
907void ata_acpi_set_state(struct ata_port *ap, pm_message_t state) 843void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
908{ 844{
909 struct ata_device *dev; 845 struct ata_device *dev;
910 846 acpi_handle handle;
911 if (!ap->acpi_handle || (ap->flags & ATA_FLAG_ACPI_SATA)) 847 int acpi_state;
912 return;
913 848
914 /* channel first and then drives for power on and vica versa 849 /* channel first and then drives for power on and vica versa
915 for power off */ 850 for power off */
916 if (state.event == PM_EVENT_ON) 851 handle = ata_ap_acpi_handle(ap);
917 acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D0); 852 if (handle && state.event == PM_EVENT_ON)
853 acpi_bus_set_power(handle, ACPI_STATE_D0);
918 854
919 ata_for_each_dev(dev, &ap->link, ENABLED) { 855 ata_for_each_dev(dev, &ap->link, ENABLED) {
920 if (dev->acpi_handle) 856 handle = ata_dev_acpi_handle(dev);
921 acpi_bus_set_power(dev->acpi_handle, 857 if (!handle)
922 state.event == PM_EVENT_ON ? 858 continue;
923 ACPI_STATE_D0 : ACPI_STATE_D3); 859
860 if (state.event != PM_EVENT_ON) {
861 acpi_state = acpi_pm_device_sleep_state(
862 &dev->sdev->sdev_gendev, NULL, ACPI_STATE_D3);
863 if (acpi_state > 0)
864 acpi_bus_set_power(handle, acpi_state);
865 /* TBD: need to check if it's runtime pm request */
866 acpi_pm_device_run_wake(
867 &dev->sdev->sdev_gendev, true);
868 } else {
869 /* Ditto */
870 acpi_pm_device_run_wake(
871 &dev->sdev->sdev_gendev, false);
872 acpi_bus_set_power(handle, ACPI_STATE_D0);
873 }
924 } 874 }
925 if (state.event != PM_EVENT_ON) 875
926 acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D3); 876 handle = ata_ap_acpi_handle(ap);
877 if (handle && state.event != PM_EVENT_ON)
878 acpi_bus_set_power(handle, ACPI_STATE_D3);
927} 879}
928 880
929/** 881/**
@@ -948,7 +900,7 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
948 int nr_executed = 0; 900 int nr_executed = 0;
949 int rc; 901 int rc;
950 902
951 if (!dev->acpi_handle) 903 if (!ata_dev_acpi_handle(dev))
952 return 0; 904 return 0;
953 905
954 /* do we need to do _GTF? */ 906 /* do we need to do _GTF? */
@@ -994,7 +946,6 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
994 } 946 }
995 947
996 ata_dev_warn(dev, "ACPI: failed the second time, disabled\n"); 948 ata_dev_warn(dev, "ACPI: failed the second time, disabled\n");
997 dev->acpi_handle = NULL;
998 949
999 /* We can safely continue if no _GTF command has been executed 950 /* We can safely continue if no _GTF command has been executed
1000 * and port is not frozen. 951 * and port is not frozen.
@@ -1018,3 +969,221 @@ void ata_acpi_on_disable(struct ata_device *dev)
1018{ 969{
1019 ata_acpi_clear_gtf(dev); 970 ata_acpi_clear_gtf(dev);
1020} 971}
972
973static void ata_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
974{
975 struct ata_device *ata_dev = context;
976
977 if (event == ACPI_NOTIFY_DEVICE_WAKE && ata_dev &&
978 pm_runtime_suspended(&ata_dev->sdev->sdev_gendev))
979 scsi_autopm_get_device(ata_dev->sdev);
980}
981
982static void ata_acpi_add_pm_notifier(struct ata_device *dev)
983{
984 struct acpi_device *acpi_dev;
985 acpi_handle handle;
986 acpi_status status;
987
988 handle = ata_dev_acpi_handle(dev);
989 if (!handle)
990 return;
991
992 status = acpi_bus_get_device(handle, &acpi_dev);
993 if (ACPI_FAILURE(status))
994 return;
995
996 if (dev->sdev->can_power_off) {
997 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
998 ata_acpi_wake_dev, dev);
999 device_set_run_wake(&dev->sdev->sdev_gendev, true);
1000 }
1001}
1002
1003static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
1004{
1005 struct acpi_device *acpi_dev;
1006 acpi_handle handle;
1007 acpi_status status;
1008
1009 handle = ata_dev_acpi_handle(dev);
1010 if (!handle)
1011 return;
1012
1013 status = acpi_bus_get_device(handle, &acpi_dev);
1014 if (ACPI_FAILURE(status))
1015 return;
1016
1017 if (dev->sdev->can_power_off) {
1018 device_set_run_wake(&dev->sdev->sdev_gendev, false);
1019 acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1020 ata_acpi_wake_dev);
1021 }
1022}
1023
1024static void ata_acpi_register_power_resource(struct ata_device *dev)
1025{
1026 struct scsi_device *sdev = dev->sdev;
1027 acpi_handle handle;
1028 struct device *device;
1029
1030 handle = ata_dev_acpi_handle(dev);
1031 if (!handle)
1032 return;
1033
1034 device = &sdev->sdev_gendev;
1035
1036 acpi_power_resource_register_device(device, handle);
1037}
1038
1039static void ata_acpi_unregister_power_resource(struct ata_device *dev)
1040{
1041 struct scsi_device *sdev = dev->sdev;
1042 acpi_handle handle;
1043 struct device *device;
1044
1045 handle = ata_dev_acpi_handle(dev);
1046 if (!handle)
1047 return;
1048
1049 device = &sdev->sdev_gendev;
1050
1051 acpi_power_resource_unregister_device(device, handle);
1052}
1053
1054void ata_acpi_bind(struct ata_device *dev)
1055{
1056 ata_acpi_add_pm_notifier(dev);
1057 ata_acpi_register_power_resource(dev);
1058}
1059
1060void ata_acpi_unbind(struct ata_device *dev)
1061{
1062 ata_acpi_remove_pm_notifier(dev);
1063 ata_acpi_unregister_power_resource(dev);
1064}
1065
1066static int compat_pci_ata(struct ata_port *ap)
1067{
1068 struct device *dev = ap->tdev.parent;
1069 struct pci_dev *pdev;
1070
1071 if (!is_pci_dev(dev))
1072 return 0;
1073
1074 pdev = to_pci_dev(dev);
1075
1076 if ((pdev->class >> 8) != PCI_CLASS_STORAGE_SATA &&
1077 (pdev->class >> 8) != PCI_CLASS_STORAGE_IDE)
1078 return 0;
1079
1080 return 1;
1081}
1082
1083static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)
1084{
1085 if (ap->flags & ATA_FLAG_ACPI_SATA)
1086 return -ENODEV;
1087
1088 *handle = acpi_get_child(DEVICE_ACPI_HANDLE(ap->tdev.parent),
1089 ap->port_no);
1090
1091 if (!*handle)
1092 return -ENODEV;
1093
1094 if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
1095 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
1096
1097 return 0;
1098}
1099
1100static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
1101 acpi_handle *handle)
1102{
1103 struct ata_device *ata_dev;
1104 acpi_status status;
1105 struct acpi_device *acpi_dev;
1106 struct acpi_device_power_state *states;
1107
1108 if (ap->flags & ATA_FLAG_ACPI_SATA)
1109 ata_dev = &ap->link.device[sdev->channel];
1110 else
1111 ata_dev = &ap->link.device[sdev->id];
1112
1113 *handle = ata_dev_acpi_handle(ata_dev);
1114
1115 if (!*handle)
1116 return -ENODEV;
1117
1118 status = acpi_bus_get_device(*handle, &acpi_dev);
1119 if (ACPI_FAILURE(status))
1120 return 0;
1121
1122 /*
1123 * If firmware has _PS3 or _PR3 for this device,
1124 * and this ata ODD device support device attention,
1125 * it means this device can be powered off
1126 */
1127 states = acpi_dev->power.states;
1128 if ((states[ACPI_STATE_D3_HOT].flags.valid ||
1129 states[ACPI_STATE_D3_COLD].flags.explicit_set) &&
1130 ata_dev->flags & ATA_DFLAG_DA)
1131 sdev->can_power_off = 1;
1132
1133 return 0;
1134}
1135
1136static int is_ata_port(const struct device *dev)
1137{
1138 return dev->type == &ata_port_type;
1139}
1140
1141static struct ata_port *dev_to_ata_port(struct device *dev)
1142{
1143 while (!is_ata_port(dev)) {
1144 if (!dev->parent)
1145 return NULL;
1146 dev = dev->parent;
1147 }
1148 return to_ata_port(dev);
1149}
1150
1151static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
1152{
1153 struct ata_port *ap = dev_to_ata_port(dev);
1154
1155 if (!ap)
1156 return -ENODEV;
1157
1158 if (!compat_pci_ata(ap))
1159 return -ENODEV;
1160
1161 if (scsi_is_host_device(dev))
1162 return ata_acpi_bind_host(ap, handle);
1163 else if (scsi_is_sdev_device(dev)) {
1164 struct scsi_device *sdev = to_scsi_device(dev);
1165
1166 return ata_acpi_bind_device(ap, sdev, handle);
1167 } else
1168 return -ENODEV;
1169}
1170
1171static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
1172{
1173 return -ENODEV;
1174}
1175
1176static struct acpi_bus_type ata_acpi_bus = {
1177 .find_bridge = ata_acpi_find_dummy,
1178 .find_device = ata_acpi_find_device,
1179};
1180
1181int ata_acpi_register(void)
1182{
1183 return scsi_register_acpi_bus_type(&ata_acpi_bus);
1184}
1185
1186void ata_acpi_unregister(void)
1187{
1188 scsi_unregister_acpi_bus_type(&ata_acpi_bus);
1189}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cece3a4d11ea..8e1039c8e159 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -80,6 +80,8 @@ const struct ata_port_operations ata_base_port_ops = {
80 .prereset = ata_std_prereset, 80 .prereset = ata_std_prereset,
81 .postreset = ata_std_postreset, 81 .postreset = ata_std_postreset,
82 .error_handler = ata_std_error_handler, 82 .error_handler = ata_std_error_handler,
83 .sched_eh = ata_std_sched_eh,
84 .end_eh = ata_std_end_eh,
83}; 85};
84 86
85const struct ata_port_operations sata_port_ops = { 87const struct ata_port_operations sata_port_ops = {
@@ -2374,6 +2376,9 @@ int ata_dev_configure(struct ata_device *dev)
2374 dma_dir_string = ", DMADIR"; 2376 dma_dir_string = ", DMADIR";
2375 } 2377 }
2376 2378
2379 if (ata_id_has_da(dev->id))
2380 dev->flags |= ATA_DFLAG_DA;
2381
2377 /* print device info to dmesg */ 2382 /* print device info to dmesg */
2378 if (ata_msg_drv(ap) && print_info) 2383 if (ata_msg_drv(ap) && print_info)
2379 ata_dev_info(dev, 2384 ata_dev_info(dev,
@@ -4057,7 +4062,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4057 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4062 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4058 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4063 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4059 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4064 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4060 { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4065 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4061 /* Odd clown on sil3726/4726 PMPs */ 4066 /* Odd clown on sil3726/4726 PMPs */
4062 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4067 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4063 4068
@@ -4123,9 +4128,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4123 4128
4124 /* Devices that do not need bridging limits applied */ 4129 /* Devices that do not need bridging limits applied */
4125 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4130 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4131 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4126 4132
4127 /* Devices which aren't very happy with higher link speeds */ 4133 /* Devices which aren't very happy with higher link speeds */
4128 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4134 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4135 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4129 4136
4130 /* 4137 /*
4131 * Devices which choke on SETXFER. Applies only if both the 4138 * Devices which choke on SETXFER. Applies only if both the
@@ -5288,8 +5295,6 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5288 return rc; 5295 return rc;
5289} 5296}
5290 5297
5291#define to_ata_port(d) container_of(d, struct ata_port, tdev)
5292
5293static int ata_port_suspend_common(struct device *dev, pm_message_t mesg) 5298static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5294{ 5299{
5295 struct ata_port *ap = to_ata_port(dev); 5300 struct ata_port *ap = to_ata_port(dev);
@@ -6051,9 +6056,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6051 if (rc) 6056 if (rc)
6052 goto err_tadd; 6057 goto err_tadd;
6053 6058
6054 /* associate with ACPI nodes */
6055 ata_acpi_associate(host);
6056
6057 /* set cable, sata_spd_limit and report */ 6059 /* set cable, sata_spd_limit and report */
6058 for (i = 0; i < host->n_ports; i++) { 6060 for (i = 0; i < host->n_ports; i++) {
6059 struct ata_port *ap = host->ports[i]; 6061 struct ata_port *ap = host->ports[i];
@@ -6513,6 +6515,8 @@ static int __init ata_init(void)
6513 6515
6514 ata_parse_force_param(); 6516 ata_parse_force_param();
6515 6517
6518 ata_acpi_register();
6519
6516 rc = ata_sff_init(); 6520 rc = ata_sff_init();
6517 if (rc) { 6521 if (rc) {
6518 kfree(ata_force_tbl); 6522 kfree(ata_force_tbl);
@@ -6539,6 +6543,7 @@ static void __exit ata_exit(void)
6539 ata_release_transport(ata_scsi_transport_template); 6543 ata_release_transport(ata_scsi_transport_template);
6540 libata_transport_exit(); 6544 libata_transport_exit();
6541 ata_sff_exit(); 6545 ata_sff_exit();
6546 ata_acpi_unregister();
6542 kfree(ata_force_tbl); 6547 kfree(ata_force_tbl);
6543} 6548}
6544 6549
@@ -6642,6 +6647,8 @@ struct ata_port_operations ata_dummy_port_ops = {
6642 .qc_prep = ata_noop_qc_prep, 6647 .qc_prep = ata_noop_qc_prep,
6643 .qc_issue = ata_dummy_qc_issue, 6648 .qc_issue = ata_dummy_qc_issue,
6644 .error_handler = ata_dummy_error_handler, 6649 .error_handler = ata_dummy_error_handler,
6650 .sched_eh = ata_std_sched_eh,
6651 .end_eh = ata_std_end_eh,
6645}; 6652};
6646 6653
6647const struct ata_port_info ata_dummy_port_info = { 6654const struct ata_port_info ata_dummy_port_info = {
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6d53cf9b3b6e..7d4535e989bf 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -419,7 +419,7 @@ int ata_ering_map(struct ata_ering *ering,
419 return rc; 419 return rc;
420} 420}
421 421
422int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 422static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
423{ 423{
424 ent->eflags |= ATA_EFLAG_OLD_ER; 424 ent->eflags |= ATA_EFLAG_OLD_ER;
425 return 0; 425 return 0;
@@ -793,12 +793,12 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
793 ata_for_each_link(link, ap, HOST_FIRST) 793 ata_for_each_link(link, ap, HOST_FIRST)
794 memset(&link->eh_info, 0, sizeof(link->eh_info)); 794 memset(&link->eh_info, 0, sizeof(link->eh_info));
795 795
796 /* Clear host_eh_scheduled while holding ap->lock such 796 /* end eh (clear host_eh_scheduled) while holding
797 * that if exception occurs after this point but 797 * ap->lock such that if exception occurs after this
798 * before EH completion, SCSI midlayer will 798 * point but before EH completion, SCSI midlayer will
799 * re-initiate EH. 799 * re-initiate EH.
800 */ 800 */
801 host->host_eh_scheduled = 0; 801 ap->ops->end_eh(ap);
802 802
803 spin_unlock_irqrestore(ap->lock, flags); 803 spin_unlock_irqrestore(ap->lock, flags);
804 ata_eh_release(ap); 804 ata_eh_release(ap);
@@ -986,16 +986,13 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
986} 986}
987 987
988/** 988/**
989 * ata_port_schedule_eh - schedule error handling without a qc 989 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
990 * @ap: ATA port to schedule EH for 990 * @ap: ATA port to schedule EH for
991 *
992 * Schedule error handling for @ap. EH will kick in as soon as
993 * all commands are drained.
994 * 991 *
995 * LOCKING: 992 * LOCKING: inherited from ata_port_schedule_eh
996 * spin_lock_irqsave(host lock) 993 * spin_lock_irqsave(host lock)
997 */ 994 */
998void ata_port_schedule_eh(struct ata_port *ap) 995void ata_std_sched_eh(struct ata_port *ap)
999{ 996{
1000 WARN_ON(!ap->ops->error_handler); 997 WARN_ON(!ap->ops->error_handler);
1001 998
@@ -1007,6 +1004,44 @@ void ata_port_schedule_eh(struct ata_port *ap)
1007 1004
1008 DPRINTK("port EH scheduled\n"); 1005 DPRINTK("port EH scheduled\n");
1009} 1006}
1007EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1008
1009/**
1010 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1011 * @ap: ATA port to end EH for
1012 *
1013 * In the libata object model there is a 1:1 mapping of ata_port to
1014 * shost, so host fields can be directly manipulated under ap->lock, in
1015 * the libsas case we need to hold a lock at the ha->level to coordinate
1016 * these events.
1017 *
1018 * LOCKING:
1019 * spin_lock_irqsave(host lock)
1020 */
1021void ata_std_end_eh(struct ata_port *ap)
1022{
1023 struct Scsi_Host *host = ap->scsi_host;
1024
1025 host->host_eh_scheduled = 0;
1026}
1027EXPORT_SYMBOL(ata_std_end_eh);
1028
1029
1030/**
1031 * ata_port_schedule_eh - schedule error handling without a qc
1032 * @ap: ATA port to schedule EH for
1033 *
1034 * Schedule error handling for @ap. EH will kick in as soon as
1035 * all commands are drained.
1036 *
1037 * LOCKING:
1038 * spin_lock_irqsave(host lock)
1039 */
1040void ata_port_schedule_eh(struct ata_port *ap)
1041{
1042 /* see: ata_std_sched_eh, unless you know better */
1043 ap->ops->sched_eh(ap);
1044}
1010 1045
1011static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 1046static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1012{ 1047{
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 21b80c555c60..61c59ee45ce9 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -529,8 +529,6 @@ int sata_pmp_attach(struct ata_device *dev)
529 ata_for_each_link(tlink, ap, EDGE) 529 ata_for_each_link(tlink, ap, EDGE)
530 sata_link_init_spd(tlink); 530 sata_link_init_spd(tlink);
531 531
532 ata_acpi_associate_sata_port(ap);
533
534 return 0; 532 return 0;
535 533
536 fail: 534 fail:
@@ -570,8 +568,6 @@ static void sata_pmp_detach(struct ata_device *dev)
570 ap->nr_pmp_links = 0; 568 ap->nr_pmp_links = 0;
571 link->pmp = 0; 569 link->pmp = 0;
572 spin_unlock_irqrestore(ap->lock, flags); 570 spin_unlock_irqrestore(ap->lock, flags);
573
574 ata_acpi_associate_sata_port(ap);
575} 571}
576 572
577/** 573/**
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 22226350cd0c..8ec81ca8f659 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3445,6 +3445,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3445 if (!IS_ERR(sdev)) { 3445 if (!IS_ERR(sdev)) {
3446 dev->sdev = sdev; 3446 dev->sdev = sdev;
3447 scsi_device_put(sdev); 3447 scsi_device_put(sdev);
3448 ata_acpi_bind(dev);
3448 } else { 3449 } else {
3449 dev->sdev = NULL; 3450 dev->sdev = NULL;
3450 } 3451 }
@@ -3541,6 +3542,8 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3541 mutex_lock(&ap->scsi_host->scan_mutex); 3542 mutex_lock(&ap->scsi_host->scan_mutex);
3542 spin_lock_irqsave(ap->lock, flags); 3543 spin_lock_irqsave(ap->lock, flags);
3543 3544
3545 ata_acpi_unbind(dev);
3546
3544 /* clearing dev->sdev is protected by host lock */ 3547 /* clearing dev->sdev is protected by host lock */
3545 sdev = dev->sdev; 3548 sdev = dev->sdev;
3546 dev->sdev = NULL; 3549 dev->sdev = NULL;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index c34190485377..c04d393d20c1 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -232,7 +232,7 @@ static void ata_tport_release(struct device *dev)
232 * Returns: 232 * Returns:
233 * %1 if the device represents a ATA Port, %0 else 233 * %1 if the device represents a ATA Port, %0 else
234 */ 234 */
235int ata_is_port(const struct device *dev) 235static int ata_is_port(const struct device *dev)
236{ 236{
237 return dev->release == ata_tport_release; 237 return dev->release == ata_tport_release;
238} 238}
@@ -355,7 +355,7 @@ static void ata_tlink_release(struct device *dev)
355 * Returns: 355 * Returns:
356 * %1 if the device represents a ATA link, %0 else 356 * %1 if the device represents a ATA link, %0 else
357 */ 357 */
358int ata_is_link(const struct device *dev) 358static int ata_is_link(const struct device *dev)
359{ 359{
360 return dev->release == ata_tlink_release; 360 return dev->release == ata_tlink_release;
361} 361}
@@ -572,7 +572,7 @@ static void ata_tdev_release(struct device *dev)
572 * Returns: 572 * Returns:
573 * %1 if the device represents a ATA device, %0 else 573 * %1 if the device represents a ATA device, %0 else
574 */ 574 */
575int ata_is_ata_dev(const struct device *dev) 575static int ata_is_ata_dev(const struct device *dev)
576{ 576{
577 return dev->release == ata_tdev_release; 577 return dev->release == ata_tdev_release;
578} 578}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 9d0fd0b71852..50e4dff0604e 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -107,21 +107,22 @@ extern const char *sata_spd_string(unsigned int spd);
107extern int ata_port_probe(struct ata_port *ap); 107extern int ata_port_probe(struct ata_port *ap);
108extern void __ata_port_probe(struct ata_port *ap); 108extern void __ata_port_probe(struct ata_port *ap);
109 109
110#define to_ata_port(d) container_of(d, struct ata_port, tdev)
111
110/* libata-acpi.c */ 112/* libata-acpi.c */
111#ifdef CONFIG_ATA_ACPI 113#ifdef CONFIG_ATA_ACPI
112extern unsigned int ata_acpi_gtf_filter; 114extern unsigned int ata_acpi_gtf_filter;
113
114extern void ata_acpi_associate_sata_port(struct ata_port *ap);
115extern void ata_acpi_associate(struct ata_host *host);
116extern void ata_acpi_dissociate(struct ata_host *host); 115extern void ata_acpi_dissociate(struct ata_host *host);
117extern int ata_acpi_on_suspend(struct ata_port *ap); 116extern int ata_acpi_on_suspend(struct ata_port *ap);
118extern void ata_acpi_on_resume(struct ata_port *ap); 117extern void ata_acpi_on_resume(struct ata_port *ap);
119extern int ata_acpi_on_devcfg(struct ata_device *dev); 118extern int ata_acpi_on_devcfg(struct ata_device *dev);
120extern void ata_acpi_on_disable(struct ata_device *dev); 119extern void ata_acpi_on_disable(struct ata_device *dev);
121extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state); 120extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
121extern int ata_acpi_register(void);
122extern void ata_acpi_unregister(void);
123extern void ata_acpi_bind(struct ata_device *dev);
124extern void ata_acpi_unbind(struct ata_device *dev);
122#else 125#else
123static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { }
124static inline void ata_acpi_associate(struct ata_host *host) { }
125static inline void ata_acpi_dissociate(struct ata_host *host) { } 126static inline void ata_acpi_dissociate(struct ata_host *host) { }
126static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } 127static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
127static inline void ata_acpi_on_resume(struct ata_port *ap) { } 128static inline void ata_acpi_on_resume(struct ata_port *ap) { }
@@ -129,6 +130,10 @@ static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
129static inline void ata_acpi_on_disable(struct ata_device *dev) { } 130static inline void ata_acpi_on_disable(struct ata_device *dev) { }
130static inline void ata_acpi_set_state(struct ata_port *ap, 131static inline void ata_acpi_set_state(struct ata_port *ap,
131 pm_message_t state) { } 132 pm_message_t state) { }
133static inline int ata_acpi_register(void) { return 0; }
134static inline void ata_acpi_unregister(void) { }
135static inline void ata_acpi_bind(struct ata_device *dev) { }
136static inline void ata_acpi_unbind(struct ata_device *dev) { }
132#endif 137#endif
133 138
134/* libata-scsi.c */ 139/* libata-scsi.c */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 54145edf50e8..09723b76beac 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -39,7 +39,7 @@ static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline)
39{ 39{
40 struct ata_port *ap = link->ap; 40 struct ata_port *ap = link->ap;
41 struct pata_acpi *acpi = ap->private_data; 41 struct pata_acpi *acpi = ap->private_data;
42 if (ap->acpi_handle == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0) 42 if (ata_ap_acpi_handle(ap) == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
43 return -ENODEV; 43 return -ENODEV;
44 44
45 return ata_sff_prereset(link, deadline); 45 return ata_sff_prereset(link, deadline);
@@ -195,7 +195,7 @@ static int pacpi_port_start(struct ata_port *ap)
195 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 195 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
196 struct pata_acpi *acpi; 196 struct pata_acpi *acpi;
197 197
198 if (ap->acpi_handle == NULL) 198 if (ata_ap_acpi_handle(ap) == NULL)
199 return -ENODEV; 199 return -ENODEV;
200 200
201 acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL); 201 acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL);
@@ -273,22 +273,10 @@ static struct pci_driver pacpi_pci_driver = {
273#endif 273#endif
274}; 274};
275 275
276static int __init pacpi_init(void) 276module_pci_driver(pacpi_pci_driver);
277{
278 return pci_register_driver(&pacpi_pci_driver);
279}
280
281static void __exit pacpi_exit(void)
282{
283 pci_unregister_driver(&pacpi_pci_driver);
284}
285
286module_init(pacpi_init);
287module_exit(pacpi_exit);
288 277
289MODULE_AUTHOR("Alan Cox"); 278MODULE_AUTHOR("Alan Cox");
290MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode"); 279MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode");
291MODULE_LICENSE("GPL"); 280MODULE_LICENSE("GPL");
292MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl); 281MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl);
293MODULE_VERSION(DRV_VERSION); 282MODULE_VERSION(DRV_VERSION);
294
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index dc6b5dae0463..82a08922afcd 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -632,21 +632,10 @@ static struct pci_driver amd_pci_driver = {
632#endif 632#endif
633}; 633};
634 634
635static int __init amd_init(void) 635module_pci_driver(amd_pci_driver);
636{
637 return pci_register_driver(&amd_pci_driver);
638}
639
640static void __exit amd_exit(void)
641{
642 pci_unregister_driver(&amd_pci_driver);
643}
644 636
645MODULE_AUTHOR("Alan Cox"); 637MODULE_AUTHOR("Alan Cox");
646MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE"); 638MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE");
647MODULE_LICENSE("GPL"); 639MODULE_LICENSE("GPL");
648MODULE_DEVICE_TABLE(pci, amd); 640MODULE_DEVICE_TABLE(pci, amd);
649MODULE_VERSION(DRV_VERSION); 641MODULE_VERSION(DRV_VERSION);
650
651module_init(amd_init);
652module_exit(amd_exit);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 3239517f4d90..bfaa5cb1629a 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller source file 4 * Arasan Compact Flash host controller source file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
@@ -184,10 +184,8 @@
184struct arasan_cf_dev { 184struct arasan_cf_dev {
185 /* pointer to ata_host structure */ 185 /* pointer to ata_host structure */
186 struct ata_host *host; 186 struct ata_host *host;
187 /* clk structure, only if HAVE_CLK is defined */ 187 /* clk structure */
188#ifdef CONFIG_HAVE_CLK
189 struct clk *clk; 188 struct clk *clk;
190#endif
191 189
192 /* physical base address of controller */ 190 /* physical base address of controller */
193 dma_addr_t pbase; 191 dma_addr_t pbase;
@@ -312,13 +310,11 @@ static int cf_init(struct arasan_cf_dev *acdev)
312 unsigned long flags; 310 unsigned long flags;
313 int ret = 0; 311 int ret = 0;
314 312
315#ifdef CONFIG_HAVE_CLK
316 ret = clk_enable(acdev->clk); 313 ret = clk_enable(acdev->clk);
317 if (ret) { 314 if (ret) {
318 dev_dbg(acdev->host->dev, "clock enable failed"); 315 dev_dbg(acdev->host->dev, "clock enable failed");
319 return ret; 316 return ret;
320 } 317 }
321#endif
322 318
323 spin_lock_irqsave(&acdev->host->lock, flags); 319 spin_lock_irqsave(&acdev->host->lock, flags);
324 /* configure CF interface clock */ 320 /* configure CF interface clock */
@@ -344,9 +340,7 @@ static void cf_exit(struct arasan_cf_dev *acdev)
344 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB, 340 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
345 acdev->vbase + OP_MODE); 341 acdev->vbase + OP_MODE);
346 spin_unlock_irqrestore(&acdev->host->lock, flags); 342 spin_unlock_irqrestore(&acdev->host->lock, flags);
347#ifdef CONFIG_HAVE_CLK
348 clk_disable(acdev->clk); 343 clk_disable(acdev->clk);
349#endif
350} 344}
351 345
352static void dma_callback(void *dev) 346static void dma_callback(void *dev)
@@ -828,13 +822,11 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
828 return -ENOMEM; 822 return -ENOMEM;
829 } 823 }
830 824
831#ifdef CONFIG_HAVE_CLK
832 acdev->clk = clk_get(&pdev->dev, NULL); 825 acdev->clk = clk_get(&pdev->dev, NULL);
833 if (IS_ERR(acdev->clk)) { 826 if (IS_ERR(acdev->clk)) {
834 dev_warn(&pdev->dev, "Clock not found\n"); 827 dev_warn(&pdev->dev, "Clock not found\n");
835 return PTR_ERR(acdev->clk); 828 return PTR_ERR(acdev->clk);
836 } 829 }
837#endif
838 830
839 /* allocate host */ 831 /* allocate host */
840 host = ata_host_alloc(&pdev->dev, 1); 832 host = ata_host_alloc(&pdev->dev, 1);
@@ -899,9 +891,7 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
899 &arasan_cf_sht); 891 &arasan_cf_sht);
900 892
901free_clk: 893free_clk:
902#ifdef CONFIG_HAVE_CLK
903 clk_put(acdev->clk); 894 clk_put(acdev->clk);
904#endif
905 return ret; 895 return ret;
906} 896}
907 897
@@ -912,9 +902,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev)
912 902
913 ata_host_detach(host); 903 ata_host_detach(host);
914 cf_exit(acdev); 904 cf_exit(acdev);
915#ifdef CONFIG_HAVE_CLK
916 clk_put(acdev->clk); 905 clk_put(acdev->clk);
917#endif
918 906
919 return 0; 907 return 0;
920} 908}
@@ -959,7 +947,7 @@ static struct platform_driver arasan_cf_driver = {
959 947
960module_platform_driver(arasan_cf_driver); 948module_platform_driver(arasan_cf_driver);
961 949
962MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 950MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
963MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); 951MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
964MODULE_LICENSE("GPL"); 952MODULE_LICENSE("GPL");
965MODULE_ALIAS("platform:" DRIVER_NAME); 953MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 4b8b22efc00b..74b215c09b21 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -451,18 +451,7 @@ static struct pci_driver artop_pci_driver = {
451#endif 451#endif
452}; 452};
453 453
454static int __init artop_init(void) 454module_pci_driver(artop_pci_driver);
455{
456 return pci_register_driver(&artop_pci_driver);
457}
458
459static void __exit artop_exit(void)
460{
461 pci_unregister_driver(&artop_pci_driver);
462}
463
464module_init(artop_init);
465module_exit(artop_exit);
466 455
467MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz"); 456MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz");
468MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA"); 457MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index be1aa1486d39..24e51056ac26 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <scsi/scsi_host.h> 21#include <scsi/scsi_host.h>
22#include <linux/libata.h> 22#include <linux/libata.h>
23#include <linux/dmi.h>
23 24
24#define DRV_NAME "pata_atiixp" 25#define DRV_NAME "pata_atiixp"
25#define DRV_VERSION "0.4.6" 26#define DRV_VERSION "0.4.6"
@@ -33,11 +34,26 @@ enum {
33 ATIIXP_IDE_UDMA_MODE = 0x56 34 ATIIXP_IDE_UDMA_MODE = 0x56
34}; 35};
35 36
37static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
38 {
39 /* Board has onboard PATA<->SATA converters */
40 .ident = "MSI E350DM-E33",
41 .matches = {
42 DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
43 DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
44 },
45 },
46 { }
47};
48
36static int atiixp_cable_detect(struct ata_port *ap) 49static int atiixp_cable_detect(struct ata_port *ap)
37{ 50{
38 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
39 u8 udma; 52 u8 udma;
40 53
54 if (dmi_check_system(attixp_cable_override_dmi_table))
55 return ATA_CBL_PATA40_SHORT;
56
41 /* Hack from drivers/ide/pci. Really we want to know how to do the 57 /* Hack from drivers/ide/pci. Really we want to know how to do the
42 raw detection not play follow the bios mode guess */ 58 raw detection not play follow the bios mode guess */
43 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); 59 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
@@ -289,22 +305,10 @@ static struct pci_driver atiixp_pci_driver = {
289#endif 305#endif
290}; 306};
291 307
292static int __init atiixp_init(void) 308module_pci_driver(atiixp_pci_driver);
293{
294 return pci_register_driver(&atiixp_pci_driver);
295}
296
297
298static void __exit atiixp_exit(void)
299{
300 pci_unregister_driver(&atiixp_pci_driver);
301}
302 309
303MODULE_AUTHOR("Alan Cox"); 310MODULE_AUTHOR("Alan Cox");
304MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400"); 311MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
305MODULE_LICENSE("GPL"); 312MODULE_LICENSE("GPL");
306MODULE_DEVICE_TABLE(pci, atiixp); 313MODULE_DEVICE_TABLE(pci, atiixp);
307MODULE_VERSION(DRV_VERSION); 314MODULE_VERSION(DRV_VERSION);
308
309module_init(atiixp_init);
310module_exit(atiixp_exit);
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 3cfabb262af2..041f50d53240 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -565,21 +565,10 @@ static struct pci_driver atp867x_driver = {
565#endif 565#endif
566}; 566};
567 567
568static int __init atp867x_init(void) 568module_pci_driver(atp867x_driver);
569{
570 return pci_register_driver(&atp867x_driver);
571}
572
573static void __exit atp867x_exit(void)
574{
575 pci_unregister_driver(&atp867x_driver);
576}
577 569
578MODULE_AUTHOR("John(Jung-Ik) Lee, Google Inc."); 570MODULE_AUTHOR("John(Jung-Ik) Lee, Google Inc.");
579MODULE_DESCRIPTION("low level driver for Artop/Acard 867x ATA controller"); 571MODULE_DESCRIPTION("low level driver for Artop/Acard 867x ATA controller");
580MODULE_LICENSE("GPL"); 572MODULE_LICENSE("GPL");
581MODULE_DEVICE_TABLE(pci, atp867x_pci_tbl); 573MODULE_DEVICE_TABLE(pci, atp867x_pci_tbl);
582MODULE_VERSION(DRV_VERSION); 574MODULE_VERSION(DRV_VERSION);
583
584module_init(atp867x_init);
585module_exit(atp867x_exit);
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 549d28dbf90d..504b98b58e19 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -263,21 +263,10 @@ static struct pci_driver cmd640_pci_driver = {
263#endif 263#endif
264}; 264};
265 265
266static int __init cmd640_init(void) 266module_pci_driver(cmd640_pci_driver);
267{
268 return pci_register_driver(&cmd640_pci_driver);
269}
270
271static void __exit cmd640_exit(void)
272{
273 pci_unregister_driver(&cmd640_pci_driver);
274}
275 267
276MODULE_AUTHOR("Alan Cox"); 268MODULE_AUTHOR("Alan Cox");
277MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers"); 269MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers");
278MODULE_LICENSE("GPL"); 270MODULE_LICENSE("GPL");
279MODULE_DEVICE_TABLE(pci, cmd640); 271MODULE_DEVICE_TABLE(pci, cmd640);
280MODULE_VERSION(DRV_VERSION); 272MODULE_VERSION(DRV_VERSION);
281
282module_init(cmd640_init);
283module_exit(cmd640_exit);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 1c17cd1e8b2d..7ba01415b676 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -423,7 +423,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
423 .port_ops = &cmd648_port_ops 423 .port_ops = &cmd648_port_ops
424 } 424 }
425 }; 425 };
426 const struct ata_port_info *ppi[] = { 426 const struct ata_port_info *ppi[] = {
427 &cmd_info[id->driver_data], 427 &cmd_info[id->driver_data],
428 &cmd_info[id->driver_data], 428 &cmd_info[id->driver_data],
429 NULL 429 NULL
@@ -478,7 +478,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
478 if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) { 478 if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
479 dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n"); 479 dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
480 ppi[0] = &ata_dummy_port_info; 480 ppi[0] = &ata_dummy_port_info;
481 481
482 } 482 }
483 if (port_ok && !(reg & CNTRL_CH1)) { 483 if (port_ok && !(reg & CNTRL_CH1)) {
484 dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n"); 484 dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
@@ -525,21 +525,10 @@ static struct pci_driver cmd64x_pci_driver = {
525#endif 525#endif
526}; 526};
527 527
528static int __init cmd64x_init(void) 528module_pci_driver(cmd64x_pci_driver);
529{
530 return pci_register_driver(&cmd64x_pci_driver);
531}
532
533static void __exit cmd64x_exit(void)
534{
535 pci_unregister_driver(&cmd64x_pci_driver);
536}
537 529
538MODULE_AUTHOR("Alan Cox"); 530MODULE_AUTHOR("Alan Cox");
539MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers"); 531MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
540MODULE_LICENSE("GPL"); 532MODULE_LICENSE("GPL");
541MODULE_DEVICE_TABLE(pci, cmd64x); 533MODULE_DEVICE_TABLE(pci, cmd64x);
542MODULE_VERSION(DRV_VERSION); 534MODULE_VERSION(DRV_VERSION);
543
544module_init(cmd64x_init);
545module_exit(cmd64x_exit);
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 9ddcddc66a20..de74d804f031 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -302,22 +302,10 @@ static struct pci_driver cs5520_pci_driver = {
302#endif 302#endif
303}; 303};
304 304
305static int __init cs5520_init(void) 305module_pci_driver(cs5520_pci_driver);
306{
307 return pci_register_driver(&cs5520_pci_driver);
308}
309
310static void __exit cs5520_exit(void)
311{
312 pci_unregister_driver(&cs5520_pci_driver);
313}
314 306
315MODULE_AUTHOR("Alan Cox"); 307MODULE_AUTHOR("Alan Cox");
316MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520"); 308MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520");
317MODULE_LICENSE("GPL"); 309MODULE_LICENSE("GPL");
318MODULE_DEVICE_TABLE(pci, pata_cs5520); 310MODULE_DEVICE_TABLE(pci, pata_cs5520);
319MODULE_VERSION(DRV_VERSION); 311MODULE_VERSION(DRV_VERSION);
320
321module_init(cs5520_init);
322module_exit(cs5520_exit);
323
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index f792330f0d8e..48389ae0b330 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -363,21 +363,10 @@ static struct pci_driver cs5530_pci_driver = {
363#endif 363#endif
364}; 364};
365 365
366static int __init cs5530_init(void) 366module_pci_driver(cs5530_pci_driver);
367{
368 return pci_register_driver(&cs5530_pci_driver);
369}
370
371static void __exit cs5530_exit(void)
372{
373 pci_unregister_driver(&cs5530_pci_driver);
374}
375 367
376MODULE_AUTHOR("Alan Cox"); 368MODULE_AUTHOR("Alan Cox");
377MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530"); 369MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
378MODULE_LICENSE("GPL"); 370MODULE_LICENSE("GPL");
379MODULE_DEVICE_TABLE(pci, cs5530); 371MODULE_DEVICE_TABLE(pci, cs5530);
380MODULE_VERSION(DRV_VERSION); 372MODULE_VERSION(DRV_VERSION);
381
382module_init(cs5530_init);
383module_exit(cs5530_exit);
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index a0b4640125ae..997e16a3a63f 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -207,21 +207,10 @@ static struct pci_driver cs5535_pci_driver = {
207#endif 207#endif
208}; 208};
209 209
210static int __init cs5535_init(void) 210module_pci_driver(cs5535_pci_driver);
211{
212 return pci_register_driver(&cs5535_pci_driver);
213}
214
215static void __exit cs5535_exit(void)
216{
217 pci_unregister_driver(&cs5535_pci_driver);
218}
219 211
220MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch"); 212MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
221MODULE_DESCRIPTION("low-level driver for the NS/AMD 5535"); 213MODULE_DESCRIPTION("low-level driver for the NS/AMD 5535");
222MODULE_LICENSE("GPL"); 214MODULE_LICENSE("GPL");
223MODULE_DEVICE_TABLE(pci, cs5535); 215MODULE_DEVICE_TABLE(pci, cs5535);
224MODULE_VERSION(DRV_VERSION); 216MODULE_VERSION(DRV_VERSION);
225
226module_init(cs5535_init);
227module_exit(cs5535_exit);
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 7a402c75ab90..dec1b6c4b351 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -274,21 +274,10 @@ static struct pci_driver cs5536_pci_driver = {
274#endif 274#endif
275}; 275};
276 276
277static int __init cs5536_init(void) 277module_pci_driver(cs5536_pci_driver);
278{
279 return pci_register_driver(&cs5536_pci_driver);
280}
281
282static void __exit cs5536_exit(void)
283{
284 pci_unregister_driver(&cs5536_pci_driver);
285}
286 278
287MODULE_AUTHOR("Martin K. Petersen"); 279MODULE_AUTHOR("Martin K. Petersen");
288MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller"); 280MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
289MODULE_LICENSE("GPL"); 281MODULE_LICENSE("GPL");
290MODULE_DEVICE_TABLE(pci, cs5536); 282MODULE_DEVICE_TABLE(pci, cs5536);
291MODULE_VERSION(DRV_VERSION); 283MODULE_VERSION(DRV_VERSION);
292
293module_init(cs5536_init);
294module_exit(cs5536_exit);
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 6d915b063d93..810bc9964dde 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -158,23 +158,10 @@ static struct pci_driver cy82c693_pci_driver = {
158#endif 158#endif
159}; 159};
160 160
161static int __init cy82c693_init(void) 161module_pci_driver(cy82c693_pci_driver);
162{
163 return pci_register_driver(&cy82c693_pci_driver);
164}
165
166
167static void __exit cy82c693_exit(void)
168{
169 pci_unregister_driver(&cy82c693_pci_driver);
170}
171
172 162
173MODULE_AUTHOR("Alan Cox"); 163MODULE_AUTHOR("Alan Cox");
174MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller"); 164MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller");
175MODULE_LICENSE("GPL"); 165MODULE_LICENSE("GPL");
176MODULE_DEVICE_TABLE(pci, cy82c693); 166MODULE_DEVICE_TABLE(pci, cy82c693);
177MODULE_VERSION(DRV_VERSION); 167MODULE_VERSION(DRV_VERSION);
178
179module_init(cy82c693_init);
180module_exit(cy82c693_exit);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index f0243ed206f7..3c12fd7acd41 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -295,22 +295,10 @@ static struct pci_driver efar_pci_driver = {
295#endif 295#endif
296}; 296};
297 297
298static int __init efar_init(void) 298module_pci_driver(efar_pci_driver);
299{
300 return pci_register_driver(&efar_pci_driver);
301}
302
303static void __exit efar_exit(void)
304{
305 pci_unregister_driver(&efar_pci_driver);
306}
307
308module_init(efar_init);
309module_exit(efar_exit);
310 299
311MODULE_AUTHOR("Alan Cox"); 300MODULE_AUTHOR("Alan Cox");
312MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones"); 301MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
313MODULE_LICENSE("GPL"); 302MODULE_LICENSE("GPL");
314MODULE_DEVICE_TABLE(pci, efar_pci_tbl); 303MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
315MODULE_VERSION(DRV_VERSION); 304MODULE_VERSION(DRV_VERSION);
316
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 42cffd38910d..4be884a9f5ed 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -418,21 +418,10 @@ static struct pci_driver hpt36x_pci_driver = {
418#endif 418#endif
419}; 419};
420 420
421static int __init hpt36x_init(void) 421module_pci_driver(hpt36x_pci_driver);
422{
423 return pci_register_driver(&hpt36x_pci_driver);
424}
425
426static void __exit hpt36x_exit(void)
427{
428 pci_unregister_driver(&hpt36x_pci_driver);
429}
430 422
431MODULE_AUTHOR("Alan Cox"); 423MODULE_AUTHOR("Alan Cox");
432MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368"); 424MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
433MODULE_LICENSE("GPL"); 425MODULE_LICENSE("GPL");
434MODULE_DEVICE_TABLE(pci, hpt36x); 426MODULE_DEVICE_TABLE(pci, hpt36x);
435MODULE_VERSION(DRV_VERSION); 427MODULE_VERSION(DRV_VERSION);
436
437module_init(hpt36x_init);
438module_exit(hpt36x_exit);
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 9620636aa405..a9d74eff5fc4 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -1058,21 +1058,10 @@ static struct pci_driver hpt37x_pci_driver = {
1058 .remove = ata_pci_remove_one 1058 .remove = ata_pci_remove_one
1059}; 1059};
1060 1060
1061static int __init hpt37x_init(void) 1061module_pci_driver(hpt37x_pci_driver);
1062{
1063 return pci_register_driver(&hpt37x_pci_driver);
1064}
1065
1066static void __exit hpt37x_exit(void)
1067{
1068 pci_unregister_driver(&hpt37x_pci_driver);
1069}
1070 1062
1071MODULE_AUTHOR("Alan Cox"); 1063MODULE_AUTHOR("Alan Cox");
1072MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x"); 1064MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x");
1073MODULE_LICENSE("GPL"); 1065MODULE_LICENSE("GPL");
1074MODULE_DEVICE_TABLE(pci, hpt37x); 1066MODULE_DEVICE_TABLE(pci, hpt37x);
1075MODULE_VERSION(DRV_VERSION); 1067MODULE_VERSION(DRV_VERSION);
1076
1077module_init(hpt37x_init);
1078module_exit(hpt37x_exit);
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 765f136d8cd3..4be0398c153d 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -621,21 +621,10 @@ static struct pci_driver hpt3x2n_pci_driver = {
621 .remove = ata_pci_remove_one 621 .remove = ata_pci_remove_one
622}; 622};
623 623
624static int __init hpt3x2n_init(void) 624module_pci_driver(hpt3x2n_pci_driver);
625{
626 return pci_register_driver(&hpt3x2n_pci_driver);
627}
628
629static void __exit hpt3x2n_exit(void)
630{
631 pci_unregister_driver(&hpt3x2n_pci_driver);
632}
633 625
634MODULE_AUTHOR("Alan Cox"); 626MODULE_AUTHOR("Alan Cox");
635MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN"); 627MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN");
636MODULE_LICENSE("GPL"); 628MODULE_LICENSE("GPL");
637MODULE_DEVICE_TABLE(pci, hpt3x2n); 629MODULE_DEVICE_TABLE(pci, hpt3x2n);
638MODULE_VERSION(DRV_VERSION); 630MODULE_VERSION(DRV_VERSION);
639
640module_init(hpt3x2n_init);
641module_exit(hpt3x2n_exit);
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index b3042dab08bb..76c9314bb824 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -284,23 +284,10 @@ static struct pci_driver hpt3x3_pci_driver = {
284#endif 284#endif
285}; 285};
286 286
287static int __init hpt3x3_init(void) 287module_pci_driver(hpt3x3_pci_driver);
288{
289 return pci_register_driver(&hpt3x3_pci_driver);
290}
291
292
293static void __exit hpt3x3_exit(void)
294{
295 pci_unregister_driver(&hpt3x3_pci_driver);
296}
297
298 288
299MODULE_AUTHOR("Alan Cox"); 289MODULE_AUTHOR("Alan Cox");
300MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363"); 290MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
301MODULE_LICENSE("GPL"); 291MODULE_LICENSE("GPL");
302MODULE_DEVICE_TABLE(pci, hpt3x3); 292MODULE_DEVICE_TABLE(pci, hpt3x3);
303MODULE_VERSION(DRV_VERSION); 293MODULE_VERSION(DRV_VERSION);
304
305module_init(hpt3x3_init);
306module_exit(hpt3x3_exit);
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index c5af97f5107b..87bb05b3cafc 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -118,7 +118,7 @@ static int __devinit pata_imx_probe(struct platform_device *pdev)
118 return PTR_ERR(priv->clk); 118 return PTR_ERR(priv->clk);
119 } 119 }
120 120
121 clk_enable(priv->clk); 121 clk_prepare_enable(priv->clk);
122 122
123 host = ata_host_alloc(&pdev->dev, 1); 123 host = ata_host_alloc(&pdev->dev, 1);
124 if (!host) 124 if (!host)
@@ -162,7 +162,7 @@ static int __devinit pata_imx_probe(struct platform_device *pdev)
162 &pata_imx_sht); 162 &pata_imx_sht);
163 163
164free_priv: 164free_priv:
165 clk_disable(priv->clk); 165 clk_disable_unprepare(priv->clk);
166 clk_put(priv->clk); 166 clk_put(priv->clk);
167 return -ENOMEM; 167 return -ENOMEM;
168} 168}
@@ -176,7 +176,7 @@ static int __devexit pata_imx_remove(struct platform_device *pdev)
176 176
177 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); 177 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
178 178
179 clk_disable(priv->clk); 179 clk_disable_unprepare(priv->clk);
180 clk_put(priv->clk); 180 clk_put(priv->clk);
181 181
182 return 0; 182 return 0;
@@ -194,7 +194,7 @@ static int pata_imx_suspend(struct device *dev)
194 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); 194 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
195 priv->ata_ctl = 195 priv->ata_ctl =
196 __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); 196 __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
197 clk_disable(priv->clk); 197 clk_disable_unprepare(priv->clk);
198 } 198 }
199 199
200 return ret; 200 return ret;
@@ -205,7 +205,7 @@ static int pata_imx_resume(struct device *dev)
205 struct ata_host *host = dev_get_drvdata(dev); 205 struct ata_host *host = dev_get_drvdata(dev);
206 struct pata_imx_priv *priv = host->private_data; 206 struct pata_imx_priv *priv = host->private_data;
207 207
208 clk_enable(priv->clk); 208 clk_prepare_enable(priv->clk);
209 209
210 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL); 210 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
211 211
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index cf9164d79f11..2a8dd9527ecc 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -290,18 +290,7 @@ static struct pci_driver it8213_pci_driver = {
290#endif 290#endif
291}; 291};
292 292
293static int __init it8213_init(void) 293module_pci_driver(it8213_pci_driver);
294{
295 return pci_register_driver(&it8213_pci_driver);
296}
297
298static void __exit it8213_exit(void)
299{
300 pci_unregister_driver(&it8213_pci_driver);
301}
302
303module_init(it8213_init);
304module_exit(it8213_exit);
305 294
306MODULE_AUTHOR("Alan Cox"); 295MODULE_AUTHOR("Alan Cox");
307MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213"); 296MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213");
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 62c5d00abd2e..9cc05d808ad5 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -972,15 +972,7 @@ static struct pci_driver it821x_pci_driver = {
972#endif 972#endif
973}; 973};
974 974
975static int __init it821x_init(void) 975module_pci_driver(it821x_pci_driver);
976{
977 return pci_register_driver(&it821x_pci_driver);
978}
979
980static void __exit it821x_exit(void)
981{
982 pci_unregister_driver(&it821x_pci_driver);
983}
984 976
985MODULE_AUTHOR("Alan Cox"); 977MODULE_AUTHOR("Alan Cox");
986MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller"); 978MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
@@ -988,9 +980,5 @@ MODULE_LICENSE("GPL");
988MODULE_DEVICE_TABLE(pci, it821x); 980MODULE_DEVICE_TABLE(pci, it821x);
989MODULE_VERSION(DRV_VERSION); 981MODULE_VERSION(DRV_VERSION);
990 982
991
992module_param_named(noraid, it8212_noraid, int, S_IRUGO); 983module_param_named(noraid, it8212_noraid, int, S_IRUGO);
993MODULE_PARM_DESC(noraid, "Force card into bypass mode"); 984MODULE_PARM_DESC(noraid, "Force card into bypass mode");
994
995module_init(it821x_init);
996module_exit(it821x_exit);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index cb3babbb7035..76e739b031b6 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -164,18 +164,7 @@ static struct pci_driver jmicron_pci_driver = {
164#endif 164#endif
165}; 165};
166 166
167static int __init jmicron_init(void) 167module_pci_driver(jmicron_pci_driver);
168{
169 return pci_register_driver(&jmicron_pci_driver);
170}
171
172static void __exit jmicron_exit(void)
173{
174 pci_unregister_driver(&jmicron_pci_driver);
175}
176
177module_init(jmicron_init);
178module_exit(jmicron_exit);
179 168
180MODULE_AUTHOR("Alan Cox"); 169MODULE_AUTHOR("Alan Cox");
181MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports"); 170MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 5d7f58a7e34d..a4f5e781c8c2 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -178,22 +178,10 @@ static struct pci_driver marvell_pci_driver = {
178#endif 178#endif
179}; 179};
180 180
181static int __init marvell_init(void) 181module_pci_driver(marvell_pci_driver);
182{
183 return pci_register_driver(&marvell_pci_driver);
184}
185
186static void __exit marvell_exit(void)
187{
188 pci_unregister_driver(&marvell_pci_driver);
189}
190
191module_init(marvell_init);
192module_exit(marvell_exit);
193 182
194MODULE_AUTHOR("Alan Cox"); 183MODULE_AUTHOR("Alan Cox");
195MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode"); 184MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
196MODULE_LICENSE("GPL"); 185MODULE_LICENSE("GPL");
197MODULE_DEVICE_TABLE(pci, marvell_pci_tbl); 186MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
198MODULE_VERSION(DRV_VERSION); 187MODULE_VERSION(DRV_VERSION);
199
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 9dc16df84191..1f5f28bb0bb8 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -230,21 +230,10 @@ static struct pci_driver mpiix_pci_driver = {
230#endif 230#endif
231}; 231};
232 232
233static int __init mpiix_init(void) 233module_pci_driver(mpiix_pci_driver);
234{
235 return pci_register_driver(&mpiix_pci_driver);
236}
237
238static void __exit mpiix_exit(void)
239{
240 pci_unregister_driver(&mpiix_pci_driver);
241}
242 234
243MODULE_AUTHOR("Alan Cox"); 235MODULE_AUTHOR("Alan Cox");
244MODULE_DESCRIPTION("low-level driver for Intel MPIIX"); 236MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
245MODULE_LICENSE("GPL"); 237MODULE_LICENSE("GPL");
246MODULE_DEVICE_TABLE(pci, mpiix); 238MODULE_DEVICE_TABLE(pci, mpiix);
247MODULE_VERSION(DRV_VERSION); 239MODULE_VERSION(DRV_VERSION);
248
249module_init(mpiix_init);
250module_exit(mpiix_exit);
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 9979a43bc596..ad1a0febd620 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -99,22 +99,10 @@ static struct pci_driver netcell_pci_driver = {
99#endif 99#endif
100}; 100};
101 101
102static int __init netcell_init(void) 102module_pci_driver(netcell_pci_driver);
103{
104 return pci_register_driver(&netcell_pci_driver);
105}
106
107static void __exit netcell_exit(void)
108{
109 pci_unregister_driver(&netcell_pci_driver);
110}
111
112module_init(netcell_init);
113module_exit(netcell_exit);
114 103
115MODULE_AUTHOR("Alan Cox"); 104MODULE_AUTHOR("Alan Cox");
116MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID"); 105MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID");
117MODULE_LICENSE("GPL"); 106MODULE_LICENSE("GPL");
118MODULE_DEVICE_TABLE(pci, netcell_pci_tbl); 107MODULE_DEVICE_TABLE(pci, netcell_pci_tbl);
119MODULE_VERSION(DRV_VERSION); 108MODULE_VERSION(DRV_VERSION);
120
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index e277a142138c..12010ed596c4 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -190,21 +190,10 @@ static struct pci_driver ninja32_pci_driver = {
190#endif 190#endif
191}; 191};
192 192
193static int __init ninja32_init(void) 193module_pci_driver(ninja32_pci_driver);
194{
195 return pci_register_driver(&ninja32_pci_driver);
196}
197
198static void __exit ninja32_exit(void)
199{
200 pci_unregister_driver(&ninja32_pci_driver);
201}
202 194
203MODULE_AUTHOR("Alan Cox"); 195MODULE_AUTHOR("Alan Cox");
204MODULE_DESCRIPTION("low-level driver for Ninja32 ATA"); 196MODULE_DESCRIPTION("low-level driver for Ninja32 ATA");
205MODULE_LICENSE("GPL"); 197MODULE_LICENSE("GPL");
206MODULE_DEVICE_TABLE(pci, ninja32); 198MODULE_DEVICE_TABLE(pci, ninja32);
207MODULE_VERSION(DRV_VERSION); 199MODULE_VERSION(DRV_VERSION);
208
209module_init(ninja32_init);
210module_exit(ninja32_exit);
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 31d5986537a3..0c424dae56e7 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -168,21 +168,10 @@ static struct pci_driver ns87410_pci_driver = {
168#endif 168#endif
169}; 169};
170 170
171static int __init ns87410_init(void) 171module_pci_driver(ns87410_pci_driver);
172{
173 return pci_register_driver(&ns87410_pci_driver);
174}
175
176static void __exit ns87410_exit(void)
177{
178 pci_unregister_driver(&ns87410_pci_driver);
179}
180 172
181MODULE_AUTHOR("Alan Cox"); 173MODULE_AUTHOR("Alan Cox");
182MODULE_DESCRIPTION("low-level driver for Nat Semi 87410"); 174MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
183MODULE_LICENSE("GPL"); 175MODULE_LICENSE("GPL");
184MODULE_DEVICE_TABLE(pci, ns87410); 176MODULE_DEVICE_TABLE(pci, ns87410);
185MODULE_VERSION(DRV_VERSION); 177MODULE_VERSION(DRV_VERSION);
186
187module_init(ns87410_init);
188module_exit(ns87410_exit);
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index f1d517bc5b49..6f6fa1060505 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -414,18 +414,7 @@ static struct pci_driver ns87415_pci_driver = {
414#endif 414#endif
415}; 415};
416 416
417static int __init ns87415_init(void) 417module_pci_driver(ns87415_pci_driver);
418{
419 return pci_register_driver(&ns87415_pci_driver);
420}
421
422static void __exit ns87415_exit(void)
423{
424 pci_unregister_driver(&ns87415_pci_driver);
425}
426
427module_init(ns87415_init);
428module_exit(ns87415_exit);
429 418
430MODULE_AUTHOR("Alan Cox"); 419MODULE_AUTHOR("Alan Cox");
431MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers"); 420MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers");
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 98cdf50e4065..d77b2e1054ef 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -265,22 +265,10 @@ static struct pci_driver oldpiix_pci_driver = {
265#endif 265#endif
266}; 266};
267 267
268static int __init oldpiix_init(void) 268module_pci_driver(oldpiix_pci_driver);
269{
270 return pci_register_driver(&oldpiix_pci_driver);
271}
272
273static void __exit oldpiix_exit(void)
274{
275 pci_unregister_driver(&oldpiix_pci_driver);
276}
277
278module_init(oldpiix_init);
279module_exit(oldpiix_exit);
280 269
281MODULE_AUTHOR("Alan Cox"); 270MODULE_AUTHOR("Alan Cox");
282MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers"); 271MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers");
283MODULE_LICENSE("GPL"); 272MODULE_LICENSE("GPL");
284MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl); 273MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl);
285MODULE_VERSION(DRV_VERSION); 274MODULE_VERSION(DRV_VERSION);
286
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index accc033faf77..4ea70cd22aee 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -191,22 +191,10 @@ static struct pci_driver opti_pci_driver = {
191#endif 191#endif
192}; 192};
193 193
194static int __init opti_init(void) 194module_pci_driver(opti_pci_driver);
195{
196 return pci_register_driver(&opti_pci_driver);
197}
198
199static void __exit opti_exit(void)
200{
201 pci_unregister_driver(&opti_pci_driver);
202}
203
204 195
205MODULE_AUTHOR("Alan Cox"); 196MODULE_AUTHOR("Alan Cox");
206MODULE_DESCRIPTION("low-level driver for Opti 621/621X"); 197MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
207MODULE_LICENSE("GPL"); 198MODULE_LICENSE("GPL");
208MODULE_DEVICE_TABLE(pci, opti); 199MODULE_DEVICE_TABLE(pci, opti);
209MODULE_VERSION(DRV_VERSION); 200MODULE_VERSION(DRV_VERSION);
210
211module_init(opti_init);
212module_exit(opti_exit);
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 77cb91408632..78ede3fd1875 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -447,21 +447,10 @@ static struct pci_driver optidma_pci_driver = {
447#endif 447#endif
448}; 448};
449 449
450static int __init optidma_init(void) 450module_pci_driver(optidma_pci_driver);
451{
452 return pci_register_driver(&optidma_pci_driver);
453}
454
455static void __exit optidma_exit(void)
456{
457 pci_unregister_driver(&optidma_pci_driver);
458}
459 451
460MODULE_AUTHOR("Alan Cox"); 452MODULE_AUTHOR("Alan Cox");
461MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus"); 453MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
462MODULE_LICENSE("GPL"); 454MODULE_LICENSE("GPL");
463MODULE_DEVICE_TABLE(pci, optidma); 455MODULE_DEVICE_TABLE(pci, optidma);
464MODULE_VERSION(DRV_VERSION); 456MODULE_VERSION(DRV_VERSION);
465
466module_init(optidma_init);
467module_exit(optidma_exit);
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index a808ba03bd7f..958238dda8fc 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -170,7 +170,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
170{ 170{
171 int *is_kme = priv_data; 171 int *is_kme = priv_data;
172 172
173 if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { 173 if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
174 != IO_DATA_PATH_WIDTH_8) {
174 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 175 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
175 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; 176 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
176 } 177 }
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 7d63f24179c7..c9399c8688c5 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -784,21 +784,4 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev)
784} 784}
785#endif 785#endif
786 786
787/** 787module_pci_driver(pdc2027x_pci_driver);
788 * pdc2027x_init - Called after this module is loaded into the kernel.
789 */
790static int __init pdc2027x_init(void)
791{
792 return pci_register_driver(&pdc2027x_pci_driver);
793}
794
795/**
796 * pdc2027x_exit - Called before this module unloaded from the kernel
797 */
798static void __exit pdc2027x_exit(void)
799{
800 pci_unregister_driver(&pdc2027x_pci_driver);
801}
802
803module_init(pdc2027x_init);
804module_exit(pdc2027x_exit);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index c2ed5868dda6..c34fc50070a6 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -384,21 +384,10 @@ static struct pci_driver pdc202xx_pci_driver = {
384#endif 384#endif
385}; 385};
386 386
387static int __init pdc202xx_init(void) 387module_pci_driver(pdc202xx_pci_driver);
388{
389 return pci_register_driver(&pdc202xx_pci_driver);
390}
391
392static void __exit pdc202xx_exit(void)
393{
394 pci_unregister_driver(&pdc202xx_pci_driver);
395}
396 388
397MODULE_AUTHOR("Alan Cox"); 389MODULE_AUTHOR("Alan Cox");
398MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267"); 390MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
399MODULE_LICENSE("GPL"); 391MODULE_LICENSE("GPL");
400MODULE_DEVICE_TABLE(pci, pdc202xx); 392MODULE_DEVICE_TABLE(pci, pdc202xx);
401MODULE_VERSION(DRV_VERSION); 393MODULE_VERSION(DRV_VERSION);
402
403module_init(pdc202xx_init);
404module_exit(pdc202xx_exit);
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index cb01bf9496fe..2beb6b5045f8 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -117,24 +117,10 @@ static struct pci_driver ata_tosh_pci_driver = {
117#endif 117#endif
118}; 118};
119 119
120static int __init ata_tosh_init(void) 120module_pci_driver(ata_tosh_pci_driver);
121{
122 return pci_register_driver(&ata_tosh_pci_driver);
123}
124
125
126static void __exit ata_tosh_exit(void)
127{
128 pci_unregister_driver(&ata_tosh_pci_driver);
129}
130
131 121
132MODULE_AUTHOR("Alan Cox"); 122MODULE_AUTHOR("Alan Cox");
133MODULE_DESCRIPTION("Low level driver for Toshiba Piccolo ATA"); 123MODULE_DESCRIPTION("Low level driver for Toshiba Piccolo ATA");
134MODULE_LICENSE("GPL"); 124MODULE_LICENSE("GPL");
135MODULE_DEVICE_TABLE(pci, ata_tosh); 125MODULE_DEVICE_TABLE(pci, ata_tosh);
136MODULE_VERSION(DRV_VERSION); 126MODULE_VERSION(DRV_VERSION);
137
138module_init(ata_tosh_init);
139module_exit(ata_tosh_exit);
140
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index b2d3a2bb4e60..f582ba180a7d 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -244,22 +244,10 @@ static struct pci_driver radisys_pci_driver = {
244#endif 244#endif
245}; 245};
246 246
247static int __init radisys_init(void) 247module_pci_driver(radisys_pci_driver);
248{
249 return pci_register_driver(&radisys_pci_driver);
250}
251
252static void __exit radisys_exit(void)
253{
254 pci_unregister_driver(&radisys_pci_driver);
255}
256
257module_init(radisys_init);
258module_exit(radisys_exit);
259 248
260MODULE_AUTHOR("Alan Cox"); 249MODULE_AUTHOR("Alan Cox");
261MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers"); 250MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers");
262MODULE_LICENSE("GPL"); 251MODULE_LICENSE("GPL");
263MODULE_DEVICE_TABLE(pci, radisys_pci_tbl); 252MODULE_DEVICE_TABLE(pci, radisys_pci_tbl);
264MODULE_VERSION(DRV_VERSION); 253MODULE_VERSION(DRV_VERSION);
265
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index e6a2dd7809c1..32a3499e83e7 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -394,18 +394,7 @@ static struct pci_driver rdc_pci_driver = {
394}; 394};
395 395
396 396
397static int __init rdc_init(void) 397module_pci_driver(rdc_pci_driver);
398{
399 return pci_register_driver(&rdc_pci_driver);
400}
401
402static void __exit rdc_exit(void)
403{
404 pci_unregister_driver(&rdc_pci_driver);
405}
406
407module_init(rdc_init);
408module_exit(rdc_exit);
409 398
410MODULE_AUTHOR("Alan Cox (based on ata_piix)"); 399MODULE_AUTHOR("Alan Cox (based on ata_piix)");
411MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers"); 400MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index aca321e1e6a2..60f4de2dd47d 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -140,22 +140,10 @@ static struct pci_driver rz1000_pci_driver = {
140#endif 140#endif
141}; 141};
142 142
143static int __init rz1000_init(void) 143module_pci_driver(rz1000_pci_driver);
144{
145 return pci_register_driver(&rz1000_pci_driver);
146}
147
148static void __exit rz1000_exit(void)
149{
150 pci_unregister_driver(&rz1000_pci_driver);
151}
152 144
153MODULE_AUTHOR("Alan Cox"); 145MODULE_AUTHOR("Alan Cox");
154MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA"); 146MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA");
155MODULE_LICENSE("GPL"); 147MODULE_LICENSE("GPL");
156MODULE_DEVICE_TABLE(pci, pata_rz1000); 148MODULE_DEVICE_TABLE(pci, pata_rz1000);
157MODULE_VERSION(DRV_VERSION); 149MODULE_VERSION(DRV_VERSION);
158
159module_init(rz1000_init);
160module_exit(rz1000_exit);
161
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index c0e603a84f7f..ce2f828c17b3 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -261,21 +261,10 @@ static struct pci_driver sc1200_pci_driver = {
261#endif 261#endif
262}; 262};
263 263
264static int __init sc1200_init(void) 264module_pci_driver(sc1200_pci_driver);
265{
266 return pci_register_driver(&sc1200_pci_driver);
267}
268
269static void __exit sc1200_exit(void)
270{
271 pci_unregister_driver(&sc1200_pci_driver);
272}
273 265
274MODULE_AUTHOR("Alan Cox, Mark Lord"); 266MODULE_AUTHOR("Alan Cox, Mark Lord");
275MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200"); 267MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
276MODULE_LICENSE("GPL"); 268MODULE_LICENSE("GPL");
277MODULE_DEVICE_TABLE(pci, sc1200); 269MODULE_DEVICE_TABLE(pci, sc1200);
278MODULE_VERSION(DRV_VERSION); 270MODULE_VERSION(DRV_VERSION);
279
280module_init(sc1200_init);
281module_exit(sc1200_exit);
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index e265f835c95d..f35f15f4d83e 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -1103,26 +1103,7 @@ static struct pci_driver scc_pci_driver = {
1103#endif 1103#endif
1104}; 1104};
1105 1105
1106static int __init scc_init (void) 1106module_pci_driver(scc_pci_driver);
1107{
1108 int rc;
1109
1110 DPRINTK("pci_register_driver\n");
1111 rc = pci_register_driver(&scc_pci_driver);
1112 if (rc)
1113 return rc;
1114
1115 DPRINTK("done\n");
1116 return 0;
1117}
1118
1119static void __exit scc_exit (void)
1120{
1121 pci_unregister_driver(&scc_pci_driver);
1122}
1123
1124module_init(scc_init);
1125module_exit(scc_exit);
1126 1107
1127MODULE_AUTHOR("Toshiba corp"); 1108MODULE_AUTHOR("Toshiba corp");
1128MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller"); 1109MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 7c78b9993627..db0d18cf1c2a 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -179,15 +179,4 @@ static int __devinit sch_init_one(struct pci_dev *pdev,
179 return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0); 179 return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
180} 180}
181 181
182static int __init sch_init(void) 182module_pci_driver(sch_pci_driver);
183{
184 return pci_register_driver(&sch_pci_driver);
185}
186
187static void __exit sch_exit(void)
188{
189 pci_unregister_driver(&sch_pci_driver);
190}
191
192module_init(sch_init);
193module_exit(sch_exit);
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 71eaf385e970..f3febbce6c46 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -475,21 +475,10 @@ static struct pci_driver serverworks_pci_driver = {
475#endif 475#endif
476}; 476};
477 477
478static int __init serverworks_init(void) 478module_pci_driver(serverworks_pci_driver);
479{
480 return pci_register_driver(&serverworks_pci_driver);
481}
482
483static void __exit serverworks_exit(void)
484{
485 pci_unregister_driver(&serverworks_pci_driver);
486}
487 479
488MODULE_AUTHOR("Alan Cox"); 480MODULE_AUTHOR("Alan Cox");
489MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6"); 481MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
490MODULE_LICENSE("GPL"); 482MODULE_LICENSE("GPL");
491MODULE_DEVICE_TABLE(pci, serverworks); 483MODULE_DEVICE_TABLE(pci, serverworks);
492MODULE_VERSION(DRV_VERSION); 484MODULE_VERSION(DRV_VERSION);
493
494module_init(serverworks_init);
495module_exit(serverworks_exit);
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index b92eacf8dd3c..5cfdf94823d0 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -437,21 +437,10 @@ static struct pci_driver sil680_pci_driver = {
437#endif 437#endif
438}; 438};
439 439
440static int __init sil680_init(void) 440module_pci_driver(sil680_pci_driver);
441{
442 return pci_register_driver(&sil680_pci_driver);
443}
444
445static void __exit sil680_exit(void)
446{
447 pci_unregister_driver(&sil680_pci_driver);
448}
449 441
450MODULE_AUTHOR("Alan Cox"); 442MODULE_AUTHOR("Alan Cox");
451MODULE_DESCRIPTION("low-level driver for SI680 PATA"); 443MODULE_DESCRIPTION("low-level driver for SI680 PATA");
452MODULE_LICENSE("GPL"); 444MODULE_LICENSE("GPL");
453MODULE_DEVICE_TABLE(pci, sil680); 445MODULE_DEVICE_TABLE(pci, sil680);
454MODULE_VERSION(DRV_VERSION); 446MODULE_VERSION(DRV_VERSION);
455
456module_init(sil680_init);
457module_exit(sil680_exit);
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index b0edc7de7b2d..2d5ac1361262 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -906,22 +906,10 @@ static struct pci_driver sis_pci_driver = {
906#endif 906#endif
907}; 907};
908 908
909static int __init sis_init(void) 909module_pci_driver(sis_pci_driver);
910{
911 return pci_register_driver(&sis_pci_driver);
912}
913
914static void __exit sis_exit(void)
915{
916 pci_unregister_driver(&sis_pci_driver);
917}
918
919module_init(sis_init);
920module_exit(sis_exit);
921 910
922MODULE_AUTHOR("Alan Cox"); 911MODULE_AUTHOR("Alan Cox");
923MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA"); 912MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA");
924MODULE_LICENSE("GPL"); 913MODULE_LICENSE("GPL");
925MODULE_DEVICE_TABLE(pci, sis_pci_tbl); 914MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
926MODULE_VERSION(DRV_VERSION); 915MODULE_VERSION(DRV_VERSION);
927
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 24cf200dd1c9..738e000107d6 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -372,21 +372,10 @@ static struct pci_driver sl82c105_pci_driver = {
372#endif 372#endif
373}; 373};
374 374
375static int __init sl82c105_init(void) 375module_pci_driver(sl82c105_pci_driver);
376{
377 return pci_register_driver(&sl82c105_pci_driver);
378}
379
380static void __exit sl82c105_exit(void)
381{
382 pci_unregister_driver(&sl82c105_pci_driver);
383}
384 376
385MODULE_AUTHOR("Alan Cox"); 377MODULE_AUTHOR("Alan Cox");
386MODULE_DESCRIPTION("low-level driver for Sl82c105"); 378MODULE_DESCRIPTION("low-level driver for Sl82c105");
387MODULE_LICENSE("GPL"); 379MODULE_LICENSE("GPL");
388MODULE_DEVICE_TABLE(pci, sl82c105); 380MODULE_DEVICE_TABLE(pci, sl82c105);
389MODULE_VERSION(DRV_VERSION); 381MODULE_VERSION(DRV_VERSION);
390
391module_init(sl82c105_init);
392module_exit(sl82c105_exit);
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 28da1c6becf1..c8e589d91231 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -240,21 +240,10 @@ static struct pci_driver triflex_pci_driver = {
240#endif 240#endif
241}; 241};
242 242
243static int __init triflex_init(void) 243module_pci_driver(triflex_pci_driver);
244{
245 return pci_register_driver(&triflex_pci_driver);
246}
247
248static void __exit triflex_exit(void)
249{
250 pci_unregister_driver(&triflex_pci_driver);
251}
252 244
253MODULE_AUTHOR("Alan Cox"); 245MODULE_AUTHOR("Alan Cox");
254MODULE_DESCRIPTION("low-level driver for Compaq Triflex"); 246MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
255MODULE_LICENSE("GPL"); 247MODULE_LICENSE("GPL");
256MODULE_DEVICE_TABLE(pci, triflex); 248MODULE_DEVICE_TABLE(pci, triflex);
257MODULE_VERSION(DRV_VERSION); 249MODULE_VERSION(DRV_VERSION);
258
259module_init(triflex_init);
260module_exit(triflex_exit);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 255f336cd7ea..8d2a9fdf6b8d 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -711,21 +711,10 @@ static struct pci_driver via_pci_driver = {
711#endif 711#endif
712}; 712};
713 713
714static int __init via_init(void) 714module_pci_driver(via_pci_driver);
715{
716 return pci_register_driver(&via_pci_driver);
717}
718
719static void __exit via_exit(void)
720{
721 pci_unregister_driver(&via_pci_driver);
722}
723 715
724MODULE_AUTHOR("Alan Cox"); 716MODULE_AUTHOR("Alan Cox");
725MODULE_DESCRIPTION("low-level driver for VIA PATA"); 717MODULE_DESCRIPTION("low-level driver for VIA PATA");
726MODULE_LICENSE("GPL"); 718MODULE_LICENSE("GPL");
727MODULE_DEVICE_TABLE(pci, via); 719MODULE_DEVICE_TABLE(pci, via);
728MODULE_VERSION(DRV_VERSION); 720MODULE_VERSION(DRV_VERSION);
729
730module_init(via_init);
731module_exit(via_exit);
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 04911d52f59d..505333340ad5 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -660,21 +660,10 @@ static int adma_ata_init_one(struct pci_dev *pdev,
660 &adma_ata_sht); 660 &adma_ata_sht);
661} 661}
662 662
663static int __init adma_ata_init(void) 663module_pci_driver(adma_ata_pci_driver);
664{
665 return pci_register_driver(&adma_ata_pci_driver);
666}
667
668static void __exit adma_ata_exit(void)
669{
670 pci_unregister_driver(&adma_ata_pci_driver);
671}
672 664
673MODULE_AUTHOR("Mark Lord"); 665MODULE_AUTHOR("Mark Lord");
674MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver"); 666MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
675MODULE_LICENSE("GPL"); 667MODULE_LICENSE("GPL");
676MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl); 668MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
677MODULE_VERSION(DRV_VERSION); 669MODULE_VERSION(DRV_VERSION);
678
679module_init(adma_ata_init);
680module_exit(adma_ata_exit);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 69f7cde49c6b..937aeb34b310 100644..100755
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -158,6 +158,7 @@ enum {
158/* Assign HW handshaking interface (x) to destination / source peripheral */ 158/* Assign HW handshaking interface (x) to destination / source peripheral */
159#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11) 159#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
160#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7) 160#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
161#define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5)
161#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) 162#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
162 163
163/* 164/*
@@ -318,6 +319,7 @@ struct sata_dwc_host_priv {
318 u32 dma_interrupt_count; 319 u32 dma_interrupt_count;
319 struct ahb_dma_regs *sata_dma_regs; 320 struct ahb_dma_regs *sata_dma_regs;
320 struct device *dwc_dev; 321 struct device *dwc_dev;
322 int dma_channel;
321}; 323};
322struct sata_dwc_host_priv host_pvt; 324struct sata_dwc_host_priv host_pvt;
323/* 325/*
@@ -437,15 +439,12 @@ static void clear_chan_interrupts(int c)
437 */ 439 */
438static int dma_request_channel(void) 440static int dma_request_channel(void)
439{ 441{
440 int i; 442 /* Check if the channel is not currently in use */
441 443 if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &
442 for (i = 0; i < DMA_NUM_CHANS; i++) { 444 DMA_CHANNEL(host_pvt.dma_channel)))
443 if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &\ 445 return host_pvt.dma_channel;
444 DMA_CHANNEL(i))) 446 dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n",
445 return i; 447 __func__, host_pvt.dma_channel);
446 }
447 dev_err(host_pvt.dwc_dev, "%s NO channel chan_en: 0x%08x\n", __func__,
448 in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)));
449 return -1; 448 return -1;
450} 449}
451 450
@@ -481,7 +480,8 @@ static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
481 dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", 480 dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
482 tfr_reg, err_reg, hsdevp->dma_pending[tag], port); 481 tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
483 482
484 for (chan = 0; chan < DMA_NUM_CHANS; chan++) { 483 chan = host_pvt.dma_channel;
484 if (chan >= 0) {
485 /* Check for end-of-transfer interrupt. */ 485 /* Check for end-of-transfer interrupt. */
486 if (tfr_reg & DMA_CHANNEL(chan)) { 486 if (tfr_reg & DMA_CHANNEL(chan)) {
487 /* 487 /*
@@ -534,9 +534,9 @@ static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
534static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) 534static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
535{ 535{
536 int retval = 0; 536 int retval = 0;
537 int chan; 537 int chan = host_pvt.dma_channel;
538 538
539 for (chan = 0; chan < DMA_NUM_CHANS; chan++) { 539 if (chan >= 0) {
540 /* Unmask error interrupt */ 540 /* Unmask error interrupt */
541 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low, 541 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
542 DMA_ENABLE_CHAN(chan)); 542 DMA_ENABLE_CHAN(chan));
@@ -575,7 +575,10 @@ static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
575 int fis_len = 0; 575 int fis_len = 0;
576 dma_addr_t next_llp; 576 dma_addr_t next_llp;
577 int bl; 577 int bl;
578 int sms_val, dms_val;
578 579
580 sms_val = 0;
581 dms_val = 1 + host_pvt.dma_channel;
579 dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x" 582 dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
580 " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, 583 " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
581 (u32)dmadr_addr); 584 (u32)dmadr_addr);
@@ -635,8 +638,8 @@ static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
635 638
636 lli[idx].ctl.low = cpu_to_le32( 639 lli[idx].ctl.low = cpu_to_le32(
637 DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | 640 DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
638 DMA_CTL_SMS(0) | 641 DMA_CTL_SMS(sms_val) |
639 DMA_CTL_DMS(1) | 642 DMA_CTL_DMS(dms_val) |
640 DMA_CTL_SRC_MSIZE(bl) | 643 DMA_CTL_SRC_MSIZE(bl) |
641 DMA_CTL_DST_MSIZE(bl) | 644 DMA_CTL_DST_MSIZE(bl) |
642 DMA_CTL_SINC_NOCHANGE | 645 DMA_CTL_SINC_NOCHANGE |
@@ -651,8 +654,8 @@ static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
651 654
652 lli[idx].ctl.low = cpu_to_le32( 655 lli[idx].ctl.low = cpu_to_le32(
653 DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | 656 DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
654 DMA_CTL_SMS(1) | 657 DMA_CTL_SMS(dms_val) |
655 DMA_CTL_DMS(0) | 658 DMA_CTL_DMS(sms_val) |
656 DMA_CTL_SRC_MSIZE(bl) | 659 DMA_CTL_SRC_MSIZE(bl) |
657 DMA_CTL_DST_MSIZE(bl) | 660 DMA_CTL_DST_MSIZE(bl) |
658 DMA_CTL_DINC_NOCHANGE | 661 DMA_CTL_DINC_NOCHANGE |
@@ -744,8 +747,10 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
744 747
745 /* Program the CFG register. */ 748 /* Program the CFG register. */
746 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high), 749 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
750 DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) |
747 DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); 751 DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
748 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), 0); 752 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low),
753 DMA_CFG_HW_CH_PRIOR(dma_ch));
749 754
750 /* Program the address of the linked list */ 755 /* Program the address of the linked list */
751 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low), 756 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
@@ -1581,10 +1586,31 @@ static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
1581 1586
1582static void sata_dwc_error_handler(struct ata_port *ap) 1587static void sata_dwc_error_handler(struct ata_port *ap)
1583{ 1588{
1584 ap->link.flags |= ATA_LFLAG_NO_HRST;
1585 ata_sff_error_handler(ap); 1589 ata_sff_error_handler(ap);
1586} 1590}
1587 1591
1592int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
1593 unsigned long deadline)
1594{
1595 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
1596 int ret;
1597
1598 ret = sata_sff_hardreset(link, class, deadline);
1599
1600 sata_dwc_enable_interrupts(hsdev);
1601
1602 /* Reconfigure the DMA control register */
1603 out_le32(&hsdev->sata_dwc_regs->dmacr,
1604 SATA_DWC_DMACR_TXRXCH_CLEAR);
1605
1606 /* Reconfigure the DMA Burst Transaction Size register */
1607 out_le32(&hsdev->sata_dwc_regs->dbtsr,
1608 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1609 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
1610
1611 return ret;
1612}
1613
1588/* 1614/*
1589 * scsi mid-layer and libata interface structures 1615 * scsi mid-layer and libata interface structures
1590 */ 1616 */
@@ -1604,6 +1630,7 @@ static struct ata_port_operations sata_dwc_ops = {
1604 .inherits = &ata_sff_port_ops, 1630 .inherits = &ata_sff_port_ops,
1605 1631
1606 .error_handler = sata_dwc_error_handler, 1632 .error_handler = sata_dwc_error_handler,
1633 .hardreset = sata_dwc_hardreset,
1607 1634
1608 .qc_prep = sata_dwc_qc_prep, 1635 .qc_prep = sata_dwc_qc_prep,
1609 .qc_issue = sata_dwc_qc_issue, 1636 .qc_issue = sata_dwc_qc_issue,
@@ -1638,6 +1665,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1638 struct ata_host *host; 1665 struct ata_host *host;
1639 struct ata_port_info pi = sata_dwc_port_info[0]; 1666 struct ata_port_info pi = sata_dwc_port_info[0];
1640 const struct ata_port_info *ppi[] = { &pi, NULL }; 1667 const struct ata_port_info *ppi[] = { &pi, NULL };
1668 struct device_node *np = ofdev->dev.of_node;
1669 u32 dma_chan;
1641 1670
1642 /* Allocate DWC SATA device */ 1671 /* Allocate DWC SATA device */
1643 hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL); 1672 hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
@@ -1647,6 +1676,13 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1647 goto error; 1676 goto error;
1648 } 1677 }
1649 1678
1679 if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
1680 dev_warn(&ofdev->dev, "no dma-channel property set."
1681 " Use channel 0\n");
1682 dma_chan = 0;
1683 }
1684 host_pvt.dma_channel = dma_chan;
1685
1650 /* Ioremap SATA registers */ 1686 /* Ioremap SATA registers */
1651 base = of_iomap(ofdev->dev.of_node, 0); 1687 base = of_iomap(ofdev->dev.of_node, 0);
1652 if (!base) { 1688 if (!base) {
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 5c7d70c03bf0..dc35f4d42b8b 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -894,21 +894,10 @@ static struct pci_driver inic_pci_driver = {
894 .remove = ata_pci_remove_one, 894 .remove = ata_pci_remove_one,
895}; 895};
896 896
897static int __init inic_init(void) 897module_pci_driver(inic_pci_driver);
898{
899 return pci_register_driver(&inic_pci_driver);
900}
901
902static void __exit inic_exit(void)
903{
904 pci_unregister_driver(&inic_pci_driver);
905}
906 898
907MODULE_AUTHOR("Tejun Heo"); 899MODULE_AUTHOR("Tejun Heo");
908MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); 900MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
909MODULE_LICENSE("GPL v2"); 901MODULE_LICENSE("GPL v2");
910MODULE_DEVICE_TABLE(pci, inic_pci_tbl); 902MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
911MODULE_VERSION(DRV_VERSION); 903MODULE_VERSION(DRV_VERSION);
912
913module_init(inic_init);
914module_exit(inic_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 24712adf69df..311be18d3f03 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -65,6 +65,8 @@
65#include <linux/mbus.h> 65#include <linux/mbus.h>
66#include <linux/bitops.h> 66#include <linux/bitops.h>
67#include <linux/gfp.h> 67#include <linux/gfp.h>
68#include <linux/of.h>
69#include <linux/of_irq.h>
68#include <scsi/scsi_host.h> 70#include <scsi/scsi_host.h>
69#include <scsi/scsi_cmnd.h> 71#include <scsi/scsi_cmnd.h>
70#include <scsi/scsi_device.h> 72#include <scsi/scsi_device.h>
@@ -4026,7 +4028,7 @@ static int mv_platform_probe(struct platform_device *pdev)
4026 struct ata_host *host; 4028 struct ata_host *host;
4027 struct mv_host_priv *hpriv; 4029 struct mv_host_priv *hpriv;
4028 struct resource *res; 4030 struct resource *res;
4029 int n_ports = 0; 4031 int n_ports = 0, irq = 0;
4030 int rc; 4032 int rc;
4031#if defined(CONFIG_HAVE_CLK) 4033#if defined(CONFIG_HAVE_CLK)
4032 int port; 4034 int port;
@@ -4050,8 +4052,14 @@ static int mv_platform_probe(struct platform_device *pdev)
4050 return -EINVAL; 4052 return -EINVAL;
4051 4053
4052 /* allocate host */ 4054 /* allocate host */
4053 mv_platform_data = pdev->dev.platform_data; 4055 if (pdev->dev.of_node) {
4054 n_ports = mv_platform_data->n_ports; 4056 of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
4057 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4058 } else {
4059 mv_platform_data = pdev->dev.platform_data;
4060 n_ports = mv_platform_data->n_ports;
4061 irq = platform_get_irq(pdev, 0);
4062 }
4055 4063
4056 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 4064 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4057 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 4065 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
@@ -4109,8 +4117,7 @@ static int mv_platform_probe(struct platform_device *pdev)
4109 dev_info(&pdev->dev, "slots %u ports %d\n", 4117 dev_info(&pdev->dev, "slots %u ports %d\n",
4110 (unsigned)MV_MAX_Q_DEPTH, host->n_ports); 4118 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4111 4119
4112 rc = ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 4120 rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4113 IRQF_SHARED, &mv6_sht);
4114 if (!rc) 4121 if (!rc)
4115 return 0; 4122 return 0;
4116 4123
@@ -4205,15 +4212,24 @@ static int mv_platform_resume(struct platform_device *pdev)
4205#define mv_platform_resume NULL 4212#define mv_platform_resume NULL
4206#endif 4213#endif
4207 4214
4215#ifdef CONFIG_OF
4216static struct of_device_id mv_sata_dt_ids[] __devinitdata = {
4217 { .compatible = "marvell,orion-sata", },
4218 {},
4219};
4220MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4221#endif
4222
4208static struct platform_driver mv_platform_driver = { 4223static struct platform_driver mv_platform_driver = {
4209 .probe = mv_platform_probe, 4224 .probe = mv_platform_probe,
4210 .remove = __devexit_p(mv_platform_remove), 4225 .remove = __devexit_p(mv_platform_remove),
4211 .suspend = mv_platform_suspend, 4226 .suspend = mv_platform_suspend,
4212 .resume = mv_platform_resume, 4227 .resume = mv_platform_resume,
4213 .driver = { 4228 .driver = {
4214 .name = DRV_NAME, 4229 .name = DRV_NAME,
4215 .owner = THIS_MODULE, 4230 .owner = THIS_MODULE,
4216 }, 4231 .of_match_table = of_match_ptr(mv_sata_dt_ids),
4232 },
4217}; 4233};
4218 4234
4219 4235
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 55d6179dde58..85ee4993ca74 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -2510,22 +2510,11 @@ static void nv_adma_host_stop(struct ata_host *host)
2510 nv_ck804_host_stop(host); 2510 nv_ck804_host_stop(host);
2511} 2511}
2512 2512
2513static int __init nv_init(void) 2513module_pci_driver(nv_pci_driver);
2514{
2515 return pci_register_driver(&nv_pci_driver);
2516}
2517
2518static void __exit nv_exit(void)
2519{
2520 pci_unregister_driver(&nv_pci_driver);
2521}
2522 2514
2523module_init(nv_init);
2524module_exit(nv_exit);
2525module_param_named(adma, adma_enabled, bool, 0444); 2515module_param_named(adma, adma_enabled, bool, 0444);
2526MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); 2516MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2527module_param_named(swncq, swncq_enabled, bool, 0444); 2517module_param_named(swncq, swncq_enabled, bool, 0444);
2528MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); 2518MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2529module_param_named(msi, msi_enabled, bool, 0444); 2519module_param_named(msi, msi_enabled, bool, 0444);
2530MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)"); 2520MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2531
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 000fcc99e01d..489c81768321 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -1249,21 +1249,10 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1249 &pdc_ata_sht); 1249 &pdc_ata_sht);
1250} 1250}
1251 1251
1252static int __init pdc_ata_init(void) 1252module_pci_driver(pdc_ata_pci_driver);
1253{
1254 return pci_register_driver(&pdc_ata_pci_driver);
1255}
1256
1257static void __exit pdc_ata_exit(void)
1258{
1259 pci_unregister_driver(&pdc_ata_pci_driver);
1260}
1261 1253
1262MODULE_AUTHOR("Jeff Garzik"); 1254MODULE_AUTHOR("Jeff Garzik");
1263MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver"); 1255MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
1264MODULE_LICENSE("GPL"); 1256MODULE_LICENSE("GPL");
1265MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl); 1257MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
1266MODULE_VERSION(DRV_VERSION); 1258MODULE_VERSION(DRV_VERSION);
1267
1268module_init(pdc_ata_init);
1269module_exit(pdc_ata_exit);
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 9d1a47bb21b3..3b0dd57984e1 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -635,21 +635,10 @@ static int qs_ata_init_one(struct pci_dev *pdev,
635 &qs_ata_sht); 635 &qs_ata_sht);
636} 636}
637 637
638static int __init qs_ata_init(void) 638module_pci_driver(qs_ata_pci_driver);
639{
640 return pci_register_driver(&qs_ata_pci_driver);
641}
642
643static void __exit qs_ata_exit(void)
644{
645 pci_unregister_driver(&qs_ata_pci_driver);
646}
647 639
648MODULE_AUTHOR("Mark Lord"); 640MODULE_AUTHOR("Mark Lord");
649MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver"); 641MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
650MODULE_LICENSE("GPL"); 642MODULE_LICENSE("GPL");
651MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl); 643MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
652MODULE_VERSION(DRV_VERSION); 644MODULE_VERSION(DRV_VERSION);
653
654module_init(qs_ata_init);
655module_exit(qs_ata_exit);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 9dfb40b8c2c9..a7b31672c4b7 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -819,16 +819,4 @@ static int sil_pci_device_resume(struct pci_dev *pdev)
819} 819}
820#endif 820#endif
821 821
822static int __init sil_init(void) 822module_pci_driver(sil_pci_driver);
823{
824 return pci_register_driver(&sil_pci_driver);
825}
826
827static void __exit sil_exit(void)
828{
829 pci_unregister_driver(&sil_pci_driver);
830}
831
832
833module_init(sil_init);
834module_exit(sil_exit);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index e7e610aa9a7a..a5f2a563a26a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1382,20 +1382,9 @@ static int sil24_port_resume(struct ata_port *ap)
1382} 1382}
1383#endif 1383#endif
1384 1384
1385static int __init sil24_init(void) 1385module_pci_driver(sil24_pci_driver);
1386{
1387 return pci_register_driver(&sil24_pci_driver);
1388}
1389
1390static void __exit sil24_exit(void)
1391{
1392 pci_unregister_driver(&sil24_pci_driver);
1393}
1394 1386
1395MODULE_AUTHOR("Tejun Heo"); 1387MODULE_AUTHOR("Tejun Heo");
1396MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver"); 1388MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1397MODULE_LICENSE("GPL"); 1389MODULE_LICENSE("GPL");
1398MODULE_DEVICE_TABLE(pci, sil24_pci_tbl); 1390MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
1399
1400module_init(sil24_init);
1401module_exit(sil24_exit);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 95ec435f0eb4..fe3ca0989b14 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -308,15 +308,4 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
308 IRQF_SHARED, &sis_sht); 308 IRQF_SHARED, &sis_sht);
309} 309}
310 310
311static int __init sis_init(void) 311module_pci_driver(sis_pci_driver);
312{
313 return pci_register_driver(&sis_pci_driver);
314}
315
316static void __exit sis_exit(void)
317{
318 pci_unregister_driver(&sis_pci_driver);
319}
320
321module_init(sis_init);
322module_exit(sis_exit);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index c646118943ff..44a4256533e1 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -525,21 +525,10 @@ static struct pci_driver k2_sata_pci_driver = {
525 .remove = ata_pci_remove_one, 525 .remove = ata_pci_remove_one,
526}; 526};
527 527
528static int __init k2_sata_init(void) 528module_pci_driver(k2_sata_pci_driver);
529{
530 return pci_register_driver(&k2_sata_pci_driver);
531}
532
533static void __exit k2_sata_exit(void)
534{
535 pci_unregister_driver(&k2_sata_pci_driver);
536}
537 529
538MODULE_AUTHOR("Benjamin Herrenschmidt"); 530MODULE_AUTHOR("Benjamin Herrenschmidt");
539MODULE_DESCRIPTION("low-level driver for K2 SATA controller"); 531MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
540MODULE_LICENSE("GPL"); 532MODULE_LICENSE("GPL");
541MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl); 533MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
542MODULE_VERSION(DRV_VERSION); 534MODULE_VERSION(DRV_VERSION);
543
544module_init(k2_sata_init);
545module_exit(k2_sata_exit);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index cdaebbe3d184..122605593166 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1498,24 +1498,10 @@ static int pdc_sata_init_one(struct pci_dev *pdev,
1498 IRQF_SHARED, &pdc_sata_sht); 1498 IRQF_SHARED, &pdc_sata_sht);
1499} 1499}
1500 1500
1501 1501module_pci_driver(pdc_sata_pci_driver);
1502static int __init pdc_sata_init(void)
1503{
1504 return pci_register_driver(&pdc_sata_pci_driver);
1505}
1506
1507
1508static void __exit pdc_sata_exit(void)
1509{
1510 pci_unregister_driver(&pdc_sata_pci_driver);
1511}
1512
1513 1502
1514MODULE_AUTHOR("Jeff Garzik"); 1503MODULE_AUTHOR("Jeff Garzik");
1515MODULE_DESCRIPTION("Promise SATA low-level driver"); 1504MODULE_DESCRIPTION("Promise SATA low-level driver");
1516MODULE_LICENSE("GPL"); 1505MODULE_LICENSE("GPL");
1517MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl); 1506MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1518MODULE_VERSION(DRV_VERSION); 1507MODULE_VERSION(DRV_VERSION);
1519
1520module_init(pdc_sata_init);
1521module_exit(pdc_sata_exit);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index b54ebfcdda32..6d6489118873 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -243,16 +243,4 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
243 IRQF_SHARED, &uli_sht); 243 IRQF_SHARED, &uli_sht);
244} 244}
245 245
246static int __init uli_init(void) 246module_pci_driver(uli_pci_driver);
247{
248 return pci_register_driver(&uli_pci_driver);
249}
250
251static void __exit uli_exit(void)
252{
253 pci_unregister_driver(&uli_pci_driver);
254}
255
256
257module_init(uli_init);
258module_exit(uli_exit);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index f93e43b0ccd8..5913ea9d57b2 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -655,15 +655,4 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
655 IRQF_SHARED, &svia_sht); 655 IRQF_SHARED, &svia_sht);
656} 656}
657 657
658static int __init svia_init(void) 658module_pci_driver(svia_pci_driver);
659{
660 return pci_register_driver(&svia_pci_driver);
661}
662
663static void __exit svia_exit(void)
664{
665 pci_unregister_driver(&svia_pci_driver);
666}
667
668module_init(svia_init);
669module_exit(svia_exit);
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 6135a5288695..e8cf88ba145d 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -436,21 +436,10 @@ static struct pci_driver vsc_sata_pci_driver = {
436 .remove = ata_pci_remove_one, 436 .remove = ata_pci_remove_one,
437}; 437};
438 438
439static int __init vsc_sata_init(void) 439module_pci_driver(vsc_sata_pci_driver);
440{
441 return pci_register_driver(&vsc_sata_pci_driver);
442}
443
444static void __exit vsc_sata_exit(void)
445{
446 pci_unregister_driver(&vsc_sata_pci_driver);
447}
448 440
449MODULE_AUTHOR("Jeremy Higdon"); 441MODULE_AUTHOR("Jeremy Higdon");
450MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller"); 442MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
451MODULE_LICENSE("GPL"); 443MODULE_LICENSE("GPL");
452MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl); 444MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
453MODULE_VERSION(DRV_VERSION); 445MODULE_VERSION(DRV_VERSION);
454
455module_init(vsc_sata_init);
456module_exit(vsc_sata_exit);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d4386019af5d..96cce6d53195 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev)
2362 { 2362 {
2363 printk(DEV_LABEL " (itf %d): can't set up page mapping\n", 2363 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2364 dev->number); 2364 dev->number);
2365 return error; 2365 return -ENOMEM;
2366 } 2366 }
2367 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n", 2367 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2368 dev->number, iadev->pci->revision, base, iadev->irq);) 2368 dev->number, iadev->pci->revision, base, iadev->irq);)
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9b21469482ae..08b4c5209384 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -196,6 +196,7 @@ config CMA
196 bool "Contiguous Memory Allocator (EXPERIMENTAL)" 196 bool "Contiguous Memory Allocator (EXPERIMENTAL)"
197 depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL 197 depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
198 select MIGRATION 198 select MIGRATION
199 select MEMORY_ISOLATION
199 help 200 help
200 This enables the Contiguous Memory Allocator which allows drivers 201 This enables the Contiguous Memory Allocator which allows drivers
201 to allocate big physically-contiguous blocks of memory for use with 202 to allocate big physically-contiguous blocks of memory for use with
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 2bcef657a60c..181ed2660b33 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -743,7 +743,6 @@ int bus_add_driver(struct device_driver *drv)
743 } 743 }
744 } 744 }
745 745
746 kobject_uevent(&priv->kobj, KOBJ_ADD);
747 return 0; 746 return 0;
748 747
749out_unregister: 748out_unregister:
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 346be8b78b24..5e6e00bc1652 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -85,14 +85,13 @@ const char *dev_driver_string(const struct device *dev)
85} 85}
86EXPORT_SYMBOL(dev_driver_string); 86EXPORT_SYMBOL(dev_driver_string);
87 87
88#define to_dev(obj) container_of(obj, struct device, kobj)
89#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 88#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
90 89
91static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, 90static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
92 char *buf) 91 char *buf)
93{ 92{
94 struct device_attribute *dev_attr = to_dev_attr(attr); 93 struct device_attribute *dev_attr = to_dev_attr(attr);
95 struct device *dev = to_dev(kobj); 94 struct device *dev = kobj_to_dev(kobj);
96 ssize_t ret = -EIO; 95 ssize_t ret = -EIO;
97 96
98 if (dev_attr->show) 97 if (dev_attr->show)
@@ -108,7 +107,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
108 const char *buf, size_t count) 107 const char *buf, size_t count)
109{ 108{
110 struct device_attribute *dev_attr = to_dev_attr(attr); 109 struct device_attribute *dev_attr = to_dev_attr(attr);
111 struct device *dev = to_dev(kobj); 110 struct device *dev = kobj_to_dev(kobj);
112 ssize_t ret = -EIO; 111 ssize_t ret = -EIO;
113 112
114 if (dev_attr->store) 113 if (dev_attr->store)
@@ -182,7 +181,7 @@ EXPORT_SYMBOL_GPL(device_show_int);
182 */ 181 */
183static void device_release(struct kobject *kobj) 182static void device_release(struct kobject *kobj)
184{ 183{
185 struct device *dev = to_dev(kobj); 184 struct device *dev = kobj_to_dev(kobj);
186 struct device_private *p = dev->p; 185 struct device_private *p = dev->p;
187 186
188 if (dev->release) 187 if (dev->release)
@@ -200,7 +199,7 @@ static void device_release(struct kobject *kobj)
200 199
201static const void *device_namespace(struct kobject *kobj) 200static const void *device_namespace(struct kobject *kobj)
202{ 201{
203 struct device *dev = to_dev(kobj); 202 struct device *dev = kobj_to_dev(kobj);
204 const void *ns = NULL; 203 const void *ns = NULL;
205 204
206 if (dev->class && dev->class->ns_type) 205 if (dev->class && dev->class->ns_type)
@@ -221,7 +220,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
221 struct kobj_type *ktype = get_ktype(kobj); 220 struct kobj_type *ktype = get_ktype(kobj);
222 221
223 if (ktype == &device_ktype) { 222 if (ktype == &device_ktype) {
224 struct device *dev = to_dev(kobj); 223 struct device *dev = kobj_to_dev(kobj);
225 if (dev->bus) 224 if (dev->bus)
226 return 1; 225 return 1;
227 if (dev->class) 226 if (dev->class)
@@ -232,7 +231,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
232 231
233static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) 232static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
234{ 233{
235 struct device *dev = to_dev(kobj); 234 struct device *dev = kobj_to_dev(kobj);
236 235
237 if (dev->bus) 236 if (dev->bus)
238 return dev->bus->name; 237 return dev->bus->name;
@@ -244,7 +243,7 @@ static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
244static int dev_uevent(struct kset *kset, struct kobject *kobj, 243static int dev_uevent(struct kset *kset, struct kobject *kobj,
245 struct kobj_uevent_env *env) 244 struct kobj_uevent_env *env)
246{ 245{
247 struct device *dev = to_dev(kobj); 246 struct device *dev = kobj_to_dev(kobj);
248 int retval = 0; 247 int retval = 0;
249 248
250 /* add device node properties if present */ 249 /* add device node properties if present */
@@ -1132,7 +1131,7 @@ int device_register(struct device *dev)
1132 */ 1131 */
1133struct device *get_device(struct device *dev) 1132struct device *get_device(struct device *dev)
1134{ 1133{
1135 return dev ? to_dev(kobject_get(&dev->kobj)) : NULL; 1134 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
1136} 1135}
1137 1136
1138/** 1137/**
@@ -1754,25 +1753,25 @@ int device_move(struct device *dev, struct device *new_parent,
1754 set_dev_node(dev, dev_to_node(new_parent)); 1753 set_dev_node(dev, dev_to_node(new_parent));
1755 } 1754 }
1756 1755
1757 if (!dev->class) 1756 if (dev->class) {
1758 goto out_put; 1757 error = device_move_class_links(dev, old_parent, new_parent);
1759 error = device_move_class_links(dev, old_parent, new_parent); 1758 if (error) {
1760 if (error) { 1759 /* We ignore errors on cleanup since we're hosed anyway... */
1761 /* We ignore errors on cleanup since we're hosed anyway... */ 1760 device_move_class_links(dev, new_parent, old_parent);
1762 device_move_class_links(dev, new_parent, old_parent); 1761 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
1763 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 1762 if (new_parent)
1764 if (new_parent) 1763 klist_remove(&dev->p->knode_parent);
1765 klist_remove(&dev->p->knode_parent); 1764 dev->parent = old_parent;
1766 dev->parent = old_parent; 1765 if (old_parent) {
1767 if (old_parent) { 1766 klist_add_tail(&dev->p->knode_parent,
1768 klist_add_tail(&dev->p->knode_parent, 1767 &old_parent->p->klist_children);
1769 &old_parent->p->klist_children); 1768 set_dev_node(dev, dev_to_node(old_parent));
1770 set_dev_node(dev, dev_to_node(old_parent)); 1769 }
1771 } 1770 }
1771 cleanup_glue_dir(dev, new_parent_kobj);
1772 put_device(new_parent);
1773 goto out;
1772 } 1774 }
1773 cleanup_glue_dir(dev, new_parent_kobj);
1774 put_device(new_parent);
1775 goto out;
1776 } 1775 }
1777 switch (dpm_order) { 1776 switch (dpm_order) {
1778 case DPM_ORDER_NONE: 1777 case DPM_ORDER_NONE:
@@ -1787,7 +1786,7 @@ int device_move(struct device *dev, struct device *new_parent,
1787 device_pm_move_last(dev); 1786 device_pm_move_last(dev);
1788 break; 1787 break;
1789 } 1788 }
1790out_put: 1789
1791 put_device(old_parent); 1790 put_device(old_parent);
1792out: 1791out:
1793 device_pm_unlock(); 1792 device_pm_unlock();
@@ -1812,6 +1811,13 @@ void device_shutdown(void)
1812 while (!list_empty(&devices_kset->list)) { 1811 while (!list_empty(&devices_kset->list)) {
1813 dev = list_entry(devices_kset->list.prev, struct device, 1812 dev = list_entry(devices_kset->list.prev, struct device,
1814 kobj.entry); 1813 kobj.entry);
1814
1815 /*
1816 * hold reference count of device's parent to
1817 * prevent it from being freed because parent's
1818 * lock is to be held
1819 */
1820 get_device(dev->parent);
1815 get_device(dev); 1821 get_device(dev);
1816 /* 1822 /*
1817 * Make sure the device is off the kset list, in the 1823 * Make sure the device is off the kset list, in the
@@ -1820,6 +1826,11 @@ void device_shutdown(void)
1820 list_del_init(&dev->kobj.entry); 1826 list_del_init(&dev->kobj.entry);
1821 spin_unlock(&devices_kset->list_lock); 1827 spin_unlock(&devices_kset->list_lock);
1822 1828
1829 /* hold lock to avoid race with probe/release */
1830 if (dev->parent)
1831 device_lock(dev->parent);
1832 device_lock(dev);
1833
1823 /* Don't allow any more runtime suspends */ 1834 /* Don't allow any more runtime suspends */
1824 pm_runtime_get_noresume(dev); 1835 pm_runtime_get_noresume(dev);
1825 pm_runtime_barrier(dev); 1836 pm_runtime_barrier(dev);
@@ -1831,7 +1842,13 @@ void device_shutdown(void)
1831 dev_dbg(dev, "shutdown\n"); 1842 dev_dbg(dev, "shutdown\n");
1832 dev->driver->shutdown(dev); 1843 dev->driver->shutdown(dev);
1833 } 1844 }
1845
1846 device_unlock(dev);
1847 if (dev->parent)
1848 device_unlock(dev->parent);
1849
1834 put_device(dev); 1850 put_device(dev);
1851 put_device(dev->parent);
1835 1852
1836 spin_lock(&devices_kset->list_lock); 1853 spin_lock(&devices_kset->list_lock);
1837 } 1854 }
@@ -1848,6 +1865,7 @@ int __dev_printk(const char *level, const struct device *dev,
1848 struct va_format *vaf) 1865 struct va_format *vaf)
1849{ 1866{
1850 char dict[128]; 1867 char dict[128];
1868 const char *level_extra = "";
1851 size_t dictlen = 0; 1869 size_t dictlen = 0;
1852 const char *subsys; 1870 const char *subsys;
1853 1871
@@ -1894,10 +1912,14 @@ int __dev_printk(const char *level, const struct device *dev,
1894 "DEVICE=+%s:%s", subsys, dev_name(dev)); 1912 "DEVICE=+%s:%s", subsys, dev_name(dev));
1895 } 1913 }
1896skip: 1914skip:
1915 if (level[2])
1916 level_extra = &level[2]; /* skip past KERN_SOH "L" */
1917
1897 return printk_emit(0, level[1] - '0', 1918 return printk_emit(0, level[1] - '0',
1898 dictlen ? dict : NULL, dictlen, 1919 dictlen ? dict : NULL, dictlen,
1899 "%s %s: %pV", 1920 "%s %s: %s%pV",
1900 dev_driver_string(dev), dev_name(dev), vaf); 1921 dev_driver_string(dev), dev_name(dev),
1922 level_extra, vaf);
1901} 1923}
1902EXPORT_SYMBOL(__dev_printk); 1924EXPORT_SYMBOL(__dev_printk);
1903 1925
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1b1cbb571d38..e3bbed8a617c 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -85,8 +85,20 @@ static void deferred_probe_work_func(struct work_struct *work)
85 * manipulate the deferred list 85 * manipulate the deferred list
86 */ 86 */
87 mutex_unlock(&deferred_probe_mutex); 87 mutex_unlock(&deferred_probe_mutex);
88
89 /*
90 * Force the device to the end of the dpm_list since
91 * the PM code assumes that the order we add things to
92 * the list is a good order for suspend but deferred
93 * probe makes that very unsafe.
94 */
95 device_pm_lock();
96 device_pm_move_last(dev);
97 device_pm_unlock();
98
88 dev_dbg(dev, "Retrying from deferred list\n"); 99 dev_dbg(dev, "Retrying from deferred list\n");
89 bus_probe_device(dev); 100 bus_probe_device(dev);
101
90 mutex_lock(&deferred_probe_mutex); 102 mutex_lock(&deferred_probe_mutex);
91 103
92 put_device(dev); 104 put_device(dev);
@@ -100,7 +112,7 @@ static void driver_deferred_probe_add(struct device *dev)
100 mutex_lock(&deferred_probe_mutex); 112 mutex_lock(&deferred_probe_mutex);
101 if (list_empty(&dev->p->deferred_probe)) { 113 if (list_empty(&dev->p->deferred_probe)) {
102 dev_dbg(dev, "Added to deferred list\n"); 114 dev_dbg(dev, "Added to deferred list\n");
103 list_add(&dev->p->deferred_probe, &deferred_probe_pending_list); 115 list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
104 } 116 }
105 mutex_unlock(&deferred_probe_mutex); 117 mutex_unlock(&deferred_probe_mutex);
106} 118}
@@ -283,6 +295,7 @@ probe_failed:
283 devres_release_all(dev); 295 devres_release_all(dev);
284 driver_sysfs_remove(dev); 296 driver_sysfs_remove(dev);
285 dev->driver = NULL; 297 dev->driver = NULL;
298 dev_set_drvdata(dev, NULL);
286 299
287 if (ret == -EPROBE_DEFER) { 300 if (ret == -EPROBE_DEFER) {
288 /* Driver requested deferred probing */ 301 /* Driver requested deferred probing */
@@ -356,10 +369,9 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
356 pr_debug("bus: '%s': %s: matched device %s with driver %s\n", 369 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
357 drv->bus->name, __func__, dev_name(dev), drv->name); 370 drv->bus->name, __func__, dev_name(dev), drv->name);
358 371
359 pm_runtime_get_noresume(dev);
360 pm_runtime_barrier(dev); 372 pm_runtime_barrier(dev);
361 ret = really_probe(dev, drv); 373 ret = really_probe(dev, drv);
362 pm_runtime_put_sync(dev); 374 pm_runtime_idle(dev);
363 375
364 return ret; 376 return ret;
365} 377}
@@ -406,9 +418,8 @@ int device_attach(struct device *dev)
406 ret = 0; 418 ret = 0;
407 } 419 }
408 } else { 420 } else {
409 pm_runtime_get_noresume(dev);
410 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); 421 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
411 pm_runtime_put_sync(dev); 422 pm_runtime_idle(dev);
412 } 423 }
413out_unlock: 424out_unlock:
414 device_unlock(dev); 425 device_unlock(dev);
@@ -487,6 +498,7 @@ static void __device_release_driver(struct device *dev)
487 drv->remove(dev); 498 drv->remove(dev);
488 devres_release_all(dev); 499 devres_release_all(dev);
489 dev->driver = NULL; 500 dev->driver = NULL;
501 dev_set_drvdata(dev, NULL);
490 klist_remove(&dev->p->knode_driver); 502 klist_remove(&dev->p->knode_driver);
491 if (dev->bus) 503 if (dev->bus)
492 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 504 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 765c3a28077a..deb4a456cf83 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -156,9 +156,7 @@ static int dev_mkdir(const char *name, umode_t mode)
156 if (!err) 156 if (!err)
157 /* mark as kernel-created inode */ 157 /* mark as kernel-created inode */
158 dentry->d_inode->i_private = &thread; 158 dentry->d_inode->i_private = &thread;
159 dput(dentry); 159 done_path_create(&path, dentry);
160 mutex_unlock(&path.dentry->d_inode->i_mutex);
161 path_put(&path);
162 return err; 160 return err;
163} 161}
164 162
@@ -218,42 +216,30 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev)
218 /* mark as kernel-created inode */ 216 /* mark as kernel-created inode */
219 dentry->d_inode->i_private = &thread; 217 dentry->d_inode->i_private = &thread;
220 } 218 }
221 dput(dentry); 219 done_path_create(&path, dentry);
222
223 mutex_unlock(&path.dentry->d_inode->i_mutex);
224 path_put(&path);
225 return err; 220 return err;
226} 221}
227 222
228static int dev_rmdir(const char *name) 223static int dev_rmdir(const char *name)
229{ 224{
230 struct nameidata nd; 225 struct path parent;
231 struct dentry *dentry; 226 struct dentry *dentry;
232 int err; 227 int err;
233 228
234 err = kern_path_parent(name, &nd); 229 dentry = kern_path_locked(name, &parent);
235 if (err) 230 if (IS_ERR(dentry))
236 return err; 231 return PTR_ERR(dentry);
237 232 if (dentry->d_inode) {
238 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 233 if (dentry->d_inode->i_private == &thread)
239 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); 234 err = vfs_rmdir(parent.dentry->d_inode, dentry);
240 if (!IS_ERR(dentry)) { 235 else
241 if (dentry->d_inode) { 236 err = -EPERM;
242 if (dentry->d_inode->i_private == &thread)
243 err = vfs_rmdir(nd.path.dentry->d_inode,
244 dentry);
245 else
246 err = -EPERM;
247 } else {
248 err = -ENOENT;
249 }
250 dput(dentry);
251 } else { 237 } else {
252 err = PTR_ERR(dentry); 238 err = -ENOENT;
253 } 239 }
254 240 dput(dentry);
255 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 241 mutex_unlock(&parent.dentry->d_inode->i_mutex);
256 path_put(&nd.path); 242 path_put(&parent);
257 return err; 243 return err;
258} 244}
259 245
@@ -305,50 +291,43 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
305 291
306static int handle_remove(const char *nodename, struct device *dev) 292static int handle_remove(const char *nodename, struct device *dev)
307{ 293{
308 struct nameidata nd; 294 struct path parent;
309 struct dentry *dentry; 295 struct dentry *dentry;
310 struct kstat stat;
311 int deleted = 1; 296 int deleted = 1;
312 int err; 297 int err;
313 298
314 err = kern_path_parent(nodename, &nd); 299 dentry = kern_path_locked(nodename, &parent);
315 if (err) 300 if (IS_ERR(dentry))
316 return err; 301 return PTR_ERR(dentry);
317 302
318 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 303 if (dentry->d_inode) {
319 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); 304 struct kstat stat;
320 if (!IS_ERR(dentry)) { 305 err = vfs_getattr(parent.mnt, dentry, &stat);
321 if (dentry->d_inode) { 306 if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
322 err = vfs_getattr(nd.path.mnt, dentry, &stat); 307 struct iattr newattrs;
323 if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { 308 /*
324 struct iattr newattrs; 309 * before unlinking this node, reset permissions
325 /* 310 * of possible references like hardlinks
326 * before unlinking this node, reset permissions 311 */
327 * of possible references like hardlinks 312 newattrs.ia_uid = 0;
328 */ 313 newattrs.ia_gid = 0;
329 newattrs.ia_uid = 0; 314 newattrs.ia_mode = stat.mode & ~0777;
330 newattrs.ia_gid = 0; 315 newattrs.ia_valid =
331 newattrs.ia_mode = stat.mode & ~0777; 316 ATTR_UID|ATTR_GID|ATTR_MODE;
332 newattrs.ia_valid = 317 mutex_lock(&dentry->d_inode->i_mutex);
333 ATTR_UID|ATTR_GID|ATTR_MODE; 318 notify_change(dentry, &newattrs);
334 mutex_lock(&dentry->d_inode->i_mutex); 319 mutex_unlock(&dentry->d_inode->i_mutex);
335 notify_change(dentry, &newattrs); 320 err = vfs_unlink(parent.dentry->d_inode, dentry);
336 mutex_unlock(&dentry->d_inode->i_mutex); 321 if (!err || err == -ENOENT)
337 err = vfs_unlink(nd.path.dentry->d_inode, 322 deleted = 1;
338 dentry);
339 if (!err || err == -ENOENT)
340 deleted = 1;
341 }
342 } else {
343 err = -ENOENT;
344 } 323 }
345 dput(dentry);
346 } else { 324 } else {
347 err = PTR_ERR(dentry); 325 err = -ENOENT;
348 } 326 }
349 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 327 dput(dentry);
328 mutex_unlock(&parent.dentry->d_inode->i_mutex);
350 329
351 path_put(&nd.path); 330 path_put(&parent);
352 if (deleted && strchr(nodename, '/')) 331 if (deleted && strchr(nodename, '/'))
353 delete_path(nodename); 332 delete_path(nodename);
354 return err; 333 return err;
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 24e88fe29ec1..c30f3e1d0efc 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -493,6 +493,7 @@ EXPORT_SYMBOL_GPL(dma_buf_vmap);
493/** 493/**
494 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. 494 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
495 * @dmabuf: [in] buffer to vunmap 495 * @dmabuf: [in] buffer to vunmap
496 * @vaddr: [in] vmap to vunmap
496 */ 497 */
497void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) 498void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
498{ 499{
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1b85949e3d2f..560a7173f810 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -186,6 +186,7 @@ EXPORT_SYMBOL(dma_release_from_coherent);
186 * @vma: vm_area for the userspace memory 186 * @vma: vm_area for the userspace memory
187 * @vaddr: cpu address returned by dma_alloc_from_coherent 187 * @vaddr: cpu address returned by dma_alloc_from_coherent
188 * @size: size of the memory buffer allocated by dma_alloc_from_coherent 188 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
189 * @ret: result from remap_pfn_range()
189 * 190 *
190 * This checks whether the memory was allocated from the per-device 191 * This checks whether the memory was allocated from the per-device
191 * coherent memory pool and if so, maps that memory to the provided vma. 192 * coherent memory pool and if so, maps that memory to the provided vma.
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 6f3676f1559f..3fbedc75e7c5 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -10,6 +10,7 @@
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11#include <linux/export.h> 11#include <linux/export.h>
12#include <linux/gfp.h> 12#include <linux/gfp.h>
13#include <asm-generic/dma-coherent.h>
13 14
14/* 15/*
15 * Managed DMA API 16 * Managed DMA API
@@ -217,4 +218,52 @@ void dmam_release_declared_memory(struct device *dev)
217} 218}
218EXPORT_SYMBOL(dmam_release_declared_memory); 219EXPORT_SYMBOL(dmam_release_declared_memory);
219 220
221/*
222 * Create scatter-list for the already allocated DMA buffer.
223 */
224int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
225 void *cpu_addr, dma_addr_t handle, size_t size)
226{
227 struct page *page = virt_to_page(cpu_addr);
228 int ret;
229
230 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
231 if (unlikely(ret))
232 return ret;
233
234 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
235 return 0;
236}
237EXPORT_SYMBOL(dma_common_get_sgtable);
238
220#endif 239#endif
240
241/*
242 * Create userspace mapping for the DMA-coherent memory.
243 */
244int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
245 void *cpu_addr, dma_addr_t dma_addr, size_t size)
246{
247 int ret = -ENXIO;
248#ifdef CONFIG_MMU
249 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
250 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
251 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
252 unsigned long off = vma->vm_pgoff;
253
254 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
255
256 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
257 return ret;
258
259 if (off < count && user_count <= (count - off)) {
260 ret = remap_pfn_range(vma, vma->vm_start,
261 pfn + off,
262 user_count << PAGE_SHIFT,
263 vma->vm_page_prot);
264 }
265#endif /* CONFIG_MMU */
266
267 return ret;
268}
269EXPORT_SYMBOL(dma_common_mmap);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 207c27ddf828..974e301a1ef0 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -185,8 +185,12 @@ int driver_register(struct device_driver *drv)
185 if (ret) 185 if (ret)
186 return ret; 186 return ret;
187 ret = driver_add_groups(drv, drv->groups); 187 ret = driver_add_groups(drv, drv->groups);
188 if (ret) 188 if (ret) {
189 bus_remove_driver(drv); 189 bus_remove_driver(drv);
190 return ret;
191 }
192 kobject_uevent(&drv->p->kobj, KOBJ_ADD);
193
190 return ret; 194 return ret;
191} 195}
192EXPORT_SYMBOL_GPL(driver_register); 196EXPORT_SYMBOL_GPL(driver_register);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 5401814c874d..803cfc1597a9 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -22,8 +22,6 @@
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24 24
25#define to_dev(obj) container_of(obj, struct device, kobj)
26
27MODULE_AUTHOR("Manuel Estrada Sainz"); 25MODULE_AUTHOR("Manuel Estrada Sainz");
28MODULE_DESCRIPTION("Multi purpose firmware loading support"); 26MODULE_DESCRIPTION("Multi purpose firmware loading support");
29MODULE_LICENSE("GPL"); 27MODULE_LICENSE("GPL");
@@ -290,7 +288,7 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
290 struct bin_attribute *bin_attr, 288 struct bin_attribute *bin_attr,
291 char *buffer, loff_t offset, size_t count) 289 char *buffer, loff_t offset, size_t count)
292{ 290{
293 struct device *dev = to_dev(kobj); 291 struct device *dev = kobj_to_dev(kobj);
294 struct firmware_priv *fw_priv = to_firmware_priv(dev); 292 struct firmware_priv *fw_priv = to_firmware_priv(dev);
295 struct firmware *fw; 293 struct firmware *fw;
296 ssize_t ret_count; 294 ssize_t ret_count;
@@ -384,7 +382,7 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
384 struct bin_attribute *bin_attr, 382 struct bin_attribute *bin_attr,
385 char *buffer, loff_t offset, size_t count) 383 char *buffer, loff_t offset, size_t count)
386{ 384{
387 struct device *dev = to_dev(kobj); 385 struct device *dev = kobj_to_dev(kobj);
388 struct firmware_priv *fw_priv = to_firmware_priv(dev); 386 struct firmware_priv *fw_priv = to_firmware_priv(dev);
389 struct firmware *fw; 387 struct firmware *fw;
390 ssize_t retval; 388 ssize_t retval;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 869d7ff2227f..eb78e9640c4a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev)
169 */ 169 */
170int pm_clk_create(struct device *dev) 170int pm_clk_create(struct device *dev)
171{ 171{
172 int ret = dev_pm_get_subsys_data(dev); 172 return dev_pm_get_subsys_data(dev);
173 return ret < 0 ? ret : 0;
174} 173}
175 174
176/** 175/**
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index a14085cc613f..39c32529b833 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -24,7 +24,6 @@
24int dev_pm_get_subsys_data(struct device *dev) 24int dev_pm_get_subsys_data(struct device *dev)
25{ 25{
26 struct pm_subsys_data *psd; 26 struct pm_subsys_data *psd;
27 int ret = 0;
28 27
29 psd = kzalloc(sizeof(*psd), GFP_KERNEL); 28 psd = kzalloc(sizeof(*psd), GFP_KERNEL);
30 if (!psd) 29 if (!psd)
@@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev)
40 dev->power.subsys_data = psd; 39 dev->power.subsys_data = psd;
41 pm_clk_init(dev); 40 pm_clk_init(dev);
42 psd = NULL; 41 psd = NULL;
43 ret = 1;
44 } 42 }
45 43
46 spin_unlock_irq(&dev->power.lock); 44 spin_unlock_irq(&dev->power.lock);
@@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev)
48 /* kfree() verifies that its argument is nonzero. */ 46 /* kfree() verifies that its argument is nonzero. */
49 kfree(psd); 47 kfree(psd);
50 48
51 return ret; 49 return 0;
52} 50}
53EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); 51EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
54 52
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 83aa694a8efe..ba3487c9835b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -75,19 +75,6 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
75 start_latency_ns, "start"); 75 start_latency_ns, "start");
76} 76}
77 77
78static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
79{
80 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
81 save_state_latency_ns, "state save");
82}
83
84static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
85{
86 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
87 restore_state_latency_ns,
88 "state restore");
89}
90
91static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 78static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
92{ 79{
93 bool ret = false; 80 bool ret = false;
@@ -139,6 +126,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
139 genpd->status = GPD_STATE_ACTIVE; 126 genpd->status = GPD_STATE_ACTIVE;
140} 127}
141 128
129static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
130{
131 s64 usecs64;
132
133 if (!genpd->cpu_data)
134 return;
135
136 usecs64 = genpd->power_on_latency_ns;
137 do_div(usecs64, NSEC_PER_USEC);
138 usecs64 += genpd->cpu_data->saved_exit_latency;
139 genpd->cpu_data->idle_state->exit_latency = usecs64;
140}
141
142/** 142/**
143 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 143 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
144 * @genpd: PM domain to power up. 144 * @genpd: PM domain to power up.
@@ -146,7 +146,7 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
146 * Restore power to @genpd and all of its masters so that it is possible to 146 * Restore power to @genpd and all of its masters so that it is possible to
147 * resume a device belonging to it. 147 * resume a device belonging to it.
148 */ 148 */
149int __pm_genpd_poweron(struct generic_pm_domain *genpd) 149static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
150 __releases(&genpd->lock) __acquires(&genpd->lock) 150 __releases(&genpd->lock) __acquires(&genpd->lock)
151{ 151{
152 struct gpd_link *link; 152 struct gpd_link *link;
@@ -176,6 +176,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
176 return 0; 176 return 0;
177 } 177 }
178 178
179 if (genpd->cpu_data) {
180 cpuidle_pause_and_lock();
181 genpd->cpu_data->idle_state->disabled = true;
182 cpuidle_resume_and_unlock();
183 goto out;
184 }
185
179 /* 186 /*
180 * The list is guaranteed not to change while the loop below is being 187 * The list is guaranteed not to change while the loop below is being
181 * executed, unless one of the masters' .power_on() callbacks fiddles 188 * executed, unless one of the masters' .power_on() callbacks fiddles
@@ -215,6 +222,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
215 if (elapsed_ns > genpd->power_on_latency_ns) { 222 if (elapsed_ns > genpd->power_on_latency_ns) {
216 genpd->power_on_latency_ns = elapsed_ns; 223 genpd->power_on_latency_ns = elapsed_ns;
217 genpd->max_off_time_changed = true; 224 genpd->max_off_time_changed = true;
225 genpd_recalc_cpu_exit_latency(genpd);
218 if (genpd->name) 226 if (genpd->name)
219 pr_warning("%s: Power-on latency exceeded, " 227 pr_warning("%s: Power-on latency exceeded, "
220 "new value %lld ns\n", genpd->name, 228 "new value %lld ns\n", genpd->name,
@@ -222,6 +230,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
222 } 230 }
223 } 231 }
224 232
233 out:
225 genpd_set_active(genpd); 234 genpd_set_active(genpd);
226 235
227 return 0; 236 return 0;
@@ -251,6 +260,19 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
251 260
252#ifdef CONFIG_PM_RUNTIME 261#ifdef CONFIG_PM_RUNTIME
253 262
263static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
264{
265 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
266 save_state_latency_ns, "state save");
267}
268
269static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
270{
271 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
272 restore_state_latency_ns,
273 "state restore");
274}
275
254static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 276static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
255 unsigned long val, void *ptr) 277 unsigned long val, void *ptr)
256{ 278{
@@ -275,7 +297,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
275 297
276 pdd = dev->power.subsys_data ? 298 pdd = dev->power.subsys_data ?
277 dev->power.subsys_data->domain_data : NULL; 299 dev->power.subsys_data->domain_data : NULL;
278 if (pdd) { 300 if (pdd && pdd->dev) {
279 to_gpd_data(pdd)->td.constraint_changed = true; 301 to_gpd_data(pdd)->td.constraint_changed = true;
280 genpd = dev_to_genpd(dev); 302 genpd = dev_to_genpd(dev);
281 } else { 303 } else {
@@ -339,19 +361,16 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
339{ 361{
340 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 362 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
341 struct device *dev = pdd->dev; 363 struct device *dev = pdd->dev;
364 bool need_restore = gpd_data->need_restore;
342 365
343 if (!gpd_data->need_restore) 366 gpd_data->need_restore = false;
344 return;
345
346 mutex_unlock(&genpd->lock); 367 mutex_unlock(&genpd->lock);
347 368
348 genpd_start_dev(genpd, dev); 369 genpd_start_dev(genpd, dev);
349 genpd_restore_dev(genpd, dev); 370 if (need_restore)
350 genpd_stop_dev(genpd, dev); 371 genpd_restore_dev(genpd, dev);
351 372
352 mutex_lock(&genpd->lock); 373 mutex_lock(&genpd->lock);
353
354 gpd_data->need_restore = false;
355} 374}
356 375
357/** 376/**
@@ -458,6 +477,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
458 } 477 }
459 } 478 }
460 479
480 if (genpd->cpu_data) {
481 /*
482 * If cpu_data is set, cpuidle should turn the domain off when
483 * the CPU in it is idle. In that case we don't decrement the
484 * subdomain counts of the master domains, so that power is not
485 * removed from the current domain prematurely as a result of
486 * cutting off the masters' power.
487 */
488 genpd->status = GPD_STATE_POWER_OFF;
489 cpuidle_pause_and_lock();
490 genpd->cpu_data->idle_state->disabled = false;
491 cpuidle_resume_and_unlock();
492 goto out;
493 }
494
461 if (genpd->power_off) { 495 if (genpd->power_off) {
462 ktime_t time_start; 496 ktime_t time_start;
463 s64 elapsed_ns; 497 s64 elapsed_ns;
@@ -595,7 +629,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
595 629
596 /* If power.irq_safe, the PM domain is never powered off. */ 630 /* If power.irq_safe, the PM domain is never powered off. */
597 if (dev->power.irq_safe) 631 if (dev->power.irq_safe)
598 goto out; 632 return genpd_start_dev(genpd, dev);
599 633
600 mutex_lock(&genpd->lock); 634 mutex_lock(&genpd->lock);
601 ret = __pm_genpd_poweron(genpd); 635 ret = __pm_genpd_poweron(genpd);
@@ -628,9 +662,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
628 wake_up_all(&genpd->status_wait_queue); 662 wake_up_all(&genpd->status_wait_queue);
629 mutex_unlock(&genpd->lock); 663 mutex_unlock(&genpd->lock);
630 664
631 out:
632 genpd_start_dev(genpd, dev);
633
634 return 0; 665 return 0;
635} 666}
636 667
@@ -1235,6 +1266,27 @@ static void pm_genpd_complete(struct device *dev)
1235 1266
1236#endif /* CONFIG_PM_SLEEP */ 1267#endif /* CONFIG_PM_SLEEP */
1237 1268
1269static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1270{
1271 struct generic_pm_domain_data *gpd_data;
1272
1273 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1274 if (!gpd_data)
1275 return NULL;
1276
1277 mutex_init(&gpd_data->lock);
1278 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1279 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1280 return gpd_data;
1281}
1282
1283static void __pm_genpd_free_dev_data(struct device *dev,
1284 struct generic_pm_domain_data *gpd_data)
1285{
1286 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1287 kfree(gpd_data);
1288}
1289
1238/** 1290/**
1239 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1291 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1240 * @genpd: PM domain to add the device to. 1292 * @genpd: PM domain to add the device to.
@@ -1244,7 +1296,7 @@ static void pm_genpd_complete(struct device *dev)
1244int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1296int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1245 struct gpd_timing_data *td) 1297 struct gpd_timing_data *td)
1246{ 1298{
1247 struct generic_pm_domain_data *gpd_data; 1299 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1248 struct pm_domain_data *pdd; 1300 struct pm_domain_data *pdd;
1249 int ret = 0; 1301 int ret = 0;
1250 1302
@@ -1253,14 +1305,10 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1253 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1305 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1254 return -EINVAL; 1306 return -EINVAL;
1255 1307
1256 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1308 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1257 if (!gpd_data) 1309 if (!gpd_data_new)
1258 return -ENOMEM; 1310 return -ENOMEM;
1259 1311
1260 mutex_init(&gpd_data->lock);
1261 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1262 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1263
1264 genpd_acquire_lock(genpd); 1312 genpd_acquire_lock(genpd);
1265 1313
1266 if (genpd->prepared_count > 0) { 1314 if (genpd->prepared_count > 0) {
@@ -1274,35 +1322,42 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1274 goto out; 1322 goto out;
1275 } 1323 }
1276 1324
1325 ret = dev_pm_get_subsys_data(dev);
1326 if (ret)
1327 goto out;
1328
1277 genpd->device_count++; 1329 genpd->device_count++;
1278 genpd->max_off_time_changed = true; 1330 genpd->max_off_time_changed = true;
1279 1331
1280 dev_pm_get_subsys_data(dev);
1281
1282 mutex_lock(&gpd_data->lock);
1283 spin_lock_irq(&dev->power.lock); 1332 spin_lock_irq(&dev->power.lock);
1333
1284 dev->pm_domain = &genpd->domain; 1334 dev->pm_domain = &genpd->domain;
1285 dev->power.subsys_data->domain_data = &gpd_data->base; 1335 if (dev->power.subsys_data->domain_data) {
1286 gpd_data->base.dev = dev; 1336 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1287 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1337 } else {
1288 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; 1338 gpd_data = gpd_data_new;
1339 dev->power.subsys_data->domain_data = &gpd_data->base;
1340 }
1341 gpd_data->refcount++;
1289 if (td) 1342 if (td)
1290 gpd_data->td = *td; 1343 gpd_data->td = *td;
1291 1344
1345 spin_unlock_irq(&dev->power.lock);
1346
1347 mutex_lock(&gpd_data->lock);
1348 gpd_data->base.dev = dev;
1349 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1350 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1292 gpd_data->td.constraint_changed = true; 1351 gpd_data->td.constraint_changed = true;
1293 gpd_data->td.effective_constraint_ns = -1; 1352 gpd_data->td.effective_constraint_ns = -1;
1294 spin_unlock_irq(&dev->power.lock);
1295 mutex_unlock(&gpd_data->lock); 1353 mutex_unlock(&gpd_data->lock);
1296 1354
1297 genpd_release_lock(genpd);
1298
1299 return 0;
1300
1301 out: 1355 out:
1302 genpd_release_lock(genpd); 1356 genpd_release_lock(genpd);
1303 1357
1304 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1358 if (gpd_data != gpd_data_new)
1305 kfree(gpd_data); 1359 __pm_genpd_free_dev_data(dev, gpd_data_new);
1360
1306 return ret; 1361 return ret;
1307} 1362}
1308 1363
@@ -1348,6 +1403,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1348{ 1403{
1349 struct generic_pm_domain_data *gpd_data; 1404 struct generic_pm_domain_data *gpd_data;
1350 struct pm_domain_data *pdd; 1405 struct pm_domain_data *pdd;
1406 bool remove = false;
1351 int ret = 0; 1407 int ret = 0;
1352 1408
1353 dev_dbg(dev, "%s()\n", __func__); 1409 dev_dbg(dev, "%s()\n", __func__);
@@ -1368,22 +1424,28 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1368 genpd->max_off_time_changed = true; 1424 genpd->max_off_time_changed = true;
1369 1425
1370 spin_lock_irq(&dev->power.lock); 1426 spin_lock_irq(&dev->power.lock);
1427
1371 dev->pm_domain = NULL; 1428 dev->pm_domain = NULL;
1372 pdd = dev->power.subsys_data->domain_data; 1429 pdd = dev->power.subsys_data->domain_data;
1373 list_del_init(&pdd->list_node); 1430 list_del_init(&pdd->list_node);
1374 dev->power.subsys_data->domain_data = NULL; 1431 gpd_data = to_gpd_data(pdd);
1432 if (--gpd_data->refcount == 0) {
1433 dev->power.subsys_data->domain_data = NULL;
1434 remove = true;
1435 }
1436
1375 spin_unlock_irq(&dev->power.lock); 1437 spin_unlock_irq(&dev->power.lock);
1376 1438
1377 gpd_data = to_gpd_data(pdd);
1378 mutex_lock(&gpd_data->lock); 1439 mutex_lock(&gpd_data->lock);
1379 pdd->dev = NULL; 1440 pdd->dev = NULL;
1380 mutex_unlock(&gpd_data->lock); 1441 mutex_unlock(&gpd_data->lock);
1381 1442
1382 genpd_release_lock(genpd); 1443 genpd_release_lock(genpd);
1383 1444
1384 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1385 kfree(gpd_data);
1386 dev_pm_put_subsys_data(dev); 1445 dev_pm_put_subsys_data(dev);
1446 if (remove)
1447 __pm_genpd_free_dev_data(dev, gpd_data);
1448
1387 return 0; 1449 return 0;
1388 1450
1389 out: 1451 out:
@@ -1541,33 +1603,52 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1541 * @dev: Device to add the callbacks to. 1603 * @dev: Device to add the callbacks to.
1542 * @ops: Set of callbacks to add. 1604 * @ops: Set of callbacks to add.
1543 * @td: Timing data to add to the device along with the callbacks (optional). 1605 * @td: Timing data to add to the device along with the callbacks (optional).
1606 *
1607 * Every call to this routine should be balanced with a call to
1608 * __pm_genpd_remove_callbacks() and they must not be nested.
1544 */ 1609 */
1545int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, 1610int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1546 struct gpd_timing_data *td) 1611 struct gpd_timing_data *td)
1547{ 1612{
1548 struct pm_domain_data *pdd; 1613 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1549 int ret = 0; 1614 int ret = 0;
1550 1615
1551 if (!(dev && dev->power.subsys_data && ops)) 1616 if (!(dev && ops))
1552 return -EINVAL; 1617 return -EINVAL;
1553 1618
1619 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1620 if (!gpd_data_new)
1621 return -ENOMEM;
1622
1554 pm_runtime_disable(dev); 1623 pm_runtime_disable(dev);
1555 device_pm_lock(); 1624 device_pm_lock();
1556 1625
1557 pdd = dev->power.subsys_data->domain_data; 1626 ret = dev_pm_get_subsys_data(dev);
1558 if (pdd) { 1627 if (ret)
1559 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 1628 goto out;
1560 1629
1561 gpd_data->ops = *ops; 1630 spin_lock_irq(&dev->power.lock);
1562 if (td) 1631
1563 gpd_data->td = *td; 1632 if (dev->power.subsys_data->domain_data) {
1633 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1564 } else { 1634 } else {
1565 ret = -EINVAL; 1635 gpd_data = gpd_data_new;
1636 dev->power.subsys_data->domain_data = &gpd_data->base;
1566 } 1637 }
1638 gpd_data->refcount++;
1639 gpd_data->ops = *ops;
1640 if (td)
1641 gpd_data->td = *td;
1642
1643 spin_unlock_irq(&dev->power.lock);
1567 1644
1645 out:
1568 device_pm_unlock(); 1646 device_pm_unlock();
1569 pm_runtime_enable(dev); 1647 pm_runtime_enable(dev);
1570 1648
1649 if (gpd_data != gpd_data_new)
1650 __pm_genpd_free_dev_data(dev, gpd_data_new);
1651
1571 return ret; 1652 return ret;
1572} 1653}
1573EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); 1654EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
@@ -1576,10 +1657,13 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1576 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. 1657 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1577 * @dev: Device to remove the callbacks from. 1658 * @dev: Device to remove the callbacks from.
1578 * @clear_td: If set, clear the device's timing data too. 1659 * @clear_td: If set, clear the device's timing data too.
1660 *
1661 * This routine can only be called after pm_genpd_add_callbacks().
1579 */ 1662 */
1580int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) 1663int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1581{ 1664{
1582 struct pm_domain_data *pdd; 1665 struct generic_pm_domain_data *gpd_data = NULL;
1666 bool remove = false;
1583 int ret = 0; 1667 int ret = 0;
1584 1668
1585 if (!(dev && dev->power.subsys_data)) 1669 if (!(dev && dev->power.subsys_data))
@@ -1588,24 +1672,118 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1588 pm_runtime_disable(dev); 1672 pm_runtime_disable(dev);
1589 device_pm_lock(); 1673 device_pm_lock();
1590 1674
1591 pdd = dev->power.subsys_data->domain_data; 1675 spin_lock_irq(&dev->power.lock);
1592 if (pdd) {
1593 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1594 1676
1595 gpd_data->ops = (struct gpd_dev_ops){ 0 }; 1677 if (dev->power.subsys_data->domain_data) {
1678 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1679 gpd_data->ops = (struct gpd_dev_ops){ NULL };
1596 if (clear_td) 1680 if (clear_td)
1597 gpd_data->td = (struct gpd_timing_data){ 0 }; 1681 gpd_data->td = (struct gpd_timing_data){ 0 };
1682
1683 if (--gpd_data->refcount == 0) {
1684 dev->power.subsys_data->domain_data = NULL;
1685 remove = true;
1686 }
1598 } else { 1687 } else {
1599 ret = -EINVAL; 1688 ret = -EINVAL;
1600 } 1689 }
1601 1690
1691 spin_unlock_irq(&dev->power.lock);
1692
1602 device_pm_unlock(); 1693 device_pm_unlock();
1603 pm_runtime_enable(dev); 1694 pm_runtime_enable(dev);
1604 1695
1605 return ret; 1696 if (ret)
1697 return ret;
1698
1699 dev_pm_put_subsys_data(dev);
1700 if (remove)
1701 __pm_genpd_free_dev_data(dev, gpd_data);
1702
1703 return 0;
1606} 1704}
1607EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); 1705EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1608 1706
1707int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1708{
1709 struct cpuidle_driver *cpuidle_drv;
1710 struct gpd_cpu_data *cpu_data;
1711 struct cpuidle_state *idle_state;
1712 int ret = 0;
1713
1714 if (IS_ERR_OR_NULL(genpd) || state < 0)
1715 return -EINVAL;
1716
1717 genpd_acquire_lock(genpd);
1718
1719 if (genpd->cpu_data) {
1720 ret = -EEXIST;
1721 goto out;
1722 }
1723 cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1724 if (!cpu_data) {
1725 ret = -ENOMEM;
1726 goto out;
1727 }
1728 cpuidle_drv = cpuidle_driver_ref();
1729 if (!cpuidle_drv) {
1730 ret = -ENODEV;
1731 goto out;
1732 }
1733 if (cpuidle_drv->state_count <= state) {
1734 ret = -EINVAL;
1735 goto err;
1736 }
1737 idle_state = &cpuidle_drv->states[state];
1738 if (!idle_state->disabled) {
1739 ret = -EAGAIN;
1740 goto err;
1741 }
1742 cpu_data->idle_state = idle_state;
1743 cpu_data->saved_exit_latency = idle_state->exit_latency;
1744 genpd->cpu_data = cpu_data;
1745 genpd_recalc_cpu_exit_latency(genpd);
1746
1747 out:
1748 genpd_release_lock(genpd);
1749 return ret;
1750
1751 err:
1752 cpuidle_driver_unref();
1753 goto out;
1754}
1755
1756int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1757{
1758 struct gpd_cpu_data *cpu_data;
1759 struct cpuidle_state *idle_state;
1760 int ret = 0;
1761
1762 if (IS_ERR_OR_NULL(genpd))
1763 return -EINVAL;
1764
1765 genpd_acquire_lock(genpd);
1766
1767 cpu_data = genpd->cpu_data;
1768 if (!cpu_data) {
1769 ret = -ENODEV;
1770 goto out;
1771 }
1772 idle_state = cpu_data->idle_state;
1773 if (!idle_state->disabled) {
1774 ret = -EAGAIN;
1775 goto out;
1776 }
1777 idle_state->exit_latency = cpu_data->saved_exit_latency;
1778 cpuidle_driver_unref();
1779 genpd->cpu_data = NULL;
1780 kfree(cpu_data);
1781
1782 out:
1783 genpd_release_lock(genpd);
1784 return ret;
1785}
1786
1609/* Default device callbacks for generic PM domains. */ 1787/* Default device callbacks for generic PM domains. */
1610 1788
1611/** 1789/**
@@ -1615,16 +1793,24 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1615static int pm_genpd_default_save_state(struct device *dev) 1793static int pm_genpd_default_save_state(struct device *dev)
1616{ 1794{
1617 int (*cb)(struct device *__dev); 1795 int (*cb)(struct device *__dev);
1618 struct device_driver *drv = dev->driver;
1619 1796
1620 cb = dev_gpd_data(dev)->ops.save_state; 1797 cb = dev_gpd_data(dev)->ops.save_state;
1621 if (cb) 1798 if (cb)
1622 return cb(dev); 1799 return cb(dev);
1623 1800
1624 if (drv && drv->pm && drv->pm->runtime_suspend) 1801 if (dev->type && dev->type->pm)
1625 return drv->pm->runtime_suspend(dev); 1802 cb = dev->type->pm->runtime_suspend;
1803 else if (dev->class && dev->class->pm)
1804 cb = dev->class->pm->runtime_suspend;
1805 else if (dev->bus && dev->bus->pm)
1806 cb = dev->bus->pm->runtime_suspend;
1807 else
1808 cb = NULL;
1626 1809
1627 return 0; 1810 if (!cb && dev->driver && dev->driver->pm)
1811 cb = dev->driver->pm->runtime_suspend;
1812
1813 return cb ? cb(dev) : 0;
1628} 1814}
1629 1815
1630/** 1816/**
@@ -1634,16 +1820,24 @@ static int pm_genpd_default_save_state(struct device *dev)
1634static int pm_genpd_default_restore_state(struct device *dev) 1820static int pm_genpd_default_restore_state(struct device *dev)
1635{ 1821{
1636 int (*cb)(struct device *__dev); 1822 int (*cb)(struct device *__dev);
1637 struct device_driver *drv = dev->driver;
1638 1823
1639 cb = dev_gpd_data(dev)->ops.restore_state; 1824 cb = dev_gpd_data(dev)->ops.restore_state;
1640 if (cb) 1825 if (cb)
1641 return cb(dev); 1826 return cb(dev);
1642 1827
1643 if (drv && drv->pm && drv->pm->runtime_resume) 1828 if (dev->type && dev->type->pm)
1644 return drv->pm->runtime_resume(dev); 1829 cb = dev->type->pm->runtime_resume;
1830 else if (dev->class && dev->class->pm)
1831 cb = dev->class->pm->runtime_resume;
1832 else if (dev->bus && dev->bus->pm)
1833 cb = dev->bus->pm->runtime_resume;
1834 else
1835 cb = NULL;
1645 1836
1646 return 0; 1837 if (!cb && dev->driver && dev->driver->pm)
1838 cb = dev->driver->pm->runtime_resume;
1839
1840 return cb ? cb(dev) : 0;
1647} 1841}
1648 1842
1649#ifdef CONFIG_PM_SLEEP 1843#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e0fb5b0435a3..0113adc310dc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -28,7 +28,7 @@
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/async.h> 29#include <linux/async.h>
30#include <linux/suspend.h> 30#include <linux/suspend.h>
31 31#include <linux/cpuidle.h>
32#include "../base.h" 32#include "../base.h"
33#include "power.h" 33#include "power.h"
34 34
@@ -45,10 +45,10 @@ typedef int (*pm_callback_t)(struct device *);
45 */ 45 */
46 46
47LIST_HEAD(dpm_list); 47LIST_HEAD(dpm_list);
48LIST_HEAD(dpm_prepared_list); 48static LIST_HEAD(dpm_prepared_list);
49LIST_HEAD(dpm_suspended_list); 49static LIST_HEAD(dpm_suspended_list);
50LIST_HEAD(dpm_late_early_list); 50static LIST_HEAD(dpm_late_early_list);
51LIST_HEAD(dpm_noirq_list); 51static LIST_HEAD(dpm_noirq_list);
52 52
53struct suspend_stats suspend_stats; 53struct suspend_stats suspend_stats;
54static DEFINE_MUTEX(dpm_list_mtx); 54static DEFINE_MUTEX(dpm_list_mtx);
@@ -166,7 +166,7 @@ static ktime_t initcall_debug_start(struct device *dev)
166{ 166{
167 ktime_t calltime = ktime_set(0, 0); 167 ktime_t calltime = ktime_set(0, 0);
168 168
169 if (initcall_debug) { 169 if (pm_print_times_enabled) {
170 pr_info("calling %s+ @ %i, parent: %s\n", 170 pr_info("calling %s+ @ %i, parent: %s\n",
171 dev_name(dev), task_pid_nr(current), 171 dev_name(dev), task_pid_nr(current),
172 dev->parent ? dev_name(dev->parent) : "none"); 172 dev->parent ? dev_name(dev->parent) : "none");
@@ -181,7 +181,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
181{ 181{
182 ktime_t delta, rettime; 182 ktime_t delta, rettime;
183 183
184 if (initcall_debug) { 184 if (pm_print_times_enabled) {
185 rettime = ktime_get(); 185 rettime = ktime_get();
186 delta = ktime_sub(rettime, calltime); 186 delta = ktime_sub(rettime, calltime);
187 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 187 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
@@ -467,6 +467,7 @@ static void dpm_resume_noirq(pm_message_t state)
467 mutex_unlock(&dpm_list_mtx); 467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq"); 468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs(); 469 resume_device_irqs();
470 cpuidle_resume();
470} 471}
471 472
472/** 473/**
@@ -867,6 +868,7 @@ static int dpm_suspend_noirq(pm_message_t state)
867 ktime_t starttime = ktime_get(); 868 ktime_t starttime = ktime_get();
868 int error = 0; 869 int error = 0;
869 870
871 cpuidle_pause();
870 suspend_device_irqs(); 872 suspend_device_irqs();
871 mutex_lock(&dpm_list_mtx); 873 mutex_lock(&dpm_list_mtx);
872 while (!list_empty(&dpm_late_early_list)) { 874 while (!list_empty(&dpm_late_early_list)) {
@@ -989,8 +991,16 @@ static int dpm_suspend_late(pm_message_t state)
989int dpm_suspend_end(pm_message_t state) 991int dpm_suspend_end(pm_message_t state)
990{ 992{
991 int error = dpm_suspend_late(state); 993 int error = dpm_suspend_late(state);
994 if (error)
995 return error;
996
997 error = dpm_suspend_noirq(state);
998 if (error) {
999 dpm_resume_early(state);
1000 return error;
1001 }
992 1002
993 return error ? : dpm_suspend_noirq(state); 1003 return 0;
994} 1004}
995EXPORT_SYMBOL_GPL(dpm_suspend_end); 1005EXPORT_SYMBOL_GPL(dpm_suspend_end);
996 1006
@@ -1031,7 +1041,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1031 dpm_wait_for_children(dev, async); 1041 dpm_wait_for_children(dev, async);
1032 1042
1033 if (async_error) 1043 if (async_error)
1034 return 0; 1044 goto Complete;
1035 1045
1036 pm_runtime_get_noresume(dev); 1046 pm_runtime_get_noresume(dev);
1037 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1047 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1050,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1040 if (pm_wakeup_pending()) { 1050 if (pm_wakeup_pending()) {
1041 pm_runtime_put_sync(dev); 1051 pm_runtime_put_sync(dev);
1042 async_error = -EBUSY; 1052 async_error = -EBUSY;
1043 return 0; 1053 goto Complete;
1044 } 1054 }
1045 1055
1046 device_lock(dev); 1056 device_lock(dev);
@@ -1097,6 +1107,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1097 } 1107 }
1098 1108
1099 device_unlock(dev); 1109 device_unlock(dev);
1110
1111 Complete:
1100 complete_all(&dev->power.completion); 1112 complete_all(&dev->power.completion);
1101 1113
1102 if (error) { 1114 if (error) {
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index fd849a2c4fa8..74a67e0019a2 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -462,7 +462,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
462static void __dev_pm_qos_drop_user_request(struct device *dev) 462static void __dev_pm_qos_drop_user_request(struct device *dev)
463{ 463{
464 dev_pm_qos_remove_request(dev->power.pq_req); 464 dev_pm_qos_remove_request(dev->power.pq_req);
465 dev->power.pq_req = 0; 465 dev->power.pq_req = NULL;
466} 466}
467 467
468/** 468/**
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 59894873a3b3..7d9c1cb1c39a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -147,6 +147,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
147 || (dev->power.request_pending 147 || (dev->power.request_pending
148 && dev->power.request == RPM_REQ_RESUME)) 148 && dev->power.request == RPM_REQ_RESUME))
149 retval = -EAGAIN; 149 retval = -EAGAIN;
150 else if (__dev_pm_qos_read_value(dev) < 0)
151 retval = -EPERM;
150 else if (dev->power.runtime_status == RPM_SUSPENDED) 152 else if (dev->power.runtime_status == RPM_SUSPENDED)
151 retval = 1; 153 retval = 1;
152 154
@@ -388,7 +390,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
388 goto repeat; 390 goto repeat;
389 } 391 }
390 392
391 dev->power.deferred_resume = false;
392 if (dev->power.no_callbacks) 393 if (dev->power.no_callbacks)
393 goto no_callback; /* Assume success. */ 394 goto no_callback; /* Assume success. */
394 395
@@ -403,12 +404,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
403 goto out; 404 goto out;
404 } 405 }
405 406
406 if (__dev_pm_qos_read_value(dev) < 0) {
407 /* Negative PM QoS constraint means "never suspend". */
408 retval = -EPERM;
409 goto out;
410 }
411
412 __update_runtime_status(dev, RPM_SUSPENDING); 407 __update_runtime_status(dev, RPM_SUSPENDING);
413 408
414 if (dev->pm_domain) 409 if (dev->pm_domain)
@@ -440,6 +435,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
440 wake_up_all(&dev->power.wait_queue); 435 wake_up_all(&dev->power.wait_queue);
441 436
442 if (dev->power.deferred_resume) { 437 if (dev->power.deferred_resume) {
438 dev->power.deferred_resume = false;
443 rpm_resume(dev, 0); 439 rpm_resume(dev, 0);
444 retval = -EAGAIN; 440 retval = -EAGAIN;
445 goto out; 441 goto out;
@@ -584,6 +580,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
584 || dev->parent->power.runtime_status == RPM_ACTIVE) { 580 || dev->parent->power.runtime_status == RPM_ACTIVE) {
585 atomic_inc(&dev->parent->power.child_count); 581 atomic_inc(&dev->parent->power.child_count);
586 spin_unlock(&dev->parent->power.lock); 582 spin_unlock(&dev->parent->power.lock);
583 retval = 1;
587 goto no_callback; /* Assume success. */ 584 goto no_callback; /* Assume success. */
588 } 585 }
589 spin_unlock(&dev->parent->power.lock); 586 spin_unlock(&dev->parent->power.lock);
@@ -664,7 +661,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
664 } 661 }
665 wake_up_all(&dev->power.wait_queue); 662 wake_up_all(&dev->power.wait_queue);
666 663
667 if (!retval) 664 if (retval >= 0)
668 rpm_idle(dev, RPM_ASYNC); 665 rpm_idle(dev, RPM_ASYNC);
669 666
670 out: 667 out:
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 48be2ad4dd2c..b91dc6f1e914 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -474,6 +474,8 @@ static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
474 474
475#endif 475#endif
476 476
477#ifdef CONFIG_PM_SLEEP
478
477static ssize_t async_show(struct device *dev, struct device_attribute *attr, 479static ssize_t async_show(struct device *dev, struct device_attribute *attr,
478 char *buf) 480 char *buf)
479{ 481{
@@ -500,6 +502,8 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
500} 502}
501 503
502static DEVICE_ATTR(async, 0644, async_show, async_store); 504static DEVICE_ATTR(async, 0644, async_show, async_store);
505
506#endif
503#endif /* CONFIG_PM_ADVANCED_DEBUG */ 507#endif /* CONFIG_PM_ADVANCED_DEBUG */
504 508
505static struct attribute *power_attrs[] = { 509static struct attribute *power_attrs[] = {
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b986b8660b0c..80f9ab9c3aa4 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -95,6 +95,9 @@ struct regmap {
95 95
96 /* if set, converts bulk rw to single rw */ 96 /* if set, converts bulk rw to single rw */
97 bool use_single_rw; 97 bool use_single_rw;
98
99 struct rb_root range_tree;
100 void *selector_work_buf; /* Scratch buffer used for selector */
98}; 101};
99 102
100struct regcache_ops { 103struct regcache_ops {
@@ -115,6 +118,20 @@ bool regmap_precious(struct regmap *map, unsigned int reg);
115int _regmap_write(struct regmap *map, unsigned int reg, 118int _regmap_write(struct regmap *map, unsigned int reg,
116 unsigned int val); 119 unsigned int val);
117 120
121struct regmap_range_node {
122 struct rb_node node;
123
124 unsigned int range_min;
125 unsigned int range_max;
126
127 unsigned int selector_reg;
128 unsigned int selector_mask;
129 int selector_shift;
130
131 unsigned int window_start;
132 unsigned int window_len;
133};
134
118#ifdef CONFIG_DEBUG_FS 135#ifdef CONFIG_DEBUG_FS
119extern void regmap_debugfs_initcall(void); 136extern void regmap_debugfs_initcall(void);
120extern void regmap_debugfs_init(struct regmap *map, const char *name); 137extern void regmap_debugfs_init(struct regmap *map, const char *name);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4fac4b9be88f..a89734621e51 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -24,14 +24,18 @@ struct regmap_irq_chip_data {
24 struct mutex lock; 24 struct mutex lock;
25 25
26 struct regmap *map; 26 struct regmap *map;
27 struct regmap_irq_chip *chip; 27 const struct regmap_irq_chip *chip;
28 28
29 int irq_base; 29 int irq_base;
30 struct irq_domain *domain; 30 struct irq_domain *domain;
31 31
32 int irq;
33 int wake_count;
34
32 unsigned int *status_buf; 35 unsigned int *status_buf;
33 unsigned int *mask_buf; 36 unsigned int *mask_buf;
34 unsigned int *mask_buf_def; 37 unsigned int *mask_buf_def;
38 unsigned int *wake_buf;
35 39
36 unsigned int irq_reg_stride; 40 unsigned int irq_reg_stride;
37}; 41};
@@ -71,6 +75,16 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
71 d->chip->mask_base + (i * map->reg_stride)); 75 d->chip->mask_base + (i * map->reg_stride));
72 } 76 }
73 77
78 /* If we've changed our wakeup count propagate it to the parent */
79 if (d->wake_count < 0)
80 for (i = d->wake_count; i < 0; i++)
81 irq_set_irq_wake(d->irq, 0);
82 else if (d->wake_count > 0)
83 for (i = 0; i < d->wake_count; i++)
84 irq_set_irq_wake(d->irq, 1);
85
86 d->wake_count = 0;
87
74 mutex_unlock(&d->lock); 88 mutex_unlock(&d->lock);
75} 89}
76 90
@@ -92,18 +106,41 @@ static void regmap_irq_disable(struct irq_data *data)
92 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; 106 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
93} 107}
94 108
109static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
110{
111 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
112 struct regmap *map = d->map;
113 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
114
115 if (!d->chip->wake_base)
116 return -EINVAL;
117
118 if (on) {
119 d->wake_buf[irq_data->reg_offset / map->reg_stride]
120 &= ~irq_data->mask;
121 d->wake_count++;
122 } else {
123 d->wake_buf[irq_data->reg_offset / map->reg_stride]
124 |= irq_data->mask;
125 d->wake_count--;
126 }
127
128 return 0;
129}
130
95static struct irq_chip regmap_irq_chip = { 131static struct irq_chip regmap_irq_chip = {
96 .name = "regmap", 132 .name = "regmap",
97 .irq_bus_lock = regmap_irq_lock, 133 .irq_bus_lock = regmap_irq_lock,
98 .irq_bus_sync_unlock = regmap_irq_sync_unlock, 134 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
99 .irq_disable = regmap_irq_disable, 135 .irq_disable = regmap_irq_disable,
100 .irq_enable = regmap_irq_enable, 136 .irq_enable = regmap_irq_enable,
137 .irq_set_wake = regmap_irq_set_wake,
101}; 138};
102 139
103static irqreturn_t regmap_irq_thread(int irq, void *d) 140static irqreturn_t regmap_irq_thread(int irq, void *d)
104{ 141{
105 struct regmap_irq_chip_data *data = d; 142 struct regmap_irq_chip_data *data = d;
106 struct regmap_irq_chip *chip = data->chip; 143 const struct regmap_irq_chip *chip = data->chip;
107 struct regmap *map = data->map; 144 struct regmap *map = data->map;
108 int ret, i; 145 int ret, i;
109 bool handled = false; 146 bool handled = false;
@@ -195,7 +232,7 @@ static struct irq_domain_ops regmap_domain_ops = {
195 * register values used by the IRQ controller over suspend and resume. 232 * register values used by the IRQ controller over suspend and resume.
196 */ 233 */
197int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, 234int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
198 int irq_base, struct regmap_irq_chip *chip, 235 int irq_base, const struct regmap_irq_chip *chip,
199 struct regmap_irq_chip_data **data) 236 struct regmap_irq_chip_data **data)
200{ 237{
201 struct regmap_irq_chip_data *d; 238 struct regmap_irq_chip_data *d;
@@ -240,6 +277,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
240 if (!d->mask_buf_def) 277 if (!d->mask_buf_def)
241 goto err_alloc; 278 goto err_alloc;
242 279
280 if (chip->wake_base) {
281 d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
282 GFP_KERNEL);
283 if (!d->wake_buf)
284 goto err_alloc;
285 }
286
287 d->irq = irq;
243 d->map = map; 288 d->map = map;
244 d->chip = chip; 289 d->chip = chip;
245 d->irq_base = irq_base; 290 d->irq_base = irq_base;
@@ -294,6 +339,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
294err_domain: 339err_domain:
295 /* Should really dispose of the domain but... */ 340 /* Should really dispose of the domain but... */
296err_alloc: 341err_alloc:
342 kfree(d->wake_buf);
297 kfree(d->mask_buf_def); 343 kfree(d->mask_buf_def);
298 kfree(d->mask_buf); 344 kfree(d->mask_buf);
299 kfree(d->status_buf); 345 kfree(d->status_buf);
@@ -315,6 +361,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
315 361
316 free_irq(irq, d); 362 free_irq(irq, d);
317 /* We should unmap the domain but... */ 363 /* We should unmap the domain but... */
364 kfree(d->wake_buf);
318 kfree(d->mask_buf_def); 365 kfree(d->mask_buf_def);
319 kfree(d->mask_buf); 366 kfree(d->mask_buf);
320 kfree(d->status_buf); 367 kfree(d->status_buf);
@@ -346,6 +393,10 @@ EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
346 */ 393 */
347int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) 394int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
348{ 395{
396 /* Handle holes in the IRQ list */
397 if (!data->chip->irqs[irq].mask)
398 return -EINVAL;
399
349 return irq_create_mapping(data->domain, irq); 400 return irq_create_mapping(data->domain, irq);
350} 401}
351EXPORT_SYMBOL_GPL(regmap_irq_get_virq); 402EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index febd6de6c8ac..f05fc74dd84a 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -37,7 +37,7 @@ static int regmap_mmio_gather_write(void *context,
37 37
38 BUG_ON(reg_size != 4); 38 BUG_ON(reg_size != 4);
39 39
40 offset = be32_to_cpup(reg); 40 offset = *(u32 *)reg;
41 41
42 while (val_size) { 42 while (val_size) {
43 switch (ctx->val_bytes) { 43 switch (ctx->val_bytes) {
@@ -45,14 +45,14 @@ static int regmap_mmio_gather_write(void *context,
45 writeb(*(u8 *)val, ctx->regs + offset); 45 writeb(*(u8 *)val, ctx->regs + offset);
46 break; 46 break;
47 case 2: 47 case 2:
48 writew(be16_to_cpup(val), ctx->regs + offset); 48 writew(*(u16 *)val, ctx->regs + offset);
49 break; 49 break;
50 case 4: 50 case 4:
51 writel(be32_to_cpup(val), ctx->regs + offset); 51 writel(*(u32 *)val, ctx->regs + offset);
52 break; 52 break;
53#ifdef CONFIG_64BIT 53#ifdef CONFIG_64BIT
54 case 8: 54 case 8:
55 writeq(be64_to_cpup(val), ctx->regs + offset); 55 writeq(*(u64 *)val, ctx->regs + offset);
56 break; 56 break;
57#endif 57#endif
58 default: 58 default:
@@ -83,7 +83,7 @@ static int regmap_mmio_read(void *context,
83 83
84 BUG_ON(reg_size != 4); 84 BUG_ON(reg_size != 4);
85 85
86 offset = be32_to_cpup(reg); 86 offset = *(u32 *)reg;
87 87
88 while (val_size) { 88 while (val_size) {
89 switch (ctx->val_bytes) { 89 switch (ctx->val_bytes) {
@@ -91,14 +91,14 @@ static int regmap_mmio_read(void *context,
91 *(u8 *)val = readb(ctx->regs + offset); 91 *(u8 *)val = readb(ctx->regs + offset);
92 break; 92 break;
93 case 2: 93 case 2:
94 *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset)); 94 *(u16 *)val = readw(ctx->regs + offset);
95 break; 95 break;
96 case 4: 96 case 4:
97 *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset)); 97 *(u32 *)val = readl(ctx->regs + offset);
98 break; 98 break;
99#ifdef CONFIG_64BIT 99#ifdef CONFIG_64BIT
100 case 8: 100 case 8:
101 *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset)); 101 *(u64 *)val = readq(ctx->regs + offset);
102 break; 102 break;
103#endif 103#endif
104 default: 104 default:
@@ -124,9 +124,11 @@ static struct regmap_bus regmap_mmio = {
124 .gather_write = regmap_mmio_gather_write, 124 .gather_write = regmap_mmio_gather_write,
125 .read = regmap_mmio_read, 125 .read = regmap_mmio_read,
126 .free_context = regmap_mmio_free_context, 126 .free_context = regmap_mmio_free_context,
127 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
128 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
127}; 129};
128 130
129struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, 131static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
130 const struct regmap_config *config) 132 const struct regmap_config *config)
131{ 133{
132 struct regmap_mmio_context *ctx; 134 struct regmap_mmio_context *ctx;
@@ -162,7 +164,15 @@ struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
162 if (config->reg_stride < min_stride) 164 if (config->reg_stride < min_stride)
163 return ERR_PTR(-EINVAL); 165 return ERR_PTR(-EINVAL);
164 166
165 ctx = kzalloc(GFP_KERNEL, sizeof(*ctx)); 167 switch (config->reg_format_endian) {
168 case REGMAP_ENDIAN_DEFAULT:
169 case REGMAP_ENDIAN_NATIVE:
170 break;
171 default:
172 return ERR_PTR(-EINVAL);
173 }
174
175 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
166 if (!ctx) 176 if (!ctx)
167 return ERR_PTR(-ENOMEM); 177 return ERR_PTR(-ENOMEM);
168 178
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0bcda488f11c..c241ae2f2f10 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -15,12 +15,25 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/rbtree.h>
18 19
19#define CREATE_TRACE_POINTS 20#define CREATE_TRACE_POINTS
20#include <trace/events/regmap.h> 21#include <trace/events/regmap.h>
21 22
22#include "internal.h" 23#include "internal.h"
23 24
25/*
26 * Sometimes for failures during very early init the trace
27 * infrastructure isn't available early enough to be used. For this
28 * sort of problem defining LOG_DEVICE will add printks for basic
29 * register I/O on a specific device.
30 */
31#undef LOG_DEVICE
32
33static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34 unsigned int mask, unsigned int val,
35 bool *change);
36
24bool regmap_writeable(struct regmap *map, unsigned int reg) 37bool regmap_writeable(struct regmap *map, unsigned int reg)
25{ 38{
26 if (map->max_register && reg > map->max_register) 39 if (map->max_register && reg > map->max_register)
@@ -119,13 +132,19 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
119 b[0] = val << shift; 132 b[0] = val << shift;
120} 133}
121 134
122static void regmap_format_16(void *buf, unsigned int val, unsigned int shift) 135static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
123{ 136{
124 __be16 *b = buf; 137 __be16 *b = buf;
125 138
126 b[0] = cpu_to_be16(val << shift); 139 b[0] = cpu_to_be16(val << shift);
127} 140}
128 141
142static void regmap_format_16_native(void *buf, unsigned int val,
143 unsigned int shift)
144{
145 *(u16 *)buf = val << shift;
146}
147
129static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 148static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
130{ 149{
131 u8 *b = buf; 150 u8 *b = buf;
@@ -137,13 +156,19 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
137 b[2] = val; 156 b[2] = val;
138} 157}
139 158
140static void regmap_format_32(void *buf, unsigned int val, unsigned int shift) 159static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
141{ 160{
142 __be32 *b = buf; 161 __be32 *b = buf;
143 162
144 b[0] = cpu_to_be32(val << shift); 163 b[0] = cpu_to_be32(val << shift);
145} 164}
146 165
166static void regmap_format_32_native(void *buf, unsigned int val,
167 unsigned int shift)
168{
169 *(u32 *)buf = val << shift;
170}
171
147static unsigned int regmap_parse_8(void *buf) 172static unsigned int regmap_parse_8(void *buf)
148{ 173{
149 u8 *b = buf; 174 u8 *b = buf;
@@ -151,7 +176,7 @@ static unsigned int regmap_parse_8(void *buf)
151 return b[0]; 176 return b[0];
152} 177}
153 178
154static unsigned int regmap_parse_16(void *buf) 179static unsigned int regmap_parse_16_be(void *buf)
155{ 180{
156 __be16 *b = buf; 181 __be16 *b = buf;
157 182
@@ -160,6 +185,11 @@ static unsigned int regmap_parse_16(void *buf)
160 return b[0]; 185 return b[0];
161} 186}
162 187
188static unsigned int regmap_parse_16_native(void *buf)
189{
190 return *(u16 *)buf;
191}
192
163static unsigned int regmap_parse_24(void *buf) 193static unsigned int regmap_parse_24(void *buf)
164{ 194{
165 u8 *b = buf; 195 u8 *b = buf;
@@ -170,7 +200,7 @@ static unsigned int regmap_parse_24(void *buf)
170 return ret; 200 return ret;
171} 201}
172 202
173static unsigned int regmap_parse_32(void *buf) 203static unsigned int regmap_parse_32_be(void *buf)
174{ 204{
175 __be32 *b = buf; 205 __be32 *b = buf;
176 206
@@ -179,6 +209,11 @@ static unsigned int regmap_parse_32(void *buf)
179 return b[0]; 209 return b[0];
180} 210}
181 211
212static unsigned int regmap_parse_32_native(void *buf)
213{
214 return *(u32 *)buf;
215}
216
182static void regmap_lock_mutex(struct regmap *map) 217static void regmap_lock_mutex(struct regmap *map)
183{ 218{
184 mutex_lock(&map->mutex); 219 mutex_lock(&map->mutex);
@@ -208,6 +243,67 @@ static void dev_get_regmap_release(struct device *dev, void *res)
208 */ 243 */
209} 244}
210 245
246static bool _regmap_range_add(struct regmap *map,
247 struct regmap_range_node *data)
248{
249 struct rb_root *root = &map->range_tree;
250 struct rb_node **new = &(root->rb_node), *parent = NULL;
251
252 while (*new) {
253 struct regmap_range_node *this =
254 container_of(*new, struct regmap_range_node, node);
255
256 parent = *new;
257 if (data->range_max < this->range_min)
258 new = &((*new)->rb_left);
259 else if (data->range_min > this->range_max)
260 new = &((*new)->rb_right);
261 else
262 return false;
263 }
264
265 rb_link_node(&data->node, parent, new);
266 rb_insert_color(&data->node, root);
267
268 return true;
269}
270
271static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
272 unsigned int reg)
273{
274 struct rb_node *node = map->range_tree.rb_node;
275
276 while (node) {
277 struct regmap_range_node *this =
278 container_of(node, struct regmap_range_node, node);
279
280 if (reg < this->range_min)
281 node = node->rb_left;
282 else if (reg > this->range_max)
283 node = node->rb_right;
284 else
285 return this;
286 }
287
288 return NULL;
289}
290
291static void regmap_range_exit(struct regmap *map)
292{
293 struct rb_node *next;
294 struct regmap_range_node *range_node;
295
296 next = rb_first(&map->range_tree);
297 while (next) {
298 range_node = rb_entry(next, struct regmap_range_node, node);
299 next = rb_next(&range_node->node);
300 rb_erase(&range_node->node, &map->range_tree);
301 kfree(range_node);
302 }
303
304 kfree(map->selector_work_buf);
305}
306
211/** 307/**
212 * regmap_init(): Initialise register map 308 * regmap_init(): Initialise register map
213 * 309 *
@@ -227,6 +323,8 @@ struct regmap *regmap_init(struct device *dev,
227{ 323{
228 struct regmap *map, **m; 324 struct regmap *map, **m;
229 int ret = -EINVAL; 325 int ret = -EINVAL;
326 enum regmap_endian reg_endian, val_endian;
327 int i, j;
230 328
231 if (!bus || !config) 329 if (!bus || !config)
232 goto err; 330 goto err;
@@ -246,11 +344,11 @@ struct regmap *regmap_init(struct device *dev,
246 map->lock = regmap_lock_mutex; 344 map->lock = regmap_lock_mutex;
247 map->unlock = regmap_unlock_mutex; 345 map->unlock = regmap_unlock_mutex;
248 } 346 }
249 map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
250 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 347 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
251 map->format.pad_bytes = config->pad_bits / 8; 348 map->format.pad_bytes = config->pad_bits / 8;
252 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 349 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
253 map->format.buf_size += map->format.pad_bytes; 350 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
351 config->val_bits + config->pad_bits, 8);
254 map->reg_shift = config->pad_bits % 8; 352 map->reg_shift = config->pad_bits % 8;
255 if (config->reg_stride) 353 if (config->reg_stride)
256 map->reg_stride = config->reg_stride; 354 map->reg_stride = config->reg_stride;
@@ -275,6 +373,18 @@ struct regmap *regmap_init(struct device *dev,
275 map->read_flag_mask = bus->read_flag_mask; 373 map->read_flag_mask = bus->read_flag_mask;
276 } 374 }
277 375
376 reg_endian = config->reg_format_endian;
377 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
378 reg_endian = bus->reg_format_endian_default;
379 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
380 reg_endian = REGMAP_ENDIAN_BIG;
381
382 val_endian = config->val_format_endian;
383 if (val_endian == REGMAP_ENDIAN_DEFAULT)
384 val_endian = bus->val_format_endian_default;
385 if (val_endian == REGMAP_ENDIAN_DEFAULT)
386 val_endian = REGMAP_ENDIAN_BIG;
387
278 switch (config->reg_bits + map->reg_shift) { 388 switch (config->reg_bits + map->reg_shift) {
279 case 2: 389 case 2:
280 switch (config->val_bits) { 390 switch (config->val_bits) {
@@ -321,11 +431,29 @@ struct regmap *regmap_init(struct device *dev,
321 break; 431 break;
322 432
323 case 16: 433 case 16:
324 map->format.format_reg = regmap_format_16; 434 switch (reg_endian) {
435 case REGMAP_ENDIAN_BIG:
436 map->format.format_reg = regmap_format_16_be;
437 break;
438 case REGMAP_ENDIAN_NATIVE:
439 map->format.format_reg = regmap_format_16_native;
440 break;
441 default:
442 goto err_map;
443 }
325 break; 444 break;
326 445
327 case 32: 446 case 32:
328 map->format.format_reg = regmap_format_32; 447 switch (reg_endian) {
448 case REGMAP_ENDIAN_BIG:
449 map->format.format_reg = regmap_format_32_be;
450 break;
451 case REGMAP_ENDIAN_NATIVE:
452 map->format.format_reg = regmap_format_32_native;
453 break;
454 default:
455 goto err_map;
456 }
329 break; 457 break;
330 458
331 default: 459 default:
@@ -338,21 +466,47 @@ struct regmap *regmap_init(struct device *dev,
338 map->format.parse_val = regmap_parse_8; 466 map->format.parse_val = regmap_parse_8;
339 break; 467 break;
340 case 16: 468 case 16:
341 map->format.format_val = regmap_format_16; 469 switch (val_endian) {
342 map->format.parse_val = regmap_parse_16; 470 case REGMAP_ENDIAN_BIG:
471 map->format.format_val = regmap_format_16_be;
472 map->format.parse_val = regmap_parse_16_be;
473 break;
474 case REGMAP_ENDIAN_NATIVE:
475 map->format.format_val = regmap_format_16_native;
476 map->format.parse_val = regmap_parse_16_native;
477 break;
478 default:
479 goto err_map;
480 }
343 break; 481 break;
344 case 24: 482 case 24:
483 if (val_endian != REGMAP_ENDIAN_BIG)
484 goto err_map;
345 map->format.format_val = regmap_format_24; 485 map->format.format_val = regmap_format_24;
346 map->format.parse_val = regmap_parse_24; 486 map->format.parse_val = regmap_parse_24;
347 break; 487 break;
348 case 32: 488 case 32:
349 map->format.format_val = regmap_format_32; 489 switch (val_endian) {
350 map->format.parse_val = regmap_parse_32; 490 case REGMAP_ENDIAN_BIG:
491 map->format.format_val = regmap_format_32_be;
492 map->format.parse_val = regmap_parse_32_be;
493 break;
494 case REGMAP_ENDIAN_NATIVE:
495 map->format.format_val = regmap_format_32_native;
496 map->format.parse_val = regmap_parse_32_native;
497 break;
498 default:
499 goto err_map;
500 }
351 break; 501 break;
352 } 502 }
353 503
354 if (map->format.format_write) 504 if (map->format.format_write) {
505 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
506 (val_endian != REGMAP_ENDIAN_BIG))
507 goto err_map;
355 map->use_single_rw = true; 508 map->use_single_rw = true;
509 }
356 510
357 if (!map->format.format_write && 511 if (!map->format.format_write &&
358 !(map->format.format_reg && map->format.format_val)) 512 !(map->format.format_reg && map->format.format_val))
@@ -364,26 +518,88 @@ struct regmap *regmap_init(struct device *dev,
364 goto err_map; 518 goto err_map;
365 } 519 }
366 520
367 regmap_debugfs_init(map, config->name); 521 map->range_tree = RB_ROOT;
522 for (i = 0; i < config->n_ranges; i++) {
523 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
524 struct regmap_range_node *new;
525
526 /* Sanity check */
527 if (range_cfg->range_max < range_cfg->range_min ||
528 range_cfg->range_max > map->max_register ||
529 range_cfg->selector_reg > map->max_register ||
530 range_cfg->window_len == 0)
531 goto err_range;
532
533 /* Make sure, that this register range has no selector
534 or data window within its boundary */
535 for (j = 0; j < config->n_ranges; j++) {
536 unsigned sel_reg = config->ranges[j].selector_reg;
537 unsigned win_min = config->ranges[j].window_start;
538 unsigned win_max = win_min +
539 config->ranges[j].window_len - 1;
540
541 if (range_cfg->range_min <= sel_reg &&
542 sel_reg <= range_cfg->range_max) {
543 goto err_range;
544 }
545
546 if (!(win_max < range_cfg->range_min ||
547 win_min > range_cfg->range_max)) {
548 goto err_range;
549 }
550 }
551
552 new = kzalloc(sizeof(*new), GFP_KERNEL);
553 if (new == NULL) {
554 ret = -ENOMEM;
555 goto err_range;
556 }
557
558 new->range_min = range_cfg->range_min;
559 new->range_max = range_cfg->range_max;
560 new->selector_reg = range_cfg->selector_reg;
561 new->selector_mask = range_cfg->selector_mask;
562 new->selector_shift = range_cfg->selector_shift;
563 new->window_start = range_cfg->window_start;
564 new->window_len = range_cfg->window_len;
565
566 if (_regmap_range_add(map, new) == false) {
567 kfree(new);
568 goto err_range;
569 }
570
571 if (map->selector_work_buf == NULL) {
572 map->selector_work_buf =
573 kzalloc(map->format.buf_size, GFP_KERNEL);
574 if (map->selector_work_buf == NULL) {
575 ret = -ENOMEM;
576 goto err_range;
577 }
578 }
579 }
368 580
369 ret = regcache_init(map, config); 581 ret = regcache_init(map, config);
370 if (ret < 0) 582 if (ret < 0)
371 goto err_free_workbuf; 583 goto err_range;
584
585 regmap_debugfs_init(map, config->name);
372 586
373 /* Add a devres resource for dev_get_regmap() */ 587 /* Add a devres resource for dev_get_regmap() */
374 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 588 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
375 if (!m) { 589 if (!m) {
376 ret = -ENOMEM; 590 ret = -ENOMEM;
377 goto err_cache; 591 goto err_debugfs;
378 } 592 }
379 *m = map; 593 *m = map;
380 devres_add(dev, m); 594 devres_add(dev, m);
381 595
382 return map; 596 return map;
383 597
384err_cache: 598err_debugfs:
599 regmap_debugfs_exit(map);
385 regcache_exit(map); 600 regcache_exit(map);
386err_free_workbuf: 601err_range:
602 regmap_range_exit(map);
387 kfree(map->work_buf); 603 kfree(map->work_buf);
388err_map: 604err_map:
389 kfree(map); 605 kfree(map);
@@ -471,6 +687,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
471 687
472 return ret; 688 return ret;
473} 689}
690EXPORT_SYMBOL_GPL(regmap_reinit_cache);
474 691
475/** 692/**
476 * regmap_exit(): Free a previously allocated register map 693 * regmap_exit(): Free a previously allocated register map
@@ -479,6 +696,7 @@ void regmap_exit(struct regmap *map)
479{ 696{
480 regcache_exit(map); 697 regcache_exit(map);
481 regmap_debugfs_exit(map); 698 regmap_debugfs_exit(map);
699 regmap_range_exit(map);
482 if (map->bus->free_context) 700 if (map->bus->free_context)
483 map->bus->free_context(map->bus_context); 701 map->bus->free_context(map->bus_context);
484 kfree(map->work_buf); 702 kfree(map->work_buf);
@@ -524,6 +742,57 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
524} 742}
525EXPORT_SYMBOL_GPL(dev_get_regmap); 743EXPORT_SYMBOL_GPL(dev_get_regmap);
526 744
745static int _regmap_select_page(struct regmap *map, unsigned int *reg,
746 unsigned int val_num)
747{
748 struct regmap_range_node *range;
749 void *orig_work_buf;
750 unsigned int win_offset;
751 unsigned int win_page;
752 bool page_chg;
753 int ret;
754
755 range = _regmap_range_lookup(map, *reg);
756 if (range) {
757 win_offset = (*reg - range->range_min) % range->window_len;
758 win_page = (*reg - range->range_min) / range->window_len;
759
760 if (val_num > 1) {
761 /* Bulk write shouldn't cross range boundary */
762 if (*reg + val_num - 1 > range->range_max)
763 return -EINVAL;
764
765 /* ... or single page boundary */
766 if (val_num > range->window_len - win_offset)
767 return -EINVAL;
768 }
769
770 /* It is possible to have selector register inside data window.
771 In that case, selector register is located on every page and
772 it needs no page switching, when accessed alone. */
773 if (val_num > 1 ||
774 range->window_start + win_offset != range->selector_reg) {
775 /* Use separate work_buf during page switching */
776 orig_work_buf = map->work_buf;
777 map->work_buf = map->selector_work_buf;
778
779 ret = _regmap_update_bits(map, range->selector_reg,
780 range->selector_mask,
781 win_page << range->selector_shift,
782 &page_chg);
783
784 map->work_buf = orig_work_buf;
785
786 if (ret < 0)
787 return ret;
788 }
789
790 *reg = range->window_start + win_offset;
791 }
792
793 return 0;
794}
795
527static int _regmap_raw_write(struct regmap *map, unsigned int reg, 796static int _regmap_raw_write(struct regmap *map, unsigned int reg,
528 const void *val, size_t val_len) 797 const void *val, size_t val_len)
529{ 798{
@@ -561,6 +830,10 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
561 } 830 }
562 } 831 }
563 832
833 ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
834 if (ret < 0)
835 return ret;
836
564 map->format.format_reg(map->work_buf, reg, map->reg_shift); 837 map->format.format_reg(map->work_buf, reg, map->reg_shift);
565 838
566 u8[0] |= map->write_flag_mask; 839 u8[0] |= map->write_flag_mask;
@@ -621,9 +894,18 @@ int _regmap_write(struct regmap *map, unsigned int reg,
621 } 894 }
622 } 895 }
623 896
897#ifdef LOG_DEVICE
898 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
899 dev_info(map->dev, "%x <= %x\n", reg, val);
900#endif
901
624 trace_regmap_reg_write(map->dev, reg, val); 902 trace_regmap_reg_write(map->dev, reg, val);
625 903
626 if (map->format.format_write) { 904 if (map->format.format_write) {
905 ret = _regmap_select_page(map, &reg, 1);
906 if (ret < 0)
907 return ret;
908
627 map->format.format_write(map, reg, val); 909 map->format.format_write(map, reg, val);
628 910
629 trace_regmap_hw_write_start(map->dev, reg, 1); 911 trace_regmap_hw_write_start(map->dev, reg, 1);
@@ -781,6 +1063,10 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
781 u8 *u8 = map->work_buf; 1063 u8 *u8 = map->work_buf;
782 int ret; 1064 int ret;
783 1065
1066 ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
1067 if (ret < 0)
1068 return ret;
1069
784 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1070 map->format.format_reg(map->work_buf, reg, map->reg_shift);
785 1071
786 /* 1072 /*
@@ -824,6 +1110,12 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
824 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 1110 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
825 if (ret == 0) { 1111 if (ret == 0) {
826 *val = map->format.parse_val(map->work_buf); 1112 *val = map->format.parse_val(map->work_buf);
1113
1114#ifdef LOG_DEVICE
1115 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1116 dev_info(map->dev, "%x => %x\n", reg, *val);
1117#endif
1118
827 trace_regmap_reg_read(map->dev, reg, *val); 1119 trace_regmap_reg_read(map->dev, reg, *val);
828 } 1120 }
829 1121
@@ -980,11 +1272,9 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
980 int ret; 1272 int ret;
981 unsigned int tmp, orig; 1273 unsigned int tmp, orig;
982 1274
983 map->lock(map);
984
985 ret = _regmap_read(map, reg, &orig); 1275 ret = _regmap_read(map, reg, &orig);
986 if (ret != 0) 1276 if (ret != 0)
987 goto out; 1277 return ret;
988 1278
989 tmp = orig & ~mask; 1279 tmp = orig & ~mask;
990 tmp |= val & mask; 1280 tmp |= val & mask;
@@ -996,9 +1286,6 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
996 *change = false; 1286 *change = false;
997 } 1287 }
998 1288
999out:
1000 map->unlock(map);
1001
1002 return ret; 1289 return ret;
1003} 1290}
1004 1291
@@ -1016,7 +1303,13 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
1016 unsigned int mask, unsigned int val) 1303 unsigned int mask, unsigned int val)
1017{ 1304{
1018 bool change; 1305 bool change;
1019 return _regmap_update_bits(map, reg, mask, val, &change); 1306 int ret;
1307
1308 map->lock(map);
1309 ret = _regmap_update_bits(map, reg, mask, val, &change);
1310 map->unlock(map);
1311
1312 return ret;
1020} 1313}
1021EXPORT_SYMBOL_GPL(regmap_update_bits); 1314EXPORT_SYMBOL_GPL(regmap_update_bits);
1022 1315
@@ -1036,7 +1329,12 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1036 unsigned int mask, unsigned int val, 1329 unsigned int mask, unsigned int val,
1037 bool *change) 1330 bool *change)
1038{ 1331{
1039 return _regmap_update_bits(map, reg, mask, val, change); 1332 int ret;
1333
1334 map->lock(map);
1335 ret = _regmap_update_bits(map, reg, mask, val, change);
1336 map->unlock(map);
1337 return ret;
1040} 1338}
1041EXPORT_SYMBOL_GPL(regmap_update_bits_check); 1339EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1042 1340
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index fb7c80fb721e..06b3207adebd 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -46,6 +46,25 @@ config BCMA_DRIVER_MIPS
46 46
47 If unsure, say N 47 If unsure, say N
48 48
49config BCMA_SFLASH
50 bool
51 depends on BCMA_DRIVER_MIPS && BROKEN
52 default y
53
54config BCMA_NFLASH
55 bool
56 depends on BCMA_DRIVER_MIPS && BROKEN
57 default y
58
59config BCMA_DRIVER_GMAC_CMN
60 bool "BCMA Broadcom GBIT MAC COMMON core driver"
61 depends on BCMA
62 help
63 Driver for the Broadcom GBIT MAC COMMON core attached to Broadcom
64 specific Advanced Microcontroller Bus.
65
66 If unsure, say N
67
49config BCMA_DEBUG 68config BCMA_DEBUG
50 bool "BCMA debugging" 69 bool "BCMA debugging"
51 depends on BCMA 70 depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 82de24e5340c..8ad42d41b2f2 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,8 +1,11 @@
1bcma-y += main.o scan.o core.o sprom.o 1bcma-y += main.o scan.o core.o sprom.o
2bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o 2bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
3bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o
4bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o
3bcma-y += driver_pci.o 5bcma-y += driver_pci.o
4bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o 6bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
5bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o 7bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
8bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
6bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o 9bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
7bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o 10bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
8obj-$(CONFIG_BCMA) += bcma.o 11obj-$(CONFIG_BCMA) += bcma.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index b81755bb4798..3cf9cc923cd2 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -10,6 +10,15 @@
10 10
11#define BCMA_CORE_SIZE 0x1000 11#define BCMA_CORE_SIZE 0x1000
12 12
13#define bcma_err(bus, fmt, ...) \
14 pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
15#define bcma_warn(bus, fmt, ...) \
16 pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
17#define bcma_info(bus, fmt, ...) \
18 pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
19#define bcma_debug(bus, fmt, ...) \
20 pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
21
13struct bcma_bus; 22struct bcma_bus;
14 23
15/* main.c */ 24/* main.c */
@@ -42,6 +51,28 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
42u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc); 51u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc);
43u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc); 52u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
44 53
54#ifdef CONFIG_BCMA_SFLASH
55/* driver_chipcommon_sflash.c */
56int bcma_sflash_init(struct bcma_drv_cc *cc);
57#else
58static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
59{
60 bcma_err(cc->core->bus, "Serial flash not supported\n");
61 return 0;
62}
63#endif /* CONFIG_BCMA_SFLASH */
64
65#ifdef CONFIG_BCMA_NFLASH
66/* driver_chipcommon_nflash.c */
67int bcma_nflash_init(struct bcma_drv_cc *cc);
68#else
69static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
70{
71 bcma_err(cc->core->bus, "NAND flash not supported\n");
72 return 0;
73}
74#endif /* CONFIG_BCMA_NFLASH */
75
45#ifdef CONFIG_BCMA_HOST_PCI 76#ifdef CONFIG_BCMA_HOST_PCI
46/* host_pci.c */ 77/* host_pci.c */
47extern int __init bcma_host_pci_init(void); 78extern int __init bcma_host_pci_init(void);
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index bc6e89212ad3..63c8b470536f 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -75,7 +75,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
75 udelay(10); 75 udelay(10);
76 } 76 }
77 if (i) 77 if (i)
78 pr_err("HT force timeout\n"); 78 bcma_err(core->bus, "HT force timeout\n");
79 break; 79 break;
80 case BCMA_CLKMODE_DYNAMIC: 80 case BCMA_CLKMODE_DYNAMIC:
81 bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT); 81 bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
@@ -102,9 +102,9 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
102 udelay(10); 102 udelay(10);
103 } 103 }
104 if (i) 104 if (i)
105 pr_err("PLL enable timeout\n"); 105 bcma_err(core->bus, "PLL enable timeout\n");
106 } else { 106 } else {
107 pr_warn("Disabling PLL not supported yet!\n"); 107 bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
108 } 108 }
109} 109}
110EXPORT_SYMBOL_GPL(bcma_core_pll_ctl); 110EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
@@ -120,8 +120,8 @@ u32 bcma_core_dma_translation(struct bcma_device *core)
120 else 120 else
121 return BCMA_DMA_TRANSLATION_DMA32_CMT; 121 return BCMA_DMA_TRANSLATION_DMA32_CMT;
122 default: 122 default:
123 pr_err("DMA translation unknown for host %d\n", 123 bcma_err(core->bus, "DMA translation unknown for host %d\n",
124 core->bus->hosttype); 124 core->bus->hosttype);
125 } 125 }
126 return BCMA_DMA_TRANSLATION_NONE; 126 return BCMA_DMA_TRANSLATION_NONE;
127} 127}
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index e9f1b3fd252c..a4c3ebcc4c86 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -44,7 +44,7 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
44 if (cc->capabilities & BCMA_CC_CAP_PMU) 44 if (cc->capabilities & BCMA_CC_CAP_PMU)
45 bcma_pmu_init(cc); 45 bcma_pmu_init(cc);
46 if (cc->capabilities & BCMA_CC_CAP_PCTL) 46 if (cc->capabilities & BCMA_CC_CAP_PCTL)
47 pr_err("Power control not implemented!\n"); 47 bcma_err(cc->core->bus, "Power control not implemented!\n");
48 48
49 if (cc->core->id.rev >= 16) { 49 if (cc->core->id.rev >= 16) {
50 if (cc->core->bus->sprom.leddc_on_time && 50 if (cc->core->bus->sprom.leddc_on_time &&
@@ -137,8 +137,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
137 | BCMA_CC_CORECTL_UARTCLKEN); 137 | BCMA_CC_CORECTL_UARTCLKEN);
138 } 138 }
139 } else { 139 } else {
140 pr_err("serial not supported on this device ccrev: 0x%x\n", 140 bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev);
141 ccrev);
142 return; 141 return;
143 } 142 }
144 143
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
new file mode 100644
index 000000000000..574d62435bc2
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -0,0 +1,19 @@
1/*
2 * Broadcom specific AMBA
3 * ChipCommon NAND flash interface
4 *
5 * Licensed under the GNU/GPL. See COPYING for details.
6 */
7
8#include <linux/bcma/bcma.h>
9#include <linux/bcma/bcma_driver_chipcommon.h>
10#include <linux/delay.h>
11
12#include "bcma_private.h"
13
14/* Initialize NAND flash access */
15int bcma_nflash_init(struct bcma_drv_cc *cc)
16{
17 bcma_err(cc->core->bus, "NAND flash support is broken\n");
18 return 0;
19}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index a058842f14fd..c9a4f46c5143 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -3,7 +3,8 @@
3 * ChipCommon Power Management Unit driver 3 * ChipCommon Power Management Unit driver
4 * 4 *
5 * Copyright 2009, Michael Buesch <m@bues.ch> 5 * Copyright 2009, Michael Buesch <m@bues.ch>
6 * Copyright 2007, Broadcom Corporation 6 * Copyright 2007, 2011, Broadcom Corporation
7 * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
7 * 8 *
8 * Licensed under the GNU/GPL. See COPYING for details. 9 * Licensed under the GNU/GPL. See COPYING for details.
9 */ 10 */
@@ -54,39 +55,19 @@ void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
54} 55}
55EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset); 56EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
56 57
57static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
58{
59 struct bcma_bus *bus = cc->core->bus;
60
61 switch (bus->chipinfo.id) {
62 case 0x4313:
63 case 0x4331:
64 case 43224:
65 case 43225:
66 break;
67 default:
68 pr_err("PLL init unknown for device 0x%04X\n",
69 bus->chipinfo.id);
70 }
71}
72
73static void bcma_pmu_resources_init(struct bcma_drv_cc *cc) 58static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
74{ 59{
75 struct bcma_bus *bus = cc->core->bus; 60 struct bcma_bus *bus = cc->core->bus;
76 u32 min_msk = 0, max_msk = 0; 61 u32 min_msk = 0, max_msk = 0;
77 62
78 switch (bus->chipinfo.id) { 63 switch (bus->chipinfo.id) {
79 case 0x4313: 64 case BCMA_CHIP_ID_BCM4313:
80 min_msk = 0x200D; 65 min_msk = 0x200D;
81 max_msk = 0xFFFF; 66 max_msk = 0xFFFF;
82 break; 67 break;
83 case 0x4331:
84 case 43224:
85 case 43225:
86 break;
87 default: 68 default:
88 pr_err("PMU resource config unknown for device 0x%04X\n", 69 bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
89 bus->chipinfo.id); 70 bus->chipinfo.id);
90 } 71 }
91 72
92 /* Set the resource masks. */ 73 /* Set the resource masks. */
@@ -94,22 +75,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
94 bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk); 75 bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
95 if (max_msk) 76 if (max_msk)
96 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); 77 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
97}
98
99void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
100{
101 struct bcma_bus *bus = cc->core->bus;
102 78
103 switch (bus->chipinfo.id) { 79 /* Add some delay; allow resources to come up and settle. */
104 case 0x4313: 80 mdelay(2);
105 case 0x4331:
106 case 43224:
107 case 43225:
108 break;
109 default:
110 pr_err("PMU switch/regulators init unknown for device "
111 "0x%04X\n", bus->chipinfo.id);
112 }
113} 81}
114 82
115/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */ 83/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
@@ -123,8 +91,11 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
123 val |= BCMA_CHIPCTL_4331_EXTPA_EN; 91 val |= BCMA_CHIPCTL_4331_EXTPA_EN;
124 if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11) 92 if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
125 val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5; 93 val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
94 else if (bus->chipinfo.rev > 0)
95 val |= BCMA_CHIPCTL_4331_EXTPA_EN2;
126 } else { 96 } else {
127 val &= ~BCMA_CHIPCTL_4331_EXTPA_EN; 97 val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
98 val &= ~BCMA_CHIPCTL_4331_EXTPA_EN2;
128 val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5; 99 val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
129 } 100 }
130 bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val); 101 bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
@@ -135,26 +106,38 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
135 struct bcma_bus *bus = cc->core->bus; 106 struct bcma_bus *bus = cc->core->bus;
136 107
137 switch (bus->chipinfo.id) { 108 switch (bus->chipinfo.id) {
138 case 0x4313: 109 case BCMA_CHIP_ID_BCM4313:
139 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); 110 /* enable 12 mA drive strenth for 4313 and set chipControl
111 register bit 1 */
112 bcma_chipco_chipctl_maskset(cc, 0,
113 ~BCMA_CCTRL_4313_12MA_LED_DRIVE,
114 BCMA_CCTRL_4313_12MA_LED_DRIVE);
140 break; 115 break;
141 case 0x4331: 116 case BCMA_CHIP_ID_BCM4331:
142 /* BCM4331 workaround is SPROM-related, we put it in sprom.c */ 117 case BCMA_CHIP_ID_BCM43431:
118 /* Ext PA lines must be enabled for tx on BCM4331 */
119 bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
143 break; 120 break;
144 case 43224: 121 case BCMA_CHIP_ID_BCM43224:
122 case BCMA_CHIP_ID_BCM43421:
123 /* enable 12 mA drive strenth for 43224 and set chipControl
124 register bit 15 */
145 if (bus->chipinfo.rev == 0) { 125 if (bus->chipinfo.rev == 0) {
146 pr_err("Workarounds for 43224 rev 0 not fully " 126 bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
147 "implemented\n"); 127 ~BCMA_CCTRL_43224_GPIO_TOGGLE,
148 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0); 128 BCMA_CCTRL_43224_GPIO_TOGGLE);
129 bcma_chipco_chipctl_maskset(cc, 0,
130 ~BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
131 BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
149 } else { 132 } else {
150 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0); 133 bcma_chipco_chipctl_maskset(cc, 0,
134 ~BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
135 BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
151 } 136 }
152 break; 137 break;
153 case 43225:
154 break;
155 default: 138 default:
156 pr_err("Workarounds unknown for device 0x%04X\n", 139 bcma_debug(bus, "Workarounds unknown or not needed for device 0x%04X\n",
157 bus->chipinfo.id); 140 bus->chipinfo.id);
158 } 141 }
159} 142}
160 143
@@ -165,8 +148,8 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
165 pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP); 148 pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
166 cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION); 149 cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
167 150
168 pr_debug("Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev, 151 bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
169 pmucap); 152 cc->pmu.rev, pmucap);
170 153
171 if (cc->pmu.rev == 1) 154 if (cc->pmu.rev == 1)
172 bcma_cc_mask32(cc, BCMA_CC_PMU_CTL, 155 bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
@@ -175,12 +158,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
175 bcma_cc_set32(cc, BCMA_CC_PMU_CTL, 158 bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
176 BCMA_CC_PMU_CTL_NOILPONW); 159 BCMA_CC_PMU_CTL_NOILPONW);
177 160
178 if (cc->core->id.id == 0x4329 && cc->core->id.rev == 2)
179 pr_err("Fix for 4329b0 bad LPOM state not implemented!\n");
180
181 bcma_pmu_pll_init(cc);
182 bcma_pmu_resources_init(cc); 161 bcma_pmu_resources_init(cc);
183 bcma_pmu_swreg_init(cc);
184 bcma_pmu_workarounds(cc); 162 bcma_pmu_workarounds(cc);
185} 163}
186 164
@@ -189,23 +167,22 @@ u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
189 struct bcma_bus *bus = cc->core->bus; 167 struct bcma_bus *bus = cc->core->bus;
190 168
191 switch (bus->chipinfo.id) { 169 switch (bus->chipinfo.id) {
192 case 0x4716: 170 case BCMA_CHIP_ID_BCM4716:
193 case 0x4748: 171 case BCMA_CHIP_ID_BCM4748:
194 case 47162: 172 case BCMA_CHIP_ID_BCM47162:
195 case 0x4313: 173 case BCMA_CHIP_ID_BCM4313:
196 case 0x5357: 174 case BCMA_CHIP_ID_BCM5357:
197 case 0x4749: 175 case BCMA_CHIP_ID_BCM4749:
198 case 53572: 176 case BCMA_CHIP_ID_BCM53572:
199 /* always 20Mhz */ 177 /* always 20Mhz */
200 return 20000 * 1000; 178 return 20000 * 1000;
201 case 0x5356: 179 case BCMA_CHIP_ID_BCM5356:
202 case 0x5300: 180 case BCMA_CHIP_ID_BCM4706:
203 /* always 25Mhz */ 181 /* always 25Mhz */
204 return 25000 * 1000; 182 return 25000 * 1000;
205 default: 183 default:
206 pr_warn("No ALP clock specified for %04X device, " 184 bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
207 "pmu rev. %d, using default %d Hz\n", 185 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
208 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
209 } 186 }
210 return BCMA_CC_PMU_ALP_CLOCK; 187 return BCMA_CC_PMU_ALP_CLOCK;
211} 188}
@@ -222,7 +199,8 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
222 199
223 BUG_ON(!m || m > 4); 200 BUG_ON(!m || m > 4);
224 201
225 if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) { 202 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
203 bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) {
226 /* Detect failure in clock setting */ 204 /* Detect failure in clock setting */
227 tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT); 205 tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
228 if (tmp & 0x40000) 206 if (tmp & 0x40000)
@@ -248,33 +226,62 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
248 return (fc / div) * 1000000; 226 return (fc / div) * 1000000;
249} 227}
250 228
229static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
230{
231 u32 tmp, ndiv, p1div, p2div;
232 u32 clock;
233
234 BUG_ON(!m || m > 4);
235
236 /* Get N, P1 and P2 dividers to determine CPU clock */
237 tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PMU6_4706_PROCPLL_OFF);
238 ndiv = (tmp & BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK)
239 >> BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT;
240 p1div = (tmp & BCMA_CC_PMU6_4706_PROC_P1DIV_MASK)
241 >> BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT;
242 p2div = (tmp & BCMA_CC_PMU6_4706_PROC_P2DIV_MASK)
243 >> BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT;
244
245 tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
246 if (tmp & BCMA_CC_CHIPST_4706_PKG_OPTION)
247 /* Low cost bonding: Fixed reference clock 25MHz and m = 4 */
248 clock = (25000000 / 4) * ndiv * p2div / p1div;
249 else
250 /* Fixed reference clock 25MHz and m = 2 */
251 clock = (25000000 / 2) * ndiv * p2div / p1div;
252
253 if (m == BCMA_CC_PMU5_MAINPLL_SSB)
254 clock = clock / 4;
255
256 return clock;
257}
258
251/* query bus clock frequency for PMU-enabled chipcommon */ 259/* query bus clock frequency for PMU-enabled chipcommon */
252u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc) 260u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
253{ 261{
254 struct bcma_bus *bus = cc->core->bus; 262 struct bcma_bus *bus = cc->core->bus;
255 263
256 switch (bus->chipinfo.id) { 264 switch (bus->chipinfo.id) {
257 case 0x4716: 265 case BCMA_CHIP_ID_BCM4716:
258 case 0x4748: 266 case BCMA_CHIP_ID_BCM4748:
259 case 47162: 267 case BCMA_CHIP_ID_BCM47162:
260 return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0, 268 return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
261 BCMA_CC_PMU5_MAINPLL_SSB); 269 BCMA_CC_PMU5_MAINPLL_SSB);
262 case 0x5356: 270 case BCMA_CHIP_ID_BCM5356:
263 return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0, 271 return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
264 BCMA_CC_PMU5_MAINPLL_SSB); 272 BCMA_CC_PMU5_MAINPLL_SSB);
265 case 0x5357: 273 case BCMA_CHIP_ID_BCM5357:
266 case 0x4749: 274 case BCMA_CHIP_ID_BCM4749:
267 return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0, 275 return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
268 BCMA_CC_PMU5_MAINPLL_SSB); 276 BCMA_CC_PMU5_MAINPLL_SSB);
269 case 0x5300: 277 case BCMA_CHIP_ID_BCM4706:
270 return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0, 278 return bcma_pmu_clock_bcm4706(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
271 BCMA_CC_PMU5_MAINPLL_SSB); 279 BCMA_CC_PMU5_MAINPLL_SSB);
272 case 53572: 280 case BCMA_CHIP_ID_BCM53572:
273 return 75000000; 281 return 75000000;
274 default: 282 default:
275 pr_warn("No backplane clock specified for %04X device, " 283 bcma_warn(bus, "No backplane clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
276 "pmu rev. %d, using default %d Hz\n", 284 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
277 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
278 } 285 }
279 return BCMA_CC_PMU_HT_CLOCK; 286 return BCMA_CC_PMU_HT_CLOCK;
280} 287}
@@ -284,17 +291,21 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
284{ 291{
285 struct bcma_bus *bus = cc->core->bus; 292 struct bcma_bus *bus = cc->core->bus;
286 293
287 if (bus->chipinfo.id == 53572) 294 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
288 return 300000000; 295 return 300000000;
289 296
290 if (cc->pmu.rev >= 5) { 297 if (cc->pmu.rev >= 5) {
291 u32 pll; 298 u32 pll;
292 switch (bus->chipinfo.id) { 299 switch (bus->chipinfo.id) {
293 case 0x5356: 300 case BCMA_CHIP_ID_BCM4706:
301 return bcma_pmu_clock_bcm4706(cc,
302 BCMA_CC_PMU4706_MAINPLL_PLL0,
303 BCMA_CC_PMU5_MAINPLL_CPU);
304 case BCMA_CHIP_ID_BCM5356:
294 pll = BCMA_CC_PMU5356_MAINPLL_PLL0; 305 pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
295 break; 306 break;
296 case 0x5357: 307 case BCMA_CHIP_ID_BCM5357:
297 case 0x4749: 308 case BCMA_CHIP_ID_BCM4749:
298 pll = BCMA_CC_PMU5357_MAINPLL_PLL0; 309 pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
299 break; 310 break;
300 default: 311 default:
@@ -302,10 +313,188 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
302 break; 313 break;
303 } 314 }
304 315
305 /* TODO: if (bus->chipinfo.id == 0x5300)
306 return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */
307 return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU); 316 return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
308 } 317 }
309 318
310 return bcma_pmu_get_clockcontrol(cc); 319 return bcma_pmu_get_clockcontrol(cc);
311} 320}
321
322static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
323 u32 value)
324{
325 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
326 bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
327}
328
329void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
330{
331 u32 tmp = 0;
332 u8 phypll_offset = 0;
333 u8 bcm5357_bcm43236_p1div[] = {0x1, 0x5, 0x5};
334 u8 bcm5357_bcm43236_ndiv[] = {0x30, 0xf6, 0xfc};
335 struct bcma_bus *bus = cc->core->bus;
336
337 switch (bus->chipinfo.id) {
338 case BCMA_CHIP_ID_BCM5357:
339 case BCMA_CHIP_ID_BCM4749:
340 case BCMA_CHIP_ID_BCM53572:
341 /* 5357[ab]0, 43236[ab]0, and 6362b0 */
342
343 /* BCM5357 needs to touch PLL1_PLLCTL[02],
344 so offset PLL0_PLLCTL[02] by 6 */
345 phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
346 bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
347 bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
348
349 /* RMW only the P1 divider */
350 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
351 BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
352 tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
353 tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
354 tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
355 bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
356
357 /* RMW only the int feedback divider */
358 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
359 BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
360 tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
361 tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
362 tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
363 bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
364
365 tmp = 1 << 10;
366 break;
367
368 case BCMA_CHIP_ID_BCM4331:
369 case BCMA_CHIP_ID_BCM43431:
370 if (spuravoid == 2) {
371 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
372 0x11500014);
373 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
374 0x0FC00a08);
375 } else if (spuravoid == 1) {
376 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
377 0x11500014);
378 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
379 0x0F600a08);
380 } else {
381 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
382 0x11100014);
383 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
384 0x03000a08);
385 }
386 tmp = 1 << 10;
387 break;
388
389 case BCMA_CHIP_ID_BCM43224:
390 case BCMA_CHIP_ID_BCM43225:
391 case BCMA_CHIP_ID_BCM43421:
392 if (spuravoid == 1) {
393 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
394 0x11500010);
395 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
396 0x000C0C06);
397 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
398 0x0F600a08);
399 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
400 0x00000000);
401 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
402 0x2001E920);
403 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
404 0x88888815);
405 } else {
406 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
407 0x11100010);
408 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
409 0x000c0c06);
410 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
411 0x03000a08);
412 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
413 0x00000000);
414 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
415 0x200005c0);
416 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
417 0x88888815);
418 }
419 tmp = 1 << 10;
420 break;
421
422 case BCMA_CHIP_ID_BCM4716:
423 case BCMA_CHIP_ID_BCM4748:
424 case BCMA_CHIP_ID_BCM47162:
425 if (spuravoid == 1) {
426 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
427 0x11500060);
428 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
429 0x080C0C06);
430 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
431 0x0F600000);
432 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
433 0x00000000);
434 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
435 0x2001E924);
436 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
437 0x88888815);
438 } else {
439 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
440 0x11100060);
441 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
442 0x080c0c06);
443 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
444 0x03000000);
445 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
446 0x00000000);
447 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
448 0x200005c0);
449 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
450 0x88888815);
451 }
452
453 tmp = 3 << 9;
454 break;
455
456 case BCMA_CHIP_ID_BCM43227:
457 case BCMA_CHIP_ID_BCM43228:
458 case BCMA_CHIP_ID_BCM43428:
459 /* LCNXN */
460 /* PLL Settings for spur avoidance on/off mode,
461 no on2 support for 43228A0 */
462 if (spuravoid == 1) {
463 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
464 0x01100014);
465 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
466 0x040C0C06);
467 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
468 0x03140A08);
469 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
470 0x00333333);
471 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
472 0x202C2820);
473 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
474 0x88888815);
475 } else {
476 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
477 0x11100014);
478 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
479 0x040c0c06);
480 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
481 0x03000a08);
482 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
483 0x00000000);
484 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
485 0x200005c0);
486 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
487 0x88888815);
488 }
489 tmp = 1 << 10;
490 break;
491 default:
492 bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
493 bus->chipinfo.id);
494 break;
495 }
496
497 tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
498 bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
499}
500EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
new file mode 100644
index 000000000000..6e157a58a1d7
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -0,0 +1,19 @@
1/*
2 * Broadcom specific AMBA
3 * ChipCommon serial flash interface
4 *
5 * Licensed under the GNU/GPL. See COPYING for details.
6 */
7
8#include <linux/bcma/bcma.h>
9#include <linux/bcma/bcma_driver_chipcommon.h>
10#include <linux/delay.h>
11
12#include "bcma_private.h"
13
14/* Initialize serial flash access */
15int bcma_sflash_init(struct bcma_drv_cc *cc)
16{
17 bcma_err(cc->core->bus, "Serial flash support is broken\n");
18 return 0;
19}
diff --git a/drivers/bcma/driver_gmac_cmn.c b/drivers/bcma/driver_gmac_cmn.c
new file mode 100644
index 000000000000..834225f65e8f
--- /dev/null
+++ b/drivers/bcma/driver_gmac_cmn.c
@@ -0,0 +1,14 @@
1/*
2 * Broadcom specific AMBA
3 * GBIT MAC COMMON Core
4 *
5 * Licensed under the GNU/GPL. See COPYING for details.
6 */
7
8#include "bcma_private.h"
9#include <linux/bcma/bcma.h>
10
11void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
12{
13 mutex_init(&gc->phy_mutex);
14}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index c3e9dff4224e..cc65b45b4368 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -22,15 +22,15 @@
22/* The 47162a0 hangs when reading MIPS DMP registers registers */ 22/* The 47162a0 hangs when reading MIPS DMP registers registers */
23static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev) 23static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
24{ 24{
25 return dev->bus->chipinfo.id == 47162 && dev->bus->chipinfo.rev == 0 && 25 return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
26 dev->id.id == BCMA_CORE_MIPS_74K; 26 dev->bus->chipinfo.rev == 0 && dev->id.id == BCMA_CORE_MIPS_74K;
27} 27}
28 28
29/* The 5357b0 hangs when reading USB20H DMP registers */ 29/* The 5357b0 hangs when reading USB20H DMP registers */
30static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev) 30static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
31{ 31{
32 return (dev->bus->chipinfo.id == 0x5357 || 32 return (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
33 dev->bus->chipinfo.id == 0x4749) && 33 dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) &&
34 dev->bus->chipinfo.pkg == 11 && 34 dev->bus->chipinfo.pkg == 11 &&
35 dev->id.id == BCMA_CORE_USB20_HOST; 35 dev->id.id == BCMA_CORE_USB20_HOST;
36} 36}
@@ -131,7 +131,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
131 /* backplane irq line is in use, find out who uses 131 /* backplane irq line is in use, find out who uses
132 * it and set user to irq 0 132 * it and set user to irq 0
133 */ 133 */
134 list_for_each_entry_reverse(core, &bus->cores, list) { 134 list_for_each_entry(core, &bus->cores, list) {
135 if ((1 << bcma_core_mips_irqflag(core)) == 135 if ((1 << bcma_core_mips_irqflag(core)) ==
136 oldirqflag) { 136 oldirqflag) {
137 bcma_core_mips_set_irq(core, 0); 137 bcma_core_mips_set_irq(core, 0);
@@ -143,8 +143,8 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
143 1 << irqflag); 143 1 << irqflag);
144 } 144 }
145 145
146 pr_info("set_irq: core 0x%04x, irq %d => %d\n", 146 bcma_info(bus, "set_irq: core 0x%04x, irq %d => %d\n",
147 dev->id.id, oldirq + 2, irq + 2); 147 dev->id.id, oldirq + 2, irq + 2);
148} 148}
149 149
150static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq) 150static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
@@ -161,7 +161,7 @@ static void bcma_core_mips_dump_irq(struct bcma_bus *bus)
161{ 161{
162 struct bcma_device *core; 162 struct bcma_device *core;
163 163
164 list_for_each_entry_reverse(core, &bus->cores, list) { 164 list_for_each_entry(core, &bus->cores, list) {
165 bcma_core_mips_print_irq(core, bcma_core_mips_irq(core)); 165 bcma_core_mips_print_irq(core, bcma_core_mips_irq(core));
166 } 166 }
167} 167}
@@ -173,7 +173,7 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
173 if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU) 173 if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
174 return bcma_pmu_get_clockcpu(&bus->drv_cc); 174 return bcma_pmu_get_clockcpu(&bus->drv_cc);
175 175
176 pr_err("No PMU available, need this to get the cpu clock\n"); 176 bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
177 return 0; 177 return 0;
178} 178}
179EXPORT_SYMBOL(bcma_cpu_clock); 179EXPORT_SYMBOL(bcma_cpu_clock);
@@ -185,10 +185,11 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
185 switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) { 185 switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
186 case BCMA_CC_FLASHT_STSER: 186 case BCMA_CC_FLASHT_STSER:
187 case BCMA_CC_FLASHT_ATSER: 187 case BCMA_CC_FLASHT_ATSER:
188 pr_err("Serial flash not supported.\n"); 188 bcma_debug(bus, "Found serial flash\n");
189 bcma_sflash_init(&bus->drv_cc);
189 break; 190 break;
190 case BCMA_CC_FLASHT_PARA: 191 case BCMA_CC_FLASHT_PARA:
191 pr_info("found parallel flash.\n"); 192 bcma_debug(bus, "Found parallel flash\n");
192 bus->drv_cc.pflash.window = 0x1c000000; 193 bus->drv_cc.pflash.window = 0x1c000000;
193 bus->drv_cc.pflash.window_size = 0x02000000; 194 bus->drv_cc.pflash.window_size = 0x02000000;
194 195
@@ -199,7 +200,15 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
199 bus->drv_cc.pflash.buswidth = 2; 200 bus->drv_cc.pflash.buswidth = 2;
200 break; 201 break;
201 default: 202 default:
202 pr_err("flash not supported.\n"); 203 bcma_err(bus, "Flash type not supported\n");
204 }
205
206 if (bus->drv_cc.core->id.rev == 38 ||
207 bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
208 if (bus->drv_cc.capabilities & BCMA_CC_CAP_NFLASH) {
209 bcma_debug(bus, "Found NAND flash\n");
210 bcma_nflash_init(&bus->drv_cc);
211 }
203 } 212 }
204} 213}
205 214
@@ -209,13 +218,13 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
209 struct bcma_device *core; 218 struct bcma_device *core;
210 bus = mcore->core->bus; 219 bus = mcore->core->bus;
211 220
212 pr_info("Initializing MIPS core...\n"); 221 bcma_info(bus, "Initializing MIPS core...\n");
213 222
214 if (!mcore->setup_done) 223 if (!mcore->setup_done)
215 mcore->assigned_irqs = 1; 224 mcore->assigned_irqs = 1;
216 225
217 /* Assign IRQs to all cores on the bus */ 226 /* Assign IRQs to all cores on the bus */
218 list_for_each_entry_reverse(core, &bus->cores, list) { 227 list_for_each_entry(core, &bus->cores, list) {
219 int mips_irq; 228 int mips_irq;
220 if (core->irq) 229 if (core->irq)
221 continue; 230 continue;
@@ -244,7 +253,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
244 break; 253 break;
245 } 254 }
246 } 255 }
247 pr_info("IRQ reconfiguration done\n"); 256 bcma_info(bus, "IRQ reconfiguration done\n");
248 bcma_core_mips_dump_irq(bus); 257 bcma_core_mips_dump_irq(bus);
249 258
250 if (mcore->setup_done) 259 if (mcore->setup_done)
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 9a96f14c8f47..c32ebd537abe 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
232int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 232int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
233 bool enable) 233 bool enable)
234{ 234{
235 struct pci_dev *pdev = pc->core->bus->host_pci; 235 struct pci_dev *pdev;
236 u32 coremask, tmp; 236 u32 coremask, tmp;
237 int err = 0; 237 int err = 0;
238 238
239 if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { 239 if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
240 /* This bcma device is not on a PCI host-bus. So the IRQs are 240 /* This bcma device is not on a PCI host-bus. So the IRQs are
241 * not routed through the PCI core. 241 * not routed through the PCI core.
242 * So we must not enable routing through the PCI core. */ 242 * So we must not enable routing through the PCI core. */
243 goto out; 243 goto out;
244 } 244 }
245 245
246 pdev = pc->core->bus->host_pci;
247
246 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); 248 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
247 if (err) 249 if (err)
248 goto out; 250 goto out;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index b9a86edfec39..cbae2c231336 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -36,7 +36,7 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
36 return false; 36 return false;
37 37
38 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) { 38 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
39 pr_info("This PCI core is disabled and not working\n"); 39 bcma_info(bus, "This PCI core is disabled and not working\n");
40 return false; 40 return false;
41 } 41 }
42 42
@@ -215,7 +215,8 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
215 } else { 215 } else {
216 writel(val, mmio); 216 writel(val, mmio);
217 217
218 if (chipid == 0x4716 || chipid == 0x4748) 218 if (chipid == BCMA_CHIP_ID_BCM4716 ||
219 chipid == BCMA_CHIP_ID_BCM4748)
219 readl(mmio); 220 readl(mmio);
220 } 221 }
221 222
@@ -340,6 +341,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
340 */ 341 */
341static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) 342static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
342{ 343{
344 struct bcma_bus *bus = pc->core->bus;
343 u8 cap_ptr, root_ctrl, root_cap, dev; 345 u8 cap_ptr, root_ctrl, root_cap, dev;
344 u16 val16; 346 u16 val16;
345 int i; 347 int i;
@@ -378,7 +380,8 @@ static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
378 udelay(10); 380 udelay(10);
379 } 381 }
380 if (val16 == 0x1) 382 if (val16 == 0x1)
381 pr_err("PCI: Broken device in slot %d\n", dev); 383 bcma_err(bus, "PCI: Broken device in slot %d\n",
384 dev);
382 } 385 }
383 } 386 }
384} 387}
@@ -391,11 +394,11 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
391 u32 pci_membase_1G; 394 u32 pci_membase_1G;
392 unsigned long io_map_base; 395 unsigned long io_map_base;
393 396
394 pr_info("PCIEcore in host mode found\n"); 397 bcma_info(bus, "PCIEcore in host mode found\n");
395 398
396 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL); 399 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
397 if (!pc_host) { 400 if (!pc_host) {
398 pr_err("can not allocate memory"); 401 bcma_err(bus, "can not allocate memory");
399 return; 402 return;
400 } 403 }
401 404
@@ -434,13 +437,14 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
434 * as mips can't generate 64-bit address on the 437 * as mips can't generate 64-bit address on the
435 * backplane. 438 * backplane.
436 */ 439 */
437 if (bus->chipinfo.id == 0x4716 || bus->chipinfo.id == 0x4748) { 440 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
441 bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
438 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; 442 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
439 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + 443 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
440 BCMA_SOC_PCI_MEM_SZ - 1; 444 BCMA_SOC_PCI_MEM_SZ - 1;
441 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, 445 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
442 BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM); 446 BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
443 } else if (bus->chipinfo.id == 0x5300) { 447 } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
444 tmp = BCMA_CORE_PCI_SBTOPCI_MEM; 448 tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
445 tmp |= BCMA_CORE_PCI_SBTOPCI_PREF; 449 tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
446 tmp |= BCMA_CORE_PCI_SBTOPCI_BURST; 450 tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 6c05cf470f96..a6e5672c67e7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -18,7 +18,7 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
18 pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2, 18 pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
19 core->wrap); 19 core->wrap);
20 core->bus->mapped_core = core; 20 core->bus->mapped_core = core;
21 pr_debug("Switched to core: 0x%X\n", core->id.id); 21 bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
22} 22}
23 23
24/* Provides access to the requested core. Returns base offset that has to be 24/* Provides access to the requested core. Returns base offset that has to be
@@ -188,7 +188,7 @@ static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
188 188
189 /* SSB needed additional powering up, do we have any AMBA PCI cards? */ 189 /* SSB needed additional powering up, do we have any AMBA PCI cards? */
190 if (!pci_is_pcie(dev)) 190 if (!pci_is_pcie(dev))
191 pr_err("PCI card detected, report problems.\n"); 191 bcma_err(bus, "PCI card detected, report problems.\n");
192 192
193 /* Map MMIO */ 193 /* Map MMIO */
194 err = -ENOMEM; 194 err = -ENOMEM;
@@ -268,9 +268,11 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
268 268
269static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { 269static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
270 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, 270 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
271 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
271 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
275 { 0, }, 277 { 0, },
276}; 278};
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7e138ec21357..758af9ccdef0 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -61,6 +61,13 @@ static struct bus_type bcma_bus_type = {
61 .dev_attrs = bcma_device_attrs, 61 .dev_attrs = bcma_device_attrs,
62}; 62};
63 63
64static u16 bcma_cc_core_id(struct bcma_bus *bus)
65{
66 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
67 return BCMA_CORE_4706_CHIPCOMMON;
68 return BCMA_CORE_CHIPCOMMON;
69}
70
64struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid) 71struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
65{ 72{
66 struct bcma_device *core; 73 struct bcma_device *core;
@@ -91,10 +98,12 @@ static int bcma_register_cores(struct bcma_bus *bus)
91 list_for_each_entry(core, &bus->cores, list) { 98 list_for_each_entry(core, &bus->cores, list) {
92 /* We support that cores ourself */ 99 /* We support that cores ourself */
93 switch (core->id.id) { 100 switch (core->id.id) {
101 case BCMA_CORE_4706_CHIPCOMMON:
94 case BCMA_CORE_CHIPCOMMON: 102 case BCMA_CORE_CHIPCOMMON:
95 case BCMA_CORE_PCI: 103 case BCMA_CORE_PCI:
96 case BCMA_CORE_PCIE: 104 case BCMA_CORE_PCIE:
97 case BCMA_CORE_MIPS_74K: 105 case BCMA_CORE_MIPS_74K:
106 case BCMA_CORE_4706_MAC_GBIT_COMMON:
98 continue; 107 continue;
99 } 108 }
100 109
@@ -118,8 +127,9 @@ static int bcma_register_cores(struct bcma_bus *bus)
118 127
119 err = device_register(&core->dev); 128 err = device_register(&core->dev);
120 if (err) { 129 if (err) {
121 pr_err("Could not register dev for core 0x%03X\n", 130 bcma_err(bus,
122 core->id.id); 131 "Could not register dev for core 0x%03X\n",
132 core->id.id);
123 continue; 133 continue;
124 } 134 }
125 core->dev_registered = true; 135 core->dev_registered = true;
@@ -151,12 +161,12 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
151 /* Scan for devices (cores) */ 161 /* Scan for devices (cores) */
152 err = bcma_bus_scan(bus); 162 err = bcma_bus_scan(bus);
153 if (err) { 163 if (err) {
154 pr_err("Failed to scan: %d\n", err); 164 bcma_err(bus, "Failed to scan: %d\n", err);
155 return -1; 165 return -1;
156 } 166 }
157 167
158 /* Init CC core */ 168 /* Init CC core */
159 core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON); 169 core = bcma_find_core(bus, bcma_cc_core_id(bus));
160 if (core) { 170 if (core) {
161 bus->drv_cc.core = core; 171 bus->drv_cc.core = core;
162 bcma_core_chipcommon_init(&bus->drv_cc); 172 bcma_core_chipcommon_init(&bus->drv_cc);
@@ -176,17 +186,24 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
176 bcma_core_pci_init(&bus->drv_pci); 186 bcma_core_pci_init(&bus->drv_pci);
177 } 187 }
178 188
189 /* Init GBIT MAC COMMON core */
190 core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
191 if (core) {
192 bus->drv_gmac_cmn.core = core;
193 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
194 }
195
179 /* Try to get SPROM */ 196 /* Try to get SPROM */
180 err = bcma_sprom_get(bus); 197 err = bcma_sprom_get(bus);
181 if (err == -ENOENT) { 198 if (err == -ENOENT) {
182 pr_err("No SPROM available\n"); 199 bcma_err(bus, "No SPROM available\n");
183 } else if (err) 200 } else if (err)
184 pr_err("Failed to get SPROM: %d\n", err); 201 bcma_err(bus, "Failed to get SPROM: %d\n", err);
185 202
186 /* Register found cores */ 203 /* Register found cores */
187 bcma_register_cores(bus); 204 bcma_register_cores(bus);
188 205
189 pr_info("Bus registered\n"); 206 bcma_info(bus, "Bus registered\n");
190 207
191 return 0; 208 return 0;
192} 209}
@@ -207,14 +224,14 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
207 bcma_init_bus(bus); 224 bcma_init_bus(bus);
208 225
209 match.manuf = BCMA_MANUF_BCM; 226 match.manuf = BCMA_MANUF_BCM;
210 match.id = BCMA_CORE_CHIPCOMMON; 227 match.id = bcma_cc_core_id(bus);
211 match.class = BCMA_CL_SIM; 228 match.class = BCMA_CL_SIM;
212 match.rev = BCMA_ANY_REV; 229 match.rev = BCMA_ANY_REV;
213 230
214 /* Scan for chip common core */ 231 /* Scan for chip common core */
215 err = bcma_bus_scan_early(bus, &match, core_cc); 232 err = bcma_bus_scan_early(bus, &match, core_cc);
216 if (err) { 233 if (err) {
217 pr_err("Failed to scan for common core: %d\n", err); 234 bcma_err(bus, "Failed to scan for common core: %d\n", err);
218 return -1; 235 return -1;
219 } 236 }
220 237
@@ -226,12 +243,12 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
226 /* Scan for mips core */ 243 /* Scan for mips core */
227 err = bcma_bus_scan_early(bus, &match, core_mips); 244 err = bcma_bus_scan_early(bus, &match, core_mips);
228 if (err) { 245 if (err) {
229 pr_err("Failed to scan for mips core: %d\n", err); 246 bcma_err(bus, "Failed to scan for mips core: %d\n", err);
230 return -1; 247 return -1;
231 } 248 }
232 249
233 /* Init CC core */ 250 /* Init CC core */
234 core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON); 251 core = bcma_find_core(bus, bcma_cc_core_id(bus));
235 if (core) { 252 if (core) {
236 bus->drv_cc.core = core; 253 bus->drv_cc.core = core;
237 bcma_core_chipcommon_init(&bus->drv_cc); 254 bcma_core_chipcommon_init(&bus->drv_cc);
@@ -244,7 +261,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
244 bcma_core_mips_init(&bus->drv_mips); 261 bcma_core_mips_init(&bus->drv_mips);
245 } 262 }
246 263
247 pr_info("Early bus registered\n"); 264 bcma_info(bus, "Early bus registered\n");
248 265
249 return 0; 266 return 0;
250} 267}
@@ -270,8 +287,7 @@ int bcma_bus_resume(struct bcma_bus *bus)
270 struct bcma_device *core; 287 struct bcma_device *core;
271 288
272 /* Init CC core */ 289 /* Init CC core */
273 core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON); 290 if (bus->drv_cc.core) {
274 if (core) {
275 bus->drv_cc.setup_done = false; 291 bus->drv_cc.setup_done = false;
276 bcma_core_chipcommon_init(&bus->drv_cc); 292 bcma_core_chipcommon_init(&bus->drv_cc);
277 } 293 }
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 5ed0718fc660..8d0b57164018 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -21,6 +21,7 @@ struct bcma_device_id_name {
21}; 21};
22 22
23static const struct bcma_device_id_name bcma_arm_device_names[] = { 23static const struct bcma_device_id_name bcma_arm_device_names[] = {
24 { BCMA_CORE_4706_MAC_GBIT_COMMON, "BCM4706 GBit MAC Common" },
24 { BCMA_CORE_ARM_1176, "ARM 1176" }, 25 { BCMA_CORE_ARM_1176, "ARM 1176" },
25 { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" }, 26 { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
26 { BCMA_CORE_ARM_CM3, "ARM CM3" }, 27 { BCMA_CORE_ARM_CM3, "ARM CM3" },
@@ -28,6 +29,11 @@ static const struct bcma_device_id_name bcma_arm_device_names[] = {
28 29
29static const struct bcma_device_id_name bcma_bcm_device_names[] = { 30static const struct bcma_device_id_name bcma_bcm_device_names[] = {
30 { BCMA_CORE_OOB_ROUTER, "OOB Router" }, 31 { BCMA_CORE_OOB_ROUTER, "OOB Router" },
32 { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
33 { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
34 { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
35 { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
36 { BCMA_CORE_ALTA, "ALTA (I2S)" },
31 { BCMA_CORE_INVALID, "Invalid" }, 37 { BCMA_CORE_INVALID, "Invalid" },
32 { BCMA_CORE_CHIPCOMMON, "ChipCommon" }, 38 { BCMA_CORE_CHIPCOMMON, "ChipCommon" },
33 { BCMA_CORE_ILINE20, "ILine 20" }, 39 { BCMA_CORE_ILINE20, "ILine 20" },
@@ -289,11 +295,15 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
289 295
290 /* check if component is a core at all */ 296 /* check if component is a core at all */
291 if (wrappers[0] + wrappers[1] == 0) { 297 if (wrappers[0] + wrappers[1] == 0) {
292 /* we could save addrl of the router 298 /* Some specific cores don't need wrappers */
293 if (cid == BCMA_CORE_OOB_ROUTER) 299 switch (core->id.id) {
294 */ 300 case BCMA_CORE_4706_MAC_GBIT_COMMON:
295 bcma_erom_skip_component(bus, eromptr); 301 /* Not used yet: case BCMA_CORE_OOB_ROUTER: */
296 return -ENXIO; 302 break;
303 default:
304 bcma_erom_skip_component(bus, eromptr);
305 return -ENXIO;
306 }
297 } 307 }
298 308
299 if (bcma_erom_is_bridge(bus, eromptr)) { 309 if (bcma_erom_is_bridge(bus, eromptr)) {
@@ -334,7 +344,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
334 if (tmp <= 0) { 344 if (tmp <= 0) {
335 return -EILSEQ; 345 return -EILSEQ;
336 } else { 346 } else {
337 pr_info("Bridge found\n"); 347 bcma_info(bus, "Bridge found\n");
338 return -ENXIO; 348 return -ENXIO;
339 } 349 }
340 } 350 }
@@ -421,8 +431,8 @@ void bcma_init_bus(struct bcma_bus *bus)
421 chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT; 431 chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
422 chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT; 432 chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
423 chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT; 433 chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
424 pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n", 434 bcma_info(bus, "Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
425 chipinfo->id, chipinfo->rev, chipinfo->pkg); 435 chipinfo->id, chipinfo->rev, chipinfo->pkg);
426 436
427 bus->init_done = true; 437 bus->init_done = true;
428} 438}
@@ -452,8 +462,10 @@ int bcma_bus_scan(struct bcma_bus *bus)
452 while (eromptr < eromend) { 462 while (eromptr < eromend) {
453 struct bcma_device *other_core; 463 struct bcma_device *other_core;
454 struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL); 464 struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
455 if (!core) 465 if (!core) {
456 return -ENOMEM; 466 err = -ENOMEM;
467 goto out;
468 }
457 INIT_LIST_HEAD(&core->list); 469 INIT_LIST_HEAD(&core->list);
458 core->bus = bus; 470 core->bus = bus;
459 471
@@ -468,7 +480,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
468 } else if (err == -ESPIPE) { 480 } else if (err == -ESPIPE) {
469 break; 481 break;
470 } 482 }
471 return err; 483 goto out;
472 } 484 }
473 485
474 core->core_index = core_num++; 486 core->core_index = core_num++;
@@ -476,19 +488,20 @@ int bcma_bus_scan(struct bcma_bus *bus)
476 other_core = bcma_find_core_reverse(bus, core->id.id); 488 other_core = bcma_find_core_reverse(bus, core->id.id);
477 core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1; 489 core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
478 490
479 pr_info("Core %d found: %s " 491 bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
480 "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n", 492 core->core_index, bcma_device_name(&core->id),
481 core->core_index, bcma_device_name(&core->id), 493 core->id.manuf, core->id.id, core->id.rev,
482 core->id.manuf, core->id.id, core->id.rev, 494 core->id.class);
483 core->id.class);
484 495
485 list_add(&core->list, &bus->cores); 496 list_add_tail(&core->list, &bus->cores);
486 } 497 }
487 498
499 err = 0;
500out:
488 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 501 if (bus->hosttype == BCMA_HOSTTYPE_SOC)
489 iounmap(eromptr); 502 iounmap(eromptr);
490 503
491 return 0; 504 return err;
492} 505}
493 506
494int __init bcma_bus_scan_early(struct bcma_bus *bus, 507int __init bcma_bus_scan_early(struct bcma_bus *bus,
@@ -528,21 +541,21 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
528 else if (err == -ESPIPE) 541 else if (err == -ESPIPE)
529 break; 542 break;
530 else if (err < 0) 543 else if (err < 0)
531 return err; 544 goto out;
532 545
533 core->core_index = core_num++; 546 core->core_index = core_num++;
534 bus->nr_cores++; 547 bus->nr_cores++;
535 pr_info("Core %d found: %s " 548 bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
536 "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n", 549 core->core_index, bcma_device_name(&core->id),
537 core->core_index, bcma_device_name(&core->id), 550 core->id.manuf, core->id.id, core->id.rev,
538 core->id.manuf, core->id.id, core->id.rev, 551 core->id.class);
539 core->id.class);
540 552
541 list_add(&core->list, &bus->cores); 553 list_add_tail(&core->list, &bus->cores);
542 err = 0; 554 err = 0;
543 break; 555 break;
544 } 556 }
545 557
558out:
546 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 559 if (bus->hosttype == BCMA_HOSTTYPE_SOC)
547 iounmap(eromptr); 560 iounmap(eromptr);
548 561
diff --git a/drivers/bcma/scan.h b/drivers/bcma/scan.h
index 113e6a66884c..30eb475e4d19 100644
--- a/drivers/bcma/scan.h
+++ b/drivers/bcma/scan.h
@@ -27,7 +27,7 @@
27#define SCAN_CIB_NMW 0x0007C000 27#define SCAN_CIB_NMW 0x0007C000
28#define SCAN_CIB_NMW_SHIFT 14 28#define SCAN_CIB_NMW_SHIFT 14
29#define SCAN_CIB_NSW 0x00F80000 29#define SCAN_CIB_NSW 0x00F80000
30#define SCAN_CIB_NSW_SHIFT 17 30#define SCAN_CIB_NSW_SHIFT 19
31#define SCAN_CIB_REV 0xFF000000 31#define SCAN_CIB_REV 0xFF000000
32#define SCAN_CIB_REV_SHIFT 24 32#define SCAN_CIB_REV_SHIFT 24
33 33
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index c7f93359acb0..9ea4627dc0c2 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -60,11 +60,11 @@ static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
60 if (err) 60 if (err)
61 goto fail; 61 goto fail;
62 62
63 pr_debug("Using SPROM revision %d provided by" 63 bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
64 " platform.\n", bus->sprom.revision); 64 bus->sprom.revision);
65 return 0; 65 return 0;
66fail: 66fail:
67 pr_warn("Using fallback SPROM failed (err %d)\n", err); 67 bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
68 return err; 68 return err;
69} 69}
70 70
@@ -468,11 +468,11 @@ static bool bcma_sprom_ext_available(struct bcma_bus *bus)
468 /* older chipcommon revisions use chip status register */ 468 /* older chipcommon revisions use chip status register */
469 chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT); 469 chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
470 switch (bus->chipinfo.id) { 470 switch (bus->chipinfo.id) {
471 case 0x4313: 471 case BCMA_CHIP_ID_BCM4313:
472 present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT; 472 present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
473 break; 473 break;
474 474
475 case 0x4331: 475 case BCMA_CHIP_ID_BCM4331:
476 present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT; 476 present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
477 break; 477 break;
478 478
@@ -494,20 +494,22 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
494 494
495 chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT); 495 chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
496 switch (bus->chipinfo.id) { 496 switch (bus->chipinfo.id) {
497 case 0x4313: 497 case BCMA_CHIP_ID_BCM4313:
498 present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT; 498 present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
499 break; 499 break;
500 500
501 case 0x4331: 501 case BCMA_CHIP_ID_BCM4331:
502 present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT; 502 present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
503 break; 503 break;
504 504
505 case 43224: 505 case BCMA_CHIP_ID_BCM43224:
506 case 43225: 506 case BCMA_CHIP_ID_BCM43225:
507 /* for these chips OTP is always available */ 507 /* for these chips OTP is always available */
508 present = true; 508 present = true;
509 break; 509 break;
510 510 case BCMA_CHIP_ID_BCM43228:
511 present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
512 break;
511 default: 513 default:
512 present = false; 514 present = false;
513 break; 515 break;
@@ -579,13 +581,15 @@ int bcma_sprom_get(struct bcma_bus *bus)
579 if (!sprom) 581 if (!sprom)
580 return -ENOMEM; 582 return -ENOMEM;
581 583
582 if (bus->chipinfo.id == 0x4331) 584 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
585 bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
583 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); 586 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
584 587
585 pr_debug("SPROM offset 0x%x\n", offset); 588 bcma_debug(bus, "SPROM offset 0x%x\n", offset);
586 bcma_sprom_read(bus, offset, sprom); 589 bcma_sprom_read(bus, offset, sprom);
587 590
588 if (bus->chipinfo.id == 0x4331) 591 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
592 bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
589 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); 593 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
590 594
591 err = bcma_sprom_valid(sprom); 595 err = bcma_sprom_valid(sprom);
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index acda773b3720..38aa6dda6b81 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
763 { 763 {
764 case CMD_TARGET_STATUS: 764 case CMD_TARGET_STATUS:
765 /* Pass it up to the upper layers... */ 765 /* Pass it up to the upper layers... */
766 if( ei->ScsiStatus) 766 if (!ei->ScsiStatus) {
767 {
768#if 0
769 printk(KERN_WARNING "cciss: cmd %p "
770 "has SCSI Status = %x\n",
771 c, ei->ScsiStatus);
772#endif
773 cmd->result |= (ei->ScsiStatus << 1);
774 }
775 else { /* scsi status is zero??? How??? */
776 767
777 /* Ordinarily, this case should never happen, but there is a bug 768 /* Ordinarily, this case should never happen, but there is a bug
778 in some released firmware revisions that allows it to happen 769 in some released firmware revisions that allows it to happen
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index e54e31b02b88..3fbef018ce55 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
411 + mdev->ldev->md.al_offset + mdev->al_tr_pos; 411 + mdev->ldev->md.al_offset + mdev->al_tr_pos;
412 412
413 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) 413 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
414 drbd_chk_io_error(mdev, 1, true); 414 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
415 415
416 if (++mdev->al_tr_pos > 416 if (++mdev->al_tr_pos >
417 div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) 417 div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
@@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
876 unsigned int enr, count = 0; 876 unsigned int enr, count = 0;
877 struct lc_element *e; 877 struct lc_element *e;
878 878
879 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 879 /* this should be an empty REQ_FLUSH */
880 if (size == 0)
881 return 0;
882
883 if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
880 dev_err(DEV, "sector: %llus, size: %d\n", 884 dev_err(DEV, "sector: %llus, size: %d\n",
881 (unsigned long long)sector, size); 885 (unsigned long long)sector, size);
882 return 0; 886 return 0;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b5c5ff53cb57..d84566496746 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -889,6 +889,7 @@ struct bm_aio_ctx {
889 unsigned int done; 889 unsigned int done;
890 unsigned flags; 890 unsigned flags;
891#define BM_AIO_COPY_PAGES 1 891#define BM_AIO_COPY_PAGES 1
892#define BM_WRITE_ALL_PAGES 2
892 int error; 893 int error;
893 struct kref kref; 894 struct kref kref;
894}; 895};
@@ -1059,7 +1060,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
1059 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) 1060 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
1060 break; 1061 break;
1061 if (rw & WRITE) { 1062 if (rw & WRITE) {
1062 if (bm_test_page_unchanged(b->bm_pages[i])) { 1063 if (!(flags & BM_WRITE_ALL_PAGES) &&
1064 bm_test_page_unchanged(b->bm_pages[i])) {
1063 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); 1065 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
1064 continue; 1066 continue;
1065 } 1067 }
@@ -1096,7 +1098,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
1096 1098
1097 if (ctx->error) { 1099 if (ctx->error) {
1098 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 1100 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
1099 drbd_chk_io_error(mdev, 1, true); 1101 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
1100 err = -EIO; /* ctx->error ? */ 1102 err = -EIO; /* ctx->error ? */
1101 } 1103 }
1102 1104
@@ -1141,6 +1143,17 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1141} 1143}
1142 1144
1143/** 1145/**
1146 * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
1147 * @mdev: DRBD device.
1148 *
1149 * Will write all pages.
1150 */
1151int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
1152{
1153 return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
1154}
1155
1156/**
1144 * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. 1157 * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1145 * @mdev: DRBD device. 1158 * @mdev: DRBD device.
1146 * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages 1159 * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
@@ -1212,7 +1225,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
1212 wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); 1225 wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
1213 1226
1214 if (ctx->error) 1227 if (ctx->error)
1215 drbd_chk_io_error(mdev, 1, true); 1228 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
1216 /* that should force detach, so the in memory bitmap will be 1229 /* that should force detach, so the in memory bitmap will be
1217 * gone in a moment as well. */ 1230 * gone in a moment as well. */
1218 1231
@@ -1475,10 +1488,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
1475 first_word = 0; 1488 first_word = 0;
1476 spin_lock_irq(&b->bm_lock); 1489 spin_lock_irq(&b->bm_lock);
1477 } 1490 }
1478
1479 /* last page (respectively only page, for first page == last page) */ 1491 /* last page (respectively only page, for first page == last page) */
1480 last_word = MLPP(el >> LN2_BPL); 1492 last_word = MLPP(el >> LN2_BPL);
1481 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); 1493
1494 /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
1495 * ==> e = 32767, el = 32768, last_page = 2,
1496 * and now last_word = 0.
1497 * We do not want to touch last_page in this case,
1498 * as we did not allocate it, it is not present in bitmap->bm_pages.
1499 */
1500 if (last_word)
1501 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1482 1502
1483 /* possibly trailing bits. 1503 /* possibly trailing bits.
1484 * example: (e & 63) == 63, el will be e+1. 1504 * example: (e & 63) == 63, el will be e+1.
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 02f013a073a7..b953cc7c9c00 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -813,7 +813,6 @@ enum {
813 SIGNAL_ASENDER, /* whether asender wants to be interrupted */ 813 SIGNAL_ASENDER, /* whether asender wants to be interrupted */
814 SEND_PING, /* whether asender should send a ping asap */ 814 SEND_PING, /* whether asender should send a ping asap */
815 815
816 UNPLUG_QUEUED, /* only relevant with kernel 2.4 */
817 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ 816 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
818 MD_DIRTY, /* current uuids and flags not yet on disk */ 817 MD_DIRTY, /* current uuids and flags not yet on disk */
819 DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ 818 DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */
@@ -824,7 +823,6 @@ enum {
824 CRASHED_PRIMARY, /* This node was a crashed primary. 823 CRASHED_PRIMARY, /* This node was a crashed primary.
825 * Gets cleared when the state.conn 824 * Gets cleared when the state.conn
826 * goes into C_CONNECTED state. */ 825 * goes into C_CONNECTED state. */
827 NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
828 CONSIDER_RESYNC, 826 CONSIDER_RESYNC,
829 827
830 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ 828 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
@@ -834,6 +832,7 @@ enum {
834 BITMAP_IO_QUEUED, /* Started bitmap IO */ 832 BITMAP_IO_QUEUED, /* Started bitmap IO */
835 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ 833 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
836 WAS_IO_ERROR, /* Local disk failed returned IO error */ 834 WAS_IO_ERROR, /* Local disk failed returned IO error */
835 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
837 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 836 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
838 NET_CONGESTED, /* The data socket is congested */ 837 NET_CONGESTED, /* The data socket is congested */
839 838
@@ -851,6 +850,13 @@ enum {
851 AL_SUSPENDED, /* Activity logging is currently suspended. */ 850 AL_SUSPENDED, /* Activity logging is currently suspended. */
852 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ 851 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
853 STATE_SENT, /* Do not change state/UUIDs while this is set */ 852 STATE_SENT, /* Do not change state/UUIDs while this is set */
853
854 CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
855 * pending, from drbd worker context.
856 * If set, bdi_write_congested() returns true,
857 * so shrink_page_list() would not recurse into,
858 * and potentially deadlock on, this drbd worker.
859 */
854}; 860};
855 861
856struct drbd_bitmap; /* opaque for drbd_conf */ 862struct drbd_bitmap; /* opaque for drbd_conf */
@@ -1130,8 +1136,8 @@ struct drbd_conf {
1130 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 1136 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
1131 int rs_planed; /* resync sectors already planned */ 1137 int rs_planed; /* resync sectors already planned */
1132 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 1138 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
1133 int peer_max_bio_size; 1139 unsigned int peer_max_bio_size;
1134 int local_max_bio_size; 1140 unsigned int local_max_bio_size;
1135}; 1141};
1136 1142
1137static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1143static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1435,9 +1441,9 @@ struct bm_extent {
1435 * hash table. */ 1441 * hash table. */
1436#define HT_SHIFT 8 1442#define HT_SHIFT 8
1437#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) 1443#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
1438#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ 1444#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
1439 1445
1440#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ 1446#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
1441 1447
1442/* Number of elements in the app_reads_hash */ 1448/* Number of elements in the app_reads_hash */
1443#define APP_R_HSIZE 15 1449#define APP_R_HSIZE 15
@@ -1463,6 +1469,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
1463extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); 1469extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
1464extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); 1470extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
1465extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); 1471extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
1472extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
1466extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local); 1473extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
1467extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, 1474extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
1468 unsigned long al_enr); 1475 unsigned long al_enr);
@@ -1840,12 +1847,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
1840 return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); 1847 return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
1841} 1848}
1842 1849
1850enum drbd_force_detach_flags {
1851 DRBD_IO_ERROR,
1852 DRBD_META_IO_ERROR,
1853 DRBD_FORCE_DETACH,
1854};
1855
1843#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) 1856#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1844static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) 1857static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
1858 enum drbd_force_detach_flags forcedetach,
1859 const char *where)
1845{ 1860{
1846 switch (mdev->ldev->dc.on_io_error) { 1861 switch (mdev->ldev->dc.on_io_error) {
1847 case EP_PASS_ON: 1862 case EP_PASS_ON:
1848 if (!forcedetach) { 1863 if (forcedetach == DRBD_IO_ERROR) {
1849 if (__ratelimit(&drbd_ratelimit_state)) 1864 if (__ratelimit(&drbd_ratelimit_state))
1850 dev_err(DEV, "Local IO failed in %s.\n", where); 1865 dev_err(DEV, "Local IO failed in %s.\n", where);
1851 if (mdev->state.disk > D_INCONSISTENT) 1866 if (mdev->state.disk > D_INCONSISTENT)
@@ -1856,6 +1871,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1856 case EP_DETACH: 1871 case EP_DETACH:
1857 case EP_CALL_HELPER: 1872 case EP_CALL_HELPER:
1858 set_bit(WAS_IO_ERROR, &mdev->flags); 1873 set_bit(WAS_IO_ERROR, &mdev->flags);
1874 if (forcedetach == DRBD_FORCE_DETACH)
1875 set_bit(FORCE_DETACH, &mdev->flags);
1859 if (mdev->state.disk > D_FAILED) { 1876 if (mdev->state.disk > D_FAILED) {
1860 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); 1877 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1861 dev_err(DEV, 1878 dev_err(DEV,
@@ -1875,7 +1892,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1875 */ 1892 */
1876#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) 1893#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1877static inline void drbd_chk_io_error_(struct drbd_conf *mdev, 1894static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
1878 int error, int forcedetach, const char *where) 1895 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1879{ 1896{
1880 if (error) { 1897 if (error) {
1881 unsigned long flags; 1898 unsigned long flags;
@@ -2405,15 +2422,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
2405 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); 2422 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
2406 2423
2407 D_ASSERT(ap_bio >= 0); 2424 D_ASSERT(ap_bio >= 0);
2425
2426 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2427 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2428 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
2429 }
2430
2408 /* this currently does wake_up for every dec_ap_bio! 2431 /* this currently does wake_up for every dec_ap_bio!
2409 * maybe rather introduce some type of hysteresis? 2432 * maybe rather introduce some type of hysteresis?
2410 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ 2433 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2411 if (ap_bio < mxb) 2434 if (ap_bio < mxb)
2412 wake_up(&mdev->misc_wait); 2435 wake_up(&mdev->misc_wait);
2413 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2414 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2415 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
2416 }
2417} 2436}
2418 2437
2419static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) 2438static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 920ede2829d6..f93a0320e952 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -79,6 +79,7 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data); 79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); 80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused); 81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82static void _tl_clear(struct drbd_conf *mdev);
82 83
83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " 84MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>"); 85 "Lars Ellenberg <lars@linbit.com>");
@@ -432,19 +433,10 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
432 433
433 /* Actions operating on the disk state, also want to work on 434 /* Actions operating on the disk state, also want to work on
434 requests that got barrier acked. */ 435 requests that got barrier acked. */
435 switch (what) {
436 case fail_frozen_disk_io:
437 case restart_frozen_disk_io:
438 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
439 req = list_entry(le, struct drbd_request, tl_requests);
440 _req_mod(req, what);
441 }
442 436
443 case connection_lost_while_pending: 437 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
444 case resend: 438 req = list_entry(le, struct drbd_request, tl_requests);
445 break; 439 _req_mod(req, what);
446 default:
447 dev_err(DEV, "what = %d in _tl_restart()\n", what);
448 } 440 }
449} 441}
450 442
@@ -459,11 +451,16 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
459 */ 451 */
460void tl_clear(struct drbd_conf *mdev) 452void tl_clear(struct drbd_conf *mdev)
461{ 453{
454 spin_lock_irq(&mdev->req_lock);
455 _tl_clear(mdev);
456 spin_unlock_irq(&mdev->req_lock);
457}
458
459static void _tl_clear(struct drbd_conf *mdev)
460{
462 struct list_head *le, *tle; 461 struct list_head *le, *tle;
463 struct drbd_request *r; 462 struct drbd_request *r;
464 463
465 spin_lock_irq(&mdev->req_lock);
466
467 _tl_restart(mdev, connection_lost_while_pending); 464 _tl_restart(mdev, connection_lost_while_pending);
468 465
469 /* we expect this list to be empty. */ 466 /* we expect this list to be empty. */
@@ -482,7 +479,6 @@ void tl_clear(struct drbd_conf *mdev)
482 479
483 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); 480 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
484 481
485 spin_unlock_irq(&mdev->req_lock);
486} 482}
487 483
488void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) 484void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
@@ -1476,12 +1472,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1476 if (ns.susp_fen) { 1472 if (ns.susp_fen) {
1477 /* case1: The outdate peer handler is successful: */ 1473 /* case1: The outdate peer handler is successful: */
1478 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) { 1474 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1479 tl_clear(mdev);
1480 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1475 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1481 drbd_uuid_new_current(mdev); 1476 drbd_uuid_new_current(mdev);
1482 clear_bit(NEW_CUR_UUID, &mdev->flags); 1477 clear_bit(NEW_CUR_UUID, &mdev->flags);
1483 } 1478 }
1484 spin_lock_irq(&mdev->req_lock); 1479 spin_lock_irq(&mdev->req_lock);
1480 _tl_clear(mdev);
1485 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); 1481 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1486 spin_unlock_irq(&mdev->req_lock); 1482 spin_unlock_irq(&mdev->req_lock);
1487 } 1483 }
@@ -1514,6 +1510,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1514 1510
1515 /* Do not change the order of the if above and the two below... */ 1511 /* Do not change the order of the if above and the two below... */
1516 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ 1512 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1513 /* we probably will start a resync soon.
1514 * make sure those things are properly reset. */
1515 mdev->rs_total = 0;
1516 mdev->rs_failed = 0;
1517 atomic_set(&mdev->rs_pending_cnt, 0);
1518 drbd_rs_cancel_all(mdev);
1519
1517 drbd_send_uuids(mdev); 1520 drbd_send_uuids(mdev);
1518 drbd_send_state(mdev, ns); 1521 drbd_send_state(mdev, ns);
1519 } 1522 }
@@ -1630,9 +1633,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1630 eh = mdev->ldev->dc.on_io_error; 1633 eh = mdev->ldev->dc.on_io_error;
1631 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); 1634 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1632 1635
1633 /* Immediately allow completion of all application IO, that waits 1636 if (was_io_error && eh == EP_CALL_HELPER)
1634 for completion from the local disk. */ 1637 drbd_khelper(mdev, "local-io-error");
1635 tl_abort_disk_io(mdev); 1638
1639 /* Immediately allow completion of all application IO,
1640 * that waits for completion from the local disk,
1641 * if this was a force-detach due to disk_timeout
1642 * or administrator request (drbdsetup detach --force).
1643 * Do NOT abort otherwise.
1644 * Aborting local requests may cause serious problems,
1645 * if requests are completed to upper layers already,
1646 * and then later the already submitted local bio completes.
1647 * This can cause DMA into former bio pages that meanwhile
1648 * have been re-used for other things.
1649 * So aborting local requests may cause crashes,
1650 * or even worse, silent data corruption.
1651 */
1652 if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
1653 tl_abort_disk_io(mdev);
1636 1654
1637 /* current state still has to be D_FAILED, 1655 /* current state still has to be D_FAILED,
1638 * there is only one way out: to D_DISKLESS, 1656 * there is only one way out: to D_DISKLESS,
@@ -1653,9 +1671,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1653 drbd_md_sync(mdev); 1671 drbd_md_sync(mdev);
1654 } 1672 }
1655 put_ldev(mdev); 1673 put_ldev(mdev);
1656
1657 if (was_io_error && eh == EP_CALL_HELPER)
1658 drbd_khelper(mdev, "local-io-error");
1659 } 1674 }
1660 1675
1661 /* second half of local IO error, failure to attach, 1676 /* second half of local IO error, failure to attach,
@@ -1669,10 +1684,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1669 "ASSERT FAILED: disk is %s while going diskless\n", 1684 "ASSERT FAILED: disk is %s while going diskless\n",
1670 drbd_disk_str(mdev->state.disk)); 1685 drbd_disk_str(mdev->state.disk));
1671 1686
1672 mdev->rs_total = 0;
1673 mdev->rs_failed = 0;
1674 atomic_set(&mdev->rs_pending_cnt, 0);
1675
1676 if (ns.conn >= C_CONNECTED) 1687 if (ns.conn >= C_CONNECTED)
1677 drbd_send_state(mdev, ns); 1688 drbd_send_state(mdev, ns);
1678 1689
@@ -2194,7 +2205,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2194{ 2205{
2195 struct p_sizes p; 2206 struct p_sizes p;
2196 sector_t d_size, u_size; 2207 sector_t d_size, u_size;
2197 int q_order_type, max_bio_size; 2208 int q_order_type;
2209 unsigned int max_bio_size;
2198 int ok; 2210 int ok;
2199 2211
2200 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 2212 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -2203,7 +2215,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2203 u_size = mdev->ldev->dc.disk_size; 2215 u_size = mdev->ldev->dc.disk_size;
2204 q_order_type = drbd_queue_order_type(mdev); 2216 q_order_type = drbd_queue_order_type(mdev);
2205 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; 2217 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2206 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); 2218 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
2207 put_ldev(mdev); 2219 put_ldev(mdev);
2208 } else { 2220 } else {
2209 d_size = 0; 2221 d_size = 0;
@@ -2214,7 +2226,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2214 2226
2215 /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */ 2227 /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
2216 if (mdev->agreed_pro_version <= 94) 2228 if (mdev->agreed_pro_version <= 94)
2217 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 2229 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
2218 2230
2219 p.d_size = cpu_to_be64(d_size); 2231 p.d_size = cpu_to_be64(d_size);
2220 p.u_size = cpu_to_be64(u_size); 2232 p.u_size = cpu_to_be64(u_size);
@@ -3521,9 +3533,9 @@ static void drbd_cleanup(void)
3521} 3533}
3522 3534
3523/** 3535/**
3524 * drbd_congested() - Callback for pdflush 3536 * drbd_congested() - Callback for the flusher thread
3525 * @congested_data: User data 3537 * @congested_data: User data
3526 * @bdi_bits: Bits pdflush is currently interested in 3538 * @bdi_bits: Bits the BDI flusher thread is currently interested in
3527 * 3539 *
3528 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested. 3540 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3529 */ 3541 */
@@ -3541,6 +3553,22 @@ static int drbd_congested(void *congested_data, int bdi_bits)
3541 goto out; 3553 goto out;
3542 } 3554 }
3543 3555
3556 if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
3557 r |= (1 << BDI_async_congested);
3558 /* Without good local data, we would need to read from remote,
3559 * and that would need the worker thread as well, which is
3560 * currently blocked waiting for that usermode helper to
3561 * finish.
3562 */
3563 if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
3564 r |= (1 << BDI_sync_congested);
3565 else
3566 put_ldev(mdev);
3567 r &= bdi_bits;
3568 reason = 'c';
3569 goto out;
3570 }
3571
3544 if (get_ldev(mdev)) { 3572 if (get_ldev(mdev)) {
3545 q = bdev_get_queue(mdev->ldev->backing_bdev); 3573 q = bdev_get_queue(mdev->ldev->backing_bdev);
3546 r = bdi_congested(&q->backing_dev_info, bdi_bits); 3574 r = bdi_congested(&q->backing_dev_info, bdi_bits);
@@ -3604,6 +3632,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3604 q->backing_dev_info.congested_data = mdev; 3632 q->backing_dev_info.congested_data = mdev;
3605 3633
3606 blk_queue_make_request(q, drbd_make_request); 3634 blk_queue_make_request(q, drbd_make_request);
3635 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
3607 /* Setting the max_hw_sectors to an odd value of 8kibyte here 3636 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3608 This triggers a max_bio_size message upon first attach or connect */ 3637 This triggers a max_bio_size message upon first attach or connect */
3609 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 3638 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
@@ -3870,7 +3899,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
3870 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { 3899 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3871 /* this was a try anyways ... */ 3900 /* this was a try anyways ... */
3872 dev_err(DEV, "meta data update failed!\n"); 3901 dev_err(DEV, "meta data update failed!\n");
3873 drbd_chk_io_error(mdev, 1, true); 3902 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
3874 } 3903 }
3875 3904
3876 /* Update mdev->ldev->md.la_size_sect, 3905 /* Update mdev->ldev->md.la_size_sect,
@@ -3950,9 +3979,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3950 3979
3951 spin_lock_irq(&mdev->req_lock); 3980 spin_lock_irq(&mdev->req_lock);
3952 if (mdev->state.conn < C_CONNECTED) { 3981 if (mdev->state.conn < C_CONNECTED) {
3953 int peer; 3982 unsigned int peer;
3954 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3983 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3955 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); 3984 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3956 mdev->peer_max_bio_size = peer; 3985 mdev->peer_max_bio_size = peer;
3957 } 3986 }
3958 spin_unlock_irq(&mdev->req_lock); 3987 spin_unlock_irq(&mdev->req_lock);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 6d4de6a72e80..edb490aad8b4 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -147,6 +147,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
147 char *argv[] = {usermode_helper, cmd, mb, NULL }; 147 char *argv[] = {usermode_helper, cmd, mb, NULL };
148 int ret; 148 int ret;
149 149
150 if (current == mdev->worker.task)
151 set_bit(CALLBACK_PENDING, &mdev->flags);
152
150 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 153 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
151 154
152 if (get_net_conf(mdev)) { 155 if (get_net_conf(mdev)) {
@@ -189,6 +192,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
189 usermode_helper, cmd, mb, 192 usermode_helper, cmd, mb,
190 (ret >> 8) & 0xff, ret); 193 (ret >> 8) & 0xff, ret);
191 194
195 if (current == mdev->worker.task)
196 clear_bit(CALLBACK_PENDING, &mdev->flags);
197
192 if (ret < 0) /* Ignore any ERRNOs we got. */ 198 if (ret < 0) /* Ignore any ERRNOs we got. */
193 ret = 0; 199 ret = 0;
194 200
@@ -668,8 +674,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
668 la_size_changed && md_moved ? "size changed and md moved" : 674 la_size_changed && md_moved ? "size changed and md moved" :
669 la_size_changed ? "size changed" : "md moved"); 675 la_size_changed ? "size changed" : "md moved");
670 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ 676 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
671 err = drbd_bitmap_io(mdev, &drbd_bm_write, 677 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
672 "size changed", BM_LOCKED_MASK); 678 "size changed", BM_LOCKED_MASK);
673 if (err) { 679 if (err) {
674 rv = dev_size_error; 680 rv = dev_size_error;
675 goto out; 681 goto out;
@@ -795,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
795static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) 801static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
796{ 802{
797 struct request_queue * const q = mdev->rq_queue; 803 struct request_queue * const q = mdev->rq_queue;
798 int max_hw_sectors = max_bio_size >> 9; 804 unsigned int max_hw_sectors = max_bio_size >> 9;
799 int max_segments = 0; 805 unsigned int max_segments = 0;
800 806
801 if (get_ldev_if_state(mdev, D_ATTACHING)) { 807 if (get_ldev_if_state(mdev, D_ATTACHING)) {
802 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 808 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
@@ -829,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
829 835
830void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) 836void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
831{ 837{
832 int now, new, local, peer; 838 unsigned int now, new, local, peer;
833 839
834 now = queue_max_hw_sectors(mdev->rq_queue) << 9; 840 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
835 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ 841 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
@@ -840,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
840 mdev->local_max_bio_size = local; 846 mdev->local_max_bio_size = local;
841 put_ldev(mdev); 847 put_ldev(mdev);
842 } 848 }
849 local = min(local, DRBD_MAX_BIO_SIZE);
843 850
844 /* We may ignore peer limits if the peer is modern enough. 851 /* We may ignore peer limits if the peer is modern enough.
845 Because new from 8.3.8 onwards the peer can use multiple 852 Because new from 8.3.8 onwards the peer can use multiple
846 BIOs for a single peer_request */ 853 BIOs for a single peer_request */
847 if (mdev->state.conn >= C_CONNECTED) { 854 if (mdev->state.conn >= C_CONNECTED) {
848 if (mdev->agreed_pro_version < 94) { 855 if (mdev->agreed_pro_version < 94) {
849 peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 856 peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
850 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ 857 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
851 } else if (mdev->agreed_pro_version == 94) 858 } else if (mdev->agreed_pro_version == 94)
852 peer = DRBD_MAX_SIZE_H80_PACKET; 859 peer = DRBD_MAX_SIZE_H80_PACKET;
@@ -854,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
854 peer = DRBD_MAX_BIO_SIZE; 861 peer = DRBD_MAX_BIO_SIZE;
855 } 862 }
856 863
857 new = min_t(int, local, peer); 864 new = min(local, peer);
858 865
859 if (mdev->state.role == R_PRIMARY && new < now) 866 if (mdev->state.role == R_PRIMARY && new < now)
860 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now); 867 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
861 868
862 if (new != now) 869 if (new != now)
863 dev_info(DEV, "max BIO size = %u\n", new); 870 dev_info(DEV, "max BIO size = %u\n", new);
@@ -950,6 +957,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
950 * to realize a "hot spare" feature (not that I'd recommend that) */ 957 * to realize a "hot spare" feature (not that I'd recommend that) */
951 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 958 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
952 959
960 /* make sure there is no leftover from previous force-detach attempts */
961 clear_bit(FORCE_DETACH, &mdev->flags);
962
963 /* and no leftover from previously aborted resync or verify, either */
964 mdev->rs_total = 0;
965 mdev->rs_failed = 0;
966 atomic_set(&mdev->rs_pending_cnt, 0);
967
953 /* allocation not in the IO path, cqueue thread context */ 968 /* allocation not in the IO path, cqueue thread context */
954 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 969 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
955 if (!nbc) { 970 if (!nbc) {
@@ -1345,6 +1360,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1345 } 1360 }
1346 1361
1347 if (dt.detach_force) { 1362 if (dt.detach_force) {
1363 set_bit(FORCE_DETACH, &mdev->flags);
1348 drbd_force_state(mdev, NS(disk, D_FAILED)); 1364 drbd_force_state(mdev, NS(disk, D_FAILED));
1349 reply->ret_code = SS_SUCCESS; 1365 reply->ret_code = SS_SUCCESS;
1350 goto out; 1366 goto out;
@@ -1962,9 +1978,11 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
1962 int retcode; 1978 int retcode;
1963 1979
1964 /* If there is still bitmap IO pending, probably because of a previous 1980 /* If there is still bitmap IO pending, probably because of a previous
1965 * resync just being finished, wait for it before requesting a new resync. */ 1981 * resync just being finished, wait for it before requesting a new resync.
1982 * Also wait for it's after_state_ch(). */
1966 drbd_suspend_io(mdev); 1983 drbd_suspend_io(mdev);
1967 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 1984 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
1985 drbd_flush_workqueue(mdev);
1968 1986
1969 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1987 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
1970 1988
@@ -2003,9 +2021,11 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
2003 int retcode; 2021 int retcode;
2004 2022
2005 /* If there is still bitmap IO pending, probably because of a previous 2023 /* If there is still bitmap IO pending, probably because of a previous
2006 * resync just being finished, wait for it before requesting a new resync. */ 2024 * resync just being finished, wait for it before requesting a new resync.
2025 * Also wait for it's after_state_ch(). */
2007 drbd_suspend_io(mdev); 2026 drbd_suspend_io(mdev);
2008 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 2027 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2028 drbd_flush_workqueue(mdev);
2009 2029
2010 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); 2030 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
2011 2031
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 869bada2ed06..5496104f90b9 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -245,6 +245,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
245 mdev->state.role == R_SECONDARY) { 245 mdev->state.role == R_SECONDARY) {
246 seq_printf(seq, "%2d: cs:Unconfigured\n", i); 246 seq_printf(seq, "%2d: cs:Unconfigured\n", i);
247 } else { 247 } else {
248 /* reset mdev->congestion_reason */
249 bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
250
248 seq_printf(seq, 251 seq_printf(seq,
249 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" 252 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
250 " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " 253 " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index ea4836e0ae98..c74ca2df7431 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; 277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
278 int i; 278 int i;
279 279
280 if (page == NULL)
281 return;
282
280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) 283 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
281 i = page_chain_free(page); 284 i = page_chain_free(page);
282 else { 285 else {
@@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
316 gfp_t gfp_mask) __must_hold(local) 319 gfp_t gfp_mask) __must_hold(local)
317{ 320{
318 struct drbd_epoch_entry *e; 321 struct drbd_epoch_entry *e;
319 struct page *page; 322 struct page *page = NULL;
320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 323 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
321 324
322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) 325 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
@@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
329 return NULL; 332 return NULL;
330 } 333 }
331 334
332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); 335 if (data_size) {
333 if (!page) 336 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
334 goto fail; 337 if (!page)
338 goto fail;
339 }
335 340
336 INIT_HLIST_NODE(&e->collision); 341 INIT_HLIST_NODE(&e->collision);
337 e->epoch = NULL; 342 e->epoch = NULL;
@@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
1270 1275
1271 data_size -= dgs; 1276 data_size -= dgs;
1272 1277
1273 ERR_IF(data_size == 0) return NULL;
1274 ERR_IF(data_size & 0x1ff) return NULL; 1278 ERR_IF(data_size & 0x1ff) return NULL;
1275 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; 1279 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
1276 1280
@@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
1291 if (!e) 1295 if (!e)
1292 return NULL; 1296 return NULL;
1293 1297
1298 if (!data_size)
1299 return e;
1300
1294 ds = data_size; 1301 ds = data_size;
1295 page = e->pages; 1302 page = e->pages;
1296 page_chain_for_each(page) { 1303 page_chain_for_each(page) {
@@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1715 1722
1716 dp_flags = be32_to_cpu(p->dp_flags); 1723 dp_flags = be32_to_cpu(p->dp_flags);
1717 rw |= wire_flags_to_bio(mdev, dp_flags); 1724 rw |= wire_flags_to_bio(mdev, dp_flags);
1725 if (e->pages == NULL) {
1726 D_ASSERT(e->size == 0);
1727 D_ASSERT(dp_flags & DP_FLUSH);
1728 }
1718 1729
1719 if (dp_flags & DP_MAY_SET_IN_SYNC) 1730 if (dp_flags & DP_MAY_SET_IN_SYNC)
1720 e->flags |= EE_MAY_SET_IN_SYNC; 1731 e->flags |= EE_MAY_SET_IN_SYNC;
@@ -3801,11 +3812,18 @@ void drbd_free_tl_hash(struct drbd_conf *mdev)
3801 mdev->ee_hash = NULL; 3812 mdev->ee_hash = NULL;
3802 mdev->ee_hash_s = 0; 3813 mdev->ee_hash_s = 0;
3803 3814
3804 /* paranoia code */ 3815 /* We may not have had the chance to wait for all locally pending
3805 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) 3816 * application requests. The hlist_add_fake() prevents access after
3806 if (h->first) 3817 * free on master bio completion. */
3807 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", 3818 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
3808 (int)(h - mdev->tl_hash), h->first); 3819 struct drbd_request *req;
3820 struct hlist_node *pos, *n;
3821 hlist_for_each_entry_safe(req, pos, n, h, collision) {
3822 hlist_del_init(&req->collision);
3823 hlist_add_fake(&req->collision);
3824 }
3825 }
3826
3809 kfree(mdev->tl_hash); 3827 kfree(mdev->tl_hash);
3810 mdev->tl_hash = NULL; 3828 mdev->tl_hash = NULL;
3811 mdev->tl_hash_s = 0; 3829 mdev->tl_hash_s = 0;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9c5c84946b05..01b2ac641c7b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
455 req->rq_state |= RQ_LOCAL_COMPLETED; 455 req->rq_state |= RQ_LOCAL_COMPLETED;
456 req->rq_state &= ~RQ_LOCAL_PENDING; 456 req->rq_state &= ~RQ_LOCAL_PENDING;
457 457
458 __drbd_chk_io_error(mdev, false); 458 __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
459 _req_may_be_done_not_susp(req, m); 459 _req_may_be_done_not_susp(req, m);
460 break; 460 break;
461 461
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
472 req->rq_state |= RQ_LOCAL_COMPLETED; 472 req->rq_state |= RQ_LOCAL_COMPLETED;
473 req->rq_state &= ~RQ_LOCAL_PENDING; 473 req->rq_state &= ~RQ_LOCAL_PENDING;
474 474
475 D_ASSERT(!(req->rq_state & RQ_NET_MASK)); 475 if (req->rq_state & RQ_LOCAL_ABORTED) {
476 _req_may_be_done(req, m);
477 break;
478 }
476 479
477 __drbd_chk_io_error(mdev, false); 480 __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
478 481
479 goto_queue_for_net_read: 482 goto_queue_for_net_read:
480 483
484 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
485
481 /* no point in retrying if there is no good remote data, 486 /* no point in retrying if there is no good remote data,
482 * or we have no connection. */ 487 * or we have no connection. */
483 if (mdev->state.pdsk != D_UP_TO_DATE) { 488 if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -690,6 +695,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
690 break; 695 break;
691 696
692 case resend: 697 case resend:
698 /* Simply complete (local only) READs. */
699 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
700 _req_may_be_done(req, m);
701 break;
702 }
703
693 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK 704 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
694 before the connection loss (B&C only); only P_BARRIER_ACK was missing. 705 before the connection loss (B&C only); only P_BARRIER_ACK was missing.
695 Trowing them out of the TL here by pretending we got a BARRIER_ACK 706 Trowing them out of the TL here by pretending we got a BARRIER_ACK
@@ -765,6 +776,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
765 return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); 776 return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
766} 777}
767 778
779static void maybe_pull_ahead(struct drbd_conf *mdev)
780{
781 int congested = 0;
782
783 /* If I don't even have good local storage, we can not reasonably try
784 * to pull ahead of the peer. We also need the local reference to make
785 * sure mdev->act_log is there.
786 * Note: caller has to make sure that net_conf is there.
787 */
788 if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
789 return;
790
791 if (mdev->net_conf->cong_fill &&
792 atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
793 dev_info(DEV, "Congestion-fill threshold reached\n");
794 congested = 1;
795 }
796
797 if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
798 dev_info(DEV, "Congestion-extents threshold reached\n");
799 congested = 1;
800 }
801
802 if (congested) {
803 queue_barrier(mdev); /* last barrier, after mirrored writes */
804
805 if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
806 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
807 else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
808 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
809 }
810 put_ldev(mdev);
811}
812
768static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) 813static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
769{ 814{
770 const int rw = bio_rw(bio); 815 const int rw = bio_rw(bio);
@@ -795,7 +840,15 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
795 req->private_bio = NULL; 840 req->private_bio = NULL;
796 } 841 }
797 if (rw == WRITE) { 842 if (rw == WRITE) {
798 remote = 1; 843 /* Need to replicate writes. Unless it is an empty flush,
844 * which is better mapped to a DRBD P_BARRIER packet,
845 * also for drbd wire protocol compatibility reasons. */
846 if (unlikely(size == 0)) {
847 /* The only size==0 bios we expect are empty flushes. */
848 D_ASSERT(bio->bi_rw & REQ_FLUSH);
849 remote = 0;
850 } else
851 remote = 1;
799 } else { 852 } else {
800 /* READ || READA */ 853 /* READ || READA */
801 if (local) { 854 if (local) {
@@ -831,8 +884,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
831 * extent. This waits for any resync activity in the corresponding 884 * extent. This waits for any resync activity in the corresponding
832 * resync extent to finish, and, if necessary, pulls in the target 885 * resync extent to finish, and, if necessary, pulls in the target
833 * extent into the activity log, which involves further disk io because 886 * extent into the activity log, which involves further disk io because
834 * of transactional on-disk meta data updates. */ 887 * of transactional on-disk meta data updates.
835 if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { 888 * Empty flushes don't need to go into the activity log, they can only
889 * flush data for pending writes which are already in there. */
890 if (rw == WRITE && local && size
891 && !test_bit(AL_SUSPENDED, &mdev->flags)) {
836 req->rq_state |= RQ_IN_ACT_LOG; 892 req->rq_state |= RQ_IN_ACT_LOG;
837 drbd_al_begin_io(mdev, sector); 893 drbd_al_begin_io(mdev, sector);
838 } 894 }
@@ -955,7 +1011,10 @@ allocate_barrier:
955 if (rw == WRITE && _req_conflicts(req)) 1011 if (rw == WRITE && _req_conflicts(req))
956 goto fail_conflicting; 1012 goto fail_conflicting;
957 1013
958 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); 1014 /* no point in adding empty flushes to the transfer log,
1015 * they are mapped to drbd barriers already. */
1016 if (likely(size!=0))
1017 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
959 1018
960 /* NOTE remote first: to get the concurrent write detection right, 1019 /* NOTE remote first: to get the concurrent write detection right,
961 * we must register the request before start of local IO. */ 1020 * we must register the request before start of local IO. */
@@ -972,29 +1031,16 @@ allocate_barrier:
972 _req_mod(req, queue_for_send_oos); 1031 _req_mod(req, queue_for_send_oos);
973 1032
974 if (remote && 1033 if (remote &&
975 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { 1034 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
976 int congested = 0; 1035 maybe_pull_ahead(mdev);
977 1036
978 if (mdev->net_conf->cong_fill && 1037 /* If this was a flush, queue a drbd barrier/start a new epoch.
979 atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { 1038 * Unless the current epoch was empty anyways, or we are not currently
980 dev_info(DEV, "Congestion-fill threshold reached\n"); 1039 * replicating, in which case there is no point. */
981 congested = 1; 1040 if (unlikely(bio->bi_rw & REQ_FLUSH)
982 } 1041 && mdev->newest_tle->n_writes
983 1042 && drbd_should_do_remote(mdev->state))
984 if (mdev->act_log->used >= mdev->net_conf->cong_extents) { 1043 queue_barrier(mdev);
985 dev_info(DEV, "Congestion-extents threshold reached\n");
986 congested = 1;
987 }
988
989 if (congested) {
990 queue_barrier(mdev); /* last barrier, after mirrored writes */
991
992 if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
993 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
994 else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
995 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
996 }
997 }
998 1044
999 spin_unlock_irq(&mdev->req_lock); 1045 spin_unlock_irq(&mdev->req_lock);
1000 kfree(b); /* if someone else has beaten us to it... */ 1046 kfree(b); /* if someone else has beaten us to it... */
@@ -1093,13 +1139,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1093 /* 1139 /*
1094 * what we "blindly" assume: 1140 * what we "blindly" assume:
1095 */ 1141 */
1096 D_ASSERT(bio->bi_size > 0);
1097 D_ASSERT((bio->bi_size & 0x1ff) == 0); 1142 D_ASSERT((bio->bi_size & 0x1ff) == 0);
1098 1143
1099 /* to make some things easier, force alignment of requests within the 1144 /* to make some things easier, force alignment of requests within the
1100 * granularity of our hash tables */ 1145 * granularity of our hash tables */
1101 s_enr = bio->bi_sector >> HT_SHIFT; 1146 s_enr = bio->bi_sector >> HT_SHIFT;
1102 e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT; 1147 e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr;
1103 1148
1104 if (likely(s_enr == e_enr)) { 1149 if (likely(s_enr == e_enr)) {
1105 do { 1150 do {
@@ -1257,7 +1302,7 @@ void request_timer_fn(unsigned long data)
1257 time_after(now, req->start_time + dt) && 1302 time_after(now, req->start_time + dt) &&
1258 !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { 1303 !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
1259 dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); 1304 dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
1260 __drbd_chk_io_error(mdev, 1); 1305 __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
1261 } 1306 }
1262 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; 1307 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
1263 spin_unlock_irq(&mdev->req_lock); 1308 spin_unlock_irq(&mdev->req_lock);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 620c70ff2231..6bce2cc179d4 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
111 if (list_empty(&mdev->read_ee)) 111 if (list_empty(&mdev->read_ee))
112 wake_up(&mdev->ee_wait); 112 wake_up(&mdev->ee_wait);
113 if (test_bit(__EE_WAS_ERROR, &e->flags)) 113 if (test_bit(__EE_WAS_ERROR, &e->flags))
114 __drbd_chk_io_error(mdev, false); 114 __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
115 spin_unlock_irqrestore(&mdev->req_lock, flags); 115 spin_unlock_irqrestore(&mdev->req_lock, flags);
116 116
117 drbd_queue_work(&mdev->data.work, &e->w); 117 drbd_queue_work(&mdev->data.work, &e->w);
@@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
154 : list_empty(&mdev->active_ee); 154 : list_empty(&mdev->active_ee);
155 155
156 if (test_bit(__EE_WAS_ERROR, &e->flags)) 156 if (test_bit(__EE_WAS_ERROR, &e->flags))
157 __drbd_chk_io_error(mdev, false); 157 __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
158 spin_unlock_irqrestore(&mdev->req_lock, flags); 158 spin_unlock_irqrestore(&mdev->req_lock, flags);
159 159
160 if (is_syncer_req) 160 if (is_syncer_req)
@@ -1501,14 +1501,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1501 return; 1501 return;
1502 } 1502 }
1503 1503
1504 if (mdev->state.conn < C_AHEAD) {
1505 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1506 drbd_rs_cancel_all(mdev);
1507 /* This should be done when we abort the resync. We definitely do not
1508 want to have this for connections going back and forth between
1509 Ahead/Behind and SyncSource/SyncTarget */
1510 }
1511
1512 if (side == C_SYNC_TARGET) { 1504 if (side == C_SYNC_TARGET) {
1513 /* Since application IO was locked out during C_WF_BITMAP_T and 1505 /* Since application IO was locked out during C_WF_BITMAP_T and
1514 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET 1506 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index cce7df367b79..a7d6347aaa79 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -191,6 +191,7 @@ static int print_unex = 1;
191#include <linux/mutex.h> 191#include <linux/mutex.h>
192#include <linux/io.h> 192#include <linux/io.h>
193#include <linux/uaccess.h> 193#include <linux/uaccess.h>
194#include <linux/async.h>
194 195
195/* 196/*
196 * PS/2 floppies have much slower step rates than regular floppies. 197 * PS/2 floppies have much slower step rates than regular floppies.
@@ -671,6 +672,7 @@ static void __reschedule_timeout(int drive, const char *message)
671 672
672 if (drive == current_reqD) 673 if (drive == current_reqD)
673 drive = current_drive; 674 drive = current_drive;
675 __cancel_delayed_work(&fd_timeout);
674 676
675 if (drive < 0 || drive >= N_DRIVE) { 677 if (drive < 0 || drive >= N_DRIVE) {
676 delay = 20UL * HZ; 678 delay = 20UL * HZ;
@@ -2515,8 +2517,7 @@ static int make_raw_rw_request(void)
2515 set_fdc((long)current_req->rq_disk->private_data); 2517 set_fdc((long)current_req->rq_disk->private_data);
2516 2518
2517 raw_cmd = &default_raw_cmd; 2519 raw_cmd = &default_raw_cmd;
2518 raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | 2520 raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
2519 FD_RAW_NEED_SEEK;
2520 raw_cmd->cmd_count = NR_RW; 2521 raw_cmd->cmd_count = NR_RW;
2521 if (rq_data_dir(current_req) == READ) { 2522 if (rq_data_dir(current_req) == READ) {
2522 raw_cmd->flags |= FD_RAW_READ; 2523 raw_cmd->flags |= FD_RAW_READ;
@@ -4122,7 +4123,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
4122 return get_disk(disks[drive]); 4123 return get_disk(disks[drive]);
4123} 4124}
4124 4125
4125static int __init floppy_init(void) 4126static int __init do_floppy_init(void)
4126{ 4127{
4127 int i, unit, drive; 4128 int i, unit, drive;
4128 int err, dr; 4129 int err, dr;
@@ -4337,6 +4338,24 @@ out_put_disk:
4337 return err; 4338 return err;
4338} 4339}
4339 4340
4341#ifndef MODULE
4342static __init void floppy_async_init(void *data, async_cookie_t cookie)
4343{
4344 do_floppy_init();
4345}
4346#endif
4347
4348static int __init floppy_init(void)
4349{
4350#ifdef MODULE
4351 return do_floppy_init();
4352#else
4353 /* Don't hold up the bootup by the floppy initialization */
4354 async_schedule(floppy_async_init, NULL);
4355 return 0;
4356#endif
4357}
4358
4340static const struct io_region { 4359static const struct io_region {
4341 int offset; 4360 int offset;
4342 int size; 4361 int size;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bbca966f8f66..3bba65510d23 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
1597 struct gendisk *disk; 1597 struct gendisk *disk;
1598 int err; 1598 int err;
1599 1599
1600 err = -ENOMEM;
1600 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1601 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1601 if (!lo) { 1602 if (!lo)
1602 err = -ENOMEM;
1603 goto out; 1603 goto out;
1604 }
1605 1604
1606 err = idr_pre_get(&loop_index_idr, GFP_KERNEL); 1605 if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
1607 if (err < 0)
1608 goto out_free_dev; 1606 goto out_free_dev;
1609 1607
1610 if (i >= 0) { 1608 if (i >= 0) {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 76fa3deaee84..1788f491e0fb 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -780,9 +780,9 @@ static const struct block_device_operations mg_disk_ops = {
780 .getgeo = mg_getgeo 780 .getgeo = mg_getgeo
781}; 781};
782 782
783static int mg_suspend(struct platform_device *plat_dev, pm_message_t state) 783static int mg_suspend(struct device *dev)
784{ 784{
785 struct mg_drv_data *prv_data = plat_dev->dev.platform_data; 785 struct mg_drv_data *prv_data = dev->platform_data;
786 struct mg_host *host = prv_data->host; 786 struct mg_host *host = prv_data->host;
787 787
788 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) 788 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
@@ -804,9 +804,9 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
804 return 0; 804 return 0;
805} 805}
806 806
807static int mg_resume(struct platform_device *plat_dev) 807static int mg_resume(struct device *dev)
808{ 808{
809 struct mg_drv_data *prv_data = plat_dev->dev.platform_data; 809 struct mg_drv_data *prv_data = dev->platform_data;
810 struct mg_host *host = prv_data->host; 810 struct mg_host *host = prv_data->host;
811 811
812 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) 812 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
@@ -825,6 +825,8 @@ static int mg_resume(struct platform_device *plat_dev)
825 return 0; 825 return 0;
826} 826}
827 827
828static SIMPLE_DEV_PM_OPS(mg_pm, mg_suspend, mg_resume);
829
828static int mg_probe(struct platform_device *plat_dev) 830static int mg_probe(struct platform_device *plat_dev)
829{ 831{
830 struct mg_host *host; 832 struct mg_host *host;
@@ -1074,11 +1076,10 @@ static int mg_remove(struct platform_device *plat_dev)
1074static struct platform_driver mg_disk_driver = { 1076static struct platform_driver mg_disk_driver = {
1075 .probe = mg_probe, 1077 .probe = mg_probe,
1076 .remove = mg_remove, 1078 .remove = mg_remove,
1077 .suspend = mg_suspend,
1078 .resume = mg_resume,
1079 .driver = { 1079 .driver = {
1080 .name = MG_DEV_NAME, 1080 .name = MG_DEV_NAME,
1081 .owner = THIS_MODULE, 1081 .owner = THIS_MODULE,
1082 .pm = &mg_pm,
1082 } 1083 }
1083}; 1084};
1084 1085
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 264bc77dcb91..a8fddeb3d638 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -37,6 +37,7 @@
37#include <linux/kthread.h> 37#include <linux/kthread.h>
38#include <../drivers/ata/ahci.h> 38#include <../drivers/ata/ahci.h>
39#include <linux/export.h> 39#include <linux/export.h>
40#include <linux/debugfs.h>
40#include "mtip32xx.h" 41#include "mtip32xx.h"
41 42
42#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) 43#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
85 * allocated in mtip_init(). 86 * allocated in mtip_init().
86 */ 87 */
87static int mtip_major; 88static int mtip_major;
89static struct dentry *dfs_parent;
88 90
89static DEFINE_SPINLOCK(rssd_index_lock); 91static DEFINE_SPINLOCK(rssd_index_lock);
90static DEFINE_IDA(rssd_index_ida); 92static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2546} 2548}
2547 2549
2548/* 2550/*
2549 * Sysfs register/status dump. 2551 * Sysfs status dump.
2550 * 2552 *
2551 * @dev Pointer to the device structure, passed by the kernrel. 2553 * @dev Pointer to the device structure, passed by the kernrel.
2552 * @attr Pointer to the device_attribute structure passed by the kernel. 2554 * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2555 * return value 2557 * return value
2556 * The size, in bytes, of the data copied into buf. 2558 * The size, in bytes, of the data copied into buf.
2557 */ 2559 */
2558static ssize_t mtip_hw_show_registers(struct device *dev, 2560static ssize_t mtip_hw_show_status(struct device *dev,
2559 struct device_attribute *attr, 2561 struct device_attribute *attr,
2560 char *buf) 2562 char *buf)
2561{ 2563{
2562 u32 group_allocated;
2563 struct driver_data *dd = dev_to_disk(dev)->private_data; 2564 struct driver_data *dd = dev_to_disk(dev)->private_data;
2564 int size = 0; 2565 int size = 0;
2566
2567 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
2568 size += sprintf(buf, "%s", "thermal_shutdown\n");
2569 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2570 size += sprintf(buf, "%s", "write_protect\n");
2571 else
2572 size += sprintf(buf, "%s", "online\n");
2573
2574 return size;
2575}
2576
2577static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2578
2579static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
2580 size_t len, loff_t *offset)
2581{
2582 struct driver_data *dd = (struct driver_data *)f->private_data;
2583 char buf[MTIP_DFS_MAX_BUF_SIZE];
2584 u32 group_allocated;
2585 int size = *offset;
2565 int n; 2586 int n;
2566 2587
2567 size += sprintf(&buf[size], "Hardware\n--------\n"); 2588 if (!len || size)
2568 size += sprintf(&buf[size], "S ACTive : [ 0x"); 2589 return 0;
2590
2591 if (size < 0)
2592 return -EINVAL;
2593
2594 size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
2569 2595
2570 for (n = dd->slot_groups-1; n >= 0; n--) 2596 for (n = dd->slot_groups-1; n >= 0; n--)
2571 size += sprintf(&buf[size], "%08X ", 2597 size += sprintf(&buf[size], "%08X ",
2572 readl(dd->port->s_active[n])); 2598 readl(dd->port->s_active[n]));
2573 2599
2574 size += sprintf(&buf[size], "]\n"); 2600 size += sprintf(&buf[size], "]\n");
2575 size += sprintf(&buf[size], "Command Issue : [ 0x"); 2601 size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
2576 2602
2577 for (n = dd->slot_groups-1; n >= 0; n--) 2603 for (n = dd->slot_groups-1; n >= 0; n--)
2578 size += sprintf(&buf[size], "%08X ", 2604 size += sprintf(&buf[size], "%08X ",
2579 readl(dd->port->cmd_issue[n])); 2605 readl(dd->port->cmd_issue[n]));
2580 2606
2581 size += sprintf(&buf[size], "]\n"); 2607 size += sprintf(&buf[size], "]\n");
2582 size += sprintf(&buf[size], "Completed : [ 0x"); 2608 size += sprintf(&buf[size], "H/ Completed : [ 0x");
2583 2609
2584 for (n = dd->slot_groups-1; n >= 0; n--) 2610 for (n = dd->slot_groups-1; n >= 0; n--)
2585 size += sprintf(&buf[size], "%08X ", 2611 size += sprintf(&buf[size], "%08X ",
2586 readl(dd->port->completed[n])); 2612 readl(dd->port->completed[n]));
2587 2613
2588 size += sprintf(&buf[size], "]\n"); 2614 size += sprintf(&buf[size], "]\n");
2589 size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n", 2615 size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
2590 readl(dd->port->mmio + PORT_IRQ_STAT)); 2616 readl(dd->port->mmio + PORT_IRQ_STAT));
2591 size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n", 2617 size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
2592 readl(dd->mmio + HOST_IRQ_STAT)); 2618 readl(dd->mmio + HOST_IRQ_STAT));
2593 size += sprintf(&buf[size], "\n"); 2619 size += sprintf(&buf[size], "\n");
2594 2620
2595 size += sprintf(&buf[size], "Local\n-----\n"); 2621 size += sprintf(&buf[size], "L/ Allocated : [ 0x");
2596 size += sprintf(&buf[size], "Allocated : [ 0x");
2597 2622
2598 for (n = dd->slot_groups-1; n >= 0; n--) { 2623 for (n = dd->slot_groups-1; n >= 0; n--) {
2599 if (sizeof(long) > sizeof(u32)) 2624 if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
2605 } 2630 }
2606 size += sprintf(&buf[size], "]\n"); 2631 size += sprintf(&buf[size], "]\n");
2607 2632
2608 size += sprintf(&buf[size], "Commands in Q: [ 0x"); 2633 size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
2609 2634
2610 for (n = dd->slot_groups-1; n >= 0; n--) { 2635 for (n = dd->slot_groups-1; n >= 0; n--) {
2611 if (sizeof(long) > sizeof(u32)) 2636 if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
2617 } 2642 }
2618 size += sprintf(&buf[size], "]\n"); 2643 size += sprintf(&buf[size], "]\n");
2619 2644
2620 return size; 2645 *offset = size <= len ? size : len;
2646 size = copy_to_user(ubuf, buf, *offset);
2647 if (size)
2648 return -EFAULT;
2649
2650 return *offset;
2621} 2651}
2622 2652
2623static ssize_t mtip_hw_show_status(struct device *dev, 2653static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
2624 struct device_attribute *attr, 2654 size_t len, loff_t *offset)
2625 char *buf)
2626{ 2655{
2627 struct driver_data *dd = dev_to_disk(dev)->private_data; 2656 struct driver_data *dd = (struct driver_data *)f->private_data;
2628 int size = 0; 2657 char buf[MTIP_DFS_MAX_BUF_SIZE];
2658 int size = *offset;
2629 2659
2630 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) 2660 if (!len || size)
2631 size += sprintf(buf, "%s", "thermal_shutdown\n"); 2661 return 0;
2632 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2633 size += sprintf(buf, "%s", "write_protect\n");
2634 else
2635 size += sprintf(buf, "%s", "online\n");
2636
2637 return size;
2638}
2639 2662
2640static ssize_t mtip_hw_show_flags(struct device *dev, 2663 if (size < 0)
2641 struct device_attribute *attr, 2664 return -EINVAL;
2642 char *buf)
2643{
2644 struct driver_data *dd = dev_to_disk(dev)->private_data;
2645 int size = 0;
2646 2665
2647 size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n", 2666 size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
2648 dd->port->flags); 2667 dd->port->flags);
2649 size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n", 2668 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
2650 dd->dd_flag); 2669 dd->dd_flag);
2651 2670
2652 return size; 2671 *offset = size <= len ? size : len;
2672 size = copy_to_user(ubuf, buf, *offset);
2673 if (size)
2674 return -EFAULT;
2675
2676 return *offset;
2653} 2677}
2654 2678
2655static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL); 2679static const struct file_operations mtip_regs_fops = {
2656static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); 2680 .owner = THIS_MODULE,
2657static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL); 2681 .open = simple_open,
2682 .read = mtip_hw_read_registers,
2683 .llseek = no_llseek,
2684};
2685
2686static const struct file_operations mtip_flags_fops = {
2687 .owner = THIS_MODULE,
2688 .open = simple_open,
2689 .read = mtip_hw_read_flags,
2690 .llseek = no_llseek,
2691};
2658 2692
2659/* 2693/*
2660 * Create the sysfs related attributes. 2694 * Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2671 if (!kobj || !dd) 2705 if (!kobj || !dd)
2672 return -EINVAL; 2706 return -EINVAL;
2673 2707
2674 if (sysfs_create_file(kobj, &dev_attr_registers.attr))
2675 dev_warn(&dd->pdev->dev,
2676 "Error creating 'registers' sysfs entry\n");
2677 if (sysfs_create_file(kobj, &dev_attr_status.attr)) 2708 if (sysfs_create_file(kobj, &dev_attr_status.attr))
2678 dev_warn(&dd->pdev->dev, 2709 dev_warn(&dd->pdev->dev,
2679 "Error creating 'status' sysfs entry\n"); 2710 "Error creating 'status' sysfs entry\n");
2680 if (sysfs_create_file(kobj, &dev_attr_flags.attr))
2681 dev_warn(&dd->pdev->dev,
2682 "Error creating 'flags' sysfs entry\n");
2683 return 0; 2711 return 0;
2684} 2712}
2685 2713
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2698 if (!kobj || !dd) 2726 if (!kobj || !dd)
2699 return -EINVAL; 2727 return -EINVAL;
2700 2728
2701 sysfs_remove_file(kobj, &dev_attr_registers.attr);
2702 sysfs_remove_file(kobj, &dev_attr_status.attr); 2729 sysfs_remove_file(kobj, &dev_attr_status.attr);
2703 sysfs_remove_file(kobj, &dev_attr_flags.attr);
2704 2730
2705 return 0; 2731 return 0;
2706} 2732}
2707 2733
2734static int mtip_hw_debugfs_init(struct driver_data *dd)
2735{
2736 if (!dfs_parent)
2737 return -1;
2738
2739 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
2740 if (IS_ERR_OR_NULL(dd->dfs_node)) {
2741 dev_warn(&dd->pdev->dev,
2742 "Error creating node %s under debugfs\n",
2743 dd->disk->disk_name);
2744 dd->dfs_node = NULL;
2745 return -1;
2746 }
2747
2748 debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
2749 &mtip_flags_fops);
2750 debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
2751 &mtip_regs_fops);
2752
2753 return 0;
2754}
2755
2756static void mtip_hw_debugfs_exit(struct driver_data *dd)
2757{
2758 debugfs_remove_recursive(dd->dfs_node);
2759}
2760
2761
2708/* 2762/*
2709 * Perform any init/resume time hardware setup 2763 * Perform any init/resume time hardware setup
2710 * 2764 *
@@ -3730,6 +3784,7 @@ skip_create_disk:
3730 mtip_hw_sysfs_init(dd, kobj); 3784 mtip_hw_sysfs_init(dd, kobj);
3731 kobject_put(kobj); 3785 kobject_put(kobj);
3732 } 3786 }
3787 mtip_hw_debugfs_init(dd);
3733 3788
3734 if (dd->mtip_svc_handler) { 3789 if (dd->mtip_svc_handler) {
3735 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 3790 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
3755 return rv; 3810 return rv;
3756 3811
3757kthread_run_error: 3812kthread_run_error:
3813 mtip_hw_debugfs_exit(dd);
3814
3758 /* Delete our gendisk. This also removes the device from /dev */ 3815 /* Delete our gendisk. This also removes the device from /dev */
3759 del_gendisk(dd->disk); 3816 del_gendisk(dd->disk);
3760 3817
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
3805 kobject_put(kobj); 3862 kobject_put(kobj);
3806 } 3863 }
3807 } 3864 }
3865 mtip_hw_debugfs_exit(dd);
3808 3866
3809 /* 3867 /*
3810 * Delete our gendisk structure. This also removes the device 3868 * Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
4152 } 4210 }
4153 mtip_major = error; 4211 mtip_major = error;
4154 4212
4213 if (!dfs_parent) {
4214 dfs_parent = debugfs_create_dir("rssd", NULL);
4215 if (IS_ERR_OR_NULL(dfs_parent)) {
4216 printk(KERN_WARNING "Error creating debugfs parent\n");
4217 dfs_parent = NULL;
4218 }
4219 }
4220
4155 /* Register our PCI operations. */ 4221 /* Register our PCI operations. */
4156 error = pci_register_driver(&mtip_pci_driver); 4222 error = pci_register_driver(&mtip_pci_driver);
4157 if (error) 4223 if (error) {
4224 debugfs_remove(dfs_parent);
4158 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4225 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4226 }
4159 4227
4160 return error; 4228 return error;
4161} 4229}
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
4172 */ 4240 */
4173static void __exit mtip_exit(void) 4241static void __exit mtip_exit(void)
4174{ 4242{
4243 debugfs_remove_recursive(dfs_parent);
4244
4175 /* Release the allocated major block device number. */ 4245 /* Release the allocated major block device number. */
4176 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4246 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4177 4247
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b2c88da26b2a..f51fc23d17bb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -26,7 +26,6 @@
26#include <linux/ata.h> 26#include <linux/ata.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/genhd.h> 28#include <linux/genhd.h>
29#include <linux/version.h>
30 29
31/* Offset of Subsystem Device ID in pci confoguration space */ 30/* Offset of Subsystem Device ID in pci confoguration space */
32#define PCI_SUBSYSTEM_DEVICEID 0x2E 31#define PCI_SUBSYSTEM_DEVICEID 0x2E
@@ -111,6 +110,8 @@
111 #define dbg_printk(format, arg...) 110 #define dbg_printk(format, arg...)
112#endif 111#endif
113 112
113#define MTIP_DFS_MAX_BUF_SIZE 1024
114
114#define __force_bit2int (unsigned int __force) 115#define __force_bit2int (unsigned int __force)
115 116
116enum { 117enum {
@@ -447,6 +448,8 @@ struct driver_data {
447 unsigned long dd_flag; /* NOTE: use atomic bit operations on this */ 448 unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
448 449
449 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ 450 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
451
452 struct dentry *dfs_node;
450}; 453};
451 454
452#endif 455#endif
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 061427a75d37..d07c9f7fded6 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -154,6 +154,7 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
154 struct msghdr msg; 154 struct msghdr msg;
155 struct kvec iov; 155 struct kvec iov;
156 sigset_t blocked, oldset; 156 sigset_t blocked, oldset;
157 unsigned long pflags = current->flags;
157 158
158 if (unlikely(!sock)) { 159 if (unlikely(!sock)) {
159 dev_err(disk_to_dev(nbd->disk), 160 dev_err(disk_to_dev(nbd->disk),
@@ -167,8 +168,9 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
167 siginitsetinv(&blocked, sigmask(SIGKILL)); 168 siginitsetinv(&blocked, sigmask(SIGKILL));
168 sigprocmask(SIG_SETMASK, &blocked, &oldset); 169 sigprocmask(SIG_SETMASK, &blocked, &oldset);
169 170
171 current->flags |= PF_MEMALLOC;
170 do { 172 do {
171 sock->sk->sk_allocation = GFP_NOIO; 173 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
172 iov.iov_base = buf; 174 iov.iov_base = buf;
173 iov.iov_len = size; 175 iov.iov_len = size;
174 msg.msg_name = NULL; 176 msg.msg_name = NULL;
@@ -214,6 +216,7 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
214 } while (size > 0); 216 } while (size > 0);
215 217
216 sigprocmask(SIG_SETMASK, &oldset, NULL); 218 sigprocmask(SIG_SETMASK, &oldset, NULL);
219 tsk_restore_flags(current, pflags, PF_MEMALLOC);
217 220
218 return result; 221 return result;
219} 222}
@@ -405,6 +408,7 @@ static int nbd_do_it(struct nbd_device *nbd)
405 408
406 BUG_ON(nbd->magic != NBD_MAGIC); 409 BUG_ON(nbd->magic != NBD_MAGIC);
407 410
411 sk_set_memalloc(nbd->sock->sk);
408 nbd->pid = task_pid_nr(current); 412 nbd->pid = task_pid_nr(current);
409 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 413 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
410 if (ret) { 414 if (ret) {
@@ -481,7 +485,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
481 nbd_end_request(req); 485 nbd_end_request(req);
482 } else { 486 } else {
483 spin_lock(&nbd->queue_lock); 487 spin_lock(&nbd->queue_lock);
484 list_add(&req->queuelist, &nbd->queue_head); 488 list_add_tail(&req->queuelist, &nbd->queue_head);
485 spin_unlock(&nbd->queue_lock); 489 spin_unlock(&nbd->queue_lock);
486 } 490 }
487 491
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65665c9c42c6..9917943a3572 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -55,8 +55,6 @@
55 55
56#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */ 56#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
57 57
58#define RBD_MAX_MD_NAME_LEN (RBD_MAX_OBJ_NAME_LEN + sizeof(RBD_SUFFIX))
59#define RBD_MAX_POOL_NAME_LEN 64
60#define RBD_MAX_SNAP_NAME_LEN 32 58#define RBD_MAX_SNAP_NAME_LEN 32
61#define RBD_MAX_OPT_LEN 1024 59#define RBD_MAX_OPT_LEN 1024
62 60
@@ -78,13 +76,12 @@
78 */ 76 */
79struct rbd_image_header { 77struct rbd_image_header {
80 u64 image_size; 78 u64 image_size;
81 char block_name[32]; 79 char *object_prefix;
82 __u8 obj_order; 80 __u8 obj_order;
83 __u8 crypt_type; 81 __u8 crypt_type;
84 __u8 comp_type; 82 __u8 comp_type;
85 struct ceph_snap_context *snapc; 83 struct ceph_snap_context *snapc;
86 size_t snap_names_len; 84 size_t snap_names_len;
87 u64 snap_seq;
88 u32 total_snaps; 85 u32 total_snaps;
89 86
90 char *snap_names; 87 char *snap_names;
@@ -150,7 +147,7 @@ struct rbd_snap {
150 * a single device 147 * a single device
151 */ 148 */
152struct rbd_device { 149struct rbd_device {
153 int id; /* blkdev unique id */ 150 int dev_id; /* blkdev unique id */
154 151
155 int major; /* blkdev assigned major */ 152 int major; /* blkdev assigned major */
156 struct gendisk *disk; /* blkdev's gendisk and rq */ 153 struct gendisk *disk; /* blkdev's gendisk and rq */
@@ -163,20 +160,24 @@ struct rbd_device {
163 spinlock_t lock; /* queue lock */ 160 spinlock_t lock; /* queue lock */
164 161
165 struct rbd_image_header header; 162 struct rbd_image_header header;
166 char obj[RBD_MAX_OBJ_NAME_LEN]; /* rbd image name */ 163 char *image_name;
167 int obj_len; 164 size_t image_name_len;
168 char obj_md_name[RBD_MAX_MD_NAME_LEN]; /* hdr nm. */ 165 char *header_name;
169 char pool_name[RBD_MAX_POOL_NAME_LEN]; 166 char *pool_name;
170 int poolid; 167 int pool_id;
171 168
172 struct ceph_osd_event *watch_event; 169 struct ceph_osd_event *watch_event;
173 struct ceph_osd_request *watch_request; 170 struct ceph_osd_request *watch_request;
174 171
175 /* protects updating the header */ 172 /* protects updating the header */
176 struct rw_semaphore header_rwsem; 173 struct rw_semaphore header_rwsem;
177 char snap_name[RBD_MAX_SNAP_NAME_LEN]; 174 /* name of the snapshot this device reads from */
175 char *snap_name;
176 /* id of the snapshot this device reads from */
178 u64 snap_id; /* current snapshot id */ 177 u64 snap_id; /* current snapshot id */
179 int read_only; 178 /* whether the snap_id this device reads from still exists */
179 bool snap_exists;
180 int read_only;
180 181
181 struct list_head node; 182 struct list_head node;
182 183
@@ -201,8 +202,7 @@ static ssize_t rbd_snap_add(struct device *dev,
201 struct device_attribute *attr, 202 struct device_attribute *attr,
202 const char *buf, 203 const char *buf,
203 size_t count); 204 size_t count);
204static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, 205static void __rbd_remove_snap_dev(struct rbd_snap *snap);
205 struct rbd_snap *snap);
206 206
207static ssize_t rbd_add(struct bus_type *bus, const char *buf, 207static ssize_t rbd_add(struct bus_type *bus, const char *buf,
208 size_t count); 208 size_t count);
@@ -240,7 +240,7 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
240 put_device(&rbd_dev->dev); 240 put_device(&rbd_dev->dev);
241} 241}
242 242
243static int __rbd_refresh_header(struct rbd_device *rbd_dev); 243static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver);
244 244
245static int rbd_open(struct block_device *bdev, fmode_t mode) 245static int rbd_open(struct block_device *bdev, fmode_t mode)
246{ 246{
@@ -273,9 +273,9 @@ static const struct block_device_operations rbd_bd_ops = {
273 273
274/* 274/*
275 * Initialize an rbd client instance. 275 * Initialize an rbd client instance.
276 * We own *opt. 276 * We own *ceph_opts.
277 */ 277 */
278static struct rbd_client *rbd_client_create(struct ceph_options *opt, 278static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts,
279 struct rbd_options *rbd_opts) 279 struct rbd_options *rbd_opts)
280{ 280{
281 struct rbd_client *rbdc; 281 struct rbd_client *rbdc;
@@ -291,10 +291,10 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt,
291 291
292 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 292 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
293 293
294 rbdc->client = ceph_create_client(opt, rbdc, 0, 0); 294 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
295 if (IS_ERR(rbdc->client)) 295 if (IS_ERR(rbdc->client))
296 goto out_mutex; 296 goto out_mutex;
297 opt = NULL; /* Now rbdc->client is responsible for opt */ 297 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
298 298
299 ret = ceph_open_session(rbdc->client); 299 ret = ceph_open_session(rbdc->client);
300 if (ret < 0) 300 if (ret < 0)
@@ -317,23 +317,23 @@ out_mutex:
317 mutex_unlock(&ctl_mutex); 317 mutex_unlock(&ctl_mutex);
318 kfree(rbdc); 318 kfree(rbdc);
319out_opt: 319out_opt:
320 if (opt) 320 if (ceph_opts)
321 ceph_destroy_options(opt); 321 ceph_destroy_options(ceph_opts);
322 return ERR_PTR(ret); 322 return ERR_PTR(ret);
323} 323}
324 324
325/* 325/*
326 * Find a ceph client with specific addr and configuration. 326 * Find a ceph client with specific addr and configuration.
327 */ 327 */
328static struct rbd_client *__rbd_client_find(struct ceph_options *opt) 328static struct rbd_client *__rbd_client_find(struct ceph_options *ceph_opts)
329{ 329{
330 struct rbd_client *client_node; 330 struct rbd_client *client_node;
331 331
332 if (opt->flags & CEPH_OPT_NOSHARE) 332 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
333 return NULL; 333 return NULL;
334 334
335 list_for_each_entry(client_node, &rbd_client_list, node) 335 list_for_each_entry(client_node, &rbd_client_list, node)
336 if (ceph_compare_options(opt, client_node->client) == 0) 336 if (!ceph_compare_options(ceph_opts, client_node->client))
337 return client_node; 337 return client_node;
338 return NULL; 338 return NULL;
339} 339}
@@ -349,7 +349,7 @@ enum {
349 /* string args above */ 349 /* string args above */
350}; 350};
351 351
352static match_table_t rbdopt_tokens = { 352static match_table_t rbd_opts_tokens = {
353 {Opt_notify_timeout, "notify_timeout=%d"}, 353 {Opt_notify_timeout, "notify_timeout=%d"},
354 /* int args above */ 354 /* int args above */
355 /* string args above */ 355 /* string args above */
@@ -358,11 +358,11 @@ static match_table_t rbdopt_tokens = {
358 358
359static int parse_rbd_opts_token(char *c, void *private) 359static int parse_rbd_opts_token(char *c, void *private)
360{ 360{
361 struct rbd_options *rbdopt = private; 361 struct rbd_options *rbd_opts = private;
362 substring_t argstr[MAX_OPT_ARGS]; 362 substring_t argstr[MAX_OPT_ARGS];
363 int token, intval, ret; 363 int token, intval, ret;
364 364
365 token = match_token(c, rbdopt_tokens, argstr); 365 token = match_token(c, rbd_opts_tokens, argstr);
366 if (token < 0) 366 if (token < 0)
367 return -EINVAL; 367 return -EINVAL;
368 368
@@ -383,7 +383,7 @@ static int parse_rbd_opts_token(char *c, void *private)
383 383
384 switch (token) { 384 switch (token) {
385 case Opt_notify_timeout: 385 case Opt_notify_timeout:
386 rbdopt->notify_timeout = intval; 386 rbd_opts->notify_timeout = intval;
387 break; 387 break;
388 default: 388 default:
389 BUG_ON(token); 389 BUG_ON(token);
@@ -400,7 +400,7 @@ static struct rbd_client *rbd_get_client(const char *mon_addr,
400 char *options) 400 char *options)
401{ 401{
402 struct rbd_client *rbdc; 402 struct rbd_client *rbdc;
403 struct ceph_options *opt; 403 struct ceph_options *ceph_opts;
404 struct rbd_options *rbd_opts; 404 struct rbd_options *rbd_opts;
405 405
406 rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL); 406 rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL);
@@ -409,29 +409,29 @@ static struct rbd_client *rbd_get_client(const char *mon_addr,
409 409
410 rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT; 410 rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT;
411 411
412 opt = ceph_parse_options(options, mon_addr, 412 ceph_opts = ceph_parse_options(options, mon_addr,
413 mon_addr + mon_addr_len, 413 mon_addr + mon_addr_len,
414 parse_rbd_opts_token, rbd_opts); 414 parse_rbd_opts_token, rbd_opts);
415 if (IS_ERR(opt)) { 415 if (IS_ERR(ceph_opts)) {
416 kfree(rbd_opts); 416 kfree(rbd_opts);
417 return ERR_CAST(opt); 417 return ERR_CAST(ceph_opts);
418 } 418 }
419 419
420 spin_lock(&rbd_client_list_lock); 420 spin_lock(&rbd_client_list_lock);
421 rbdc = __rbd_client_find(opt); 421 rbdc = __rbd_client_find(ceph_opts);
422 if (rbdc) { 422 if (rbdc) {
423 /* using an existing client */ 423 /* using an existing client */
424 kref_get(&rbdc->kref); 424 kref_get(&rbdc->kref);
425 spin_unlock(&rbd_client_list_lock); 425 spin_unlock(&rbd_client_list_lock);
426 426
427 ceph_destroy_options(opt); 427 ceph_destroy_options(ceph_opts);
428 kfree(rbd_opts); 428 kfree(rbd_opts);
429 429
430 return rbdc; 430 return rbdc;
431 } 431 }
432 spin_unlock(&rbd_client_list_lock); 432 spin_unlock(&rbd_client_list_lock);
433 433
434 rbdc = rbd_client_create(opt, rbd_opts); 434 rbdc = rbd_client_create(ceph_opts, rbd_opts);
435 435
436 if (IS_ERR(rbdc)) 436 if (IS_ERR(rbdc))
437 kfree(rbd_opts); 437 kfree(rbd_opts);
@@ -480,46 +480,60 @@ static void rbd_coll_release(struct kref *kref)
480 kfree(coll); 480 kfree(coll);
481} 481}
482 482
483static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
484{
485 return !memcmp(&ondisk->text,
486 RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT));
487}
488
483/* 489/*
484 * Create a new header structure, translate header format from the on-disk 490 * Create a new header structure, translate header format from the on-disk
485 * header. 491 * header.
486 */ 492 */
487static int rbd_header_from_disk(struct rbd_image_header *header, 493static int rbd_header_from_disk(struct rbd_image_header *header,
488 struct rbd_image_header_ondisk *ondisk, 494 struct rbd_image_header_ondisk *ondisk,
489 u32 allocated_snaps, 495 u32 allocated_snaps)
490 gfp_t gfp_flags)
491{ 496{
492 u32 i, snap_count; 497 u32 snap_count;
493 498
494 if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) 499 if (!rbd_dev_ondisk_valid(ondisk))
495 return -ENXIO; 500 return -ENXIO;
496 501
497 snap_count = le32_to_cpu(ondisk->snap_count); 502 snap_count = le32_to_cpu(ondisk->snap_count);
498 if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context)) 503 if (snap_count > (SIZE_MAX - sizeof(struct ceph_snap_context))
499 / sizeof (*ondisk)) 504 / sizeof (u64))
500 return -EINVAL; 505 return -EINVAL;
501 header->snapc = kmalloc(sizeof(struct ceph_snap_context) + 506 header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
502 snap_count * sizeof (*ondisk), 507 snap_count * sizeof(u64),
503 gfp_flags); 508 GFP_KERNEL);
504 if (!header->snapc) 509 if (!header->snapc)
505 return -ENOMEM; 510 return -ENOMEM;
506 511
507 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
508 if (snap_count) { 512 if (snap_count) {
513 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
509 header->snap_names = kmalloc(header->snap_names_len, 514 header->snap_names = kmalloc(header->snap_names_len,
510 gfp_flags); 515 GFP_KERNEL);
511 if (!header->snap_names) 516 if (!header->snap_names)
512 goto err_snapc; 517 goto err_snapc;
513 header->snap_sizes = kmalloc(snap_count * sizeof(u64), 518 header->snap_sizes = kmalloc(snap_count * sizeof(u64),
514 gfp_flags); 519 GFP_KERNEL);
515 if (!header->snap_sizes) 520 if (!header->snap_sizes)
516 goto err_names; 521 goto err_names;
517 } else { 522 } else {
523 WARN_ON(ondisk->snap_names_len);
524 header->snap_names_len = 0;
518 header->snap_names = NULL; 525 header->snap_names = NULL;
519 header->snap_sizes = NULL; 526 header->snap_sizes = NULL;
520 } 527 }
521 memcpy(header->block_name, ondisk->block_name, 528
529 header->object_prefix = kmalloc(sizeof (ondisk->block_name) + 1,
530 GFP_KERNEL);
531 if (!header->object_prefix)
532 goto err_sizes;
533
534 memcpy(header->object_prefix, ondisk->block_name,
522 sizeof(ondisk->block_name)); 535 sizeof(ondisk->block_name));
536 header->object_prefix[sizeof (ondisk->block_name)] = '\0';
523 537
524 header->image_size = le64_to_cpu(ondisk->image_size); 538 header->image_size = le64_to_cpu(ondisk->image_size);
525 header->obj_order = ondisk->options.order; 539 header->obj_order = ondisk->options.order;
@@ -527,11 +541,13 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
527 header->comp_type = ondisk->options.comp_type; 541 header->comp_type = ondisk->options.comp_type;
528 542
529 atomic_set(&header->snapc->nref, 1); 543 atomic_set(&header->snapc->nref, 1);
530 header->snap_seq = le64_to_cpu(ondisk->snap_seq); 544 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
531 header->snapc->num_snaps = snap_count; 545 header->snapc->num_snaps = snap_count;
532 header->total_snaps = snap_count; 546 header->total_snaps = snap_count;
533 547
534 if (snap_count && allocated_snaps == snap_count) { 548 if (snap_count && allocated_snaps == snap_count) {
549 int i;
550
535 for (i = 0; i < snap_count; i++) { 551 for (i = 0; i < snap_count; i++) {
536 header->snapc->snaps[i] = 552 header->snapc->snaps[i] =
537 le64_to_cpu(ondisk->snaps[i].id); 553 le64_to_cpu(ondisk->snaps[i].id);
@@ -540,16 +556,22 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
540 } 556 }
541 557
542 /* copy snapshot names */ 558 /* copy snapshot names */
543 memcpy(header->snap_names, &ondisk->snaps[i], 559 memcpy(header->snap_names, &ondisk->snaps[snap_count],
544 header->snap_names_len); 560 header->snap_names_len);
545 } 561 }
546 562
547 return 0; 563 return 0;
548 564
565err_sizes:
566 kfree(header->snap_sizes);
567 header->snap_sizes = NULL;
549err_names: 568err_names:
550 kfree(header->snap_names); 569 kfree(header->snap_names);
570 header->snap_names = NULL;
551err_snapc: 571err_snapc:
552 kfree(header->snapc); 572 kfree(header->snapc);
573 header->snapc = NULL;
574
553 return -ENOMEM; 575 return -ENOMEM;
554} 576}
555 577
@@ -575,52 +597,50 @@ static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
575 return -ENOENT; 597 return -ENOENT;
576} 598}
577 599
578static int rbd_header_set_snap(struct rbd_device *dev, u64 *size) 600static int rbd_header_set_snap(struct rbd_device *rbd_dev, u64 *size)
579{ 601{
580 struct rbd_image_header *header = &dev->header; 602 int ret;
581 struct ceph_snap_context *snapc = header->snapc;
582 int ret = -ENOENT;
583
584 BUILD_BUG_ON(sizeof (dev->snap_name) < sizeof (RBD_SNAP_HEAD_NAME));
585 603
586 down_write(&dev->header_rwsem); 604 down_write(&rbd_dev->header_rwsem);
587 605
588 if (!memcmp(dev->snap_name, RBD_SNAP_HEAD_NAME, 606 if (!memcmp(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
589 sizeof (RBD_SNAP_HEAD_NAME))) { 607 sizeof (RBD_SNAP_HEAD_NAME))) {
590 if (header->total_snaps) 608 rbd_dev->snap_id = CEPH_NOSNAP;
591 snapc->seq = header->snap_seq; 609 rbd_dev->snap_exists = false;
592 else 610 rbd_dev->read_only = 0;
593 snapc->seq = 0;
594 dev->snap_id = CEPH_NOSNAP;
595 dev->read_only = 0;
596 if (size) 611 if (size)
597 *size = header->image_size; 612 *size = rbd_dev->header.image_size;
598 } else { 613 } else {
599 ret = snap_by_name(header, dev->snap_name, &snapc->seq, size); 614 u64 snap_id = 0;
615
616 ret = snap_by_name(&rbd_dev->header, rbd_dev->snap_name,
617 &snap_id, size);
600 if (ret < 0) 618 if (ret < 0)
601 goto done; 619 goto done;
602 dev->snap_id = snapc->seq; 620 rbd_dev->snap_id = snap_id;
603 dev->read_only = 1; 621 rbd_dev->snap_exists = true;
622 rbd_dev->read_only = 1;
604 } 623 }
605 624
606 ret = 0; 625 ret = 0;
607done: 626done:
608 up_write(&dev->header_rwsem); 627 up_write(&rbd_dev->header_rwsem);
609 return ret; 628 return ret;
610} 629}
611 630
612static void rbd_header_free(struct rbd_image_header *header) 631static void rbd_header_free(struct rbd_image_header *header)
613{ 632{
614 kfree(header->snapc); 633 kfree(header->object_prefix);
615 kfree(header->snap_names);
616 kfree(header->snap_sizes); 634 kfree(header->snap_sizes);
635 kfree(header->snap_names);
636 ceph_put_snap_context(header->snapc);
617} 637}
618 638
619/* 639/*
620 * get the actual striped segment name, offset and length 640 * get the actual striped segment name, offset and length
621 */ 641 */
622static u64 rbd_get_segment(struct rbd_image_header *header, 642static u64 rbd_get_segment(struct rbd_image_header *header,
623 const char *block_name, 643 const char *object_prefix,
624 u64 ofs, u64 len, 644 u64 ofs, u64 len,
625 char *seg_name, u64 *segofs) 645 char *seg_name, u64 *segofs)
626{ 646{
@@ -628,7 +648,7 @@ static u64 rbd_get_segment(struct rbd_image_header *header,
628 648
629 if (seg_name) 649 if (seg_name)
630 snprintf(seg_name, RBD_MAX_SEG_NAME_LEN, 650 snprintf(seg_name, RBD_MAX_SEG_NAME_LEN,
631 "%s.%012llx", block_name, seg); 651 "%s.%012llx", object_prefix, seg);
632 652
633 ofs = ofs & ((1 << header->obj_order) - 1); 653 ofs = ofs & ((1 << header->obj_order) - 1);
634 len = min_t(u64, len, (1 << header->obj_order) - ofs); 654 len = min_t(u64, len, (1 << header->obj_order) - ofs);
@@ -726,9 +746,8 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
726 * split_bio will BUG_ON if this is not the case 746 * split_bio will BUG_ON if this is not the case
727 */ 747 */
728 dout("bio_chain_clone split! total=%d remaining=%d" 748 dout("bio_chain_clone split! total=%d remaining=%d"
729 "bi_size=%d\n", 749 "bi_size=%u\n",
730 (int)total, (int)len-total, 750 total, len - total, old_chain->bi_size);
731 (int)old_chain->bi_size);
732 751
733 /* split the bio. We'll release it either in the next 752 /* split the bio. We'll release it either in the next
734 call, or it will have to be released outside */ 753 call, or it will have to be released outside */
@@ -777,22 +796,24 @@ err_out:
777/* 796/*
778 * helpers for osd request op vectors. 797 * helpers for osd request op vectors.
779 */ 798 */
780static int rbd_create_rw_ops(struct ceph_osd_req_op **ops, 799static struct ceph_osd_req_op *rbd_create_rw_ops(int num_ops,
781 int num_ops, 800 int opcode, u32 payload_len)
782 int opcode, 801{
783 u32 payload_len) 802 struct ceph_osd_req_op *ops;
784{ 803
785 *ops = kzalloc(sizeof(struct ceph_osd_req_op) * (num_ops + 1), 804 ops = kzalloc(sizeof (*ops) * (num_ops + 1), GFP_NOIO);
786 GFP_NOIO); 805 if (!ops)
787 if (!*ops) 806 return NULL;
788 return -ENOMEM; 807
789 (*ops)[0].op = opcode; 808 ops[0].op = opcode;
809
790 /* 810 /*
791 * op extent offset and length will be set later on 811 * op extent offset and length will be set later on
792 * in calc_raw_layout() 812 * in calc_raw_layout()
793 */ 813 */
794 (*ops)[0].payload_len = payload_len; 814 ops[0].payload_len = payload_len;
795 return 0; 815
816 return ops;
796} 817}
797 818
798static void rbd_destroy_ops(struct ceph_osd_req_op *ops) 819static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
@@ -808,8 +829,8 @@ static void rbd_coll_end_req_index(struct request *rq,
808 struct request_queue *q; 829 struct request_queue *q;
809 int min, max, i; 830 int min, max, i;
810 831
811 dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", 832 dout("rbd_coll_end_req_index %p index %d ret %d len %llu\n",
812 coll, index, ret, len); 833 coll, index, ret, (unsigned long long) len);
813 834
814 if (!rq) 835 if (!rq)
815 return; 836 return;
@@ -848,16 +869,15 @@ static void rbd_coll_end_req(struct rbd_request *req,
848 * Send ceph osd request 869 * Send ceph osd request
849 */ 870 */
850static int rbd_do_request(struct request *rq, 871static int rbd_do_request(struct request *rq,
851 struct rbd_device *dev, 872 struct rbd_device *rbd_dev,
852 struct ceph_snap_context *snapc, 873 struct ceph_snap_context *snapc,
853 u64 snapid, 874 u64 snapid,
854 const char *obj, u64 ofs, u64 len, 875 const char *object_name, u64 ofs, u64 len,
855 struct bio *bio, 876 struct bio *bio,
856 struct page **pages, 877 struct page **pages,
857 int num_pages, 878 int num_pages,
858 int flags, 879 int flags,
859 struct ceph_osd_req_op *ops, 880 struct ceph_osd_req_op *ops,
860 int num_reply,
861 struct rbd_req_coll *coll, 881 struct rbd_req_coll *coll,
862 int coll_index, 882 int coll_index,
863 void (*rbd_cb)(struct ceph_osd_request *req, 883 void (*rbd_cb)(struct ceph_osd_request *req,
@@ -887,15 +907,13 @@ static int rbd_do_request(struct request *rq,
887 req_data->coll_index = coll_index; 907 req_data->coll_index = coll_index;
888 } 908 }
889 909
890 dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); 910 dout("rbd_do_request object_name=%s ofs=%llu len=%llu\n", object_name,
891 911 (unsigned long long) ofs, (unsigned long long) len);
892 down_read(&dev->header_rwsem);
893 912
894 osdc = &dev->rbd_client->client->osdc; 913 osdc = &rbd_dev->rbd_client->client->osdc;
895 req = ceph_osdc_alloc_request(osdc, flags, snapc, ops, 914 req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
896 false, GFP_NOIO, pages, bio); 915 false, GFP_NOIO, pages, bio);
897 if (!req) { 916 if (!req) {
898 up_read(&dev->header_rwsem);
899 ret = -ENOMEM; 917 ret = -ENOMEM;
900 goto done_pages; 918 goto done_pages;
901 } 919 }
@@ -912,7 +930,7 @@ static int rbd_do_request(struct request *rq,
912 reqhead = req->r_request->front.iov_base; 930 reqhead = req->r_request->front.iov_base;
913 reqhead->snapid = cpu_to_le64(CEPH_NOSNAP); 931 reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
914 932
915 strncpy(req->r_oid, obj, sizeof(req->r_oid)); 933 strncpy(req->r_oid, object_name, sizeof(req->r_oid));
916 req->r_oid_len = strlen(req->r_oid); 934 req->r_oid_len = strlen(req->r_oid);
917 935
918 layout = &req->r_file_layout; 936 layout = &req->r_file_layout;
@@ -920,7 +938,7 @@ static int rbd_do_request(struct request *rq,
920 layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); 938 layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
921 layout->fl_stripe_count = cpu_to_le32(1); 939 layout->fl_stripe_count = cpu_to_le32(1);
922 layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); 940 layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
923 layout->fl_pg_pool = cpu_to_le32(dev->poolid); 941 layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
924 ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, 942 ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
925 req, ops); 943 req, ops);
926 944
@@ -929,7 +947,6 @@ static int rbd_do_request(struct request *rq,
929 snapc, 947 snapc,
930 &mtime, 948 &mtime,
931 req->r_oid, req->r_oid_len); 949 req->r_oid, req->r_oid_len);
932 up_read(&dev->header_rwsem);
933 950
934 if (linger_req) { 951 if (linger_req) {
935 ceph_osdc_set_request_linger(osdc, req); 952 ceph_osdc_set_request_linger(osdc, req);
@@ -944,8 +961,9 @@ static int rbd_do_request(struct request *rq,
944 ret = ceph_osdc_wait_request(osdc, req); 961 ret = ceph_osdc_wait_request(osdc, req);
945 if (ver) 962 if (ver)
946 *ver = le64_to_cpu(req->r_reassert_version.version); 963 *ver = le64_to_cpu(req->r_reassert_version.version);
947 dout("reassert_ver=%lld\n", 964 dout("reassert_ver=%llu\n",
948 le64_to_cpu(req->r_reassert_version.version)); 965 (unsigned long long)
966 le64_to_cpu(req->r_reassert_version.version));
949 ceph_osdc_put_request(req); 967 ceph_osdc_put_request(req);
950 } 968 }
951 return ret; 969 return ret;
@@ -977,9 +995,10 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
977 op = (void *)(replyhead + 1); 995 op = (void *)(replyhead + 1);
978 rc = le32_to_cpu(replyhead->result); 996 rc = le32_to_cpu(replyhead->result);
979 bytes = le64_to_cpu(op->extent.length); 997 bytes = le64_to_cpu(op->extent.length);
980 read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ); 998 read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
981 999
982 dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc); 1000 dout("rbd_req_cb bytes=%llu readop=%d rc=%d\n",
1001 (unsigned long long) bytes, read_op, (int) rc);
983 1002
984 if (rc == -ENOENT && read_op) { 1003 if (rc == -ENOENT && read_op) {
985 zero_bio_chain(req_data->bio, 0); 1004 zero_bio_chain(req_data->bio, 0);
@@ -1006,14 +1025,12 @@ static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg
1006/* 1025/*
1007 * Do a synchronous ceph osd operation 1026 * Do a synchronous ceph osd operation
1008 */ 1027 */
1009static int rbd_req_sync_op(struct rbd_device *dev, 1028static int rbd_req_sync_op(struct rbd_device *rbd_dev,
1010 struct ceph_snap_context *snapc, 1029 struct ceph_snap_context *snapc,
1011 u64 snapid, 1030 u64 snapid,
1012 int opcode,
1013 int flags, 1031 int flags,
1014 struct ceph_osd_req_op *orig_ops, 1032 struct ceph_osd_req_op *ops,
1015 int num_reply, 1033 const char *object_name,
1016 const char *obj,
1017 u64 ofs, u64 len, 1034 u64 ofs, u64 len,
1018 char *buf, 1035 char *buf,
1019 struct ceph_osd_request **linger_req, 1036 struct ceph_osd_request **linger_req,
@@ -1022,45 +1039,28 @@ static int rbd_req_sync_op(struct rbd_device *dev,
1022 int ret; 1039 int ret;
1023 struct page **pages; 1040 struct page **pages;
1024 int num_pages; 1041 int num_pages;
1025 struct ceph_osd_req_op *ops = orig_ops; 1042
1026 u32 payload_len; 1043 BUG_ON(ops == NULL);
1027 1044
1028 num_pages = calc_pages_for(ofs , len); 1045 num_pages = calc_pages_for(ofs , len);
1029 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1046 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1030 if (IS_ERR(pages)) 1047 if (IS_ERR(pages))
1031 return PTR_ERR(pages); 1048 return PTR_ERR(pages);
1032 1049
1033 if (!orig_ops) { 1050 ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
1034 payload_len = (flags & CEPH_OSD_FLAG_WRITE ? len : 0); 1051 object_name, ofs, len, NULL,
1035 ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
1036 if (ret < 0)
1037 goto done;
1038
1039 if ((flags & CEPH_OSD_FLAG_WRITE) && buf) {
1040 ret = ceph_copy_to_page_vector(pages, buf, ofs, len);
1041 if (ret < 0)
1042 goto done_ops;
1043 }
1044 }
1045
1046 ret = rbd_do_request(NULL, dev, snapc, snapid,
1047 obj, ofs, len, NULL,
1048 pages, num_pages, 1052 pages, num_pages,
1049 flags, 1053 flags,
1050 ops, 1054 ops,
1051 2,
1052 NULL, 0, 1055 NULL, 0,
1053 NULL, 1056 NULL,
1054 linger_req, ver); 1057 linger_req, ver);
1055 if (ret < 0) 1058 if (ret < 0)
1056 goto done_ops; 1059 goto done;
1057 1060
1058 if ((flags & CEPH_OSD_FLAG_READ) && buf) 1061 if ((flags & CEPH_OSD_FLAG_READ) && buf)
1059 ret = ceph_copy_from_page_vector(pages, buf, ofs, ret); 1062 ret = ceph_copy_from_page_vector(pages, buf, ofs, ret);
1060 1063
1061done_ops:
1062 if (!orig_ops)
1063 rbd_destroy_ops(ops);
1064done: 1064done:
1065 ceph_release_page_vector(pages, num_pages); 1065 ceph_release_page_vector(pages, num_pages);
1066 return ret; 1066 return ret;
@@ -1070,10 +1070,10 @@ done:
1070 * Do an asynchronous ceph osd operation 1070 * Do an asynchronous ceph osd operation
1071 */ 1071 */
1072static int rbd_do_op(struct request *rq, 1072static int rbd_do_op(struct request *rq,
1073 struct rbd_device *rbd_dev , 1073 struct rbd_device *rbd_dev,
1074 struct ceph_snap_context *snapc, 1074 struct ceph_snap_context *snapc,
1075 u64 snapid, 1075 u64 snapid,
1076 int opcode, int flags, int num_reply, 1076 int opcode, int flags,
1077 u64 ofs, u64 len, 1077 u64 ofs, u64 len,
1078 struct bio *bio, 1078 struct bio *bio,
1079 struct rbd_req_coll *coll, 1079 struct rbd_req_coll *coll,
@@ -1091,14 +1091,15 @@ static int rbd_do_op(struct request *rq,
1091 return -ENOMEM; 1091 return -ENOMEM;
1092 1092
1093 seg_len = rbd_get_segment(&rbd_dev->header, 1093 seg_len = rbd_get_segment(&rbd_dev->header,
1094 rbd_dev->header.block_name, 1094 rbd_dev->header.object_prefix,
1095 ofs, len, 1095 ofs, len,
1096 seg_name, &seg_ofs); 1096 seg_name, &seg_ofs);
1097 1097
1098 payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0); 1098 payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
1099 1099
1100 ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len); 1100 ret = -ENOMEM;
1101 if (ret < 0) 1101 ops = rbd_create_rw_ops(1, opcode, payload_len);
1102 if (!ops)
1102 goto done; 1103 goto done;
1103 1104
1104 /* we've taken care of segment sizes earlier when we 1105 /* we've taken care of segment sizes earlier when we
@@ -1112,7 +1113,6 @@ static int rbd_do_op(struct request *rq,
1112 NULL, 0, 1113 NULL, 0,
1113 flags, 1114 flags,
1114 ops, 1115 ops,
1115 num_reply,
1116 coll, coll_index, 1116 coll, coll_index,
1117 rbd_req_cb, 0, NULL); 1117 rbd_req_cb, 0, NULL);
1118 1118
@@ -1136,7 +1136,6 @@ static int rbd_req_write(struct request *rq,
1136 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, 1136 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
1137 CEPH_OSD_OP_WRITE, 1137 CEPH_OSD_OP_WRITE,
1138 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1138 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1139 2,
1140 ofs, len, bio, coll, coll_index); 1139 ofs, len, bio, coll, coll_index);
1141} 1140}
1142 1141
@@ -1155,55 +1154,58 @@ static int rbd_req_read(struct request *rq,
1155 snapid, 1154 snapid,
1156 CEPH_OSD_OP_READ, 1155 CEPH_OSD_OP_READ,
1157 CEPH_OSD_FLAG_READ, 1156 CEPH_OSD_FLAG_READ,
1158 2,
1159 ofs, len, bio, coll, coll_index); 1157 ofs, len, bio, coll, coll_index);
1160} 1158}
1161 1159
1162/* 1160/*
1163 * Request sync osd read 1161 * Request sync osd read
1164 */ 1162 */
1165static int rbd_req_sync_read(struct rbd_device *dev, 1163static int rbd_req_sync_read(struct rbd_device *rbd_dev,
1166 struct ceph_snap_context *snapc,
1167 u64 snapid, 1164 u64 snapid,
1168 const char *obj, 1165 const char *object_name,
1169 u64 ofs, u64 len, 1166 u64 ofs, u64 len,
1170 char *buf, 1167 char *buf,
1171 u64 *ver) 1168 u64 *ver)
1172{ 1169{
1173 return rbd_req_sync_op(dev, NULL, 1170 struct ceph_osd_req_op *ops;
1171 int ret;
1172
1173 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_READ, 0);
1174 if (!ops)
1175 return -ENOMEM;
1176
1177 ret = rbd_req_sync_op(rbd_dev, NULL,
1174 snapid, 1178 snapid,
1175 CEPH_OSD_OP_READ,
1176 CEPH_OSD_FLAG_READ, 1179 CEPH_OSD_FLAG_READ,
1177 NULL, 1180 ops, object_name, ofs, len, buf, NULL, ver);
1178 1, obj, ofs, len, buf, NULL, ver); 1181 rbd_destroy_ops(ops);
1182
1183 return ret;
1179} 1184}
1180 1185
1181/* 1186/*
1182 * Request sync osd watch 1187 * Request sync osd watch
1183 */ 1188 */
1184static int rbd_req_sync_notify_ack(struct rbd_device *dev, 1189static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
1185 u64 ver, 1190 u64 ver,
1186 u64 notify_id, 1191 u64 notify_id)
1187 const char *obj)
1188{ 1192{
1189 struct ceph_osd_req_op *ops; 1193 struct ceph_osd_req_op *ops;
1190 struct page **pages = NULL;
1191 int ret; 1194 int ret;
1192 1195
1193 ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); 1196 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY_ACK, 0);
1194 if (ret < 0) 1197 if (!ops)
1195 return ret; 1198 return -ENOMEM;
1196 1199
1197 ops[0].watch.ver = cpu_to_le64(dev->header.obj_version); 1200 ops[0].watch.ver = cpu_to_le64(ver);
1198 ops[0].watch.cookie = notify_id; 1201 ops[0].watch.cookie = notify_id;
1199 ops[0].watch.flag = 0; 1202 ops[0].watch.flag = 0;
1200 1203
1201 ret = rbd_do_request(NULL, dev, NULL, CEPH_NOSNAP, 1204 ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
1202 obj, 0, 0, NULL, 1205 rbd_dev->header_name, 0, 0, NULL,
1203 pages, 0, 1206 NULL, 0,
1204 CEPH_OSD_FLAG_READ, 1207 CEPH_OSD_FLAG_READ,
1205 ops, 1208 ops,
1206 1,
1207 NULL, 0, 1209 NULL, 0,
1208 rbd_simple_req_cb, 0, NULL); 1210 rbd_simple_req_cb, 0, NULL);
1209 1211
@@ -1213,54 +1215,53 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1213 1215
1214static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) 1216static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1215{ 1217{
1216 struct rbd_device *dev = (struct rbd_device *)data; 1218 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1219 u64 hver;
1217 int rc; 1220 int rc;
1218 1221
1219 if (!dev) 1222 if (!rbd_dev)
1220 return; 1223 return;
1221 1224
1222 dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name, 1225 dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
1223 notify_id, (int)opcode); 1226 rbd_dev->header_name, (unsigned long long) notify_id,
1224 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 1227 (unsigned int) opcode);
1225 rc = __rbd_refresh_header(dev); 1228 rc = rbd_refresh_header(rbd_dev, &hver);
1226 mutex_unlock(&ctl_mutex);
1227 if (rc) 1229 if (rc)
1228 pr_warning(RBD_DRV_NAME "%d got notification but failed to " 1230 pr_warning(RBD_DRV_NAME "%d got notification but failed to "
1229 " update snaps: %d\n", dev->major, rc); 1231 " update snaps: %d\n", rbd_dev->major, rc);
1230 1232
1231 rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name); 1233 rbd_req_sync_notify_ack(rbd_dev, hver, notify_id);
1232} 1234}
1233 1235
1234/* 1236/*
1235 * Request sync osd watch 1237 * Request sync osd watch
1236 */ 1238 */
1237static int rbd_req_sync_watch(struct rbd_device *dev, 1239static int rbd_req_sync_watch(struct rbd_device *rbd_dev)
1238 const char *obj,
1239 u64 ver)
1240{ 1240{
1241 struct ceph_osd_req_op *ops; 1241 struct ceph_osd_req_op *ops;
1242 struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc; 1242 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1243 int ret;
1243 1244
1244 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0); 1245 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
1245 if (ret < 0) 1246 if (!ops)
1246 return ret; 1247 return -ENOMEM;
1247 1248
1248 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0, 1249 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
1249 (void *)dev, &dev->watch_event); 1250 (void *)rbd_dev, &rbd_dev->watch_event);
1250 if (ret < 0) 1251 if (ret < 0)
1251 goto fail; 1252 goto fail;
1252 1253
1253 ops[0].watch.ver = cpu_to_le64(ver); 1254 ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
1254 ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie); 1255 ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
1255 ops[0].watch.flag = 1; 1256 ops[0].watch.flag = 1;
1256 1257
1257 ret = rbd_req_sync_op(dev, NULL, 1258 ret = rbd_req_sync_op(rbd_dev, NULL,
1258 CEPH_NOSNAP, 1259 CEPH_NOSNAP,
1259 0,
1260 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1260 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1261 ops, 1261 ops,
1262 1, obj, 0, 0, NULL, 1262 rbd_dev->header_name,
1263 &dev->watch_request, NULL); 1263 0, 0, NULL,
1264 &rbd_dev->watch_request, NULL);
1264 1265
1265 if (ret < 0) 1266 if (ret < 0)
1266 goto fail_event; 1267 goto fail_event;
@@ -1269,8 +1270,8 @@ static int rbd_req_sync_watch(struct rbd_device *dev,
1269 return 0; 1270 return 0;
1270 1271
1271fail_event: 1272fail_event:
1272 ceph_osdc_cancel_event(dev->watch_event); 1273 ceph_osdc_cancel_event(rbd_dev->watch_event);
1273 dev->watch_event = NULL; 1274 rbd_dev->watch_event = NULL;
1274fail: 1275fail:
1275 rbd_destroy_ops(ops); 1276 rbd_destroy_ops(ops);
1276 return ret; 1277 return ret;
@@ -1279,64 +1280,65 @@ fail:
1279/* 1280/*
1280 * Request sync osd unwatch 1281 * Request sync osd unwatch
1281 */ 1282 */
1282static int rbd_req_sync_unwatch(struct rbd_device *dev, 1283static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
1283 const char *obj)
1284{ 1284{
1285 struct ceph_osd_req_op *ops; 1285 struct ceph_osd_req_op *ops;
1286 int ret;
1286 1287
1287 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0); 1288 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
1288 if (ret < 0) 1289 if (!ops)
1289 return ret; 1290 return -ENOMEM;
1290 1291
1291 ops[0].watch.ver = 0; 1292 ops[0].watch.ver = 0;
1292 ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie); 1293 ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
1293 ops[0].watch.flag = 0; 1294 ops[0].watch.flag = 0;
1294 1295
1295 ret = rbd_req_sync_op(dev, NULL, 1296 ret = rbd_req_sync_op(rbd_dev, NULL,
1296 CEPH_NOSNAP, 1297 CEPH_NOSNAP,
1297 0,
1298 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1298 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1299 ops, 1299 ops,
1300 1, obj, 0, 0, NULL, NULL, NULL); 1300 rbd_dev->header_name,
1301 0, 0, NULL, NULL, NULL);
1302
1301 1303
1302 rbd_destroy_ops(ops); 1304 rbd_destroy_ops(ops);
1303 ceph_osdc_cancel_event(dev->watch_event); 1305 ceph_osdc_cancel_event(rbd_dev->watch_event);
1304 dev->watch_event = NULL; 1306 rbd_dev->watch_event = NULL;
1305 return ret; 1307 return ret;
1306} 1308}
1307 1309
1308struct rbd_notify_info { 1310struct rbd_notify_info {
1309 struct rbd_device *dev; 1311 struct rbd_device *rbd_dev;
1310}; 1312};
1311 1313
1312static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data) 1314static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1313{ 1315{
1314 struct rbd_device *dev = (struct rbd_device *)data; 1316 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1315 if (!dev) 1317 if (!rbd_dev)
1316 return; 1318 return;
1317 1319
1318 dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name, 1320 dout("rbd_notify_cb %s notify_id=%llu opcode=%u\n",
1319 notify_id, (int)opcode); 1321 rbd_dev->header_name, (unsigned long long) notify_id,
1322 (unsigned int) opcode);
1320} 1323}
1321 1324
1322/* 1325/*
1323 * Request sync osd notify 1326 * Request sync osd notify
1324 */ 1327 */
1325static int rbd_req_sync_notify(struct rbd_device *dev, 1328static int rbd_req_sync_notify(struct rbd_device *rbd_dev)
1326 const char *obj)
1327{ 1329{
1328 struct ceph_osd_req_op *ops; 1330 struct ceph_osd_req_op *ops;
1329 struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc; 1331 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1330 struct ceph_osd_event *event; 1332 struct ceph_osd_event *event;
1331 struct rbd_notify_info info; 1333 struct rbd_notify_info info;
1332 int payload_len = sizeof(u32) + sizeof(u32); 1334 int payload_len = sizeof(u32) + sizeof(u32);
1333 int ret; 1335 int ret;
1334 1336
1335 ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY, payload_len); 1337 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY, payload_len);
1336 if (ret < 0) 1338 if (!ops)
1337 return ret; 1339 return -ENOMEM;
1338 1340
1339 info.dev = dev; 1341 info.rbd_dev = rbd_dev;
1340 1342
1341 ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1, 1343 ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1,
1342 (void *)&info, &event); 1344 (void *)&info, &event);
@@ -1349,12 +1351,12 @@ static int rbd_req_sync_notify(struct rbd_device *dev,
1349 ops[0].watch.prot_ver = RADOS_NOTIFY_VER; 1351 ops[0].watch.prot_ver = RADOS_NOTIFY_VER;
1350 ops[0].watch.timeout = 12; 1352 ops[0].watch.timeout = 12;
1351 1353
1352 ret = rbd_req_sync_op(dev, NULL, 1354 ret = rbd_req_sync_op(rbd_dev, NULL,
1353 CEPH_NOSNAP, 1355 CEPH_NOSNAP,
1354 0,
1355 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1356 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1356 ops, 1357 ops,
1357 1, obj, 0, 0, NULL, NULL, NULL); 1358 rbd_dev->header_name,
1359 0, 0, NULL, NULL, NULL);
1358 if (ret < 0) 1360 if (ret < 0)
1359 goto fail_event; 1361 goto fail_event;
1360 1362
@@ -1373,36 +1375,37 @@ fail:
1373/* 1375/*
1374 * Request sync osd read 1376 * Request sync osd read
1375 */ 1377 */
1376static int rbd_req_sync_exec(struct rbd_device *dev, 1378static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
1377 const char *obj, 1379 const char *object_name,
1378 const char *cls, 1380 const char *class_name,
1379 const char *method, 1381 const char *method_name,
1380 const char *data, 1382 const char *data,
1381 int len, 1383 int len,
1382 u64 *ver) 1384 u64 *ver)
1383{ 1385{
1384 struct ceph_osd_req_op *ops; 1386 struct ceph_osd_req_op *ops;
1385 int cls_len = strlen(cls); 1387 int class_name_len = strlen(class_name);
1386 int method_len = strlen(method); 1388 int method_name_len = strlen(method_name);
1387 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_CALL, 1389 int ret;
1388 cls_len + method_len + len);
1389 if (ret < 0)
1390 return ret;
1391 1390
1392 ops[0].cls.class_name = cls; 1391 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL,
1393 ops[0].cls.class_len = (__u8)cls_len; 1392 class_name_len + method_name_len + len);
1394 ops[0].cls.method_name = method; 1393 if (!ops)
1395 ops[0].cls.method_len = (__u8)method_len; 1394 return -ENOMEM;
1395
1396 ops[0].cls.class_name = class_name;
1397 ops[0].cls.class_len = (__u8) class_name_len;
1398 ops[0].cls.method_name = method_name;
1399 ops[0].cls.method_len = (__u8) method_name_len;
1396 ops[0].cls.argc = 0; 1400 ops[0].cls.argc = 0;
1397 ops[0].cls.indata = data; 1401 ops[0].cls.indata = data;
1398 ops[0].cls.indata_len = len; 1402 ops[0].cls.indata_len = len;
1399 1403
1400 ret = rbd_req_sync_op(dev, NULL, 1404 ret = rbd_req_sync_op(rbd_dev, NULL,
1401 CEPH_NOSNAP, 1405 CEPH_NOSNAP,
1402 0,
1403 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1406 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1404 ops, 1407 ops,
1405 1, obj, 0, 0, NULL, NULL, ver); 1408 object_name, 0, 0, NULL, NULL, ver);
1406 1409
1407 rbd_destroy_ops(ops); 1410 rbd_destroy_ops(ops);
1408 1411
@@ -1437,10 +1440,12 @@ static void rbd_rq_fn(struct request_queue *q)
1437 struct bio *bio; 1440 struct bio *bio;
1438 struct bio *rq_bio, *next_bio = NULL; 1441 struct bio *rq_bio, *next_bio = NULL;
1439 bool do_write; 1442 bool do_write;
1440 int size, op_size = 0; 1443 unsigned int size;
1444 u64 op_size = 0;
1441 u64 ofs; 1445 u64 ofs;
1442 int num_segs, cur_seg = 0; 1446 int num_segs, cur_seg = 0;
1443 struct rbd_req_coll *coll; 1447 struct rbd_req_coll *coll;
1448 struct ceph_snap_context *snapc;
1444 1449
1445 /* peek at request from block layer */ 1450 /* peek at request from block layer */
1446 if (!rq) 1451 if (!rq)
@@ -1467,23 +1472,38 @@ static void rbd_rq_fn(struct request_queue *q)
1467 1472
1468 spin_unlock_irq(q->queue_lock); 1473 spin_unlock_irq(q->queue_lock);
1469 1474
1475 down_read(&rbd_dev->header_rwsem);
1476
1477 if (rbd_dev->snap_id != CEPH_NOSNAP && !rbd_dev->snap_exists) {
1478 up_read(&rbd_dev->header_rwsem);
1479 dout("request for non-existent snapshot");
1480 spin_lock_irq(q->queue_lock);
1481 __blk_end_request_all(rq, -ENXIO);
1482 continue;
1483 }
1484
1485 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1486
1487 up_read(&rbd_dev->header_rwsem);
1488
1470 dout("%s 0x%x bytes at 0x%llx\n", 1489 dout("%s 0x%x bytes at 0x%llx\n",
1471 do_write ? "write" : "read", 1490 do_write ? "write" : "read",
1472 size, blk_rq_pos(rq) * SECTOR_SIZE); 1491 size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
1473 1492
1474 num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); 1493 num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
1475 coll = rbd_alloc_coll(num_segs); 1494 coll = rbd_alloc_coll(num_segs);
1476 if (!coll) { 1495 if (!coll) {
1477 spin_lock_irq(q->queue_lock); 1496 spin_lock_irq(q->queue_lock);
1478 __blk_end_request_all(rq, -ENOMEM); 1497 __blk_end_request_all(rq, -ENOMEM);
1498 ceph_put_snap_context(snapc);
1479 continue; 1499 continue;
1480 } 1500 }
1481 1501
1482 do { 1502 do {
1483 /* a bio clone to be passed down to OSD req */ 1503 /* a bio clone to be passed down to OSD req */
1484 dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); 1504 dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
1485 op_size = rbd_get_segment(&rbd_dev->header, 1505 op_size = rbd_get_segment(&rbd_dev->header,
1486 rbd_dev->header.block_name, 1506 rbd_dev->header.object_prefix,
1487 ofs, size, 1507 ofs, size,
1488 NULL, NULL); 1508 NULL, NULL);
1489 kref_get(&coll->kref); 1509 kref_get(&coll->kref);
@@ -1499,7 +1519,7 @@ static void rbd_rq_fn(struct request_queue *q)
1499 /* init OSD command: write or read */ 1519 /* init OSD command: write or read */
1500 if (do_write) 1520 if (do_write)
1501 rbd_req_write(rq, rbd_dev, 1521 rbd_req_write(rq, rbd_dev,
1502 rbd_dev->header.snapc, 1522 snapc,
1503 ofs, 1523 ofs,
1504 op_size, bio, 1524 op_size, bio,
1505 coll, cur_seg); 1525 coll, cur_seg);
@@ -1522,6 +1542,8 @@ next_seg:
1522 if (bp) 1542 if (bp)
1523 bio_pair_release(bp); 1543 bio_pair_release(bp);
1524 spin_lock_irq(q->queue_lock); 1544 spin_lock_irq(q->queue_lock);
1545
1546 ceph_put_snap_context(snapc);
1525 } 1547 }
1526} 1548}
1527 1549
@@ -1592,18 +1614,19 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1592 return -ENOMEM; 1614 return -ENOMEM;
1593 1615
1594 rc = rbd_req_sync_read(rbd_dev, 1616 rc = rbd_req_sync_read(rbd_dev,
1595 NULL, CEPH_NOSNAP, 1617 CEPH_NOSNAP,
1596 rbd_dev->obj_md_name, 1618 rbd_dev->header_name,
1597 0, len, 1619 0, len,
1598 (char *)dh, &ver); 1620 (char *)dh, &ver);
1599 if (rc < 0) 1621 if (rc < 0)
1600 goto out_dh; 1622 goto out_dh;
1601 1623
1602 rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); 1624 rc = rbd_header_from_disk(header, dh, snap_count);
1603 if (rc < 0) { 1625 if (rc < 0) {
1604 if (rc == -ENXIO) 1626 if (rc == -ENXIO)
1605 pr_warning("unrecognized header format" 1627 pr_warning("unrecognized header format"
1606 " for image %s", rbd_dev->obj); 1628 " for image %s\n",
1629 rbd_dev->image_name);
1607 goto out_dh; 1630 goto out_dh;
1608 } 1631 }
1609 1632
@@ -1628,7 +1651,7 @@ out_dh:
1628/* 1651/*
1629 * create a snapshot 1652 * create a snapshot
1630 */ 1653 */
1631static int rbd_header_add_snap(struct rbd_device *dev, 1654static int rbd_header_add_snap(struct rbd_device *rbd_dev,
1632 const char *snap_name, 1655 const char *snap_name,
1633 gfp_t gfp_flags) 1656 gfp_t gfp_flags)
1634{ 1657{
@@ -1636,16 +1659,15 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1636 u64 new_snapid; 1659 u64 new_snapid;
1637 int ret; 1660 int ret;
1638 void *data, *p, *e; 1661 void *data, *p, *e;
1639 u64 ver;
1640 struct ceph_mon_client *monc; 1662 struct ceph_mon_client *monc;
1641 1663
1642 /* we should create a snapshot only if we're pointing at the head */ 1664 /* we should create a snapshot only if we're pointing at the head */
1643 if (dev->snap_id != CEPH_NOSNAP) 1665 if (rbd_dev->snap_id != CEPH_NOSNAP)
1644 return -EINVAL; 1666 return -EINVAL;
1645 1667
1646 monc = &dev->rbd_client->client->monc; 1668 monc = &rbd_dev->rbd_client->client->monc;
1647 ret = ceph_monc_create_snapid(monc, dev->poolid, &new_snapid); 1669 ret = ceph_monc_create_snapid(monc, rbd_dev->pool_id, &new_snapid);
1648 dout("created snapid=%lld\n", new_snapid); 1670 dout("created snapid=%llu\n", (unsigned long long) new_snapid);
1649 if (ret < 0) 1671 if (ret < 0)
1650 return ret; 1672 return ret;
1651 1673
@@ -1659,19 +1681,13 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1659 ceph_encode_string_safe(&p, e, snap_name, name_len, bad); 1681 ceph_encode_string_safe(&p, e, snap_name, name_len, bad);
1660 ceph_encode_64_safe(&p, e, new_snapid, bad); 1682 ceph_encode_64_safe(&p, e, new_snapid, bad);
1661 1683
1662 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", 1684 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
1663 data, p - data, &ver); 1685 "rbd", "snap_add",
1686 data, p - data, NULL);
1664 1687
1665 kfree(data); 1688 kfree(data);
1666 1689
1667 if (ret < 0) 1690 return ret < 0 ? ret : 0;
1668 return ret;
1669
1670 down_write(&dev->header_rwsem);
1671 dev->header.snapc->seq = new_snapid;
1672 up_write(&dev->header_rwsem);
1673
1674 return 0;
1675bad: 1691bad:
1676 return -ERANGE; 1692 return -ERANGE;
1677} 1693}
@@ -1679,52 +1695,52 @@ bad:
1679static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev) 1695static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
1680{ 1696{
1681 struct rbd_snap *snap; 1697 struct rbd_snap *snap;
1698 struct rbd_snap *next;
1682 1699
1683 while (!list_empty(&rbd_dev->snaps)) { 1700 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
1684 snap = list_first_entry(&rbd_dev->snaps, struct rbd_snap, node); 1701 __rbd_remove_snap_dev(snap);
1685 __rbd_remove_snap_dev(rbd_dev, snap);
1686 }
1687} 1702}
1688 1703
1689/* 1704/*
1690 * only read the first part of the ondisk header, without the snaps info 1705 * only read the first part of the ondisk header, without the snaps info
1691 */ 1706 */
1692static int __rbd_refresh_header(struct rbd_device *rbd_dev) 1707static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
1693{ 1708{
1694 int ret; 1709 int ret;
1695 struct rbd_image_header h; 1710 struct rbd_image_header h;
1696 u64 snap_seq;
1697 int follow_seq = 0;
1698 1711
1699 ret = rbd_read_header(rbd_dev, &h); 1712 ret = rbd_read_header(rbd_dev, &h);
1700 if (ret < 0) 1713 if (ret < 0)
1701 return ret; 1714 return ret;
1702 1715
1703 /* resized? */
1704 set_capacity(rbd_dev->disk, h.image_size / SECTOR_SIZE);
1705
1706 down_write(&rbd_dev->header_rwsem); 1716 down_write(&rbd_dev->header_rwsem);
1707 1717
1708 snap_seq = rbd_dev->header.snapc->seq; 1718 /* resized? */
1709 if (rbd_dev->header.total_snaps && 1719 if (rbd_dev->snap_id == CEPH_NOSNAP) {
1710 rbd_dev->header.snapc->snaps[0] == snap_seq) 1720 sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
1711 /* pointing at the head, will need to follow that
1712 if head moves */
1713 follow_seq = 1;
1714 1721
1715 kfree(rbd_dev->header.snapc); 1722 dout("setting size to %llu sectors", (unsigned long long) size);
1716 kfree(rbd_dev->header.snap_names); 1723 set_capacity(rbd_dev->disk, size);
1724 }
1725
1726 /* rbd_dev->header.object_prefix shouldn't change */
1717 kfree(rbd_dev->header.snap_sizes); 1727 kfree(rbd_dev->header.snap_sizes);
1728 kfree(rbd_dev->header.snap_names);
1729 /* osd requests may still refer to snapc */
1730 ceph_put_snap_context(rbd_dev->header.snapc);
1718 1731
1732 if (hver)
1733 *hver = h.obj_version;
1734 rbd_dev->header.obj_version = h.obj_version;
1735 rbd_dev->header.image_size = h.image_size;
1719 rbd_dev->header.total_snaps = h.total_snaps; 1736 rbd_dev->header.total_snaps = h.total_snaps;
1720 rbd_dev->header.snapc = h.snapc; 1737 rbd_dev->header.snapc = h.snapc;
1721 rbd_dev->header.snap_names = h.snap_names; 1738 rbd_dev->header.snap_names = h.snap_names;
1722 rbd_dev->header.snap_names_len = h.snap_names_len; 1739 rbd_dev->header.snap_names_len = h.snap_names_len;
1723 rbd_dev->header.snap_sizes = h.snap_sizes; 1740 rbd_dev->header.snap_sizes = h.snap_sizes;
1724 if (follow_seq) 1741 /* Free the extra copy of the object prefix */
1725 rbd_dev->header.snapc->seq = rbd_dev->header.snapc->snaps[0]; 1742 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
1726 else 1743 kfree(h.object_prefix);
1727 rbd_dev->header.snapc->seq = snap_seq;
1728 1744
1729 ret = __rbd_init_snaps_header(rbd_dev); 1745 ret = __rbd_init_snaps_header(rbd_dev);
1730 1746
@@ -1733,6 +1749,17 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev)
1733 return ret; 1749 return ret;
1734} 1750}
1735 1751
1752static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
1753{
1754 int ret;
1755
1756 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1757 ret = __rbd_refresh_header(rbd_dev, hver);
1758 mutex_unlock(&ctl_mutex);
1759
1760 return ret;
1761}
1762
1736static int rbd_init_disk(struct rbd_device *rbd_dev) 1763static int rbd_init_disk(struct rbd_device *rbd_dev)
1737{ 1764{
1738 struct gendisk *disk; 1765 struct gendisk *disk;
@@ -1762,7 +1789,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
1762 goto out; 1789 goto out;
1763 1790
1764 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", 1791 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
1765 rbd_dev->id); 1792 rbd_dev->dev_id);
1766 disk->major = rbd_dev->major; 1793 disk->major = rbd_dev->major;
1767 disk->first_minor = 0; 1794 disk->first_minor = 0;
1768 disk->fops = &rbd_bd_ops; 1795 disk->fops = &rbd_bd_ops;
@@ -1819,8 +1846,13 @@ static ssize_t rbd_size_show(struct device *dev,
1819 struct device_attribute *attr, char *buf) 1846 struct device_attribute *attr, char *buf)
1820{ 1847{
1821 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 1848 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1849 sector_t size;
1850
1851 down_read(&rbd_dev->header_rwsem);
1852 size = get_capacity(rbd_dev->disk);
1853 up_read(&rbd_dev->header_rwsem);
1822 1854
1823 return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size); 1855 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
1824} 1856}
1825 1857
1826static ssize_t rbd_major_show(struct device *dev, 1858static ssize_t rbd_major_show(struct device *dev,
@@ -1848,12 +1880,20 @@ static ssize_t rbd_pool_show(struct device *dev,
1848 return sprintf(buf, "%s\n", rbd_dev->pool_name); 1880 return sprintf(buf, "%s\n", rbd_dev->pool_name);
1849} 1881}
1850 1882
1883static ssize_t rbd_pool_id_show(struct device *dev,
1884 struct device_attribute *attr, char *buf)
1885{
1886 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1887
1888 return sprintf(buf, "%d\n", rbd_dev->pool_id);
1889}
1890
1851static ssize_t rbd_name_show(struct device *dev, 1891static ssize_t rbd_name_show(struct device *dev,
1852 struct device_attribute *attr, char *buf) 1892 struct device_attribute *attr, char *buf)
1853{ 1893{
1854 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 1894 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1855 1895
1856 return sprintf(buf, "%s\n", rbd_dev->obj); 1896 return sprintf(buf, "%s\n", rbd_dev->image_name);
1857} 1897}
1858 1898
1859static ssize_t rbd_snap_show(struct device *dev, 1899static ssize_t rbd_snap_show(struct device *dev,
@@ -1871,23 +1911,18 @@ static ssize_t rbd_image_refresh(struct device *dev,
1871 size_t size) 1911 size_t size)
1872{ 1912{
1873 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 1913 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1874 int rc; 1914 int ret;
1875 int ret = size;
1876
1877 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1878 1915
1879 rc = __rbd_refresh_header(rbd_dev); 1916 ret = rbd_refresh_header(rbd_dev, NULL);
1880 if (rc < 0)
1881 ret = rc;
1882 1917
1883 mutex_unlock(&ctl_mutex); 1918 return ret < 0 ? ret : size;
1884 return ret;
1885} 1919}
1886 1920
1887static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); 1921static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
1888static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); 1922static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
1889static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); 1923static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
1890static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); 1924static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
1925static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
1891static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); 1926static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
1892static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); 1927static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
1893static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); 1928static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
@@ -1898,6 +1933,7 @@ static struct attribute *rbd_attrs[] = {
1898 &dev_attr_major.attr, 1933 &dev_attr_major.attr,
1899 &dev_attr_client_id.attr, 1934 &dev_attr_client_id.attr,
1900 &dev_attr_pool.attr, 1935 &dev_attr_pool.attr,
1936 &dev_attr_pool_id.attr,
1901 &dev_attr_name.attr, 1937 &dev_attr_name.attr,
1902 &dev_attr_current_snap.attr, 1938 &dev_attr_current_snap.attr,
1903 &dev_attr_refresh.attr, 1939 &dev_attr_refresh.attr,
@@ -1977,15 +2013,13 @@ static struct device_type rbd_snap_device_type = {
1977 .release = rbd_snap_dev_release, 2013 .release = rbd_snap_dev_release,
1978}; 2014};
1979 2015
1980static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, 2016static void __rbd_remove_snap_dev(struct rbd_snap *snap)
1981 struct rbd_snap *snap)
1982{ 2017{
1983 list_del(&snap->node); 2018 list_del(&snap->node);
1984 device_unregister(&snap->dev); 2019 device_unregister(&snap->dev);
1985} 2020}
1986 2021
1987static int rbd_register_snap_dev(struct rbd_device *rbd_dev, 2022static int rbd_register_snap_dev(struct rbd_snap *snap,
1988 struct rbd_snap *snap,
1989 struct device *parent) 2023 struct device *parent)
1990{ 2024{
1991 struct device *dev = &snap->dev; 2025 struct device *dev = &snap->dev;
@@ -2000,29 +2034,36 @@ static int rbd_register_snap_dev(struct rbd_device *rbd_dev,
2000 return ret; 2034 return ret;
2001} 2035}
2002 2036
2003static int __rbd_add_snap_dev(struct rbd_device *rbd_dev, 2037static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
2004 int i, const char *name, 2038 int i, const char *name)
2005 struct rbd_snap **snapp)
2006{ 2039{
2040 struct rbd_snap *snap;
2007 int ret; 2041 int ret;
2008 struct rbd_snap *snap = kzalloc(sizeof(*snap), GFP_KERNEL); 2042
2043 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
2009 if (!snap) 2044 if (!snap)
2010 return -ENOMEM; 2045 return ERR_PTR(-ENOMEM);
2046
2047 ret = -ENOMEM;
2011 snap->name = kstrdup(name, GFP_KERNEL); 2048 snap->name = kstrdup(name, GFP_KERNEL);
2049 if (!snap->name)
2050 goto err;
2051
2012 snap->size = rbd_dev->header.snap_sizes[i]; 2052 snap->size = rbd_dev->header.snap_sizes[i];
2013 snap->id = rbd_dev->header.snapc->snaps[i]; 2053 snap->id = rbd_dev->header.snapc->snaps[i];
2014 if (device_is_registered(&rbd_dev->dev)) { 2054 if (device_is_registered(&rbd_dev->dev)) {
2015 ret = rbd_register_snap_dev(rbd_dev, snap, 2055 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
2016 &rbd_dev->dev);
2017 if (ret < 0) 2056 if (ret < 0)
2018 goto err; 2057 goto err;
2019 } 2058 }
2020 *snapp = snap; 2059
2021 return 0; 2060 return snap;
2061
2022err: 2062err:
2023 kfree(snap->name); 2063 kfree(snap->name);
2024 kfree(snap); 2064 kfree(snap);
2025 return ret; 2065
2066 return ERR_PTR(ret);
2026} 2067}
2027 2068
2028/* 2069/*
@@ -2055,7 +2096,6 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
2055 const char *name, *first_name; 2096 const char *name, *first_name;
2056 int i = rbd_dev->header.total_snaps; 2097 int i = rbd_dev->header.total_snaps;
2057 struct rbd_snap *snap, *old_snap = NULL; 2098 struct rbd_snap *snap, *old_snap = NULL;
2058 int ret;
2059 struct list_head *p, *n; 2099 struct list_head *p, *n;
2060 2100
2061 first_name = rbd_dev->header.snap_names; 2101 first_name = rbd_dev->header.snap_names;
@@ -2070,8 +2110,15 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
2070 cur_id = rbd_dev->header.snapc->snaps[i - 1]; 2110 cur_id = rbd_dev->header.snapc->snaps[i - 1];
2071 2111
2072 if (!i || old_snap->id < cur_id) { 2112 if (!i || old_snap->id < cur_id) {
2073 /* old_snap->id was skipped, thus was removed */ 2113 /*
2074 __rbd_remove_snap_dev(rbd_dev, old_snap); 2114 * old_snap->id was skipped, thus was
2115 * removed. If this rbd_dev is mapped to
2116 * the removed snapshot, record that it no
2117 * longer exists, to prevent further I/O.
2118 */
2119 if (rbd_dev->snap_id == old_snap->id)
2120 rbd_dev->snap_exists = false;
2121 __rbd_remove_snap_dev(old_snap);
2075 continue; 2122 continue;
2076 } 2123 }
2077 if (old_snap->id == cur_id) { 2124 if (old_snap->id == cur_id) {
@@ -2091,9 +2138,9 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
2091 if (cur_id >= old_snap->id) 2138 if (cur_id >= old_snap->id)
2092 break; 2139 break;
2093 /* a new snapshot */ 2140 /* a new snapshot */
2094 ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap); 2141 snap = __rbd_add_snap_dev(rbd_dev, i - 1, name);
2095 if (ret < 0) 2142 if (IS_ERR(snap))
2096 return ret; 2143 return PTR_ERR(snap);
2097 2144
2098 /* note that we add it backward so using n and not p */ 2145 /* note that we add it backward so using n and not p */
2099 list_add(&snap->node, n); 2146 list_add(&snap->node, n);
@@ -2107,9 +2154,9 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
2107 WARN_ON(1); 2154 WARN_ON(1);
2108 return -EINVAL; 2155 return -EINVAL;
2109 } 2156 }
2110 ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap); 2157 snap = __rbd_add_snap_dev(rbd_dev, i - 1, name);
2111 if (ret < 0) 2158 if (IS_ERR(snap))
2112 return ret; 2159 return PTR_ERR(snap);
2113 list_add(&snap->node, &rbd_dev->snaps); 2160 list_add(&snap->node, &rbd_dev->snaps);
2114 } 2161 }
2115 2162
@@ -2129,14 +2176,13 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
2129 dev->type = &rbd_device_type; 2176 dev->type = &rbd_device_type;
2130 dev->parent = &rbd_root_dev; 2177 dev->parent = &rbd_root_dev;
2131 dev->release = rbd_dev_release; 2178 dev->release = rbd_dev_release;
2132 dev_set_name(dev, "%d", rbd_dev->id); 2179 dev_set_name(dev, "%d", rbd_dev->dev_id);
2133 ret = device_register(dev); 2180 ret = device_register(dev);
2134 if (ret < 0) 2181 if (ret < 0)
2135 goto out; 2182 goto out;
2136 2183
2137 list_for_each_entry(snap, &rbd_dev->snaps, node) { 2184 list_for_each_entry(snap, &rbd_dev->snaps, node) {
2138 ret = rbd_register_snap_dev(rbd_dev, snap, 2185 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
2139 &rbd_dev->dev);
2140 if (ret < 0) 2186 if (ret < 0)
2141 break; 2187 break;
2142 } 2188 }
@@ -2155,12 +2201,9 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
2155 int ret, rc; 2201 int ret, rc;
2156 2202
2157 do { 2203 do {
2158 ret = rbd_req_sync_watch(rbd_dev, rbd_dev->obj_md_name, 2204 ret = rbd_req_sync_watch(rbd_dev);
2159 rbd_dev->header.obj_version);
2160 if (ret == -ERANGE) { 2205 if (ret == -ERANGE) {
2161 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2206 rc = rbd_refresh_header(rbd_dev, NULL);
2162 rc = __rbd_refresh_header(rbd_dev);
2163 mutex_unlock(&ctl_mutex);
2164 if (rc < 0) 2207 if (rc < 0)
2165 return rc; 2208 return rc;
2166 } 2209 }
@@ -2177,7 +2220,7 @@ static atomic64_t rbd_id_max = ATOMIC64_INIT(0);
2177 */ 2220 */
2178static void rbd_id_get(struct rbd_device *rbd_dev) 2221static void rbd_id_get(struct rbd_device *rbd_dev)
2179{ 2222{
2180 rbd_dev->id = atomic64_inc_return(&rbd_id_max); 2223 rbd_dev->dev_id = atomic64_inc_return(&rbd_id_max);
2181 2224
2182 spin_lock(&rbd_dev_list_lock); 2225 spin_lock(&rbd_dev_list_lock);
2183 list_add_tail(&rbd_dev->node, &rbd_dev_list); 2226 list_add_tail(&rbd_dev->node, &rbd_dev_list);
@@ -2191,7 +2234,7 @@ static void rbd_id_get(struct rbd_device *rbd_dev)
2191static void rbd_id_put(struct rbd_device *rbd_dev) 2234static void rbd_id_put(struct rbd_device *rbd_dev)
2192{ 2235{
2193 struct list_head *tmp; 2236 struct list_head *tmp;
2194 int rbd_id = rbd_dev->id; 2237 int rbd_id = rbd_dev->dev_id;
2195 int max_id; 2238 int max_id;
2196 2239
2197 BUG_ON(rbd_id < 1); 2240 BUG_ON(rbd_id < 1);
@@ -2282,19 +2325,58 @@ static inline size_t copy_token(const char **buf,
2282} 2325}
2283 2326
2284/* 2327/*
2285 * This fills in the pool_name, obj, obj_len, snap_name, obj_len, 2328 * Finds the next token in *buf, dynamically allocates a buffer big
2329 * enough to hold a copy of it, and copies the token into the new
2330 * buffer. The copy is guaranteed to be terminated with '\0'. Note
2331 * that a duplicate buffer is created even for a zero-length token.
2332 *
2333 * Returns a pointer to the newly-allocated duplicate, or a null
2334 * pointer if memory for the duplicate was not available. If
2335 * the lenp argument is a non-null pointer, the length of the token
2336 * (not including the '\0') is returned in *lenp.
2337 *
2338 * If successful, the *buf pointer will be updated to point beyond
2339 * the end of the found token.
2340 *
2341 * Note: uses GFP_KERNEL for allocation.
2342 */
2343static inline char *dup_token(const char **buf, size_t *lenp)
2344{
2345 char *dup;
2346 size_t len;
2347
2348 len = next_token(buf);
2349 dup = kmalloc(len + 1, GFP_KERNEL);
2350 if (!dup)
2351 return NULL;
2352
2353 memcpy(dup, *buf, len);
2354 *(dup + len) = '\0';
2355 *buf += len;
2356
2357 if (lenp)
2358 *lenp = len;
2359
2360 return dup;
2361}
2362
2363/*
2364 * This fills in the pool_name, image_name, image_name_len, snap_name,
2286 * rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based 2365 * rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based
2287 * on the list of monitor addresses and other options provided via 2366 * on the list of monitor addresses and other options provided via
2288 * /sys/bus/rbd/add. 2367 * /sys/bus/rbd/add.
2368 *
2369 * Note: rbd_dev is assumed to have been initially zero-filled.
2289 */ 2370 */
2290static int rbd_add_parse_args(struct rbd_device *rbd_dev, 2371static int rbd_add_parse_args(struct rbd_device *rbd_dev,
2291 const char *buf, 2372 const char *buf,
2292 const char **mon_addrs, 2373 const char **mon_addrs,
2293 size_t *mon_addrs_size, 2374 size_t *mon_addrs_size,
2294 char *options, 2375 char *options,
2295 size_t options_size) 2376 size_t options_size)
2296{ 2377{
2297 size_t len; 2378 size_t len;
2379 int ret;
2298 2380
2299 /* The first four tokens are required */ 2381 /* The first four tokens are required */
2300 2382
@@ -2310,56 +2392,74 @@ static int rbd_add_parse_args(struct rbd_device *rbd_dev,
2310 if (!len || len >= options_size) 2392 if (!len || len >= options_size)
2311 return -EINVAL; 2393 return -EINVAL;
2312 2394
2313 len = copy_token(&buf, rbd_dev->pool_name, sizeof (rbd_dev->pool_name)); 2395 ret = -ENOMEM;
2314 if (!len || len >= sizeof (rbd_dev->pool_name)) 2396 rbd_dev->pool_name = dup_token(&buf, NULL);
2315 return -EINVAL; 2397 if (!rbd_dev->pool_name)
2316 2398 goto out_err;
2317 len = copy_token(&buf, rbd_dev->obj, sizeof (rbd_dev->obj));
2318 if (!len || len >= sizeof (rbd_dev->obj))
2319 return -EINVAL;
2320 2399
2321 /* We have the object length in hand, save it. */ 2400 rbd_dev->image_name = dup_token(&buf, &rbd_dev->image_name_len);
2401 if (!rbd_dev->image_name)
2402 goto out_err;
2322 2403
2323 rbd_dev->obj_len = len; 2404 /* Create the name of the header object */
2324 2405
2325 BUILD_BUG_ON(RBD_MAX_MD_NAME_LEN 2406 rbd_dev->header_name = kmalloc(rbd_dev->image_name_len
2326 < RBD_MAX_OBJ_NAME_LEN + sizeof (RBD_SUFFIX)); 2407 + sizeof (RBD_SUFFIX),
2327 sprintf(rbd_dev->obj_md_name, "%s%s", rbd_dev->obj, RBD_SUFFIX); 2408 GFP_KERNEL);
2409 if (!rbd_dev->header_name)
2410 goto out_err;
2411 sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
2328 2412
2329 /* 2413 /*
2330 * The snapshot name is optional, but it's an error if it's 2414 * The snapshot name is optional. If none is is supplied,
2331 * too long. If no snapshot is supplied, fill in the default. 2415 * we use the default value.
2332 */ 2416 */
2333 len = copy_token(&buf, rbd_dev->snap_name, sizeof (rbd_dev->snap_name)); 2417 rbd_dev->snap_name = dup_token(&buf, &len);
2334 if (!len) 2418 if (!rbd_dev->snap_name)
2419 goto out_err;
2420 if (!len) {
2421 /* Replace the empty name with the default */
2422 kfree(rbd_dev->snap_name);
2423 rbd_dev->snap_name
2424 = kmalloc(sizeof (RBD_SNAP_HEAD_NAME), GFP_KERNEL);
2425 if (!rbd_dev->snap_name)
2426 goto out_err;
2427
2335 memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME, 2428 memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
2336 sizeof (RBD_SNAP_HEAD_NAME)); 2429 sizeof (RBD_SNAP_HEAD_NAME));
2337 else if (len >= sizeof (rbd_dev->snap_name)) 2430 }
2338 return -EINVAL;
2339 2431
2340 return 0; 2432 return 0;
2433
2434out_err:
2435 kfree(rbd_dev->header_name);
2436 kfree(rbd_dev->image_name);
2437 kfree(rbd_dev->pool_name);
2438 rbd_dev->pool_name = NULL;
2439
2440 return ret;
2341} 2441}
2342 2442
2343static ssize_t rbd_add(struct bus_type *bus, 2443static ssize_t rbd_add(struct bus_type *bus,
2344 const char *buf, 2444 const char *buf,
2345 size_t count) 2445 size_t count)
2346{ 2446{
2347 struct rbd_device *rbd_dev; 2447 char *options;
2448 struct rbd_device *rbd_dev = NULL;
2348 const char *mon_addrs = NULL; 2449 const char *mon_addrs = NULL;
2349 size_t mon_addrs_size = 0; 2450 size_t mon_addrs_size = 0;
2350 char *options = NULL;
2351 struct ceph_osd_client *osdc; 2451 struct ceph_osd_client *osdc;
2352 int rc = -ENOMEM; 2452 int rc = -ENOMEM;
2353 2453
2354 if (!try_module_get(THIS_MODULE)) 2454 if (!try_module_get(THIS_MODULE))
2355 return -ENODEV; 2455 return -ENODEV;
2356 2456
2357 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
2358 if (!rbd_dev)
2359 goto err_nomem;
2360 options = kmalloc(count, GFP_KERNEL); 2457 options = kmalloc(count, GFP_KERNEL);
2361 if (!options) 2458 if (!options)
2362 goto err_nomem; 2459 goto err_nomem;
2460 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
2461 if (!rbd_dev)
2462 goto err_nomem;
2363 2463
2364 /* static rbd_device initialization */ 2464 /* static rbd_device initialization */
2365 spin_lock_init(&rbd_dev->lock); 2465 spin_lock_init(&rbd_dev->lock);
@@ -2367,15 +2467,13 @@ static ssize_t rbd_add(struct bus_type *bus,
2367 INIT_LIST_HEAD(&rbd_dev->snaps); 2467 INIT_LIST_HEAD(&rbd_dev->snaps);
2368 init_rwsem(&rbd_dev->header_rwsem); 2468 init_rwsem(&rbd_dev->header_rwsem);
2369 2469
2370 init_rwsem(&rbd_dev->header_rwsem);
2371
2372 /* generate unique id: find highest unique id, add one */ 2470 /* generate unique id: find highest unique id, add one */
2373 rbd_id_get(rbd_dev); 2471 rbd_id_get(rbd_dev);
2374 2472
2375 /* Fill in the device name, now that we have its id. */ 2473 /* Fill in the device name, now that we have its id. */
2376 BUILD_BUG_ON(DEV_NAME_LEN 2474 BUILD_BUG_ON(DEV_NAME_LEN
2377 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); 2475 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
2378 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->id); 2476 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
2379 2477
2380 /* parse add command */ 2478 /* parse add command */
2381 rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size, 2479 rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size,
@@ -2395,7 +2493,7 @@ static ssize_t rbd_add(struct bus_type *bus,
2395 rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name); 2493 rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
2396 if (rc < 0) 2494 if (rc < 0)
2397 goto err_out_client; 2495 goto err_out_client;
2398 rbd_dev->poolid = rc; 2496 rbd_dev->pool_id = rc;
2399 2497
2400 /* register our block device */ 2498 /* register our block device */
2401 rc = register_blkdev(0, rbd_dev->name); 2499 rc = register_blkdev(0, rbd_dev->name);
@@ -2435,10 +2533,16 @@ err_out_blkdev:
2435err_out_client: 2533err_out_client:
2436 rbd_put_client(rbd_dev); 2534 rbd_put_client(rbd_dev);
2437err_put_id: 2535err_put_id:
2536 if (rbd_dev->pool_name) {
2537 kfree(rbd_dev->snap_name);
2538 kfree(rbd_dev->header_name);
2539 kfree(rbd_dev->image_name);
2540 kfree(rbd_dev->pool_name);
2541 }
2438 rbd_id_put(rbd_dev); 2542 rbd_id_put(rbd_dev);
2439err_nomem: 2543err_nomem:
2440 kfree(options);
2441 kfree(rbd_dev); 2544 kfree(rbd_dev);
2545 kfree(options);
2442 2546
2443 dout("Error adding device %s\n", buf); 2547 dout("Error adding device %s\n", buf);
2444 module_put(THIS_MODULE); 2548 module_put(THIS_MODULE);
@@ -2446,7 +2550,7 @@ err_nomem:
2446 return (ssize_t) rc; 2550 return (ssize_t) rc;
2447} 2551}
2448 2552
2449static struct rbd_device *__rbd_get_dev(unsigned long id) 2553static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
2450{ 2554{
2451 struct list_head *tmp; 2555 struct list_head *tmp;
2452 struct rbd_device *rbd_dev; 2556 struct rbd_device *rbd_dev;
@@ -2454,7 +2558,7 @@ static struct rbd_device *__rbd_get_dev(unsigned long id)
2454 spin_lock(&rbd_dev_list_lock); 2558 spin_lock(&rbd_dev_list_lock);
2455 list_for_each(tmp, &rbd_dev_list) { 2559 list_for_each(tmp, &rbd_dev_list) {
2456 rbd_dev = list_entry(tmp, struct rbd_device, node); 2560 rbd_dev = list_entry(tmp, struct rbd_device, node);
2457 if (rbd_dev->id == id) { 2561 if (rbd_dev->dev_id == dev_id) {
2458 spin_unlock(&rbd_dev_list_lock); 2562 spin_unlock(&rbd_dev_list_lock);
2459 return rbd_dev; 2563 return rbd_dev;
2460 } 2564 }
@@ -2474,7 +2578,7 @@ static void rbd_dev_release(struct device *dev)
2474 rbd_dev->watch_request); 2578 rbd_dev->watch_request);
2475 } 2579 }
2476 if (rbd_dev->watch_event) 2580 if (rbd_dev->watch_event)
2477 rbd_req_sync_unwatch(rbd_dev, rbd_dev->obj_md_name); 2581 rbd_req_sync_unwatch(rbd_dev);
2478 2582
2479 rbd_put_client(rbd_dev); 2583 rbd_put_client(rbd_dev);
2480 2584
@@ -2483,6 +2587,10 @@ static void rbd_dev_release(struct device *dev)
2483 unregister_blkdev(rbd_dev->major, rbd_dev->name); 2587 unregister_blkdev(rbd_dev->major, rbd_dev->name);
2484 2588
2485 /* done with the id, and with the rbd_dev */ 2589 /* done with the id, and with the rbd_dev */
2590 kfree(rbd_dev->snap_name);
2591 kfree(rbd_dev->header_name);
2592 kfree(rbd_dev->pool_name);
2593 kfree(rbd_dev->image_name);
2486 rbd_id_put(rbd_dev); 2594 rbd_id_put(rbd_dev);
2487 kfree(rbd_dev); 2595 kfree(rbd_dev);
2488 2596
@@ -2544,7 +2652,7 @@ static ssize_t rbd_snap_add(struct device *dev,
2544 if (ret < 0) 2652 if (ret < 0)
2545 goto err_unlock; 2653 goto err_unlock;
2546 2654
2547 ret = __rbd_refresh_header(rbd_dev); 2655 ret = __rbd_refresh_header(rbd_dev, NULL);
2548 if (ret < 0) 2656 if (ret < 0)
2549 goto err_unlock; 2657 goto err_unlock;
2550 2658
@@ -2553,7 +2661,7 @@ static ssize_t rbd_snap_add(struct device *dev,
2553 mutex_unlock(&ctl_mutex); 2661 mutex_unlock(&ctl_mutex);
2554 2662
2555 /* make a best effort, don't error if failed */ 2663 /* make a best effort, don't error if failed */
2556 rbd_req_sync_notify(rbd_dev, rbd_dev->obj_md_name); 2664 rbd_req_sync_notify(rbd_dev);
2557 2665
2558 ret = count; 2666 ret = count;
2559 kfree(name); 2667 kfree(name);
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index 950708688f17..0924e9e41a60 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -31,7 +31,6 @@
31#define RBD_MIN_OBJ_ORDER 16 31#define RBD_MIN_OBJ_ORDER 16
32#define RBD_MAX_OBJ_ORDER 30 32#define RBD_MAX_OBJ_ORDER 30
33 33
34#define RBD_MAX_OBJ_NAME_LEN 96
35#define RBD_MAX_SEG_NAME_LEN 128 34#define RBD_MAX_SEG_NAME_LEN 128
36 35
37#define RBD_COMP_NONE 0 36#define RBD_COMP_NONE 0
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index aa2712060bfb..eb0d8216f557 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,6 +513,21 @@ static void process_page(unsigned long data)
513 } 513 }
514} 514}
515 515
516static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule)
517{
518 struct cardinfo *card = cb->data;
519
520 spin_lock_irq(&card->lock);
521 activate(card);
522 spin_unlock_irq(&card->lock);
523 kfree(cb);
524}
525
526static int mm_check_plugged(struct cardinfo *card)
527{
528 return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
529}
530
516static void mm_make_request(struct request_queue *q, struct bio *bio) 531static void mm_make_request(struct request_queue *q, struct bio *bio)
517{ 532{
518 struct cardinfo *card = q->queuedata; 533 struct cardinfo *card = q->queuedata;
@@ -523,6 +538,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
523 *card->biotail = bio; 538 *card->biotail = bio;
524 bio->bi_next = NULL; 539 bio->bi_next = NULL;
525 card->biotail = &bio->bi_next; 540 card->biotail = &bio->bi_next;
541 if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
542 activate(card);
526 spin_unlock_irq(&card->lock); 543 spin_unlock_irq(&card->lock);
527 544
528 return; 545 return;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 693187df7601..c0bbeb470754 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -21,8 +21,6 @@ struct workqueue_struct *virtblk_wq;
21 21
22struct virtio_blk 22struct virtio_blk
23{ 23{
24 spinlock_t lock;
25
26 struct virtio_device *vdev; 24 struct virtio_device *vdev;
27 struct virtqueue *vq; 25 struct virtqueue *vq;
28 26
@@ -65,7 +63,7 @@ static void blk_done(struct virtqueue *vq)
65 unsigned int len; 63 unsigned int len;
66 unsigned long flags; 64 unsigned long flags;
67 65
68 spin_lock_irqsave(&vblk->lock, flags); 66 spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
69 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { 67 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
70 int error; 68 int error;
71 69
@@ -99,7 +97,7 @@ static void blk_done(struct virtqueue *vq)
99 } 97 }
100 /* In case queue is stopped waiting for more buffers. */ 98 /* In case queue is stopped waiting for more buffers. */
101 blk_start_queue(vblk->disk->queue); 99 blk_start_queue(vblk->disk->queue);
102 spin_unlock_irqrestore(&vblk->lock, flags); 100 spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
103} 101}
104 102
105static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 103static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
@@ -397,6 +395,83 @@ static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
397 return 0; 395 return 0;
398} 396}
399 397
398static int virtblk_get_cache_mode(struct virtio_device *vdev)
399{
400 u8 writeback;
401 int err;
402
403 err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
404 offsetof(struct virtio_blk_config, wce),
405 &writeback);
406 if (err)
407 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
408
409 return writeback;
410}
411
412static void virtblk_update_cache_mode(struct virtio_device *vdev)
413{
414 u8 writeback = virtblk_get_cache_mode(vdev);
415 struct virtio_blk *vblk = vdev->priv;
416
417 if (writeback)
418 blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
419 else
420 blk_queue_flush(vblk->disk->queue, 0);
421
422 revalidate_disk(vblk->disk);
423}
424
425static const char *const virtblk_cache_types[] = {
426 "write through", "write back"
427};
428
429static ssize_t
430virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
431 const char *buf, size_t count)
432{
433 struct gendisk *disk = dev_to_disk(dev);
434 struct virtio_blk *vblk = disk->private_data;
435 struct virtio_device *vdev = vblk->vdev;
436 int i;
437 u8 writeback;
438
439 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
440 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
441 if (sysfs_streq(buf, virtblk_cache_types[i]))
442 break;
443
444 if (i < 0)
445 return -EINVAL;
446
447 writeback = i;
448 vdev->config->set(vdev,
449 offsetof(struct virtio_blk_config, wce),
450 &writeback, sizeof(writeback));
451
452 virtblk_update_cache_mode(vdev);
453 return count;
454}
455
456static ssize_t
457virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
458 char *buf)
459{
460 struct gendisk *disk = dev_to_disk(dev);
461 struct virtio_blk *vblk = disk->private_data;
462 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
463
464 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
465 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
466}
467
468static const struct device_attribute dev_attr_cache_type_ro =
469 __ATTR(cache_type, S_IRUGO,
470 virtblk_cache_type_show, NULL);
471static const struct device_attribute dev_attr_cache_type_rw =
472 __ATTR(cache_type, S_IRUGO|S_IWUSR,
473 virtblk_cache_type_show, virtblk_cache_type_store);
474
400static int __devinit virtblk_probe(struct virtio_device *vdev) 475static int __devinit virtblk_probe(struct virtio_device *vdev)
401{ 476{
402 struct virtio_blk *vblk; 477 struct virtio_blk *vblk;
@@ -431,7 +506,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
431 goto out_free_index; 506 goto out_free_index;
432 } 507 }
433 508
434 spin_lock_init(&vblk->lock);
435 vblk->vdev = vdev; 509 vblk->vdev = vdev;
436 vblk->sg_elems = sg_elems; 510 vblk->sg_elems = sg_elems;
437 sg_init_table(vblk->sg, vblk->sg_elems); 511 sg_init_table(vblk->sg, vblk->sg_elems);
@@ -456,7 +530,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
456 goto out_mempool; 530 goto out_mempool;
457 } 531 }
458 532
459 q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); 533 q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL);
460 if (!q) { 534 if (!q) {
461 err = -ENOMEM; 535 err = -ENOMEM;
462 goto out_put_disk; 536 goto out_put_disk;
@@ -474,8 +548,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
474 vblk->index = index; 548 vblk->index = index;
475 549
476 /* configure queue flush support */ 550 /* configure queue flush support */
477 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) 551 virtblk_update_cache_mode(vdev);
478 blk_queue_flush(q, REQ_FLUSH);
479 552
480 /* If disk is read-only in the host, the guest should obey */ 553 /* If disk is read-only in the host, the guest should obey */
481 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 554 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -553,6 +626,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
553 if (err) 626 if (err)
554 goto out_del_disk; 627 goto out_del_disk;
555 628
629 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
630 err = device_create_file(disk_to_dev(vblk->disk),
631 &dev_attr_cache_type_rw);
632 else
633 err = device_create_file(disk_to_dev(vblk->disk),
634 &dev_attr_cache_type_ro);
635 if (err)
636 goto out_del_disk;
556 return 0; 637 return 0;
557 638
558out_del_disk: 639out_del_disk:
@@ -576,30 +657,20 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
576{ 657{
577 struct virtio_blk *vblk = vdev->priv; 658 struct virtio_blk *vblk = vdev->priv;
578 int index = vblk->index; 659 int index = vblk->index;
579 struct virtblk_req *vbr;
580 unsigned long flags;
581 660
582 /* Prevent config work handler from accessing the device. */ 661 /* Prevent config work handler from accessing the device. */
583 mutex_lock(&vblk->config_lock); 662 mutex_lock(&vblk->config_lock);
584 vblk->config_enable = false; 663 vblk->config_enable = false;
585 mutex_unlock(&vblk->config_lock); 664 mutex_unlock(&vblk->config_lock);
586 665
666 del_gendisk(vblk->disk);
667 blk_cleanup_queue(vblk->disk->queue);
668
587 /* Stop all the virtqueues. */ 669 /* Stop all the virtqueues. */
588 vdev->config->reset(vdev); 670 vdev->config->reset(vdev);
589 671
590 flush_work(&vblk->config_work); 672 flush_work(&vblk->config_work);
591 673
592 del_gendisk(vblk->disk);
593
594 /* Abort requests dispatched to driver. */
595 spin_lock_irqsave(&vblk->lock, flags);
596 while ((vbr = virtqueue_detach_unused_buf(vblk->vq))) {
597 __blk_end_request_all(vbr->req, -EIO);
598 mempool_free(vbr, vblk->pool);
599 }
600 spin_unlock_irqrestore(&vblk->lock, flags);
601
602 blk_cleanup_queue(vblk->disk->queue);
603 put_disk(vblk->disk); 674 put_disk(vblk->disk);
604 mempool_destroy(vblk->pool); 675 mempool_destroy(vblk->pool);
605 vdev->config->del_vqs(vdev); 676 vdev->config->del_vqs(vdev);
@@ -655,7 +726,7 @@ static const struct virtio_device_id id_table[] = {
655static unsigned int features[] = { 726static unsigned int features[] = {
656 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 727 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
657 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, 728 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
658 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY 729 VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE
659}; 730};
660 731
661/* 732/*
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 773cf27dc23f..9ad3b5ec1dc1 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
257 break; 257 break;
258 case BLKIF_OP_DISCARD: 258 case BLKIF_OP_DISCARD:
259 dst->u.discard.flag = src->u.discard.flag; 259 dst->u.discard.flag = src->u.discard.flag;
260 dst->u.discard.id = src->u.discard.id;
260 dst->u.discard.sector_number = src->u.discard.sector_number; 261 dst->u.discard.sector_number = src->u.discard.sector_number;
261 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 262 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
262 break; 263 break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
287 break; 288 break;
288 case BLKIF_OP_DISCARD: 289 case BLKIF_OP_DISCARD:
289 dst->u.discard.flag = src->u.discard.flag; 290 dst->u.discard.flag = src->u.discard.flag;
291 dst->u.discard.id = src->u.discard.id;
290 dst->u.discard.sector_number = src->u.discard.sector_number; 292 dst->u.discard.sector_number = src->u.discard.sector_number;
291 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 293 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
292 break; 294 break;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 60eed4bdd2e4..2c2d2e5c1597 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
141 return free; 141 return free;
142} 142}
143 143
144static void add_id_to_freelist(struct blkfront_info *info, 144static int add_id_to_freelist(struct blkfront_info *info,
145 unsigned long id) 145 unsigned long id)
146{ 146{
147 if (info->shadow[id].req.u.rw.id != id)
148 return -EINVAL;
149 if (info->shadow[id].request == NULL)
150 return -EINVAL;
147 info->shadow[id].req.u.rw.id = info->shadow_free; 151 info->shadow[id].req.u.rw.id = info->shadow_free;
148 info->shadow[id].request = NULL; 152 info->shadow[id].request = NULL;
149 info->shadow_free = id; 153 info->shadow_free = id;
154 return 0;
150} 155}
151 156
157static const char *op_name(int op)
158{
159 static const char *const names[] = {
160 [BLKIF_OP_READ] = "read",
161 [BLKIF_OP_WRITE] = "write",
162 [BLKIF_OP_WRITE_BARRIER] = "barrier",
163 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
164 [BLKIF_OP_DISCARD] = "discard" };
165
166 if (op < 0 || op >= ARRAY_SIZE(names))
167 return "unknown";
168
169 if (!names[op])
170 return "reserved";
171
172 return names[op];
173}
152static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) 174static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
153{ 175{
154 unsigned int end = minor + nr; 176 unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
746 768
747 bret = RING_GET_RESPONSE(&info->ring, i); 769 bret = RING_GET_RESPONSE(&info->ring, i);
748 id = bret->id; 770 id = bret->id;
771 /*
772 * The backend has messed up and given us an id that we would
773 * never have given to it (we stamp it up to BLK_RING_SIZE -
774 * look in get_id_from_freelist.
775 */
776 if (id >= BLK_RING_SIZE) {
777 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
778 info->gd->disk_name, op_name(bret->operation), id);
779 /* We can't safely get the 'struct request' as
780 * the id is busted. */
781 continue;
782 }
749 req = info->shadow[id].request; 783 req = info->shadow[id].request;
750 784
751 if (bret->operation != BLKIF_OP_DISCARD) 785 if (bret->operation != BLKIF_OP_DISCARD)
752 blkif_completion(&info->shadow[id]); 786 blkif_completion(&info->shadow[id]);
753 787
754 add_id_to_freelist(info, id); 788 if (add_id_to_freelist(info, id)) {
789 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
790 info->gd->disk_name, op_name(bret->operation), id);
791 continue;
792 }
755 793
756 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 794 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
757 switch (bret->operation) { 795 switch (bret->operation) {
758 case BLKIF_OP_DISCARD: 796 case BLKIF_OP_DISCARD:
759 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 797 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
760 struct request_queue *rq = info->rq; 798 struct request_queue *rq = info->rq;
761 printk(KERN_WARNING "blkfront: %s: discard op failed\n", 799 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
762 info->gd->disk_name); 800 info->gd->disk_name, op_name(bret->operation));
763 error = -EOPNOTSUPP; 801 error = -EOPNOTSUPP;
764 info->feature_discard = 0; 802 info->feature_discard = 0;
765 info->feature_secdiscard = 0; 803 info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
771 case BLKIF_OP_FLUSH_DISKCACHE: 809 case BLKIF_OP_FLUSH_DISKCACHE:
772 case BLKIF_OP_WRITE_BARRIER: 810 case BLKIF_OP_WRITE_BARRIER:
773 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 811 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
774 printk(KERN_WARNING "blkfront: %s: write %s op failed\n", 812 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
775 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 813 info->gd->disk_name, op_name(bret->operation));
776 "barrier" : "flush disk cache",
777 info->gd->disk_name);
778 error = -EOPNOTSUPP; 814 error = -EOPNOTSUPP;
779 } 815 }
780 if (unlikely(bret->status == BLKIF_RSP_ERROR && 816 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
781 info->shadow[id].req.u.rw.nr_segments == 0)) { 817 info->shadow[id].req.u.rw.nr_segments == 0)) {
782 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", 818 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
783 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 819 info->gd->disk_name, op_name(bret->operation));
784 "barrier" : "flush disk cache",
785 info->gd->disk_name);
786 error = -EOPNOTSUPP; 820 error = -EOPNOTSUPP;
787 } 821 }
788 if (unlikely(error)) { 822 if (unlikely(error)) {
@@ -854,9 +888,8 @@ static int setup_blkring(struct xenbus_device *dev,
854 if (err) 888 if (err)
855 goto fail; 889 goto fail;
856 890
857 err = bind_evtchn_to_irqhandler(info->evtchn, 891 err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
858 blkif_interrupt, 892 "blkif", info);
859 IRQF_SAMPLE_RANDOM, "blkif", info);
860 if (err <= 0) { 893 if (err <= 0) {
861 xenbus_dev_fatal(dev, err, 894 xenbus_dev_fatal(dev, err,
862 "bind_evtchn_to_irqhandler failed"); 895 "bind_evtchn_to_irqhandler failed");
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 5ccf142ef0b8..e9f203eadb1f 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -81,6 +81,18 @@ config BT_HCIUART_LL
81 81
82 Say Y here to compile support for HCILL protocol. 82 Say Y here to compile support for HCILL protocol.
83 83
84config BT_HCIUART_3WIRE
85 bool "Three-wire UART (H5) protocol support"
86 depends on BT_HCIUART
87 help
88 The HCI Three-wire UART Transport Layer makes it possible to
89 user the Bluetooth HCI over a serial port interface. The HCI
90 Three-wire UART Transport Layer assumes that the UART
91 communication may have bit errors, overrun errors or burst
92 errors and thereby making CTS/RTS lines unnecessary.
93
94 Say Y here to compile support for Three-wire UART protocol.
95
84config BT_HCIBCM203X 96config BT_HCIBCM203X
85 tristate "HCI BCM203x USB driver" 97 tristate "HCI BCM203x USB driver"
86 depends on USB 98 depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index f4460f4f4b78..4afae20df512 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -28,4 +28,5 @@ hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o
28hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o 28hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
29hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o 29hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
30hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o 30hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
31hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
31hci_uart-objs := $(hci_uart-y) 32hci_uart-objs := $(hci_uart-y)
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index ad591bd240ec..11f36e502136 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
63 63
64 /* Atheros AR3011 with sflash firmware*/ 64 /* Atheros AR3011 with sflash firmware*/
65 { USB_DEVICE(0x0CF3, 0x3002) }, 65 { USB_DEVICE(0x0CF3, 0x3002) },
66 { USB_DEVICE(0x0CF3, 0xE019) },
66 { USB_DEVICE(0x13d3, 0x3304) }, 67 { USB_DEVICE(0x13d3, 0x3304) },
67 { USB_DEVICE(0x0930, 0x0215) }, 68 { USB_DEVICE(0x0930, 0x0215) },
68 { USB_DEVICE(0x0489, 0xE03D) }, 69 { USB_DEVICE(0x0489, 0xE03D) },
@@ -77,6 +78,8 @@ static struct usb_device_id ath3k_table[] = {
77 { USB_DEVICE(0x04CA, 0x3005) }, 78 { USB_DEVICE(0x04CA, 0x3005) },
78 { USB_DEVICE(0x13d3, 0x3362) }, 79 { USB_DEVICE(0x13d3, 0x3362) },
79 { USB_DEVICE(0x0CF3, 0xE004) }, 80 { USB_DEVICE(0x0CF3, 0xE004) },
81 { USB_DEVICE(0x0930, 0x0219) },
82 { USB_DEVICE(0x0489, 0xe057) },
80 83
81 /* Atheros AR5BBU12 with sflash firmware */ 84 /* Atheros AR5BBU12 with sflash firmware */
82 { USB_DEVICE(0x0489, 0xE02C) }, 85 { USB_DEVICE(0x0489, 0xE02C) },
@@ -101,6 +104,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
101 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 104 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
102 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 105 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
103 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 106 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
107 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
108 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
104 109
105 /* Atheros AR5BBU22 with sflash firmware */ 110 /* Atheros AR5BBU22 with sflash firmware */
106 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, 111 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 1fcd92380356..66c3a6770c41 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
231 } 231 }
232 232
233 do { 233 do {
234 register unsigned int iobase = info->p_dev->resource[0]->start; 234 unsigned int iobase = info->p_dev->resource[0]->start;
235 register unsigned int offset; 235 unsigned int offset;
236 register unsigned char command; 236 unsigned char command;
237 register unsigned long ready_bit; 237 unsigned long ready_bit;
238 register struct sk_buff *skb; 238 register struct sk_buff *skb;
239 register int len; 239 int len;
240 240
241 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 241 clear_bit(XMIT_WAKEUP, &(info->tx_state));
242 242
@@ -621,7 +621,6 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
621static int bluecard_hci_open(struct hci_dev *hdev) 621static int bluecard_hci_open(struct hci_dev *hdev)
622{ 622{
623 bluecard_info_t *info = hci_get_drvdata(hdev); 623 bluecard_info_t *info = hci_get_drvdata(hdev);
624 unsigned int iobase = info->p_dev->resource[0]->start;
625 624
626 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) 625 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
627 bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); 626 bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
@@ -630,6 +629,8 @@ static int bluecard_hci_open(struct hci_dev *hdev)
630 return 0; 629 return 0;
631 630
632 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { 631 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
632 unsigned int iobase = info->p_dev->resource[0]->start;
633
633 /* Enable LED */ 634 /* Enable LED */
634 outb(0x08 | 0x20, iobase + 0x30); 635 outb(0x08 | 0x20, iobase + 0x30);
635 } 636 }
@@ -641,7 +642,6 @@ static int bluecard_hci_open(struct hci_dev *hdev)
641static int bluecard_hci_close(struct hci_dev *hdev) 642static int bluecard_hci_close(struct hci_dev *hdev)
642{ 643{
643 bluecard_info_t *info = hci_get_drvdata(hdev); 644 bluecard_info_t *info = hci_get_drvdata(hdev);
644 unsigned int iobase = info->p_dev->resource[0]->start;
645 645
646 if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) 646 if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
647 return 0; 647 return 0;
@@ -649,6 +649,8 @@ static int bluecard_hci_close(struct hci_dev *hdev)
649 bluecard_hci_flush(hdev); 649 bluecard_hci_flush(hdev);
650 650
651 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { 651 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
652 unsigned int iobase = info->p_dev->resource[0]->start;
653
652 /* Disable LED */ 654 /* Disable LED */
653 outb(0x00, iobase + 0x30); 655 outb(0x00, iobase + 0x30);
654 } 656 }
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 609861a53c28..29caaed2d715 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
470 hdev->flush = bpa10x_flush; 470 hdev->flush = bpa10x_flush;
471 hdev->send = bpa10x_send_frame; 471 hdev->send = bpa10x_send_frame;
472 472
473 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 473 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
474 474
475 err = hci_register_dev(hdev); 475 err = hci_register_dev(hdev);
476 if (err < 0) { 476 if (err < 0) {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 308c8599ab55..8925b6d672a6 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
186 return; 186 return;
187 187
188 do { 188 do {
189 register unsigned int iobase = info->p_dev->resource[0]->start; 189 unsigned int iobase = info->p_dev->resource[0]->start;
190 register struct sk_buff *skb; 190 register struct sk_buff *skb;
191 register int len; 191 int len;
192 192
193 if (!pcmcia_dev_present(info->p_dev)) 193 if (!pcmcia_dev_present(info->p_dev))
194 break; 194 break;
@@ -664,7 +664,7 @@ static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
664{ 664{
665 int *try = priv_data; 665 int *try = priv_data;
666 666
667 if (try == 0) 667 if (!try)
668 p_dev->io_lines = 16; 668 p_dev->io_lines = 16;
669 669
670 if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0)) 670 if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 94f2d65131c4..27068d149380 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -136,7 +136,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv);
136 136
137void btmrvl_interrupt(struct btmrvl_private *priv); 137void btmrvl_interrupt(struct btmrvl_private *priv);
138 138
139void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb); 139bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
140int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb); 140int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
141 141
142int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd); 142int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 681ca9d18e12..3a4343b3bd6d 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -44,23 +44,35 @@ void btmrvl_interrupt(struct btmrvl_private *priv)
44} 44}
45EXPORT_SYMBOL_GPL(btmrvl_interrupt); 45EXPORT_SYMBOL_GPL(btmrvl_interrupt);
46 46
47void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb) 47bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
48{ 48{
49 struct hci_event_hdr *hdr = (void *) skb->data; 49 struct hci_event_hdr *hdr = (void *) skb->data;
50 struct hci_ev_cmd_complete *ec;
51 u16 opcode, ocf;
52 50
53 if (hdr->evt == HCI_EV_CMD_COMPLETE) { 51 if (hdr->evt == HCI_EV_CMD_COMPLETE) {
52 struct hci_ev_cmd_complete *ec;
53 u16 opcode, ocf, ogf;
54
54 ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE); 55 ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
55 opcode = __le16_to_cpu(ec->opcode); 56 opcode = __le16_to_cpu(ec->opcode);
56 ocf = hci_opcode_ocf(opcode); 57 ocf = hci_opcode_ocf(opcode);
58 ogf = hci_opcode_ogf(opcode);
59
57 if (ocf == BT_CMD_MODULE_CFG_REQ && 60 if (ocf == BT_CMD_MODULE_CFG_REQ &&
58 priv->btmrvl_dev.sendcmdflag) { 61 priv->btmrvl_dev.sendcmdflag) {
59 priv->btmrvl_dev.sendcmdflag = false; 62 priv->btmrvl_dev.sendcmdflag = false;
60 priv->adapter->cmd_complete = true; 63 priv->adapter->cmd_complete = true;
61 wake_up_interruptible(&priv->adapter->cmd_wait_q); 64 wake_up_interruptible(&priv->adapter->cmd_wait_q);
62 } 65 }
66
67 if (ogf == OGF) {
68 BT_DBG("vendor event skipped: ogf 0x%4.4x ocf 0x%4.4x",
69 ogf, ocf);
70 kfree_skb(skb);
71 return false;
72 }
63 } 73 }
74
75 return true;
64} 76}
65EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt); 77EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt);
66 78
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index a853244e7fd7..6a9e9717d3ab 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -110,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
110 /* Marvell SD8787 Bluetooth device */ 110 /* Marvell SD8787 Bluetooth device */
111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), 111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, 112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8787 Bluetooth AMP device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8797 Bluetooth device */ 116 /* Marvell SD8797 Bluetooth device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), 117 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, 118 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
@@ -562,10 +565,13 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
562 skb_put(skb, buf_len); 565 skb_put(skb, buf_len);
563 skb_pull(skb, SDIO_HEADER_LEN); 566 skb_pull(skb, SDIO_HEADER_LEN);
564 567
565 if (type == HCI_EVENT_PKT) 568 if (type == HCI_EVENT_PKT) {
566 btmrvl_check_evtpkt(priv, skb); 569 if (btmrvl_check_evtpkt(priv, skb))
570 hci_recv_frame(skb);
571 } else {
572 hci_recv_frame(skb);
573 }
567 574
568 hci_recv_frame(skb);
569 hdev->stat.byte_rx += buf_len; 575 hdev->stat.byte_rx += buf_len;
570 break; 576 break;
571 577
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index c4fc2f3fc32c..21e803a6a281 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info)
140 } 140 }
141 141
142 do { 142 do {
143 register unsigned int iobase = info->p_dev->resource[0]->start; 143 unsigned int iobase = info->p_dev->resource[0]->start;
144 register struct sk_buff *skb; 144 register struct sk_buff *skb;
145 register int len; 145 int len;
146 146
147 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 147 clear_bit(XMIT_WAKEUP, &(info->tx_state));
148 148
@@ -593,7 +593,7 @@ static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
593{ 593{
594 int *try = priv_data; 594 int *try = priv_data;
595 595
596 if (try == 0) 596 if (!try)
597 p_dev->io_lines = 16; 597 p_dev->io_lines = 16;
598 598
599 if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0)) 599 if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c9463af8e564..cef3bac1a543 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,15 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/kernel.h>
25#include <linux/module.h> 24#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/errno.h>
31#include <linux/skbuff.h>
32
33#include <linux/usb.h> 25#include <linux/usb.h>
34 26
35#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
@@ -106,6 +98,7 @@ static struct usb_device_id btusb_table[] = {
106 { USB_DEVICE(0x0a5c, 0x21e6) }, 98 { USB_DEVICE(0x0a5c, 0x21e6) },
107 { USB_DEVICE(0x0a5c, 0x21e8) }, 99 { USB_DEVICE(0x0a5c, 0x21e8) },
108 { USB_DEVICE(0x0a5c, 0x21f3) }, 100 { USB_DEVICE(0x0a5c, 0x21f3) },
101 { USB_DEVICE(0x0a5c, 0x21f4) },
109 { USB_DEVICE(0x413c, 0x8197) }, 102 { USB_DEVICE(0x413c, 0x8197) },
110 103
111 /* Foxconn - Hon Hai */ 104 /* Foxconn - Hon Hai */
@@ -125,6 +118,7 @@ static struct usb_device_id blacklist_table[] = {
125 118
126 /* Atheros 3011 with sflash firmware */ 119 /* Atheros 3011 with sflash firmware */
127 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 120 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
121 { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
128 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, 122 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
129 { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, 123 { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
130 { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, 124 { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -139,6 +133,8 @@ static struct usb_device_id blacklist_table[] = {
139 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
140 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
141 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
137 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
142 138
143 /* Atheros AR5BBU12 with sflash firmware */ 139 /* Atheros AR5BBU12 with sflash firmware */
144 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 140 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -1026,7 +1022,7 @@ static int btusb_probe(struct usb_interface *intf,
1026 data->isoc = usb_ifnum_to_if(data->udev, 1); 1022 data->isoc = usb_ifnum_to_if(data->udev, 1);
1027 1023
1028 if (!reset) 1024 if (!reset)
1029 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1025 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1030 1026
1031 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { 1027 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
1032 if (!disable_scofix) 1028 if (!disable_scofix)
@@ -1038,7 +1034,7 @@ static int btusb_probe(struct usb_interface *intf,
1038 1034
1039 if (id->driver_info & BTUSB_DIGIANSWER) { 1035 if (id->driver_info & BTUSB_DIGIANSWER) {
1040 data->cmdreq_type = USB_TYPE_VENDOR; 1036 data->cmdreq_type = USB_TYPE_VENDOR;
1041 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1037 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1042 } 1038 }
1043 1039
1044 if (id->driver_info & BTUSB_CSR) { 1040 if (id->driver_info & BTUSB_CSR) {
@@ -1046,7 +1042,7 @@ static int btusb_probe(struct usb_interface *intf,
1046 1042
1047 /* Old firmware would otherwise execute USB reset */ 1043 /* Old firmware would otherwise execute USB reset */
1048 if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) 1044 if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
1049 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1045 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1050 } 1046 }
1051 1047
1052 if (id->driver_info & BTUSB_SNIFFER) { 1048 if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 6e8d96189684..97a7784db4a2 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
144 } 144 }
145 145
146 do { 146 do {
147 register unsigned int iobase = info->p_dev->resource[0]->start; 147 unsigned int iobase = info->p_dev->resource[0]->start;
148 register struct sk_buff *skb; 148 register struct sk_buff *skb;
149 register int len; 149 int len;
150 150
151 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 151 clear_bit(XMIT_WAKEUP, &(info->tx_state));
152 152
@@ -586,29 +586,31 @@ static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
586static int dtl1_config(struct pcmcia_device *link) 586static int dtl1_config(struct pcmcia_device *link)
587{ 587{
588 dtl1_info_t *info = link->priv; 588 dtl1_info_t *info = link->priv;
589 int i; 589 int ret;
590 590
591 /* Look for a generic full-sized window */ 591 /* Look for a generic full-sized window */
592 link->resource[0]->end = 8; 592 link->resource[0]->end = 8;
593 if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0) 593 ret = pcmcia_loop_config(link, dtl1_confcheck, NULL);
594 if (ret)
594 goto failed; 595 goto failed;
595 596
596 i = pcmcia_request_irq(link, dtl1_interrupt); 597 ret = pcmcia_request_irq(link, dtl1_interrupt);
597 if (i != 0) 598 if (ret)
598 goto failed; 599 goto failed;
599 600
600 i = pcmcia_enable_device(link); 601 ret = pcmcia_enable_device(link);
601 if (i != 0) 602 if (ret)
602 goto failed; 603 goto failed;
603 604
604 if (dtl1_open(info) != 0) 605 ret = dtl1_open(info);
606 if (ret)
605 goto failed; 607 goto failed;
606 608
607 return 0; 609 return 0;
608 610
609failed: 611failed:
610 dtl1_detach(link); 612 dtl1_detach(link);
611 return -ENODEV; 613 return ret;
612} 614}
613 615
614static const struct pcmcia_device_id dtl1_ids[] = { 616static const struct pcmcia_device_id dtl1_ids[] = {
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 661a8dc4d2f8..57e502e06080 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
552static int bcsp_recv(struct hci_uart *hu, void *data, int count) 552static int bcsp_recv(struct hci_uart *hu, void *data, int count)
553{ 553{
554 struct bcsp_struct *bcsp = hu->priv; 554 struct bcsp_struct *bcsp = hu->priv;
555 register unsigned char *ptr; 555 unsigned char *ptr;
556 556
557 BT_DBG("hu %p count %d rx_state %d rx_count %ld", 557 BT_DBG("hu %p count %d rx_state %d rx_count %ld",
558 hu, count, bcsp->rx_state, bcsp->rx_count); 558 hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 748329468d26..c60623f206d4 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
126 126
127static inline int h4_check_data_len(struct h4_struct *h4, int len) 127static inline int h4_check_data_len(struct h4_struct *h4, int len)
128{ 128{
129 register int room = skb_tailroom(h4->rx_skb); 129 int room = skb_tailroom(h4->rx_skb);
130 130
131 BT_DBG("len %d room %d", len, room); 131 BT_DBG("len %d room %d", len, room);
132 132
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
new file mode 100644
index 000000000000..b6154d5a07a5
--- /dev/null
+++ b/drivers/bluetooth/hci_h5.c
@@ -0,0 +1,747 @@
1/*
2 *
3 * Bluetooth HCI Three-wire UART driver
4 *
5 * Copyright (C) 2012 Intel Corporation
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/skbuff.h>
27
28#include <net/bluetooth/bluetooth.h>
29#include <net/bluetooth/hci_core.h>
30
31#include "hci_uart.h"
32
33#define HCI_3WIRE_ACK_PKT 0
34#define HCI_3WIRE_LINK_PKT 15
35
36/* Sliding window size */
37#define H5_TX_WIN_MAX 4
38
39#define H5_ACK_TIMEOUT msecs_to_jiffies(250)
40#define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
41
42/*
43 * Maximum Three-wire packet:
44 * 4 byte header + max value for 12-bit length + 2 bytes for CRC
45 */
46#define H5_MAX_LEN (4 + 0xfff + 2)
47
48/* Convenience macros for reading Three-wire header values */
49#define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
50#define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
51#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
52#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
53#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
54#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
55
56#define SLIP_DELIMITER 0xc0
57#define SLIP_ESC 0xdb
58#define SLIP_ESC_DELIM 0xdc
59#define SLIP_ESC_ESC 0xdd
60
61/* H5 state flags */
62enum {
63 H5_RX_ESC, /* SLIP escape mode */
64 H5_TX_ACK_REQ, /* Pending ack to send */
65};
66
67struct h5 {
68 struct sk_buff_head unack; /* Unack'ed packets queue */
69 struct sk_buff_head rel; /* Reliable packets queue */
70 struct sk_buff_head unrel; /* Unreliable packets queue */
71
72 unsigned long flags;
73
74 struct sk_buff *rx_skb; /* Receive buffer */
75 size_t rx_pending; /* Expecting more bytes */
76 u8 rx_ack; /* Last ack number received */
77
78 int (*rx_func) (struct hci_uart *hu, u8 c);
79
80 struct timer_list timer; /* Retransmission timer */
81
82 u8 tx_seq; /* Next seq number to send */
83 u8 tx_ack; /* Next ack number to send */
84 u8 tx_win; /* Sliding window size */
85
86 enum {
87 H5_UNINITIALIZED,
88 H5_INITIALIZED,
89 H5_ACTIVE,
90 } state;
91
92 enum {
93 H5_AWAKE,
94 H5_SLEEPING,
95 H5_WAKING_UP,
96 } sleep;
97};
98
99static void h5_reset_rx(struct h5 *h5);
100
101static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
102{
103 struct h5 *h5 = hu->priv;
104 struct sk_buff *nskb;
105
106 nskb = alloc_skb(3, GFP_ATOMIC);
107 if (!nskb)
108 return;
109
110 bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT;
111
112 memcpy(skb_put(nskb, len), data, len);
113
114 skb_queue_tail(&h5->unrel, nskb);
115}
116
117static u8 h5_cfg_field(struct h5 *h5)
118{
119 u8 field = 0;
120
121 /* Sliding window size (first 3 bits) */
122 field |= (h5->tx_win & 7);
123
124 return field;
125}
126
127static void h5_timed_event(unsigned long arg)
128{
129 const unsigned char sync_req[] = { 0x01, 0x7e };
130 unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
131 struct hci_uart *hu = (struct hci_uart *) arg;
132 struct h5 *h5 = hu->priv;
133 struct sk_buff *skb;
134 unsigned long flags;
135
136 BT_DBG("%s", hu->hdev->name);
137
138 if (h5->state == H5_UNINITIALIZED)
139 h5_link_control(hu, sync_req, sizeof(sync_req));
140
141 if (h5->state == H5_INITIALIZED) {
142 conf_req[2] = h5_cfg_field(h5);
143 h5_link_control(hu, conf_req, sizeof(conf_req));
144 }
145
146 if (h5->state != H5_ACTIVE) {
147 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
148 goto wakeup;
149 }
150
151 if (h5->sleep != H5_AWAKE) {
152 h5->sleep = H5_SLEEPING;
153 goto wakeup;
154 }
155
156 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
157
158 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
159
160 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
161 h5->tx_seq = (h5->tx_seq - 1) & 0x07;
162 skb_queue_head(&h5->rel, skb);
163 }
164
165 spin_unlock_irqrestore(&h5->unack.lock, flags);
166
167wakeup:
168 hci_uart_tx_wakeup(hu);
169}
170
171static int h5_open(struct hci_uart *hu)
172{
173 struct h5 *h5;
174 const unsigned char sync[] = { 0x01, 0x7e };
175
176 BT_DBG("hu %p", hu);
177
178 h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
179 if (!h5)
180 return -ENOMEM;
181
182 hu->priv = h5;
183
184 skb_queue_head_init(&h5->unack);
185 skb_queue_head_init(&h5->rel);
186 skb_queue_head_init(&h5->unrel);
187
188 h5_reset_rx(h5);
189
190 init_timer(&h5->timer);
191 h5->timer.function = h5_timed_event;
192 h5->timer.data = (unsigned long) hu;
193
194 h5->tx_win = H5_TX_WIN_MAX;
195
196 set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
197
198 /* Send initial sync request */
199 h5_link_control(hu, sync, sizeof(sync));
200 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
201
202 return 0;
203}
204
205static int h5_close(struct hci_uart *hu)
206{
207 struct h5 *h5 = hu->priv;
208
209 skb_queue_purge(&h5->unack);
210 skb_queue_purge(&h5->rel);
211 skb_queue_purge(&h5->unrel);
212
213 del_timer(&h5->timer);
214
215 kfree(h5);
216
217 return 0;
218}
219
220static void h5_pkt_cull(struct h5 *h5)
221{
222 struct sk_buff *skb, *tmp;
223 unsigned long flags;
224 int i, to_remove;
225 u8 seq;
226
227 spin_lock_irqsave(&h5->unack.lock, flags);
228
229 to_remove = skb_queue_len(&h5->unack);
230 if (to_remove == 0)
231 goto unlock;
232
233 seq = h5->tx_seq;
234
235 while (to_remove > 0) {
236 if (h5->rx_ack == seq)
237 break;
238
239 to_remove--;
240 seq = (seq - 1) % 8;
241 }
242
243 if (seq != h5->rx_ack)
244 BT_ERR("Controller acked invalid packet");
245
246 i = 0;
247 skb_queue_walk_safe(&h5->unack, skb, tmp) {
248 if (i++ >= to_remove)
249 break;
250
251 __skb_unlink(skb, &h5->unack);
252 kfree_skb(skb);
253 }
254
255 if (skb_queue_empty(&h5->unack))
256 del_timer(&h5->timer);
257
258unlock:
259 spin_unlock_irqrestore(&h5->unack.lock, flags);
260}
261
262static void h5_handle_internal_rx(struct hci_uart *hu)
263{
264 struct h5 *h5 = hu->priv;
265 const unsigned char sync_req[] = { 0x01, 0x7e };
266 const unsigned char sync_rsp[] = { 0x02, 0x7d };
267 unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
268 const unsigned char conf_rsp[] = { 0x04, 0x7b };
269 const unsigned char wakeup_req[] = { 0x05, 0xfa };
270 const unsigned char woken_req[] = { 0x06, 0xf9 };
271 const unsigned char sleep_req[] = { 0x07, 0x78 };
272 const unsigned char *hdr = h5->rx_skb->data;
273 const unsigned char *data = &h5->rx_skb->data[4];
274
275 BT_DBG("%s", hu->hdev->name);
276
277 if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
278 return;
279
280 if (H5_HDR_LEN(hdr) < 2)
281 return;
282
283 conf_req[2] = h5_cfg_field(h5);
284
285 if (memcmp(data, sync_req, 2) == 0) {
286 h5_link_control(hu, sync_rsp, 2);
287 } else if (memcmp(data, sync_rsp, 2) == 0) {
288 h5->state = H5_INITIALIZED;
289 h5_link_control(hu, conf_req, 3);
290 } else if (memcmp(data, conf_req, 2) == 0) {
291 h5_link_control(hu, conf_rsp, 2);
292 h5_link_control(hu, conf_req, 3);
293 } else if (memcmp(data, conf_rsp, 2) == 0) {
294 if (H5_HDR_LEN(hdr) > 2)
295 h5->tx_win = (data[2] & 7);
296 BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
297 h5->state = H5_ACTIVE;
298 hci_uart_init_ready(hu);
299 return;
300 } else if (memcmp(data, sleep_req, 2) == 0) {
301 BT_DBG("Peer went to sleep");
302 h5->sleep = H5_SLEEPING;
303 return;
304 } else if (memcmp(data, woken_req, 2) == 0) {
305 BT_DBG("Peer woke up");
306 h5->sleep = H5_AWAKE;
307 } else if (memcmp(data, wakeup_req, 2) == 0) {
308 BT_DBG("Peer requested wakeup");
309 h5_link_control(hu, woken_req, 2);
310 h5->sleep = H5_AWAKE;
311 } else {
312 BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
313 return;
314 }
315
316 hci_uart_tx_wakeup(hu);
317}
318
319static void h5_complete_rx_pkt(struct hci_uart *hu)
320{
321 struct h5 *h5 = hu->priv;
322 const unsigned char *hdr = h5->rx_skb->data;
323
324 if (H5_HDR_RELIABLE(hdr)) {
325 h5->tx_ack = (h5->tx_ack + 1) % 8;
326 set_bit(H5_TX_ACK_REQ, &h5->flags);
327 hci_uart_tx_wakeup(hu);
328 }
329
330 h5->rx_ack = H5_HDR_ACK(hdr);
331
332 h5_pkt_cull(h5);
333
334 switch (H5_HDR_PKT_TYPE(hdr)) {
335 case HCI_EVENT_PKT:
336 case HCI_ACLDATA_PKT:
337 case HCI_SCODATA_PKT:
338 bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
339
340 /* Remove Three-wire header */
341 skb_pull(h5->rx_skb, 4);
342
343 hci_recv_frame(h5->rx_skb);
344 h5->rx_skb = NULL;
345
346 break;
347
348 default:
349 h5_handle_internal_rx(hu);
350 break;
351 }
352
353 h5_reset_rx(h5);
354}
355
356static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
357{
358 struct h5 *h5 = hu->priv;
359
360 h5_complete_rx_pkt(hu);
361 h5_reset_rx(h5);
362
363 return 0;
364}
365
366static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
367{
368 struct h5 *h5 = hu->priv;
369 const unsigned char *hdr = h5->rx_skb->data;
370
371 if (H5_HDR_CRC(hdr)) {
372 h5->rx_func = h5_rx_crc;
373 h5->rx_pending = 2;
374 } else {
375 h5_complete_rx_pkt(hu);
376 h5_reset_rx(h5);
377 }
378
379 return 0;
380}
381
382static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
383{
384 struct h5 *h5 = hu->priv;
385 const unsigned char *hdr = h5->rx_skb->data;
386
387 BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
388 hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
389 H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
390 H5_HDR_LEN(hdr));
391
392 if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
393 BT_ERR("Invalid header checksum");
394 h5_reset_rx(h5);
395 return 0;
396 }
397
398 if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
399 BT_ERR("Out-of-order packet arrived (%u != %u)",
400 H5_HDR_SEQ(hdr), h5->tx_ack);
401 h5_reset_rx(h5);
402 return 0;
403 }
404
405 if (h5->state != H5_ACTIVE &&
406 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
407 BT_ERR("Non-link packet received in non-active state");
408 h5_reset_rx(h5);
409 }
410
411 h5->rx_func = h5_rx_payload;
412 h5->rx_pending = H5_HDR_LEN(hdr);
413
414 return 0;
415}
416
417static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
418{
419 struct h5 *h5 = hu->priv;
420
421 if (c == SLIP_DELIMITER)
422 return 1;
423
424 h5->rx_func = h5_rx_3wire_hdr;
425 h5->rx_pending = 4;
426
427 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
428 if (!h5->rx_skb) {
429 BT_ERR("Can't allocate mem for new packet");
430 h5_reset_rx(h5);
431 return -ENOMEM;
432 }
433
434 h5->rx_skb->dev = (void *) hu->hdev;
435
436 return 0;
437}
438
439static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
440{
441 struct h5 *h5 = hu->priv;
442
443 if (c == SLIP_DELIMITER)
444 h5->rx_func = h5_rx_pkt_start;
445
446 return 1;
447}
448
449static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
450{
451 const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
452 const u8 *byte = &c;
453
454 if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
455 set_bit(H5_RX_ESC, &h5->flags);
456 return;
457 }
458
459 if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
460 switch (c) {
461 case SLIP_ESC_DELIM:
462 byte = &delim;
463 break;
464 case SLIP_ESC_ESC:
465 byte = &esc;
466 break;
467 default:
468 BT_ERR("Invalid esc byte 0x%02hhx", c);
469 h5_reset_rx(h5);
470 return;
471 }
472 }
473
474 memcpy(skb_put(h5->rx_skb, 1), byte, 1);
475 h5->rx_pending--;
476
477 BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
478}
479
480static void h5_reset_rx(struct h5 *h5)
481{
482 if (h5->rx_skb) {
483 kfree_skb(h5->rx_skb);
484 h5->rx_skb = NULL;
485 }
486
487 h5->rx_func = h5_rx_delimiter;
488 h5->rx_pending = 0;
489 clear_bit(H5_RX_ESC, &h5->flags);
490}
491
492static int h5_recv(struct hci_uart *hu, void *data, int count)
493{
494 struct h5 *h5 = hu->priv;
495 unsigned char *ptr = data;
496
497 BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
498 count);
499
500 while (count > 0) {
501 int processed;
502
503 if (h5->rx_pending > 0) {
504 if (*ptr == SLIP_DELIMITER) {
505 BT_ERR("Too short H5 packet");
506 h5_reset_rx(h5);
507 continue;
508 }
509
510 h5_unslip_one_byte(h5, *ptr);
511
512 ptr++; count--;
513 continue;
514 }
515
516 processed = h5->rx_func(hu, *ptr);
517 if (processed < 0)
518 return processed;
519
520 ptr += processed;
521 count -= processed;
522 }
523
524 return 0;
525}
526
527static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
528{
529 struct h5 *h5 = hu->priv;
530
531 if (skb->len > 0xfff) {
532 BT_ERR("Packet too long (%u bytes)", skb->len);
533 kfree_skb(skb);
534 return 0;
535 }
536
537 if (h5->state != H5_ACTIVE) {
538 BT_ERR("Ignoring HCI data in non-active state");
539 kfree_skb(skb);
540 return 0;
541 }
542
543 switch (bt_cb(skb)->pkt_type) {
544 case HCI_ACLDATA_PKT:
545 case HCI_COMMAND_PKT:
546 skb_queue_tail(&h5->rel, skb);
547 break;
548
549 case HCI_SCODATA_PKT:
550 skb_queue_tail(&h5->unrel, skb);
551 break;
552
553 default:
554 BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
555 kfree_skb(skb);
556 break;
557 }
558
559 return 0;
560}
561
562static void h5_slip_delim(struct sk_buff *skb)
563{
564 const char delim = SLIP_DELIMITER;
565
566 memcpy(skb_put(skb, 1), &delim, 1);
567}
568
569static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
570{
571 const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
572 const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
573
574 switch (c) {
575 case SLIP_DELIMITER:
576 memcpy(skb_put(skb, 2), &esc_delim, 2);
577 break;
578 case SLIP_ESC:
579 memcpy(skb_put(skb, 2), &esc_esc, 2);
580 break;
581 default:
582 memcpy(skb_put(skb, 1), &c, 1);
583 }
584}
585
586static bool valid_packet_type(u8 type)
587{
588 switch (type) {
589 case HCI_ACLDATA_PKT:
590 case HCI_COMMAND_PKT:
591 case HCI_SCODATA_PKT:
592 case HCI_3WIRE_LINK_PKT:
593 case HCI_3WIRE_ACK_PKT:
594 return true;
595 default:
596 return false;
597 }
598}
599
600static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
601 const u8 *data, size_t len)
602{
603 struct h5 *h5 = hu->priv;
604 struct sk_buff *nskb;
605 u8 hdr[4];
606 int i;
607
608 if (!valid_packet_type(pkt_type)) {
609 BT_ERR("Unknown packet type %u", pkt_type);
610 return NULL;
611 }
612
613 /*
614 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
615 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
616 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
617 * delimiters at start and end).
618 */
619 nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
620 if (!nskb)
621 return NULL;
622
623 bt_cb(nskb)->pkt_type = pkt_type;
624
625 h5_slip_delim(nskb);
626
627 hdr[0] = h5->tx_ack << 3;
628 clear_bit(H5_TX_ACK_REQ, &h5->flags);
629
630 /* Reliable packet? */
631 if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
632 hdr[0] |= 1 << 7;
633 hdr[0] |= h5->tx_seq;
634 h5->tx_seq = (h5->tx_seq + 1) % 8;
635 }
636
637 hdr[1] = pkt_type | ((len & 0x0f) << 4);
638 hdr[2] = len >> 4;
639 hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
640
641 BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
642 hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
643 H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
644 H5_HDR_LEN(hdr));
645
646 for (i = 0; i < 4; i++)
647 h5_slip_one_byte(nskb, hdr[i]);
648
649 for (i = 0; i < len; i++)
650 h5_slip_one_byte(nskb, data[i]);
651
652 h5_slip_delim(nskb);
653
654 return nskb;
655}
656
657static struct sk_buff *h5_dequeue(struct hci_uart *hu)
658{
659 struct h5 *h5 = hu->priv;
660 unsigned long flags;
661 struct sk_buff *skb, *nskb;
662
663 if (h5->sleep != H5_AWAKE) {
664 const unsigned char wakeup_req[] = { 0x05, 0xfa };
665
666 if (h5->sleep == H5_WAKING_UP)
667 return NULL;
668
669 h5->sleep = H5_WAKING_UP;
670 BT_DBG("Sending wakeup request");
671
672 mod_timer(&h5->timer, jiffies + HZ / 100);
673 return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
674 }
675
676 if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
677 nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
678 skb->data, skb->len);
679 if (nskb) {
680 kfree_skb(skb);
681 return nskb;
682 }
683
684 skb_queue_head(&h5->unrel, skb);
685 BT_ERR("Could not dequeue pkt because alloc_skb failed");
686 }
687
688 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
689
690 if (h5->unack.qlen >= h5->tx_win)
691 goto unlock;
692
693 if ((skb = skb_dequeue(&h5->rel)) != NULL) {
694 nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
695 skb->data, skb->len);
696 if (nskb) {
697 __skb_queue_tail(&h5->unack, skb);
698 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
699 spin_unlock_irqrestore(&h5->unack.lock, flags);
700 return nskb;
701 }
702
703 skb_queue_head(&h5->rel, skb);
704 BT_ERR("Could not dequeue pkt because alloc_skb failed");
705 }
706
707unlock:
708 spin_unlock_irqrestore(&h5->unack.lock, flags);
709
710 if (test_bit(H5_TX_ACK_REQ, &h5->flags))
711 return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
712
713 return NULL;
714}
715
716static int h5_flush(struct hci_uart *hu)
717{
718 BT_DBG("hu %p", hu);
719 return 0;
720}
721
722static struct hci_uart_proto h5p = {
723 .id = HCI_UART_3WIRE,
724 .open = h5_open,
725 .close = h5_close,
726 .recv = h5_recv,
727 .enqueue = h5_enqueue,
728 .dequeue = h5_dequeue,
729 .flush = h5_flush,
730};
731
732int __init h5_init(void)
733{
734 int err = hci_uart_register_proto(&h5p);
735
736 if (!err)
737 BT_INFO("HCI Three-wire UART (H5) protocol initialized");
738 else
739 BT_ERR("HCI Three-wire UART (H5) protocol init failed");
740
741 return err;
742}
743
744int __exit h5_deinit(void)
745{
746 return hci_uart_unregister_proto(&h5p);
747}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index e564579a6115..74e0966b3ead 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -156,6 +156,35 @@ restart:
156 return 0; 156 return 0;
157} 157}
158 158
159static void hci_uart_init_work(struct work_struct *work)
160{
161 struct hci_uart *hu = container_of(work, struct hci_uart, init_ready);
162 int err;
163
164 if (!test_and_clear_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
165 return;
166
167 err = hci_register_dev(hu->hdev);
168 if (err < 0) {
169 BT_ERR("Can't register HCI device");
170 hci_free_dev(hu->hdev);
171 hu->hdev = NULL;
172 hu->proto->close(hu);
173 }
174
175 set_bit(HCI_UART_REGISTERED, &hu->flags);
176}
177
178int hci_uart_init_ready(struct hci_uart *hu)
179{
180 if (!test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
181 return -EALREADY;
182
183 schedule_work(&hu->init_ready);
184
185 return 0;
186}
187
159/* ------- Interface to HCI layer ------ */ 188/* ------- Interface to HCI layer ------ */
160/* Initialize device */ 189/* Initialize device */
161static int hci_uart_open(struct hci_dev *hdev) 190static int hci_uart_open(struct hci_dev *hdev)
@@ -264,6 +293,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
264 hu->tty = tty; 293 hu->tty = tty;
265 tty->receive_room = 65536; 294 tty->receive_room = 65536;
266 295
296 INIT_WORK(&hu->init_ready, hci_uart_init_work);
297
267 spin_lock_init(&hu->rx_lock); 298 spin_lock_init(&hu->rx_lock);
268 299
269 /* Flush any pending characters in the driver and line discipline. */ 300 /* Flush any pending characters in the driver and line discipline. */
@@ -286,28 +317,30 @@ static int hci_uart_tty_open(struct tty_struct *tty)
286static void hci_uart_tty_close(struct tty_struct *tty) 317static void hci_uart_tty_close(struct tty_struct *tty)
287{ 318{
288 struct hci_uart *hu = (void *)tty->disc_data; 319 struct hci_uart *hu = (void *)tty->disc_data;
320 struct hci_dev *hdev;
289 321
290 BT_DBG("tty %p", tty); 322 BT_DBG("tty %p", tty);
291 323
292 /* Detach from the tty */ 324 /* Detach from the tty */
293 tty->disc_data = NULL; 325 tty->disc_data = NULL;
294 326
295 if (hu) { 327 if (!hu)
296 struct hci_dev *hdev = hu->hdev; 328 return;
297 329
298 if (hdev) 330 hdev = hu->hdev;
299 hci_uart_close(hdev); 331 if (hdev)
332 hci_uart_close(hdev);
300 333
301 if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { 334 if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
302 if (hdev) { 335 if (hdev) {
336 if (test_bit(HCI_UART_REGISTERED, &hu->flags))
303 hci_unregister_dev(hdev); 337 hci_unregister_dev(hdev);
304 hci_free_dev(hdev); 338 hci_free_dev(hdev);
305 }
306 hu->proto->close(hu);
307 } 339 }
308 340 hu->proto->close(hu);
309 kfree(hu);
310 } 341 }
342
343 kfree(hu);
311} 344}
312 345
313/* hci_uart_tty_wakeup() 346/* hci_uart_tty_wakeup()
@@ -394,19 +427,24 @@ static int hci_uart_register_dev(struct hci_uart *hu)
394 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); 427 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
395 428
396 if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) 429 if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
397 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 430 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
398 431
399 if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) 432 if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
400 hdev->dev_type = HCI_AMP; 433 hdev->dev_type = HCI_AMP;
401 else 434 else
402 hdev->dev_type = HCI_BREDR; 435 hdev->dev_type = HCI_BREDR;
403 436
437 if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
438 return 0;
439
404 if (hci_register_dev(hdev) < 0) { 440 if (hci_register_dev(hdev) < 0) {
405 BT_ERR("Can't register HCI device"); 441 BT_ERR("Can't register HCI device");
406 hci_free_dev(hdev); 442 hci_free_dev(hdev);
407 return -ENODEV; 443 return -ENODEV;
408 } 444 }
409 445
446 set_bit(HCI_UART_REGISTERED, &hu->flags);
447
410 return 0; 448 return 0;
411} 449}
412 450
@@ -558,6 +596,9 @@ static int __init hci_uart_init(void)
558#ifdef CONFIG_BT_HCIUART_ATH3K 596#ifdef CONFIG_BT_HCIUART_ATH3K
559 ath_init(); 597 ath_init();
560#endif 598#endif
599#ifdef CONFIG_BT_HCIUART_3WIRE
600 h5_init();
601#endif
561 602
562 return 0; 603 return 0;
563} 604}
@@ -578,6 +619,9 @@ static void __exit hci_uart_exit(void)
578#ifdef CONFIG_BT_HCIUART_ATH3K 619#ifdef CONFIG_BT_HCIUART_ATH3K
579 ath_deinit(); 620 ath_deinit();
580#endif 621#endif
622#ifdef CONFIG_BT_HCIUART_3WIRE
623 h5_deinit();
624#endif
581 625
582 /* Release tty registration of line discipline */ 626 /* Release tty registration of line discipline */
583 if ((err = tty_unregister_ldisc(N_HCI))) 627 if ((err = tty_unregister_ldisc(N_HCI)))
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index b874c0efde24..ff6d589c34a5 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
348 348
349static inline int ll_check_data_len(struct ll_struct *ll, int len) 349static inline int ll_check_data_len(struct ll_struct *ll, int len)
350{ 350{
351 register int room = skb_tailroom(ll->rx_skb); 351 int room = skb_tailroom(ll->rx_skb);
352 352
353 BT_DBG("len %d room %d", len, room); 353 BT_DBG("len %d room %d", len, room);
354 354
@@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len)
374static int ll_recv(struct hci_uart *hu, void *data, int count) 374static int ll_recv(struct hci_uart *hu, void *data, int count)
375{ 375{
376 struct ll_struct *ll = hu->priv; 376 struct ll_struct *ll = hu->priv;
377 register char *ptr; 377 char *ptr;
378 struct hci_event_hdr *eh; 378 struct hci_event_hdr *eh;
379 struct hci_acl_hdr *ah; 379 struct hci_acl_hdr *ah;
380 struct hci_sco_hdr *sh; 380 struct hci_sco_hdr *sh;
381 register int len, type, dlen; 381 int len, type, dlen;
382 382
383 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count); 383 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
384 384
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 6cf6ab22ad21..fffa61ff5cb1 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -47,6 +47,7 @@
47#define HCI_UART_RAW_DEVICE 0 47#define HCI_UART_RAW_DEVICE 0
48#define HCI_UART_RESET_ON_INIT 1 48#define HCI_UART_RESET_ON_INIT 1
49#define HCI_UART_CREATE_AMP 2 49#define HCI_UART_CREATE_AMP 2
50#define HCI_UART_INIT_PENDING 3
50 51
51struct hci_uart; 52struct hci_uart;
52 53
@@ -66,6 +67,8 @@ struct hci_uart {
66 unsigned long flags; 67 unsigned long flags;
67 unsigned long hdev_flags; 68 unsigned long hdev_flags;
68 69
70 struct work_struct init_ready;
71
69 struct hci_uart_proto *proto; 72 struct hci_uart_proto *proto;
70 void *priv; 73 void *priv;
71 74
@@ -76,6 +79,7 @@ struct hci_uart {
76 79
77/* HCI_UART proto flag bits */ 80/* HCI_UART proto flag bits */
78#define HCI_UART_PROTO_SET 0 81#define HCI_UART_PROTO_SET 0
82#define HCI_UART_REGISTERED 1
79 83
80/* TX states */ 84/* TX states */
81#define HCI_UART_SENDING 1 85#define HCI_UART_SENDING 1
@@ -84,6 +88,7 @@ struct hci_uart {
84int hci_uart_register_proto(struct hci_uart_proto *p); 88int hci_uart_register_proto(struct hci_uart_proto *p);
85int hci_uart_unregister_proto(struct hci_uart_proto *p); 89int hci_uart_unregister_proto(struct hci_uart_proto *p);
86int hci_uart_tx_wakeup(struct hci_uart *hu); 90int hci_uart_tx_wakeup(struct hci_uart *hu);
91int hci_uart_init_ready(struct hci_uart *hu);
87 92
88#ifdef CONFIG_BT_HCIUART_H4 93#ifdef CONFIG_BT_HCIUART_H4
89int h4_init(void); 94int h4_init(void);
@@ -104,3 +109,8 @@ int ll_deinit(void);
104int ath_init(void); 109int ath_init(void);
105int ath_deinit(void); 110int ath_deinit(void);
106#endif 111#endif
112
113#ifdef CONFIG_BT_HCIUART_3WIRE
114int h5_init(void);
115int h5_deinit(void);
116#endif
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 0a4185279417..b130df0a1958 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include "agp.h" 13#include "agp.h"
14#include "intel-agp.h" 14#include "intel-agp.h"
15#include <drm/intel-gtt.h>
15 16
16int intel_agp_enabled; 17int intel_agp_enabled;
17EXPORT_SYMBOL(intel_agp_enabled); 18EXPORT_SYMBOL(intel_agp_enabled);
@@ -747,7 +748,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
747 748
748 bridge->capndx = cap_ptr; 749 bridge->capndx = cap_ptr;
749 750
750 if (intel_gmch_probe(pdev, bridge)) 751 if (intel_gmch_probe(pdev, NULL, bridge))
751 goto found_gmch; 752 goto found_gmch;
752 753
753 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { 754 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -824,7 +825,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
824 825
825 agp_remove_bridge(bridge); 826 agp_remove_bridge(bridge);
826 827
827 intel_gmch_remove(pdev); 828 intel_gmch_remove();
828 829
829 agp_put_bridge(bridge); 830 agp_put_bridge(bridge);
830} 831}
@@ -902,17 +903,6 @@ static struct pci_device_id agp_intel_pci_table[] = {
902 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 903 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
903 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 904 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
904 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 905 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
905 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
906 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
907 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
909 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
910 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
911 ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
912 ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
913 ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
914 ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
915 ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
916 { } 906 { }
917}; 907};
918 908
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 8e2d9140f300..6ec0fff79bc2 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -64,6 +64,7 @@
64#define I830_PTE_SYSTEM_CACHED 0x00000006 64#define I830_PTE_SYSTEM_CACHED 0x00000006
65/* GT PTE cache control fields */ 65/* GT PTE cache control fields */
66#define GEN6_PTE_UNCACHED 0x00000002 66#define GEN6_PTE_UNCACHED 0x00000002
67#define HSW_PTE_UNCACHED 0x00000000
67#define GEN6_PTE_LLC 0x00000004 68#define GEN6_PTE_LLC 0x00000004
68#define GEN6_PTE_LLC_MLC 0x00000006 69#define GEN6_PTE_LLC_MLC 0x00000006
69#define GEN6_PTE_GFDT 0x00000008 70#define GEN6_PTE_GFDT 0x00000008
@@ -239,19 +240,45 @@
239#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A 240#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
240#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ 241#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
241#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 242#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
242#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ 243#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
243#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 244#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
244#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 245#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
245#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ 246#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
247#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
246#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 248#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
247#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 249#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
248#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ 250#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
251#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
249#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a 252#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
250#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a 253#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
251#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */ 254#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
252#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 255#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
256#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
257#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
258#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
259#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
260#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
261#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
262#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
263#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
264#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
265#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
266#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
267#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
268#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
269#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
270#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
271#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
272#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
273#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
274#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
275#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
276#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
277#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
278#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
279#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
280#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
281#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
282#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
253 283
254int intel_gmch_probe(struct pci_dev *pdev,
255 struct agp_bridge_data *bridge);
256void intel_gmch_remove(struct pci_dev *pdev);
257#endif 284#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 1237e7575c3f..58e32f7c3229 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -66,7 +66,6 @@ static struct _intel_private {
66 struct pci_dev *bridge_dev; 66 struct pci_dev *bridge_dev;
67 u8 __iomem *registers; 67 u8 __iomem *registers;
68 phys_addr_t gtt_bus_addr; 68 phys_addr_t gtt_bus_addr;
69 phys_addr_t gma_bus_addr;
70 u32 PGETBL_save; 69 u32 PGETBL_save;
71 u32 __iomem *gtt; /* I915G */ 70 u32 __iomem *gtt; /* I915G */
72 bool clear_fake_agp; /* on first access via agp, fill with scratch */ 71 bool clear_fake_agp; /* on first access via agp, fill with scratch */
@@ -76,6 +75,7 @@ static struct _intel_private {
76 struct resource ifp_resource; 75 struct resource ifp_resource;
77 int resource_valid; 76 int resource_valid;
78 struct page *scratch_page; 77 struct page *scratch_page;
78 int refcount;
79} intel_private; 79} intel_private;
80 80
81#define INTEL_GTT_GEN intel_private.driver->gen 81#define INTEL_GTT_GEN intel_private.driver->gen
@@ -648,6 +648,7 @@ static void intel_gtt_cleanup(void)
648 648
649static int intel_gtt_init(void) 649static int intel_gtt_init(void)
650{ 650{
651 u32 gma_addr;
651 u32 gtt_map_size; 652 u32 gtt_map_size;
652 int ret; 653 int ret;
653 654
@@ -694,6 +695,15 @@ static int intel_gtt_init(void)
694 return ret; 695 return ret;
695 } 696 }
696 697
698 if (INTEL_GTT_GEN <= 2)
699 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
700 &gma_addr);
701 else
702 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
703 &gma_addr);
704
705 intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
706
697 return 0; 707 return 0;
698} 708}
699 709
@@ -767,20 +777,10 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
767 writel(addr | pte_flags, intel_private.gtt + entry); 777 writel(addr | pte_flags, intel_private.gtt + entry);
768} 778}
769 779
770static bool intel_enable_gtt(void) 780bool intel_enable_gtt(void)
771{ 781{
772 u32 gma_addr;
773 u8 __iomem *reg; 782 u8 __iomem *reg;
774 783
775 if (INTEL_GTT_GEN <= 2)
776 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
777 &gma_addr);
778 else
779 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
780 &gma_addr);
781
782 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
783
784 if (INTEL_GTT_GEN >= 6) 784 if (INTEL_GTT_GEN >= 6)
785 return true; 785 return true;
786 786
@@ -823,6 +823,7 @@ static bool intel_enable_gtt(void)
823 823
824 return true; 824 return true;
825} 825}
826EXPORT_SYMBOL(intel_enable_gtt);
826 827
827static int i830_setup(void) 828static int i830_setup(void)
828{ 829{
@@ -860,7 +861,7 @@ static int intel_fake_agp_configure(void)
860 return -EIO; 861 return -EIO;
861 862
862 intel_private.clear_fake_agp = true; 863 intel_private.clear_fake_agp = true;
863 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; 864 agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
864 865
865 return 0; 866 return 0;
866} 867}
@@ -1155,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)
1155 return true; 1156 return true;
1156} 1157}
1157 1158
1159static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
1160 unsigned int flags)
1161{
1162 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1163 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1164 u32 pte_flags;
1165
1166 if (type_mask == AGP_USER_MEMORY)
1167 pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
1168 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1169 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1170 if (gfdt)
1171 pte_flags |= GEN6_PTE_GFDT;
1172 } else { /* set 'normal'/'cached' to LLC by default */
1173 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1174 if (gfdt)
1175 pte_flags |= GEN6_PTE_GFDT;
1176 }
1177
1178 /* gen6 has bit11-4 for physical addr bit39-32 */
1179 addr |= (addr >> 28) & 0xff0;
1180 writel(addr | pte_flags, intel_private.gtt + entry);
1181}
1182
1158static void gen6_write_entry(dma_addr_t addr, unsigned int entry, 1183static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1159 unsigned int flags) 1184 unsigned int flags)
1160{ 1185{
@@ -1182,9 +1207,17 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1182static void valleyview_write_entry(dma_addr_t addr, unsigned int entry, 1207static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
1183 unsigned int flags) 1208 unsigned int flags)
1184{ 1209{
1210 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1211 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1185 u32 pte_flags; 1212 u32 pte_flags;
1186 1213
1187 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; 1214 if (type_mask == AGP_USER_MEMORY)
1215 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1216 else {
1217 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1218 if (gfdt)
1219 pte_flags |= GEN6_PTE_GFDT;
1220 }
1188 1221
1189 /* gen6 has bit11-4 for physical addr bit39-32 */ 1222 /* gen6 has bit11-4 for physical addr bit39-32 */
1190 addr |= (addr >> 28) & 0xff0; 1223 addr |= (addr >> 28) & 0xff0;
@@ -1244,6 +1277,7 @@ static int i9xx_setup(void)
1244 switch (INTEL_GTT_GEN) { 1277 switch (INTEL_GTT_GEN) {
1245 case 5: 1278 case 5:
1246 case 6: 1279 case 6:
1280 case 7:
1247 gtt_offset = MB(2); 1281 gtt_offset = MB(2);
1248 break; 1282 break;
1249 case 4: 1283 case 4:
@@ -1372,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
1372 .check_flags = gen6_check_flags, 1406 .check_flags = gen6_check_flags,
1373 .chipset_flush = i9xx_chipset_flush, 1407 .chipset_flush = i9xx_chipset_flush,
1374}; 1408};
1409static const struct intel_gtt_driver haswell_gtt_driver = {
1410 .gen = 6,
1411 .setup = i9xx_setup,
1412 .cleanup = gen6_cleanup,
1413 .write_entry = haswell_write_entry,
1414 .dma_mask_size = 40,
1415 .check_flags = gen6_check_flags,
1416 .chipset_flush = i9xx_chipset_flush,
1417};
1375static const struct intel_gtt_driver valleyview_gtt_driver = { 1418static const struct intel_gtt_driver valleyview_gtt_driver = {
1376 .gen = 7, 1419 .gen = 7,
1377 .setup = i9xx_setup, 1420 .setup = i9xx_setup,
@@ -1379,7 +1422,6 @@ static const struct intel_gtt_driver valleyview_gtt_driver = {
1379 .write_entry = valleyview_write_entry, 1422 .write_entry = valleyview_write_entry,
1380 .dma_mask_size = 40, 1423 .dma_mask_size = 40,
1381 .check_flags = gen6_check_flags, 1424 .check_flags = gen6_check_flags,
1382 .chipset_flush = i9xx_chipset_flush,
1383}; 1425};
1384 1426
1385/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 1427/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
@@ -1490,19 +1532,77 @@ static const struct intel_gtt_driver_description {
1490 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, 1532 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
1491 "ValleyView", &valleyview_gtt_driver }, 1533 "ValleyView", &valleyview_gtt_driver },
1492 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, 1534 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
1493 "Haswell", &sandybridge_gtt_driver }, 1535 "Haswell", &haswell_gtt_driver },
1494 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, 1536 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
1495 "Haswell", &sandybridge_gtt_driver }, 1537 "Haswell", &haswell_gtt_driver },
1538 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
1539 "Haswell", &haswell_gtt_driver },
1496 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, 1540 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
1497 "Haswell", &sandybridge_gtt_driver }, 1541 "Haswell", &haswell_gtt_driver },
1498 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, 1542 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
1499 "Haswell", &sandybridge_gtt_driver }, 1543 "Haswell", &haswell_gtt_driver },
1544 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
1545 "Haswell", &haswell_gtt_driver },
1500 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, 1546 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
1501 "Haswell", &sandybridge_gtt_driver }, 1547 "Haswell", &haswell_gtt_driver },
1502 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, 1548 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
1503 "Haswell", &sandybridge_gtt_driver }, 1549 "Haswell", &haswell_gtt_driver },
1504 { PCI_DEVICE_ID_INTEL_HASWELL_SDV, 1550 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
1505 "Haswell", &sandybridge_gtt_driver }, 1551 "Haswell", &haswell_gtt_driver },
1552 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
1553 "Haswell", &haswell_gtt_driver },
1554 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
1555 "Haswell", &haswell_gtt_driver },
1556 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
1557 "Haswell", &haswell_gtt_driver },
1558 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
1559 "Haswell", &haswell_gtt_driver },
1560 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
1561 "Haswell", &haswell_gtt_driver },
1562 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
1563 "Haswell", &haswell_gtt_driver },
1564 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
1565 "Haswell", &haswell_gtt_driver },
1566 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
1567 "Haswell", &haswell_gtt_driver },
1568 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
1569 "Haswell", &haswell_gtt_driver },
1570 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
1571 "Haswell", &haswell_gtt_driver },
1572 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
1573 "Haswell", &haswell_gtt_driver },
1574 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
1575 "Haswell", &haswell_gtt_driver },
1576 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
1577 "Haswell", &haswell_gtt_driver },
1578 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
1579 "Haswell", &haswell_gtt_driver },
1580 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
1581 "Haswell", &haswell_gtt_driver },
1582 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
1583 "Haswell", &haswell_gtt_driver },
1584 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
1585 "Haswell", &haswell_gtt_driver },
1586 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
1587 "Haswell", &haswell_gtt_driver },
1588 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
1589 "Haswell", &haswell_gtt_driver },
1590 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
1591 "Haswell", &haswell_gtt_driver },
1592 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
1593 "Haswell", &haswell_gtt_driver },
1594 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
1595 "Haswell", &haswell_gtt_driver },
1596 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
1597 "Haswell", &haswell_gtt_driver },
1598 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
1599 "Haswell", &haswell_gtt_driver },
1600 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
1601 "Haswell", &haswell_gtt_driver },
1602 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
1603 "Haswell", &haswell_gtt_driver },
1604 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
1605 "Haswell", &haswell_gtt_driver },
1506 { 0, NULL, NULL } 1606 { 0, NULL, NULL }
1507}; 1607};
1508 1608
@@ -1523,14 +1623,32 @@ static int find_gmch(u16 device)
1523 return 1; 1623 return 1;
1524} 1624}
1525 1625
1526int intel_gmch_probe(struct pci_dev *pdev, 1626int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1527 struct agp_bridge_data *bridge) 1627 struct agp_bridge_data *bridge)
1528{ 1628{
1529 int i, mask; 1629 int i, mask;
1530 intel_private.driver = NULL; 1630
1631 /*
1632 * Can be called from the fake agp driver but also directly from
1633 * drm/i915.ko. Hence we need to check whether everything is set up
1634 * already.
1635 */
1636 if (intel_private.driver) {
1637 intel_private.refcount++;
1638 return 1;
1639 }
1531 1640
1532 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { 1641 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1533 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { 1642 if (gpu_pdev) {
1643 if (gpu_pdev->device ==
1644 intel_gtt_chipsets[i].gmch_chip_id) {
1645 intel_private.pcidev = pci_dev_get(gpu_pdev);
1646 intel_private.driver =
1647 intel_gtt_chipsets[i].gtt_driver;
1648
1649 break;
1650 }
1651 } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1534 intel_private.driver = 1652 intel_private.driver =
1535 intel_gtt_chipsets[i].gtt_driver; 1653 intel_gtt_chipsets[i].gtt_driver;
1536 break; 1654 break;
@@ -1540,13 +1658,17 @@ int intel_gmch_probe(struct pci_dev *pdev,
1540 if (!intel_private.driver) 1658 if (!intel_private.driver)
1541 return 0; 1659 return 0;
1542 1660
1543 bridge->driver = &intel_fake_agp_driver; 1661 intel_private.refcount++;
1544 bridge->dev_private_data = &intel_private; 1662
1545 bridge->dev = pdev; 1663 if (bridge) {
1664 bridge->driver = &intel_fake_agp_driver;
1665 bridge->dev_private_data = &intel_private;
1666 bridge->dev = bridge_pdev;
1667 }
1546 1668
1547 intel_private.bridge_dev = pci_dev_get(pdev); 1669 intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1548 1670
1549 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); 1671 dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1550 1672
1551 mask = intel_private.driver->dma_mask_size; 1673 mask = intel_private.driver->dma_mask_size;
1552 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) 1674 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
@@ -1556,11 +1678,11 @@ int intel_gmch_probe(struct pci_dev *pdev,
1556 pci_set_consistent_dma_mask(intel_private.pcidev, 1678 pci_set_consistent_dma_mask(intel_private.pcidev,
1557 DMA_BIT_MASK(mask)); 1679 DMA_BIT_MASK(mask));
1558 1680
1559 /*if (bridge->driver == &intel_810_driver) 1681 if (intel_gtt_init() != 0) {
1560 return 1;*/ 1682 intel_gmch_remove();
1561 1683
1562 if (intel_gtt_init() != 0)
1563 return 0; 1684 return 0;
1685 }
1564 1686
1565 return 1; 1687 return 1;
1566} 1688}
@@ -1579,12 +1701,16 @@ void intel_gtt_chipset_flush(void)
1579} 1701}
1580EXPORT_SYMBOL(intel_gtt_chipset_flush); 1702EXPORT_SYMBOL(intel_gtt_chipset_flush);
1581 1703
1582void intel_gmch_remove(struct pci_dev *pdev) 1704void intel_gmch_remove(void)
1583{ 1705{
1706 if (--intel_private.refcount)
1707 return;
1708
1584 if (intel_private.pcidev) 1709 if (intel_private.pcidev)
1585 pci_dev_put(intel_private.pcidev); 1710 pci_dev_put(intel_private.pcidev);
1586 if (intel_private.bridge_dev) 1711 if (intel_private.bridge_dev)
1587 pci_dev_put(intel_private.bridge_dev); 1712 pci_dev_put(intel_private.bridge_dev);
1713 intel_private.driver = NULL;
1588} 1714}
1589EXPORT_SYMBOL(intel_gmch_remove); 1715EXPORT_SYMBOL(intel_gmch_remove);
1590 1716
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 0c688232aab3..97467053a01b 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -297,7 +297,6 @@ static int __init bsr_init(void)
297 struct device_node *np; 297 struct device_node *np;
298 dev_t bsr_dev; 298 dev_t bsr_dev;
299 int ret = -ENODEV; 299 int ret = -ENODEV;
300 int result;
301 300
302 np = of_find_compatible_node(NULL, NULL, "ibm,bsr"); 301 np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
303 if (!np) 302 if (!np)
@@ -306,13 +305,14 @@ static int __init bsr_init(void)
306 bsr_class = class_create(THIS_MODULE, "bsr"); 305 bsr_class = class_create(THIS_MODULE, "bsr");
307 if (IS_ERR(bsr_class)) { 306 if (IS_ERR(bsr_class)) {
308 printk(KERN_ERR "class_create() failed for bsr_class\n"); 307 printk(KERN_ERR "class_create() failed for bsr_class\n");
308 ret = PTR_ERR(bsr_class);
309 goto out_err_1; 309 goto out_err_1;
310 } 310 }
311 bsr_class->dev_attrs = bsr_dev_attrs; 311 bsr_class->dev_attrs = bsr_dev_attrs;
312 312
313 result = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr"); 313 ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
314 bsr_major = MAJOR(bsr_dev); 314 bsr_major = MAJOR(bsr_dev);
315 if (result < 0) { 315 if (ret < 0) {
316 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n"); 316 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
317 goto out_err_2; 317 goto out_err_2;
318 } 318 }
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index f45dad39a18b..7c0d391996b5 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -73,6 +73,20 @@ config HW_RANDOM_ATMEL
73 73
74 If unsure, say Y. 74 If unsure, say Y.
75 75
76config HW_RANDOM_BCM63XX
77 tristate "Broadcom BCM63xx Random Number Generator support"
78 depends on HW_RANDOM && BCM63XX
79 default HW_RANDOM
80 ---help---
81 This driver provides kernel-side support for the Random Number
82 Generator hardware found on the Broadcom BCM63xx SoCs.
83
84 To compile this driver as a module, choose M here: the
85 module will be called bcm63xx-rng
86
87 If unusure, say Y.
88
89
76config HW_RANDOM_GEODE 90config HW_RANDOM_GEODE
77 tristate "AMD Geode HW Random Number Generator support" 91 tristate "AMD Geode HW Random Number Generator support"
78 depends on HW_RANDOM && X86_32 && PCI 92 depends on HW_RANDOM && X86_32 && PCI
@@ -263,3 +277,15 @@ config HW_RANDOM_PSERIES
263 module will be called pseries-rng. 277 module will be called pseries-rng.
264 278
265 If unsure, say Y. 279 If unsure, say Y.
280
281config HW_RANDOM_EXYNOS
282 tristate "EXYNOS HW random number generator support"
283 depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK
284 ---help---
285 This driver provides kernel-side support for the Random Number
286 Generator hardware found on EXYNOS SOCs.
287
288 To compile this driver as a module, choose M here: the
289 module will be called exynos-rng.
290
291 If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index d901dfa30321..39a757ca15b6 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
8obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o 8obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
9obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o 9obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
10obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o 10obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
11obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
11obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o 12obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
12obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o 13obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
13n2-rng-y := n2-drv.o n2-asm.o 14n2-rng-y := n2-drv.o n2-asm.o
@@ -23,3 +24,4 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
23obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o 24obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
24obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o 25obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
25obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o 26obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
27obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index f518b99f53f5..731c9046cf7b 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
34 u32 *data = buf; 34 u32 *data = buf;
35 35
36 /* data ready? */ 36 /* data ready? */
37 if (readl(trng->base + TRNG_ODATA) & 1) { 37 if (readl(trng->base + TRNG_ISR) & 1) {
38 *data = readl(trng->base + TRNG_ODATA); 38 *data = readl(trng->base + TRNG_ODATA);
39 /*
40 ensure data ready is only set again AFTER the next data
41 word is ready in case it got set between checking ISR
42 and reading ODATA, so we don't risk re-reading the
43 same word
44 */
45 readl(trng->base + TRNG_ISR);
39 return 4; 46 return 4;
40 } else 47 } else
41 return 0; 48 return 0;
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
new file mode 100644
index 000000000000..aec6a4277caa
--- /dev/null
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -0,0 +1,175 @@
1/*
2 * Broadcom BCM63xx Random Number Generator support
3 *
4 * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org>
5 * Copyright (C) 2009, Broadcom Corporation
6 *
7 */
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/io.h>
11#include <linux/err.h>
12#include <linux/clk.h>
13#include <linux/platform_device.h>
14#include <linux/hw_random.h>
15
16#include <bcm63xx_io.h>
17#include <bcm63xx_regs.h>
18
19struct bcm63xx_rng_priv {
20 struct clk *clk;
21 void __iomem *regs;
22};
23
24#define to_rng_priv(rng) ((struct bcm63xx_rng_priv *)rng->priv)
25
26static int bcm63xx_rng_init(struct hwrng *rng)
27{
28 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
29 u32 val;
30
31 val = bcm_readl(priv->regs + RNG_CTRL);
32 val |= RNG_EN;
33 bcm_writel(val, priv->regs + RNG_CTRL);
34
35 return 0;
36}
37
38static void bcm63xx_rng_cleanup(struct hwrng *rng)
39{
40 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
41 u32 val;
42
43 val = bcm_readl(priv->regs + RNG_CTRL);
44 val &= ~RNG_EN;
45 bcm_writel(val, priv->regs + RNG_CTRL);
46}
47
48static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
49{
50 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
51
52 return bcm_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
53}
54
55static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
56{
57 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
58
59 *data = bcm_readl(priv->regs + RNG_DATA);
60
61 return 4;
62}
63
64static int __devinit bcm63xx_rng_probe(struct platform_device *pdev)
65{
66 struct resource *r;
67 struct clk *clk;
68 int ret;
69 struct bcm63xx_rng_priv *priv;
70 struct hwrng *rng;
71
72 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
73 if (!r) {
74 dev_err(&pdev->dev, "no iomem resource\n");
75 ret = -ENXIO;
76 goto out;
77 }
78
79 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
80 if (!priv) {
81 dev_err(&pdev->dev, "no memory for private structure\n");
82 ret = -ENOMEM;
83 goto out;
84 }
85
86 rng = kzalloc(sizeof(*rng), GFP_KERNEL);
87 if (!rng) {
88 dev_err(&pdev->dev, "no memory for rng structure\n");
89 ret = -ENOMEM;
90 goto out_free_priv;
91 }
92
93 platform_set_drvdata(pdev, rng);
94 rng->priv = (unsigned long)priv;
95 rng->name = pdev->name;
96 rng->init = bcm63xx_rng_init;
97 rng->cleanup = bcm63xx_rng_cleanup;
98 rng->data_present = bcm63xx_rng_data_present;
99 rng->data_read = bcm63xx_rng_data_read;
100
101 clk = clk_get(&pdev->dev, "ipsec");
102 if (IS_ERR(clk)) {
103 dev_err(&pdev->dev, "no clock for device\n");
104 ret = PTR_ERR(clk);
105 goto out_free_rng;
106 }
107
108 priv->clk = clk;
109
110 if (!devm_request_mem_region(&pdev->dev, r->start,
111 resource_size(r), pdev->name)) {
112 dev_err(&pdev->dev, "request mem failed");
113 ret = -ENOMEM;
114 goto out_free_rng;
115 }
116
117 priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
118 resource_size(r));
119 if (!priv->regs) {
120 dev_err(&pdev->dev, "ioremap failed");
121 ret = -ENOMEM;
122 goto out_free_rng;
123 }
124
125 clk_enable(clk);
126
127 ret = hwrng_register(rng);
128 if (ret) {
129 dev_err(&pdev->dev, "failed to register rng device\n");
130 goto out_clk_disable;
131 }
132
133 dev_info(&pdev->dev, "registered RNG driver\n");
134
135 return 0;
136
137out_clk_disable:
138 clk_disable(clk);
139out_free_rng:
140 platform_set_drvdata(pdev, NULL);
141 kfree(rng);
142out_free_priv:
143 kfree(priv);
144out:
145 return ret;
146}
147
148static int __devexit bcm63xx_rng_remove(struct platform_device *pdev)
149{
150 struct hwrng *rng = platform_get_drvdata(pdev);
151 struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
152
153 hwrng_unregister(rng);
154 clk_disable(priv->clk);
155 kfree(priv);
156 kfree(rng);
157 platform_set_drvdata(pdev, NULL);
158
159 return 0;
160}
161
162static struct platform_driver bcm63xx_rng_driver = {
163 .probe = bcm63xx_rng_probe,
164 .remove = __devexit_p(bcm63xx_rng_remove),
165 .driver = {
166 .name = "bcm63xx-rng",
167 .owner = THIS_MODULE,
168 },
169};
170
171module_platform_driver(bcm63xx_rng_driver);
172
173MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
174MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver");
175MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
new file mode 100644
index 000000000000..232ba9ce579c
--- /dev/null
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -0,0 +1,182 @@
1/*
2 * exynos-rng.c - Random Number Generator driver for the exynos
3 *
4 * Copyright (C) 2012 Samsung Electronics
5 * Jonghwa Lee <jonghwa3.lee@smasung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation;
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/hw_random.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
28#include <linux/clk.h>
29#include <linux/pm_runtime.h>
30#include <linux/err.h>
31
32#define EXYNOS_PRNG_STATUS_OFFSET 0x10
33#define EXYNOS_PRNG_SEED_OFFSET 0x140
34#define EXYNOS_PRNG_OUT1_OFFSET 0x160
35#define SEED_SETTING_DONE BIT(1)
36#define PRNG_START 0x18
37#define PRNG_DONE BIT(5)
38#define EXYNOS_AUTOSUSPEND_DELAY 100
39
40struct exynos_rng {
41 struct device *dev;
42 struct hwrng rng;
43 void __iomem *mem;
44 struct clk *clk;
45};
46
47static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
48{
49 return __raw_readl(rng->mem + offset);
50}
51
52static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
53{
54 __raw_writel(val, rng->mem + offset);
55}
56
57static int exynos_init(struct hwrng *rng)
58{
59 struct exynos_rng *exynos_rng = container_of(rng,
60 struct exynos_rng, rng);
61 int i;
62 int ret = 0;
63
64 pm_runtime_get_sync(exynos_rng->dev);
65
66 for (i = 0 ; i < 5 ; i++)
67 exynos_rng_writel(exynos_rng, jiffies,
68 EXYNOS_PRNG_SEED_OFFSET + 4*i);
69
70 if (!(exynos_rng_readl(exynos_rng, EXYNOS_PRNG_STATUS_OFFSET)
71 & SEED_SETTING_DONE))
72 ret = -EIO;
73
74 pm_runtime_put_noidle(exynos_rng->dev);
75
76 return ret;
77}
78
79static int exynos_read(struct hwrng *rng, void *buf,
80 size_t max, bool wait)
81{
82 struct exynos_rng *exynos_rng = container_of(rng,
83 struct exynos_rng, rng);
84 u32 *data = buf;
85
86 pm_runtime_get_sync(exynos_rng->dev);
87
88 exynos_rng_writel(exynos_rng, PRNG_START, 0);
89
90 while (!(exynos_rng_readl(exynos_rng,
91 EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE))
92 cpu_relax();
93
94 exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
95
96 *data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
97
98 pm_runtime_mark_last_busy(exynos_rng->dev);
99 pm_runtime_autosuspend(exynos_rng->dev);
100
101 return 4;
102}
103
104static int __devinit exynos_rng_probe(struct platform_device *pdev)
105{
106 struct exynos_rng *exynos_rng;
107
108 exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
109 GFP_KERNEL);
110 if (!exynos_rng)
111 return -ENOMEM;
112
113 exynos_rng->dev = &pdev->dev;
114 exynos_rng->rng.name = "exynos";
115 exynos_rng->rng.init = exynos_init;
116 exynos_rng->rng.read = exynos_read;
117 exynos_rng->clk = devm_clk_get(&pdev->dev, "secss");
118 if (IS_ERR(exynos_rng->clk)) {
119 dev_err(&pdev->dev, "Couldn't get clock.\n");
120 return -ENOENT;
121 }
122
123 exynos_rng->mem = devm_request_and_ioremap(&pdev->dev,
124 platform_get_resource(pdev, IORESOURCE_MEM, 0));
125 if (!exynos_rng->mem)
126 return -EBUSY;
127
128 platform_set_drvdata(pdev, exynos_rng);
129
130 pm_runtime_set_autosuspend_delay(&pdev->dev, EXYNOS_AUTOSUSPEND_DELAY);
131 pm_runtime_use_autosuspend(&pdev->dev);
132 pm_runtime_enable(&pdev->dev);
133
134 return hwrng_register(&exynos_rng->rng);
135}
136
137static int __devexit exynos_rng_remove(struct platform_device *pdev)
138{
139 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
140
141 hwrng_unregister(&exynos_rng->rng);
142
143 return 0;
144}
145
146static int exynos_rng_runtime_suspend(struct device *dev)
147{
148 struct platform_device *pdev = to_platform_device(dev);
149 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
150
151 clk_disable_unprepare(exynos_rng->clk);
152
153 return 0;
154}
155
156static int exynos_rng_runtime_resume(struct device *dev)
157{
158 struct platform_device *pdev = to_platform_device(dev);
159 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
160
161 return clk_prepare_enable(exynos_rng->clk);
162}
163
164
165UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
166 exynos_rng_runtime_resume, NULL);
167
168static struct platform_driver exynos_rng_driver = {
169 .driver = {
170 .name = "exynos-rng",
171 .owner = THIS_MODULE,
172 .pm = &exynos_rng_pm_ops,
173 },
174 .probe = exynos_rng_probe,
175 .remove = __devexit_p(exynos_rng_remove),
176};
177
178module_platform_driver(exynos_rng_driver);
179
180MODULE_DESCRIPTION("EXYNOS 4 H/W Random Number Generator driver");
181MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
182MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 187c6be80f43..85074de5042e 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -24,6 +24,7 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/hw_random.h> 26#include <linux/hw_random.h>
27#include <linux/delay.h>
27#include <linux/io.h> 28#include <linux/io.h>
28 29
29/* RNGA Registers */ 30/* RNGA Registers */
@@ -60,16 +61,20 @@
60 61
61static struct platform_device *rng_dev; 62static struct platform_device *rng_dev;
62 63
63static int mxc_rnga_data_present(struct hwrng *rng) 64static int mxc_rnga_data_present(struct hwrng *rng, int wait)
64{ 65{
65 int level;
66 void __iomem *rng_base = (void __iomem *)rng->priv; 66 void __iomem *rng_base = (void __iomem *)rng->priv;
67 67 int i;
68 /* how many random numbers is in FIFO? [0-16] */ 68
69 level = ((__raw_readl(rng_base + RNGA_STATUS) & 69 for (i = 0; i < 20; i++) {
70 RNGA_STATUS_LEVEL_MASK) >> 8); 70 /* how many random numbers are in FIFO? [0-16] */
71 71 int level = (__raw_readl(rng_base + RNGA_STATUS) &
72 return level > 0 ? 1 : 0; 72 RNGA_STATUS_LEVEL_MASK) >> 8;
73 if (level || !wait)
74 return !!level;
75 udelay(10);
76 }
77 return 0;
73} 78}
74 79
75static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) 80static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 1412565c01af..4fbdceb6f773 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -160,24 +160,26 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
160 return 0; 160 return 0;
161} 161}
162 162
163#ifdef CONFIG_PM 163#ifdef CONFIG_PM_SLEEP
164 164
165static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message) 165static int omap_rng_suspend(struct device *dev)
166{ 166{
167 omap_rng_write_reg(RNG_MASK_REG, 0x0); 167 omap_rng_write_reg(RNG_MASK_REG, 0x0);
168 return 0; 168 return 0;
169} 169}
170 170
171static int omap_rng_resume(struct platform_device *pdev) 171static int omap_rng_resume(struct device *dev)
172{ 172{
173 omap_rng_write_reg(RNG_MASK_REG, 0x1); 173 omap_rng_write_reg(RNG_MASK_REG, 0x1);
174 return 0; 174 return 0;
175} 175}
176 176
177static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
178#define OMAP_RNG_PM (&omap_rng_pm)
179
177#else 180#else
178 181
179#define omap_rng_suspend NULL 182#define OMAP_RNG_PM NULL
180#define omap_rng_resume NULL
181 183
182#endif 184#endif
183 185
@@ -188,11 +190,10 @@ static struct platform_driver omap_rng_driver = {
188 .driver = { 190 .driver = {
189 .name = "omap_rng", 191 .name = "omap_rng",
190 .owner = THIS_MODULE, 192 .owner = THIS_MODULE,
193 .pm = OMAP_RNG_PM,
191 }, 194 },
192 .probe = omap_rng_probe, 195 .probe = omap_rng_probe,
193 .remove = __exit_p(omap_rng_remove), 196 .remove = __exit_p(omap_rng_remove),
194 .suspend = omap_rng_suspend,
195 .resume = omap_rng_resume
196}; 197};
197 198
198static int __init omap_rng_init(void) 199static int __init omap_rng_init(void)
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 723725bbb96b..5708299507d0 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -55,6 +55,7 @@ static void register_buffer(u8 *buf, size_t size)
55 55
56static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) 56static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
57{ 57{
58 int ret;
58 59
59 if (!busy) { 60 if (!busy) {
60 busy = true; 61 busy = true;
@@ -65,7 +66,9 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
65 if (!wait) 66 if (!wait)
66 return 0; 67 return 0;
67 68
68 wait_for_completion(&have_data); 69 ret = wait_for_completion_killable(&have_data);
70 if (ret < 0)
71 return ret;
69 72
70 busy = false; 73 busy = false;
71 74
@@ -85,7 +88,7 @@ static struct hwrng virtio_hwrng = {
85 .read = virtio_read, 88 .read = virtio_read,
86}; 89};
87 90
88static int virtrng_probe(struct virtio_device *vdev) 91static int probe_common(struct virtio_device *vdev)
89{ 92{
90 int err; 93 int err;
91 94
@@ -103,13 +106,37 @@ static int virtrng_probe(struct virtio_device *vdev)
103 return 0; 106 return 0;
104} 107}
105 108
106static void __devexit virtrng_remove(struct virtio_device *vdev) 109static void remove_common(struct virtio_device *vdev)
107{ 110{
108 vdev->config->reset(vdev); 111 vdev->config->reset(vdev);
112 busy = false;
109 hwrng_unregister(&virtio_hwrng); 113 hwrng_unregister(&virtio_hwrng);
110 vdev->config->del_vqs(vdev); 114 vdev->config->del_vqs(vdev);
111} 115}
112 116
117static int virtrng_probe(struct virtio_device *vdev)
118{
119 return probe_common(vdev);
120}
121
122static void __devexit virtrng_remove(struct virtio_device *vdev)
123{
124 remove_common(vdev);
125}
126
127#ifdef CONFIG_PM
128static int virtrng_freeze(struct virtio_device *vdev)
129{
130 remove_common(vdev);
131 return 0;
132}
133
134static int virtrng_restore(struct virtio_device *vdev)
135{
136 return probe_common(vdev);
137}
138#endif
139
113static struct virtio_device_id id_table[] = { 140static struct virtio_device_id id_table[] = {
114 { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, 141 { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
115 { 0 }, 142 { 0 },
@@ -121,6 +148,10 @@ static struct virtio_driver virtio_rng_driver = {
121 .id_table = id_table, 148 .id_table = id_table,
122 .probe = virtrng_probe, 149 .probe = virtrng_probe,
123 .remove = __devexit_p(virtrng_remove), 150 .remove = __devexit_p(virtrng_remove),
151#ifdef CONFIG_PM
152 .freeze = virtrng_freeze,
153 .restore = virtrng_restore,
154#endif
124}; 155};
125 156
126static int __init init(void) 157static int __init init(void)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1e638fff40ea..83f85cf7fb1b 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2503,18 +2503,6 @@ static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2503 cleanup_one_si(info); 2503 cleanup_one_si(info);
2504} 2504}
2505 2505
2506#ifdef CONFIG_PM
2507static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2508{
2509 return 0;
2510}
2511
2512static int ipmi_pci_resume(struct pci_dev *pdev)
2513{
2514 return 0;
2515}
2516#endif
2517
2518static struct pci_device_id ipmi_pci_devices[] = { 2506static struct pci_device_id ipmi_pci_devices[] = {
2519 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, 2507 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2520 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, 2508 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
@@ -2527,10 +2515,6 @@ static struct pci_driver ipmi_pci_driver = {
2527 .id_table = ipmi_pci_devices, 2515 .id_table = ipmi_pci_devices,
2528 .probe = ipmi_pci_probe, 2516 .probe = ipmi_pci_probe,
2529 .remove = __devexit_p(ipmi_pci_remove), 2517 .remove = __devexit_p(ipmi_pci_remove),
2530#ifdef CONFIG_PM
2531 .suspend = ipmi_pci_suspend,
2532 .resume = ipmi_pci_resume,
2533#endif
2534}; 2518};
2535#endif /* CONFIG_PCI */ 2519#endif /* CONFIG_PCI */
2536 2520
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 7ed356e52035..37b8be7cba95 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -141,17 +141,6 @@
141 141
142#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80 142#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
143 143
144/* These are here until the real ones get into the watchdog.h interface. */
145#ifndef WDIOC_GETTIMEOUT
146#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
147#endif
148#ifndef WDIOC_SET_PRETIMEOUT
149#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
150#endif
151#ifndef WDIOC_GET_PRETIMEOUT
152#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
153#endif
154
155static DEFINE_MUTEX(ipmi_watchdog_mutex); 144static DEFINE_MUTEX(ipmi_watchdog_mutex);
156static bool nowayout = WATCHDOG_NOWAYOUT; 145static bool nowayout = WATCHDOG_NOWAYOUT;
157 146
@@ -732,7 +721,6 @@ static int ipmi_ioctl(struct file *file,
732 return -EFAULT; 721 return -EFAULT;
733 return 0; 722 return 0;
734 723
735 case WDIOC_SET_PRETIMEOUT:
736 case WDIOC_SETPRETIMEOUT: 724 case WDIOC_SETPRETIMEOUT:
737 i = copy_from_user(&val, argp, sizeof(int)); 725 i = copy_from_user(&val, argp, sizeof(int));
738 if (i) 726 if (i)
@@ -740,7 +728,6 @@ static int ipmi_ioctl(struct file *file,
740 pretimeout = val; 728 pretimeout = val;
741 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); 729 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
742 730
743 case WDIOC_GET_PRETIMEOUT:
744 case WDIOC_GETPRETIMEOUT: 731 case WDIOC_GETPRETIMEOUT:
745 i = copy_to_user(argp, &pretimeout, sizeof(pretimeout)); 732 i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
746 if (i) 733 if (i)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 67c3371723cc..e5eedfa24c91 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -27,14 +27,16 @@
27#include <linux/splice.h> 27#include <linux/splice.h>
28#include <linux/pfn.h> 28#include <linux/pfn.h>
29#include <linux/export.h> 29#include <linux/export.h>
30#include <linux/io.h>
30 31
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32#include <asm/io.h>
33 33
34#ifdef CONFIG_IA64 34#ifdef CONFIG_IA64
35# include <linux/efi.h> 35# include <linux/efi.h>
36#endif 36#endif
37 37
38#define DEVPORT_MINOR 4
39
38static inline unsigned long size_inside_page(unsigned long start, 40static inline unsigned long size_inside_page(unsigned long start,
39 unsigned long size) 41 unsigned long size)
40{ 42{
@@ -894,6 +896,13 @@ static int __init chr_dev_init(void)
894 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { 896 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
895 if (!devlist[minor].name) 897 if (!devlist[minor].name)
896 continue; 898 continue;
899
900 /*
901 * Create /dev/port?
902 */
903 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
904 continue;
905
897 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), 906 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
898 NULL, devlist[minor].name); 907 NULL, devlist[minor].name);
899 } 908 }
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 8b78750f1efe..845f97fd1832 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -283,7 +283,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
283 vdata->flags = flags; 283 vdata->flags = flags;
284 vdata->type = type; 284 vdata->type = type;
285 spin_lock_init(&vdata->lock); 285 spin_lock_init(&vdata->lock);
286 vdata->refcnt = ATOMIC_INIT(1); 286 atomic_set(&vdata->refcnt, 1);
287 vma->vm_private_data = vdata; 287 vma->vm_private_data = vdata;
288 288
289 vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND); 289 vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4ec04a754733..b86eae9b77df 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -125,21 +125,26 @@
125 * The current exported interfaces for gathering environmental noise 125 * The current exported interfaces for gathering environmental noise
126 * from the devices are: 126 * from the devices are:
127 * 127 *
128 * void add_device_randomness(const void *buf, unsigned int size);
128 * void add_input_randomness(unsigned int type, unsigned int code, 129 * void add_input_randomness(unsigned int type, unsigned int code,
129 * unsigned int value); 130 * unsigned int value);
130 * void add_interrupt_randomness(int irq); 131 * void add_interrupt_randomness(int irq, int irq_flags);
131 * void add_disk_randomness(struct gendisk *disk); 132 * void add_disk_randomness(struct gendisk *disk);
132 * 133 *
134 * add_device_randomness() is for adding data to the random pool that
135 * is likely to differ between two devices (or possibly even per boot).
136 * This would be things like MAC addresses or serial numbers, or the
137 * read-out of the RTC. This does *not* add any actual entropy to the
138 * pool, but it initializes the pool to different values for devices
139 * that might otherwise be identical and have very little entropy
140 * available to them (particularly common in the embedded world).
141 *
133 * add_input_randomness() uses the input layer interrupt timing, as well as 142 * add_input_randomness() uses the input layer interrupt timing, as well as
134 * the event type information from the hardware. 143 * the event type information from the hardware.
135 * 144 *
136 * add_interrupt_randomness() uses the inter-interrupt timing as random 145 * add_interrupt_randomness() uses the interrupt timing as random
137 * inputs to the entropy pool. Note that not all interrupts are good 146 * inputs to the entropy pool. Using the cycle counters and the irq source
138 * sources of randomness! For example, the timer interrupts is not a 147 * as inputs, it feeds the randomness roughly once a second.
139 * good choice, because the periodicity of the interrupts is too
140 * regular, and hence predictable to an attacker. Network Interface
141 * Controller interrupts are a better measure, since the timing of the
142 * NIC interrupts are more unpredictable.
143 * 148 *
144 * add_disk_randomness() uses what amounts to the seek time of block 149 * add_disk_randomness() uses what amounts to the seek time of block
145 * layer request events, on a per-disk_devt basis, as input to the 150 * layer request events, on a per-disk_devt basis, as input to the
@@ -248,6 +253,8 @@
248#include <linux/percpu.h> 253#include <linux/percpu.h>
249#include <linux/cryptohash.h> 254#include <linux/cryptohash.h>
250#include <linux/fips.h> 255#include <linux/fips.h>
256#include <linux/ptrace.h>
257#include <linux/kmemcheck.h>
251 258
252#ifdef CONFIG_GENERIC_HARDIRQS 259#ifdef CONFIG_GENERIC_HARDIRQS
253# include <linux/irq.h> 260# include <linux/irq.h>
@@ -256,8 +263,12 @@
256#include <asm/processor.h> 263#include <asm/processor.h>
257#include <asm/uaccess.h> 264#include <asm/uaccess.h>
258#include <asm/irq.h> 265#include <asm/irq.h>
266#include <asm/irq_regs.h>
259#include <asm/io.h> 267#include <asm/io.h>
260 268
269#define CREATE_TRACE_POINTS
270#include <trace/events/random.h>
271
261/* 272/*
262 * Configuration information 273 * Configuration information
263 */ 274 */
@@ -266,6 +277,8 @@
266#define SEC_XFER_SIZE 512 277#define SEC_XFER_SIZE 512
267#define EXTRACT_SIZE 10 278#define EXTRACT_SIZE 10
268 279
280#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
281
269/* 282/*
270 * The minimum number of bits of entropy before we wake up a read on 283 * The minimum number of bits of entropy before we wake up a read on
271 * /dev/random. Should be enough to do a significant reseed. 284 * /dev/random. Should be enough to do a significant reseed.
@@ -420,8 +433,10 @@ struct entropy_store {
420 /* read-write data: */ 433 /* read-write data: */
421 spinlock_t lock; 434 spinlock_t lock;
422 unsigned add_ptr; 435 unsigned add_ptr;
436 unsigned input_rotate;
423 int entropy_count; 437 int entropy_count;
424 int input_rotate; 438 int entropy_total;
439 unsigned int initialized:1;
425 __u8 last_data[EXTRACT_SIZE]; 440 __u8 last_data[EXTRACT_SIZE];
426}; 441};
427 442
@@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = {
454 .pool = nonblocking_pool_data 469 .pool = nonblocking_pool_data
455}; 470};
456 471
472static __u32 const twist_table[8] = {
473 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
474 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
475
457/* 476/*
458 * This function adds bytes into the entropy "pool". It does not 477 * This function adds bytes into the entropy "pool". It does not
459 * update the entropy estimate. The caller should call 478 * update the entropy estimate. The caller should call
@@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = {
464 * it's cheap to do so and helps slightly in the expected case where 483 * it's cheap to do so and helps slightly in the expected case where
465 * the entropy is concentrated in the low-order bits. 484 * the entropy is concentrated in the low-order bits.
466 */ 485 */
467static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, 486static void _mix_pool_bytes(struct entropy_store *r, const void *in,
468 int nbytes, __u8 out[64]) 487 int nbytes, __u8 out[64])
469{ 488{
470 static __u32 const twist_table[8] = {
471 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
472 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
473 unsigned long i, j, tap1, tap2, tap3, tap4, tap5; 489 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
474 int input_rotate; 490 int input_rotate;
475 int wordmask = r->poolinfo->poolwords - 1; 491 int wordmask = r->poolinfo->poolwords - 1;
476 const char *bytes = in; 492 const char *bytes = in;
477 __u32 w; 493 __u32 w;
478 unsigned long flags;
479 494
480 /* Taps are constant, so we can load them without holding r->lock. */
481 tap1 = r->poolinfo->tap1; 495 tap1 = r->poolinfo->tap1;
482 tap2 = r->poolinfo->tap2; 496 tap2 = r->poolinfo->tap2;
483 tap3 = r->poolinfo->tap3; 497 tap3 = r->poolinfo->tap3;
484 tap4 = r->poolinfo->tap4; 498 tap4 = r->poolinfo->tap4;
485 tap5 = r->poolinfo->tap5; 499 tap5 = r->poolinfo->tap5;
486 500
487 spin_lock_irqsave(&r->lock, flags); 501 smp_rmb();
488 input_rotate = r->input_rotate; 502 input_rotate = ACCESS_ONCE(r->input_rotate);
489 i = r->add_ptr; 503 i = ACCESS_ONCE(r->add_ptr);
490 504
491 /* mix one byte at a time to simplify size handling and churn faster */ 505 /* mix one byte at a time to simplify size handling and churn faster */
492 while (nbytes--) { 506 while (nbytes--) {
@@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
513 input_rotate += i ? 7 : 14; 527 input_rotate += i ? 7 : 14;
514 } 528 }
515 529
516 r->input_rotate = input_rotate; 530 ACCESS_ONCE(r->input_rotate) = input_rotate;
517 r->add_ptr = i; 531 ACCESS_ONCE(r->add_ptr) = i;
532 smp_wmb();
518 533
519 if (out) 534 if (out)
520 for (j = 0; j < 16; j++) 535 for (j = 0; j < 16; j++)
521 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; 536 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
537}
538
539static void __mix_pool_bytes(struct entropy_store *r, const void *in,
540 int nbytes, __u8 out[64])
541{
542 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
543 _mix_pool_bytes(r, in, nbytes, out);
544}
545
546static void mix_pool_bytes(struct entropy_store *r, const void *in,
547 int nbytes, __u8 out[64])
548{
549 unsigned long flags;
522 550
551 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
552 spin_lock_irqsave(&r->lock, flags);
553 _mix_pool_bytes(r, in, nbytes, out);
523 spin_unlock_irqrestore(&r->lock, flags); 554 spin_unlock_irqrestore(&r->lock, flags);
524} 555}
525 556
526static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) 557struct fast_pool {
558 __u32 pool[4];
559 unsigned long last;
560 unsigned short count;
561 unsigned char rotate;
562 unsigned char last_timer_intr;
563};
564
565/*
566 * This is a fast mixing routine used by the interrupt randomness
567 * collector. It's hardcoded for an 128 bit pool and assumes that any
568 * locks that might be needed are taken by the caller.
569 */
570static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
527{ 571{
528 mix_pool_bytes_extract(r, in, bytes, NULL); 572 const char *bytes = in;
573 __u32 w;
574 unsigned i = f->count;
575 unsigned input_rotate = f->rotate;
576
577 while (nbytes--) {
578 w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
579 f->pool[(i + 1) & 3];
580 f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
581 input_rotate += (i++ & 3) ? 7 : 14;
582 }
583 f->count = i;
584 f->rotate = input_rotate;
529} 585}
530 586
531/* 587/*
@@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
533 */ 589 */
534static void credit_entropy_bits(struct entropy_store *r, int nbits) 590static void credit_entropy_bits(struct entropy_store *r, int nbits)
535{ 591{
536 unsigned long flags; 592 int entropy_count, orig;
537 int entropy_count;
538 593
539 if (!nbits) 594 if (!nbits)
540 return; 595 return;
541 596
542 spin_lock_irqsave(&r->lock, flags);
543
544 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); 597 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
545 entropy_count = r->entropy_count; 598retry:
599 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
546 entropy_count += nbits; 600 entropy_count += nbits;
601
547 if (entropy_count < 0) { 602 if (entropy_count < 0) {
548 DEBUG_ENT("negative entropy/overflow\n"); 603 DEBUG_ENT("negative entropy/overflow\n");
549 entropy_count = 0; 604 entropy_count = 0;
550 } else if (entropy_count > r->poolinfo->POOLBITS) 605 } else if (entropy_count > r->poolinfo->POOLBITS)
551 entropy_count = r->poolinfo->POOLBITS; 606 entropy_count = r->poolinfo->POOLBITS;
552 r->entropy_count = entropy_count; 607 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
608 goto retry;
609
610 if (!r->initialized && nbits > 0) {
611 r->entropy_total += nbits;
612 if (r->entropy_total > 128)
613 r->initialized = 1;
614 }
615
616 trace_credit_entropy_bits(r->name, nbits, entropy_count,
617 r->entropy_total, _RET_IP_);
553 618
554 /* should we wake readers? */ 619 /* should we wake readers? */
555 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { 620 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
556 wake_up_interruptible(&random_read_wait); 621 wake_up_interruptible(&random_read_wait);
557 kill_fasync(&fasync, SIGIO, POLL_IN); 622 kill_fasync(&fasync, SIGIO, POLL_IN);
558 } 623 }
559 spin_unlock_irqrestore(&r->lock, flags);
560} 624}
561 625
562/********************************************************************* 626/*********************************************************************
@@ -572,42 +636,24 @@ struct timer_rand_state {
572 unsigned dont_count_entropy:1; 636 unsigned dont_count_entropy:1;
573}; 637};
574 638
575#ifndef CONFIG_GENERIC_HARDIRQS 639/*
576 640 * Add device- or boot-specific data to the input and nonblocking
577static struct timer_rand_state *irq_timer_state[NR_IRQS]; 641 * pools to help initialize them to unique values.
578 642 *
579static struct timer_rand_state *get_timer_rand_state(unsigned int irq) 643 * None of this adds any entropy, it is meant to avoid the
580{ 644 * problem of the nonblocking pool having similar initial state
581 return irq_timer_state[irq]; 645 * across largely identical devices.
582} 646 */
583 647void add_device_randomness(const void *buf, unsigned int size)
584static void set_timer_rand_state(unsigned int irq,
585 struct timer_rand_state *state)
586{
587 irq_timer_state[irq] = state;
588}
589
590#else
591
592static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
593{
594 struct irq_desc *desc;
595
596 desc = irq_to_desc(irq);
597
598 return desc->timer_rand_state;
599}
600
601static void set_timer_rand_state(unsigned int irq,
602 struct timer_rand_state *state)
603{ 648{
604 struct irq_desc *desc; 649 unsigned long time = get_cycles() ^ jiffies;
605 650
606 desc = irq_to_desc(irq); 651 mix_pool_bytes(&input_pool, buf, size, NULL);
607 652 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
608 desc->timer_rand_state = state; 653 mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
654 mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
609} 655}
610#endif 656EXPORT_SYMBOL(add_device_randomness);
611 657
612static struct timer_rand_state input_timer_state; 658static struct timer_rand_state input_timer_state;
613 659
@@ -637,13 +683,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
637 goto out; 683 goto out;
638 684
639 sample.jiffies = jiffies; 685 sample.jiffies = jiffies;
640 686 sample.cycles = get_cycles();
641 /* Use arch random value, fall back to cycles */
642 if (!arch_get_random_int(&sample.cycles))
643 sample.cycles = get_cycles();
644
645 sample.num = num; 687 sample.num = num;
646 mix_pool_bytes(&input_pool, &sample, sizeof(sample)); 688 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
647 689
648 /* 690 /*
649 * Calculate number of bits of randomness we probably added. 691 * Calculate number of bits of randomness we probably added.
@@ -700,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
700} 742}
701EXPORT_SYMBOL_GPL(add_input_randomness); 743EXPORT_SYMBOL_GPL(add_input_randomness);
702 744
703void add_interrupt_randomness(int irq) 745static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
746
747void add_interrupt_randomness(int irq, int irq_flags)
704{ 748{
705 struct timer_rand_state *state; 749 struct entropy_store *r;
750 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
751 struct pt_regs *regs = get_irq_regs();
752 unsigned long now = jiffies;
753 __u32 input[4], cycles = get_cycles();
754
755 input[0] = cycles ^ jiffies;
756 input[1] = irq;
757 if (regs) {
758 __u64 ip = instruction_pointer(regs);
759 input[2] = ip;
760 input[3] = ip >> 32;
761 }
706 762
707 state = get_timer_rand_state(irq); 763 fast_mix(fast_pool, input, sizeof(input));
708 764
709 if (state == NULL) 765 if ((fast_pool->count & 1023) &&
766 !time_after(now, fast_pool->last + HZ))
710 return; 767 return;
711 768
712 DEBUG_ENT("irq event %d\n", irq); 769 fast_pool->last = now;
713 add_timer_randomness(state, 0x100 + irq); 770
771 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
772 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
773 /*
774 * If we don't have a valid cycle counter, and we see
775 * back-to-back timer interrupts, then skip giving credit for
776 * any entropy.
777 */
778 if (cycles == 0) {
779 if (irq_flags & __IRQF_TIMER) {
780 if (fast_pool->last_timer_intr)
781 return;
782 fast_pool->last_timer_intr = 1;
783 } else
784 fast_pool->last_timer_intr = 0;
785 }
786 credit_entropy_bits(r, 1);
714} 787}
715 788
716#ifdef CONFIG_BLOCK 789#ifdef CONFIG_BLOCK
@@ -742,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
742 */ 815 */
743static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) 816static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
744{ 817{
745 __u32 tmp[OUTPUT_POOL_WORDS]; 818 __u32 tmp[OUTPUT_POOL_WORDS];
746 819
747 if (r->pull && r->entropy_count < nbytes * 8 && 820 if (r->pull && r->entropy_count < nbytes * 8 &&
748 r->entropy_count < r->poolinfo->POOLBITS) { 821 r->entropy_count < r->poolinfo->POOLBITS) {
@@ -761,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
761 834
762 bytes = extract_entropy(r->pull, tmp, bytes, 835 bytes = extract_entropy(r->pull, tmp, bytes,
763 random_read_wakeup_thresh / 8, rsvd); 836 random_read_wakeup_thresh / 8, rsvd);
764 mix_pool_bytes(r, tmp, bytes); 837 mix_pool_bytes(r, tmp, bytes, NULL);
765 credit_entropy_bits(r, bytes*8); 838 credit_entropy_bits(r, bytes*8);
766 } 839 }
767} 840}
@@ -820,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
820static void extract_buf(struct entropy_store *r, __u8 *out) 893static void extract_buf(struct entropy_store *r, __u8 *out)
821{ 894{
822 int i; 895 int i;
823 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; 896 union {
897 __u32 w[5];
898 unsigned long l[LONGS(EXTRACT_SIZE)];
899 } hash;
900 __u32 workspace[SHA_WORKSPACE_WORDS];
824 __u8 extract[64]; 901 __u8 extract[64];
902 unsigned long flags;
825 903
826 /* Generate a hash across the pool, 16 words (512 bits) at a time */ 904 /* Generate a hash across the pool, 16 words (512 bits) at a time */
827 sha_init(hash); 905 sha_init(hash.w);
906 spin_lock_irqsave(&r->lock, flags);
828 for (i = 0; i < r->poolinfo->poolwords; i += 16) 907 for (i = 0; i < r->poolinfo->poolwords; i += 16)
829 sha_transform(hash, (__u8 *)(r->pool + i), workspace); 908 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
830 909
831 /* 910 /*
832 * We mix the hash back into the pool to prevent backtracking 911 * We mix the hash back into the pool to prevent backtracking
@@ -837,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
837 * brute-forcing the feedback as hard as brute-forcing the 916 * brute-forcing the feedback as hard as brute-forcing the
838 * hash. 917 * hash.
839 */ 918 */
840 mix_pool_bytes_extract(r, hash, sizeof(hash), extract); 919 __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
920 spin_unlock_irqrestore(&r->lock, flags);
841 921
842 /* 922 /*
843 * To avoid duplicates, we atomically extract a portion of the 923 * To avoid duplicates, we atomically extract a portion of the
844 * pool while mixing, and hash one final time. 924 * pool while mixing, and hash one final time.
845 */ 925 */
846 sha_transform(hash, extract, workspace); 926 sha_transform(hash.w, extract, workspace);
847 memset(extract, 0, sizeof(extract)); 927 memset(extract, 0, sizeof(extract));
848 memset(workspace, 0, sizeof(workspace)); 928 memset(workspace, 0, sizeof(workspace));
849 929
@@ -852,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
852 * pattern, we fold it in half. Thus, we always feed back 932 * pattern, we fold it in half. Thus, we always feed back
853 * twice as much data as we output. 933 * twice as much data as we output.
854 */ 934 */
855 hash[0] ^= hash[3]; 935 hash.w[0] ^= hash.w[3];
856 hash[1] ^= hash[4]; 936 hash.w[1] ^= hash.w[4];
857 hash[2] ^= rol32(hash[2], 16); 937 hash.w[2] ^= rol32(hash.w[2], 16);
858 memcpy(out, hash, EXTRACT_SIZE); 938
859 memset(hash, 0, sizeof(hash)); 939 /*
940 * If we have a architectural hardware random number
941 * generator, mix that in, too.
942 */
943 for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
944 unsigned long v;
945 if (!arch_get_random_long(&v))
946 break;
947 hash.l[i] ^= v;
948 }
949
950 memcpy(out, &hash, EXTRACT_SIZE);
951 memset(&hash, 0, sizeof(hash));
860} 952}
861 953
862static ssize_t extract_entropy(struct entropy_store *r, void *buf, 954static ssize_t extract_entropy(struct entropy_store *r, void *buf,
863 size_t nbytes, int min, int reserved) 955 size_t nbytes, int min, int reserved)
864{ 956{
865 ssize_t ret = 0, i; 957 ssize_t ret = 0, i;
866 __u8 tmp[EXTRACT_SIZE]; 958 __u8 tmp[EXTRACT_SIZE];
867 unsigned long flags;
868 959
960 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
869 xfer_secondary_pool(r, nbytes); 961 xfer_secondary_pool(r, nbytes);
870 nbytes = account(r, nbytes, min, reserved); 962 nbytes = account(r, nbytes, min, reserved);
871 963
@@ -873,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
873 extract_buf(r, tmp); 965 extract_buf(r, tmp);
874 966
875 if (fips_enabled) { 967 if (fips_enabled) {
968 unsigned long flags;
969
876 spin_lock_irqsave(&r->lock, flags); 970 spin_lock_irqsave(&r->lock, flags);
877 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) 971 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
878 panic("Hardware RNG duplicated output!\n"); 972 panic("Hardware RNG duplicated output!\n");
@@ -898,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
898 ssize_t ret = 0, i; 992 ssize_t ret = 0, i;
899 __u8 tmp[EXTRACT_SIZE]; 993 __u8 tmp[EXTRACT_SIZE];
900 994
995 trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
901 xfer_secondary_pool(r, nbytes); 996 xfer_secondary_pool(r, nbytes);
902 nbytes = account(r, nbytes, 0, 0); 997 nbytes = account(r, nbytes, 0, 0);
903 998
@@ -931,17 +1026,35 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
931 1026
932/* 1027/*
933 * This function is the exported kernel interface. It returns some 1028 * This function is the exported kernel interface. It returns some
934 * number of good random numbers, suitable for seeding TCP sequence 1029 * number of good random numbers, suitable for key generation, seeding
935 * numbers, etc. 1030 * TCP sequence numbers, etc. It does not use the hw random number
1031 * generator, if available; use get_random_bytes_arch() for that.
936 */ 1032 */
937void get_random_bytes(void *buf, int nbytes) 1033void get_random_bytes(void *buf, int nbytes)
938{ 1034{
1035 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
1036}
1037EXPORT_SYMBOL(get_random_bytes);
1038
1039/*
1040 * This function will use the architecture-specific hardware random
1041 * number generator if it is available. The arch-specific hw RNG will
1042 * almost certainly be faster than what we can do in software, but it
1043 * is impossible to verify that it is implemented securely (as
1044 * opposed, to, say, the AES encryption of a sequence number using a
1045 * key known by the NSA). So it's useful if we need the speed, but
1046 * only if we're willing to trust the hardware manufacturer not to
1047 * have put in a back door.
1048 */
1049void get_random_bytes_arch(void *buf, int nbytes)
1050{
939 char *p = buf; 1051 char *p = buf;
940 1052
1053 trace_get_random_bytes(nbytes, _RET_IP_);
941 while (nbytes) { 1054 while (nbytes) {
942 unsigned long v; 1055 unsigned long v;
943 int chunk = min(nbytes, (int)sizeof(unsigned long)); 1056 int chunk = min(nbytes, (int)sizeof(unsigned long));
944 1057
945 if (!arch_get_random_long(&v)) 1058 if (!arch_get_random_long(&v))
946 break; 1059 break;
947 1060
@@ -950,9 +1063,11 @@ void get_random_bytes(void *buf, int nbytes)
950 nbytes -= chunk; 1063 nbytes -= chunk;
951 } 1064 }
952 1065
953 extract_entropy(&nonblocking_pool, p, nbytes, 0, 0); 1066 if (nbytes)
1067 extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
954} 1068}
955EXPORT_SYMBOL(get_random_bytes); 1069EXPORT_SYMBOL(get_random_bytes_arch);
1070
956 1071
957/* 1072/*
958 * init_std_data - initialize pool with system data 1073 * init_std_data - initialize pool with system data
@@ -966,23 +1081,30 @@ EXPORT_SYMBOL(get_random_bytes);
966static void init_std_data(struct entropy_store *r) 1081static void init_std_data(struct entropy_store *r)
967{ 1082{
968 int i; 1083 int i;
969 ktime_t now; 1084 ktime_t now = ktime_get_real();
970 unsigned long flags; 1085 unsigned long rv;
971 1086
972 spin_lock_irqsave(&r->lock, flags);
973 r->entropy_count = 0; 1087 r->entropy_count = 0;
974 spin_unlock_irqrestore(&r->lock, flags); 1088 r->entropy_total = 0;
975 1089 mix_pool_bytes(r, &now, sizeof(now), NULL);
976 now = ktime_get_real(); 1090 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
977 mix_pool_bytes(r, &now, sizeof(now)); 1091 if (!arch_get_random_long(&rv))
978 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
979 if (!arch_get_random_long(&flags))
980 break; 1092 break;
981 mix_pool_bytes(r, &flags, sizeof(flags)); 1093 mix_pool_bytes(r, &rv, sizeof(rv), NULL);
982 } 1094 }
983 mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); 1095 mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
984} 1096}
985 1097
1098/*
1099 * Note that setup_arch() may call add_device_randomness()
1100 * long before we get here. This allows seeding of the pools
1101 * with some platform dependent data very early in the boot
1102 * process. But it limits our options here. We must use
1103 * statically allocated structures that already have all
1104 * initializations complete at compile time. We should also
1105 * take care not to overwrite the precious per platform data
1106 * we were given.
1107 */
986static int rand_initialize(void) 1108static int rand_initialize(void)
987{ 1109{
988 init_std_data(&input_pool); 1110 init_std_data(&input_pool);
@@ -992,24 +1114,6 @@ static int rand_initialize(void)
992} 1114}
993module_init(rand_initialize); 1115module_init(rand_initialize);
994 1116
995void rand_initialize_irq(int irq)
996{
997 struct timer_rand_state *state;
998
999 state = get_timer_rand_state(irq);
1000
1001 if (state)
1002 return;
1003
1004 /*
1005 * If kzalloc returns null, we just won't use that entropy
1006 * source.
1007 */
1008 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1009 if (state)
1010 set_timer_rand_state(irq, state);
1011}
1012
1013#ifdef CONFIG_BLOCK 1117#ifdef CONFIG_BLOCK
1014void rand_initialize_disk(struct gendisk *disk) 1118void rand_initialize_disk(struct gendisk *disk)
1015{ 1119{
@@ -1117,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1117 count -= bytes; 1221 count -= bytes;
1118 p += bytes; 1222 p += bytes;
1119 1223
1120 mix_pool_bytes(r, buf, bytes); 1224 mix_pool_bytes(r, buf, bytes, NULL);
1121 cond_resched(); 1225 cond_resched();
1122 } 1226 }
1123 1227
@@ -1279,6 +1383,7 @@ static int proc_do_uuid(ctl_table *table, int write,
1279} 1383}
1280 1384
1281static int sysctl_poolsize = INPUT_POOL_WORDS * 32; 1385static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
1386extern ctl_table random_table[];
1282ctl_table random_table[] = { 1387ctl_table random_table[] = {
1283 { 1388 {
1284 .procname = "poolsize", 1389 .procname = "poolsize",
@@ -1344,7 +1449,7 @@ late_initcall(random_int_secret_init);
1344 * value is not cryptographically secure but for several uses the cost of 1449 * value is not cryptographically secure but for several uses the cost of
1345 * depleting entropy is too high 1450 * depleting entropy is too high
1346 */ 1451 */
1347DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); 1452static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
1348unsigned int get_random_int(void) 1453unsigned int get_random_int(void)
1349{ 1454{
1350 __u32 *hash; 1455 __u32 *hash;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 45713f0e7d61..f87780502b41 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1459,7 +1459,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
1459#ifdef CONFIG_PM 1459#ifdef CONFIG_PM
1460static int old_camera_power; 1460static int old_camera_power;
1461 1461
1462static int sonypi_suspend(struct platform_device *dev, pm_message_t state) 1462static int sonypi_suspend(struct device *dev)
1463{ 1463{
1464 old_camera_power = sonypi_device.camera_power; 1464 old_camera_power = sonypi_device.camera_power;
1465 sonypi_disable(); 1465 sonypi_disable();
@@ -1467,14 +1467,16 @@ static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
1467 return 0; 1467 return 0;
1468} 1468}
1469 1469
1470static int sonypi_resume(struct platform_device *dev) 1470static int sonypi_resume(struct device *dev)
1471{ 1471{
1472 sonypi_enable(old_camera_power); 1472 sonypi_enable(old_camera_power);
1473 return 0; 1473 return 0;
1474} 1474}
1475
1476static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume);
1477#define SONYPI_PM (&sonypi_pm)
1475#else 1478#else
1476#define sonypi_suspend NULL 1479#define SONYPI_PM NULL
1477#define sonypi_resume NULL
1478#endif 1480#endif
1479 1481
1480static void sonypi_shutdown(struct platform_device *dev) 1482static void sonypi_shutdown(struct platform_device *dev)
@@ -1486,12 +1488,11 @@ static struct platform_driver sonypi_driver = {
1486 .driver = { 1488 .driver = {
1487 .name = "sonypi", 1489 .name = "sonypi",
1488 .owner = THIS_MODULE, 1490 .owner = THIS_MODULE,
1491 .pm = SONYPI_PM,
1489 }, 1492 },
1490 .probe = sonypi_probe, 1493 .probe = sonypi_probe,
1491 .remove = __devexit_p(sonypi_remove), 1494 .remove = __devexit_p(sonypi_remove),
1492 .shutdown = sonypi_shutdown, 1495 .shutdown = sonypi_shutdown,
1493 .suspend = sonypi_suspend,
1494 .resume = sonypi_resume,
1495}; 1496};
1496 1497
1497static struct platform_device *sonypi_platform_device; 1498static struct platform_device *sonypi_platform_device;
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index ad7c7320dd1b..817f0ee202b6 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -827,10 +827,10 @@ EXPORT_SYMBOL_GPL(tpm_pcr_extend);
827int tpm_do_selftest(struct tpm_chip *chip) 827int tpm_do_selftest(struct tpm_chip *chip)
828{ 828{
829 int rc; 829 int rc;
830 u8 digest[TPM_DIGEST_SIZE];
831 unsigned int loops; 830 unsigned int loops;
832 unsigned int delay_msec = 1000; 831 unsigned int delay_msec = 1000;
833 unsigned long duration; 832 unsigned long duration;
833 struct tpm_cmd_t cmd;
834 834
835 duration = tpm_calc_ordinal_duration(chip, 835 duration = tpm_calc_ordinal_duration(chip,
836 TPM_ORD_CONTINUE_SELFTEST); 836 TPM_ORD_CONTINUE_SELFTEST);
@@ -845,7 +845,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
845 return rc; 845 return rc;
846 846
847 do { 847 do {
848 rc = __tpm_pcr_read(chip, 0, digest); 848 /* Attempt to read a PCR value */
849 cmd.header.in = pcrread_header;
850 cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
851 rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
852
853 if (rc < TPM_HEADER_SIZE)
854 return -EFAULT;
855
856 rc = be32_to_cpu(cmd.header.out.return_code);
849 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { 857 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
850 dev_info(chip->dev, 858 dev_info(chip->dev,
851 "TPM is disabled/deactivated (0x%X)\n", rc); 859 "TPM is disabled/deactivated (0x%X)\n", rc);
@@ -1274,7 +1282,7 @@ static struct tpm_input_header savestate_header = {
1274 * We are about to suspend. Save the TPM state 1282 * We are about to suspend. Save the TPM state
1275 * so that it can be restored. 1283 * so that it can be restored.
1276 */ 1284 */
1277int tpm_pm_suspend(struct device *dev, pm_message_t pm_state) 1285int tpm_pm_suspend(struct device *dev)
1278{ 1286{
1279 struct tpm_chip *chip = dev_get_drvdata(dev); 1287 struct tpm_chip *chip = dev_get_drvdata(dev);
1280 struct tpm_cmd_t cmd; 1288 struct tpm_cmd_t cmd;
@@ -1322,6 +1330,9 @@ EXPORT_SYMBOL_GPL(tpm_pm_resume);
1322 1330
1323void tpm_dev_vendor_release(struct tpm_chip *chip) 1331void tpm_dev_vendor_release(struct tpm_chip *chip)
1324{ 1332{
1333 if (!chip)
1334 return;
1335
1325 if (chip->vendor.release) 1336 if (chip->vendor.release)
1326 chip->vendor.release(chip->dev); 1337 chip->vendor.release(chip->dev);
1327 1338
@@ -1339,6 +1350,9 @@ void tpm_dev_release(struct device *dev)
1339{ 1350{
1340 struct tpm_chip *chip = dev_get_drvdata(dev); 1351 struct tpm_chip *chip = dev_get_drvdata(dev);
1341 1352
1353 if (!chip)
1354 return;
1355
1342 tpm_dev_vendor_release(chip); 1356 tpm_dev_vendor_release(chip);
1343 1357
1344 chip->release(dev); 1358 chip->release(dev);
@@ -1405,15 +1419,12 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1405 "unable to misc_register %s, minor %d\n", 1419 "unable to misc_register %s, minor %d\n",
1406 chip->vendor.miscdev.name, 1420 chip->vendor.miscdev.name,
1407 chip->vendor.miscdev.minor); 1421 chip->vendor.miscdev.minor);
1408 put_device(chip->dev); 1422 goto put_device;
1409 return NULL;
1410 } 1423 }
1411 1424
1412 if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) { 1425 if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
1413 misc_deregister(&chip->vendor.miscdev); 1426 misc_deregister(&chip->vendor.miscdev);
1414 put_device(chip->dev); 1427 goto put_device;
1415
1416 return NULL;
1417 } 1428 }
1418 1429
1419 chip->bios_dir = tpm_bios_log_setup(devname); 1430 chip->bios_dir = tpm_bios_log_setup(devname);
@@ -1425,6 +1436,8 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1425 1436
1426 return chip; 1437 return chip;
1427 1438
1439put_device:
1440 put_device(chip->dev);
1428out_free: 1441out_free:
1429 kfree(chip); 1442 kfree(chip);
1430 kfree(devname); 1443 kfree(devname);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index b1c5280ac159..917f727e6740 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -299,7 +299,7 @@ extern ssize_t tpm_write(struct file *, const char __user *, size_t,
299 loff_t *); 299 loff_t *);
300extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *); 300extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
301extern void tpm_remove_hardware(struct device *); 301extern void tpm_remove_hardware(struct device *);
302extern int tpm_pm_suspend(struct device *, pm_message_t); 302extern int tpm_pm_suspend(struct device *);
303extern int tpm_pm_resume(struct device *); 303extern int tpm_pm_resume(struct device *);
304extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, 304extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
305 wait_queue_head_t *); 305 wait_queue_head_t *);
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index c64a1bc65349..678d57019dc4 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -168,22 +168,14 @@ static void atml_plat_remove(void)
168 } 168 }
169} 169}
170 170
171static int tpm_atml_suspend(struct platform_device *dev, pm_message_t msg) 171static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume);
172{
173 return tpm_pm_suspend(&dev->dev, msg);
174}
175 172
176static int tpm_atml_resume(struct platform_device *dev)
177{
178 return tpm_pm_resume(&dev->dev);
179}
180static struct platform_driver atml_drv = { 173static struct platform_driver atml_drv = {
181 .driver = { 174 .driver = {
182 .name = "tpm_atmel", 175 .name = "tpm_atmel",
183 .owner = THIS_MODULE, 176 .owner = THIS_MODULE,
177 .pm = &tpm_atml_pm,
184 }, 178 },
185 .suspend = tpm_atml_suspend,
186 .resume = tpm_atml_resume,
187}; 179};
188 180
189static int __init init_atmel(void) 181static int __init init_atmel(void)
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 76da32e11f18..3251a44e8ceb 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -4,8 +4,8 @@
4 * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module 4 * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
5 * Specifications at www.trustedcomputinggroup.org 5 * Specifications at www.trustedcomputinggroup.org
6 * 6 *
7 * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com> 7 * Copyright (C) 2005, Marcel Selhorst <tpmdd@selhorst.net>
8 * Sirrix AG - security technologies, http://www.sirrix.com and 8 * Sirrix AG - security technologies <tpmdd@sirrix.com> and
9 * Applied Data Security Group, Ruhr-University Bochum, Germany 9 * Applied Data Security Group, Ruhr-University Bochum, Germany
10 * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ 10 * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
11 * 11 *
@@ -671,7 +671,7 @@ static void __exit cleanup_inf(void)
671module_init(init_inf); 671module_init(init_inf);
672module_exit(cleanup_inf); 672module_exit(cleanup_inf);
673 673
674MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); 674MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>");
675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
676MODULE_VERSION("1.9.2"); 676MODULE_VERSION("1.9.2");
677MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 4d2464871ada..640c9a427b59 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -274,22 +274,13 @@ static void tpm_nsc_remove(struct device *dev)
274 } 274 }
275} 275}
276 276
277static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg) 277static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
278{
279 return tpm_pm_suspend(&dev->dev, msg);
280}
281
282static int tpm_nsc_resume(struct platform_device *dev)
283{
284 return tpm_pm_resume(&dev->dev);
285}
286 278
287static struct platform_driver nsc_drv = { 279static struct platform_driver nsc_drv = {
288 .suspend = tpm_nsc_suspend,
289 .resume = tpm_nsc_resume,
290 .driver = { 280 .driver = {
291 .name = "tpm_nsc", 281 .name = "tpm_nsc",
292 .owner = THIS_MODULE, 282 .owner = THIS_MODULE,
283 .pm = &tpm_nsc_pm,
293 }, 284 },
294}; 285};
295 286
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index d2a70cae76df..c4be3519a587 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -750,7 +750,7 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
750 750
751static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) 751static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
752{ 752{
753 return tpm_pm_suspend(&dev->dev, msg); 753 return tpm_pm_suspend(&dev->dev);
754} 754}
755 755
756static int tpm_tis_pnp_resume(struct pnp_dev *dev) 756static int tpm_tis_pnp_resume(struct pnp_dev *dev)
@@ -806,27 +806,27 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
806 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 806 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
807MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 807MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
808#endif 808#endif
809static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
810{
811 return tpm_pm_suspend(&dev->dev, msg);
812}
813 809
814static int tpm_tis_resume(struct platform_device *dev) 810#ifdef CONFIG_PM_SLEEP
811static int tpm_tis_resume(struct device *dev)
815{ 812{
816 struct tpm_chip *chip = dev_get_drvdata(&dev->dev); 813 struct tpm_chip *chip = dev_get_drvdata(dev);
817 814
818 if (chip->vendor.irq) 815 if (chip->vendor.irq)
819 tpm_tis_reenable_interrupts(chip); 816 tpm_tis_reenable_interrupts(chip);
820 817
821 return tpm_pm_resume(&dev->dev); 818 return tpm_pm_resume(dev);
822} 819}
820#endif
821
822static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
823
823static struct platform_driver tis_drv = { 824static struct platform_driver tis_drv = {
824 .driver = { 825 .driver = {
825 .name = "tpm_tis", 826 .name = "tpm_tis",
826 .owner = THIS_MODULE, 827 .owner = THIS_MODULE,
828 .pm = &tpm_tis_pm,
827 }, 829 },
828 .suspend = tpm_tis_suspend,
829 .resume = tpm_tis_resume,
830}; 830};
831 831
832static struct platform_device *pdev; 832static struct platform_device *pdev;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4864407e3fc4..7f0b5ca78516 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -25,7 +25,6 @@ menu "Common Clock Framework"
25 25
26config COMMON_CLK_DEBUG 26config COMMON_CLK_DEBUG
27 bool "DebugFS representation of clock tree" 27 bool "DebugFS representation of clock tree"
28 depends on COMMON_CLK
29 select DEBUG_FS 28 select DEBUG_FS
30 ---help--- 29 ---help---
31 Creates a directory hierchy in debugfs for visualizing the clk 30 Creates a directory hierchy in debugfs for visualizing the clk
@@ -34,4 +33,11 @@ config COMMON_CLK_DEBUG
34 clk_flags, clk_prepare_count, clk_enable_count & 33 clk_flags, clk_prepare_count, clk_enable_count &
35 clk_notifier_count. 34 clk_notifier_count.
36 35
36config COMMON_CLK_WM831X
37 tristate "Clock driver for WM831x/2x PMICs"
38 depends on MFD_WM831X
39 ---help---
40 Supports the clocking subsystem of the WM831x/2x series of
41 PMICs from Wolfson Microlectronics.
42
37endmenu 43endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b9a5158a30b1..5869ea387054 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,7 +1,15 @@
1 1# common clock types
2obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o 2obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
3obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \ 3obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
4 clk-mux.o clk-divider.o clk-fixed-factor.o 4 clk-mux.o clk-divider.o clk-fixed-factor.o
5# SoCs specific 5# SoCs specific
6obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
7obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
6obj-$(CONFIG_ARCH_MXS) += mxs/ 8obj-$(CONFIG_ARCH_MXS) += mxs/
9obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
7obj-$(CONFIG_PLAT_SPEAR) += spear/ 10obj-$(CONFIG_PLAT_SPEAR) += spear/
11obj-$(CONFIG_ARCH_U300) += clk-u300.o
12obj-$(CONFIG_ARCH_INTEGRATOR) += versatile/
13
14# Chip specific
15obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8ea11b444528..a9204c69148d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -30,18 +30,89 @@
30#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) 30#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
31 31
32#define div_mask(d) ((1 << (d->width)) - 1) 32#define div_mask(d) ((1 << (d->width)) - 1)
33#define is_power_of_two(i) !(i & ~i)
34
35static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
36{
37 unsigned int maxdiv = 0;
38 const struct clk_div_table *clkt;
39
40 for (clkt = table; clkt->div; clkt++)
41 if (clkt->div > maxdiv)
42 maxdiv = clkt->div;
43 return maxdiv;
44}
45
46static unsigned int _get_maxdiv(struct clk_divider *divider)
47{
48 if (divider->flags & CLK_DIVIDER_ONE_BASED)
49 return div_mask(divider);
50 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
51 return 1 << div_mask(divider);
52 if (divider->table)
53 return _get_table_maxdiv(divider->table);
54 return div_mask(divider) + 1;
55}
56
57static unsigned int _get_table_div(const struct clk_div_table *table,
58 unsigned int val)
59{
60 const struct clk_div_table *clkt;
61
62 for (clkt = table; clkt->div; clkt++)
63 if (clkt->val == val)
64 return clkt->div;
65 return 0;
66}
67
68static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
69{
70 if (divider->flags & CLK_DIVIDER_ONE_BASED)
71 return val;
72 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
73 return 1 << val;
74 if (divider->table)
75 return _get_table_div(divider->table, val);
76 return val + 1;
77}
78
79static unsigned int _get_table_val(const struct clk_div_table *table,
80 unsigned int div)
81{
82 const struct clk_div_table *clkt;
83
84 for (clkt = table; clkt->div; clkt++)
85 if (clkt->div == div)
86 return clkt->val;
87 return 0;
88}
89
90static unsigned int _get_val(struct clk_divider *divider, u8 div)
91{
92 if (divider->flags & CLK_DIVIDER_ONE_BASED)
93 return div;
94 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
95 return __ffs(div);
96 if (divider->table)
97 return _get_table_val(divider->table, div);
98 return div - 1;
99}
33 100
34static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, 101static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
35 unsigned long parent_rate) 102 unsigned long parent_rate)
36{ 103{
37 struct clk_divider *divider = to_clk_divider(hw); 104 struct clk_divider *divider = to_clk_divider(hw);
38 unsigned int div; 105 unsigned int div, val;
39 106
40 div = readl(divider->reg) >> divider->shift; 107 val = readl(divider->reg) >> divider->shift;
41 div &= div_mask(divider); 108 val &= div_mask(divider);
42 109
43 if (!(divider->flags & CLK_DIVIDER_ONE_BASED)) 110 div = _get_div(divider, val);
44 div++; 111 if (!div) {
112 WARN(1, "%s: Invalid divisor for clock %s\n", __func__,
113 __clk_get_name(hw->clk));
114 return parent_rate;
115 }
45 116
46 return parent_rate / div; 117 return parent_rate / div;
47} 118}
@@ -52,6 +123,26 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
52 */ 123 */
53#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) 124#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
54 125
126static bool _is_valid_table_div(const struct clk_div_table *table,
127 unsigned int div)
128{
129 const struct clk_div_table *clkt;
130
131 for (clkt = table; clkt->div; clkt++)
132 if (clkt->div == div)
133 return true;
134 return false;
135}
136
137static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
138{
139 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
140 return is_power_of_two(div);
141 if (divider->table)
142 return _is_valid_table_div(divider->table, div);
143 return true;
144}
145
55static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 146static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
56 unsigned long *best_parent_rate) 147 unsigned long *best_parent_rate)
57{ 148{
@@ -62,10 +153,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
62 if (!rate) 153 if (!rate)
63 rate = 1; 154 rate = 1;
64 155
65 maxdiv = (1 << divider->width); 156 maxdiv = _get_maxdiv(divider);
66
67 if (divider->flags & CLK_DIVIDER_ONE_BASED)
68 maxdiv--;
69 157
70 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 158 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
71 parent_rate = *best_parent_rate; 159 parent_rate = *best_parent_rate;
@@ -82,6 +170,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
82 maxdiv = min(ULONG_MAX / rate, maxdiv); 170 maxdiv = min(ULONG_MAX / rate, maxdiv);
83 171
84 for (i = 1; i <= maxdiv; i++) { 172 for (i = 1; i <= maxdiv; i++) {
173 if (!_is_valid_div(divider, i))
174 continue;
85 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 175 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
86 MULT_ROUND_UP(rate, i)); 176 MULT_ROUND_UP(rate, i));
87 now = parent_rate / i; 177 now = parent_rate / i;
@@ -93,9 +183,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
93 } 183 }
94 184
95 if (!bestdiv) { 185 if (!bestdiv) {
96 bestdiv = (1 << divider->width); 186 bestdiv = _get_maxdiv(divider);
97 if (divider->flags & CLK_DIVIDER_ONE_BASED)
98 bestdiv--;
99 *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1); 187 *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
100 } 188 }
101 189
@@ -115,24 +203,22 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
115 unsigned long parent_rate) 203 unsigned long parent_rate)
116{ 204{
117 struct clk_divider *divider = to_clk_divider(hw); 205 struct clk_divider *divider = to_clk_divider(hw);
118 unsigned int div; 206 unsigned int div, value;
119 unsigned long flags = 0; 207 unsigned long flags = 0;
120 u32 val; 208 u32 val;
121 209
122 div = parent_rate / rate; 210 div = parent_rate / rate;
211 value = _get_val(divider, div);
123 212
124 if (!(divider->flags & CLK_DIVIDER_ONE_BASED)) 213 if (value > div_mask(divider))
125 div--; 214 value = div_mask(divider);
126
127 if (div > div_mask(divider))
128 div = div_mask(divider);
129 215
130 if (divider->lock) 216 if (divider->lock)
131 spin_lock_irqsave(divider->lock, flags); 217 spin_lock_irqsave(divider->lock, flags);
132 218
133 val = readl(divider->reg); 219 val = readl(divider->reg);
134 val &= ~(div_mask(divider) << divider->shift); 220 val &= ~(div_mask(divider) << divider->shift);
135 val |= div << divider->shift; 221 val |= value << divider->shift;
136 writel(val, divider->reg); 222 writel(val, divider->reg);
137 223
138 if (divider->lock) 224 if (divider->lock)
@@ -148,22 +234,11 @@ const struct clk_ops clk_divider_ops = {
148}; 234};
149EXPORT_SYMBOL_GPL(clk_divider_ops); 235EXPORT_SYMBOL_GPL(clk_divider_ops);
150 236
151/** 237static struct clk *_register_divider(struct device *dev, const char *name,
152 * clk_register_divider - register a divider clock with the clock framework
153 * @dev: device registering this clock
154 * @name: name of this clock
155 * @parent_name: name of clock's parent
156 * @flags: framework-specific flags
157 * @reg: register address to adjust divider
158 * @shift: number of bits to shift the bitfield
159 * @width: width of the bitfield
160 * @clk_divider_flags: divider-specific flags for this clock
161 * @lock: shared register lock for this clock
162 */
163struct clk *clk_register_divider(struct device *dev, const char *name,
164 const char *parent_name, unsigned long flags, 238 const char *parent_name, unsigned long flags,
165 void __iomem *reg, u8 shift, u8 width, 239 void __iomem *reg, u8 shift, u8 width,
166 u8 clk_divider_flags, spinlock_t *lock) 240 u8 clk_divider_flags, const struct clk_div_table *table,
241 spinlock_t *lock)
167{ 242{
168 struct clk_divider *div; 243 struct clk_divider *div;
169 struct clk *clk; 244 struct clk *clk;
@@ -178,7 +253,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
178 253
179 init.name = name; 254 init.name = name;
180 init.ops = &clk_divider_ops; 255 init.ops = &clk_divider_ops;
181 init.flags = flags; 256 init.flags = flags | CLK_IS_BASIC;
182 init.parent_names = (parent_name ? &parent_name: NULL); 257 init.parent_names = (parent_name ? &parent_name: NULL);
183 init.num_parents = (parent_name ? 1 : 0); 258 init.num_parents = (parent_name ? 1 : 0);
184 259
@@ -189,6 +264,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
189 div->flags = clk_divider_flags; 264 div->flags = clk_divider_flags;
190 div->lock = lock; 265 div->lock = lock;
191 div->hw.init = &init; 266 div->hw.init = &init;
267 div->table = table;
192 268
193 /* register the clock */ 269 /* register the clock */
194 clk = clk_register(dev, &div->hw); 270 clk = clk_register(dev, &div->hw);
@@ -198,3 +274,48 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
198 274
199 return clk; 275 return clk;
200} 276}
277
278/**
279 * clk_register_divider - register a divider clock with the clock framework
280 * @dev: device registering this clock
281 * @name: name of this clock
282 * @parent_name: name of clock's parent
283 * @flags: framework-specific flags
284 * @reg: register address to adjust divider
285 * @shift: number of bits to shift the bitfield
286 * @width: width of the bitfield
287 * @clk_divider_flags: divider-specific flags for this clock
288 * @lock: shared register lock for this clock
289 */
290struct clk *clk_register_divider(struct device *dev, const char *name,
291 const char *parent_name, unsigned long flags,
292 void __iomem *reg, u8 shift, u8 width,
293 u8 clk_divider_flags, spinlock_t *lock)
294{
295 return _register_divider(dev, name, parent_name, flags, reg, shift,
296 width, clk_divider_flags, NULL, lock);
297}
298
299/**
300 * clk_register_divider_table - register a table based divider clock with
301 * the clock framework
302 * @dev: device registering this clock
303 * @name: name of this clock
304 * @parent_name: name of clock's parent
305 * @flags: framework-specific flags
306 * @reg: register address to adjust divider
307 * @shift: number of bits to shift the bitfield
308 * @width: width of the bitfield
309 * @clk_divider_flags: divider-specific flags for this clock
310 * @table: array of divider/value pairs ending with a div set to 0
311 * @lock: shared register lock for this clock
312 */
313struct clk *clk_register_divider_table(struct device *dev, const char *name,
314 const char *parent_name, unsigned long flags,
315 void __iomem *reg, u8 shift, u8 width,
316 u8 clk_divider_flags, const struct clk_div_table *table,
317 spinlock_t *lock)
318{
319 return _register_divider(dev, name, parent_name, flags, reg, shift,
320 width, clk_divider_flags, table, lock);
321}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index c8c003e217ad..a4899855c0f6 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -82,7 +82,7 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
82 82
83 init.name = name; 83 init.name = name;
84 init.ops = &clk_fixed_factor_ops; 84 init.ops = &clk_fixed_factor_ops;
85 init.flags = flags; 85 init.flags = flags | CLK_IS_BASIC;
86 init.parent_names = &parent_name; 86 init.parent_names = &parent_name;
87 init.num_parents = 1; 87 init.num_parents = 1;
88 88
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index cbd246229786..f5ec0eebd4d7 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/of.h>
17 18
18/* 19/*
19 * DOC: basic fixed-rate clock that cannot gate 20 * DOC: basic fixed-rate clock that cannot gate
@@ -63,7 +64,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
63 64
64 init.name = name; 65 init.name = name;
65 init.ops = &clk_fixed_rate_ops; 66 init.ops = &clk_fixed_rate_ops;
66 init.flags = flags; 67 init.flags = flags | CLK_IS_BASIC;
67 init.parent_names = (parent_name ? &parent_name: NULL); 68 init.parent_names = (parent_name ? &parent_name: NULL);
68 init.num_parents = (parent_name ? 1 : 0); 69 init.num_parents = (parent_name ? 1 : 0);
69 70
@@ -79,3 +80,25 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
79 80
80 return clk; 81 return clk;
81} 82}
83
84#ifdef CONFIG_OF
85/**
86 * of_fixed_clk_setup() - Setup function for simple fixed rate clock
87 */
88void __init of_fixed_clk_setup(struct device_node *node)
89{
90 struct clk *clk;
91 const char *clk_name = node->name;
92 u32 rate;
93
94 if (of_property_read_u32(node, "clock-frequency", &rate))
95 return;
96
97 of_property_read_string(node, "clock-output-names", &clk_name);
98
99 clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
100 if (clk)
101 of_clk_add_provider(node, of_clk_src_simple_get, clk);
102}
103EXPORT_SYMBOL_GPL(of_fixed_clk_setup);
104#endif
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 578465e04be6..15114febfd92 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -130,7 +130,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
130 130
131 init.name = name; 131 init.name = name;
132 init.ops = &clk_gate_ops; 132 init.ops = &clk_gate_ops;
133 init.flags = flags; 133 init.flags = flags | CLK_IS_BASIC;
134 init.parent_names = (parent_name ? &parent_name: NULL); 134 init.parent_names = (parent_name ? &parent_name: NULL);
135 init.num_parents = (parent_name ? 1 : 0); 135 init.num_parents = (parent_name ? 1 : 0);
136 136
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
new file mode 100644
index 000000000000..52fecadf004a
--- /dev/null
+++ b/drivers/clk/clk-highbank.c
@@ -0,0 +1,346 @@
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/clk-provider.h>
21#include <linux/io.h>
22#include <linux/of.h>
23
24extern void __iomem *sregs_base;
25
26#define HB_PLL_LOCK_500 0x20000000
27#define HB_PLL_LOCK 0x10000000
28#define HB_PLL_DIVF_SHIFT 20
29#define HB_PLL_DIVF_MASK 0x0ff00000
30#define HB_PLL_DIVQ_SHIFT 16
31#define HB_PLL_DIVQ_MASK 0x00070000
32#define HB_PLL_DIVR_SHIFT 8
33#define HB_PLL_DIVR_MASK 0x00001f00
34#define HB_PLL_RANGE_SHIFT 4
35#define HB_PLL_RANGE_MASK 0x00000070
36#define HB_PLL_BYPASS 0x00000008
37#define HB_PLL_RESET 0x00000004
38#define HB_PLL_EXT_BYPASS 0x00000002
39#define HB_PLL_EXT_ENA 0x00000001
40
41#define HB_PLL_VCO_MIN_FREQ 2133000000
42#define HB_PLL_MAX_FREQ HB_PLL_VCO_MIN_FREQ
43#define HB_PLL_MIN_FREQ (HB_PLL_VCO_MIN_FREQ / 64)
44
45#define HB_A9_BCLK_DIV_MASK 0x00000006
46#define HB_A9_BCLK_DIV_SHIFT 1
47#define HB_A9_PCLK_DIV 0x00000001
48
49struct hb_clk {
50 struct clk_hw hw;
51 void __iomem *reg;
52 char *parent_name;
53};
54#define to_hb_clk(p) container_of(p, struct hb_clk, hw)
55
56static int clk_pll_prepare(struct clk_hw *hwclk)
57 {
58 struct hb_clk *hbclk = to_hb_clk(hwclk);
59 u32 reg;
60
61 reg = readl(hbclk->reg);
62 reg &= ~HB_PLL_RESET;
63 writel(reg, hbclk->reg);
64
65 while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
66 ;
67 while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
68 ;
69
70 return 0;
71}
72
73static void clk_pll_unprepare(struct clk_hw *hwclk)
74{
75 struct hb_clk *hbclk = to_hb_clk(hwclk);
76 u32 reg;
77
78 reg = readl(hbclk->reg);
79 reg |= HB_PLL_RESET;
80 writel(reg, hbclk->reg);
81}
82
83static int clk_pll_enable(struct clk_hw *hwclk)
84{
85 struct hb_clk *hbclk = to_hb_clk(hwclk);
86 u32 reg;
87
88 reg = readl(hbclk->reg);
89 reg |= HB_PLL_EXT_ENA;
90 writel(reg, hbclk->reg);
91
92 return 0;
93}
94
95static void clk_pll_disable(struct clk_hw *hwclk)
96{
97 struct hb_clk *hbclk = to_hb_clk(hwclk);
98 u32 reg;
99
100 reg = readl(hbclk->reg);
101 reg &= ~HB_PLL_EXT_ENA;
102 writel(reg, hbclk->reg);
103}
104
105static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
106 unsigned long parent_rate)
107{
108 struct hb_clk *hbclk = to_hb_clk(hwclk);
109 unsigned long divf, divq, vco_freq, reg;
110
111 reg = readl(hbclk->reg);
112 if (reg & HB_PLL_EXT_BYPASS)
113 return parent_rate;
114
115 divf = (reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT;
116 divq = (reg & HB_PLL_DIVQ_MASK) >> HB_PLL_DIVQ_SHIFT;
117 vco_freq = parent_rate * (divf + 1);
118
119 return vco_freq / (1 << divq);
120}
121
122static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
123 u32 *pdivq, u32 *pdivf)
124{
125 u32 divq, divf;
126 unsigned long vco_freq;
127
128 if (rate < HB_PLL_MIN_FREQ)
129 rate = HB_PLL_MIN_FREQ;
130 if (rate > HB_PLL_MAX_FREQ)
131 rate = HB_PLL_MAX_FREQ;
132
133 for (divq = 1; divq <= 6; divq++) {
134 if ((rate * (1 << divq)) >= HB_PLL_VCO_MIN_FREQ)
135 break;
136 }
137
138 vco_freq = rate * (1 << divq);
139 divf = (vco_freq + (ref_freq / 2)) / ref_freq;
140 divf--;
141
142 *pdivq = divq;
143 *pdivf = divf;
144}
145
146static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
147 unsigned long *parent_rate)
148{
149 u32 divq, divf;
150 unsigned long ref_freq = *parent_rate;
151
152 clk_pll_calc(rate, ref_freq, &divq, &divf);
153
154 return (ref_freq * (divf + 1)) / (1 << divq);
155}
156
157static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
158 unsigned long parent_rate)
159{
160 struct hb_clk *hbclk = to_hb_clk(hwclk);
161 u32 divq, divf;
162 u32 reg;
163
164 clk_pll_calc(rate, parent_rate, &divq, &divf);
165
166 reg = readl(hbclk->reg);
167 if (divf != ((reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT)) {
168 /* Need to re-lock PLL, so put it into bypass mode */
169 reg |= HB_PLL_EXT_BYPASS;
170 writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
171
172 writel(reg | HB_PLL_RESET, hbclk->reg);
173 reg &= ~(HB_PLL_DIVF_MASK | HB_PLL_DIVQ_MASK);
174 reg |= (divf << HB_PLL_DIVF_SHIFT) | (divq << HB_PLL_DIVQ_SHIFT);
175 writel(reg | HB_PLL_RESET, hbclk->reg);
176 writel(reg, hbclk->reg);
177
178 while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
179 ;
180 while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
181 ;
182 reg |= HB_PLL_EXT_ENA;
183 reg &= ~HB_PLL_EXT_BYPASS;
184 } else {
185 reg &= ~HB_PLL_DIVQ_MASK;
186 reg |= divq << HB_PLL_DIVQ_SHIFT;
187 }
188 writel(reg, hbclk->reg);
189
190 return 0;
191}
192
193static const struct clk_ops clk_pll_ops = {
194 .prepare = clk_pll_prepare,
195 .unprepare = clk_pll_unprepare,
196 .enable = clk_pll_enable,
197 .disable = clk_pll_disable,
198 .recalc_rate = clk_pll_recalc_rate,
199 .round_rate = clk_pll_round_rate,
200 .set_rate = clk_pll_set_rate,
201};
202
203static unsigned long clk_cpu_periphclk_recalc_rate(struct clk_hw *hwclk,
204 unsigned long parent_rate)
205{
206 struct hb_clk *hbclk = to_hb_clk(hwclk);
207 u32 div = (readl(hbclk->reg) & HB_A9_PCLK_DIV) ? 8 : 4;
208 return parent_rate / div;
209}
210
211static const struct clk_ops a9periphclk_ops = {
212 .recalc_rate = clk_cpu_periphclk_recalc_rate,
213};
214
215static unsigned long clk_cpu_a9bclk_recalc_rate(struct clk_hw *hwclk,
216 unsigned long parent_rate)
217{
218 struct hb_clk *hbclk = to_hb_clk(hwclk);
219 u32 div = (readl(hbclk->reg) & HB_A9_BCLK_DIV_MASK) >> HB_A9_BCLK_DIV_SHIFT;
220
221 return parent_rate / (div + 2);
222}
223
224static const struct clk_ops a9bclk_ops = {
225 .recalc_rate = clk_cpu_a9bclk_recalc_rate,
226};
227
228static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
229 unsigned long parent_rate)
230{
231 struct hb_clk *hbclk = to_hb_clk(hwclk);
232 u32 div;
233
234 div = readl(hbclk->reg) & 0x1f;
235 div++;
236 div *= 2;
237
238 return parent_rate / div;
239}
240
241static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
242 unsigned long *parent_rate)
243{
244 u32 div;
245
246 div = *parent_rate / rate;
247 div++;
248 div &= ~0x1;
249
250 return *parent_rate / div;
251}
252
253static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
254 unsigned long parent_rate)
255{
256 struct hb_clk *hbclk = to_hb_clk(hwclk);
257 u32 div;
258
259 div = parent_rate / rate;
260 if (div & 0x1)
261 return -EINVAL;
262
263 writel(div >> 1, hbclk->reg);
264 return 0;
265}
266
267static const struct clk_ops periclk_ops = {
268 .recalc_rate = clk_periclk_recalc_rate,
269 .round_rate = clk_periclk_round_rate,
270 .set_rate = clk_periclk_set_rate,
271};
272
273static __init struct clk *hb_clk_init(struct device_node *node, const struct clk_ops *ops)
274{
275 u32 reg;
276 struct clk *clk;
277 struct hb_clk *hb_clk;
278 const char *clk_name = node->name;
279 const char *parent_name;
280 struct clk_init_data init;
281 int rc;
282
283 rc = of_property_read_u32(node, "reg", &reg);
284 if (WARN_ON(rc))
285 return NULL;
286
287 hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL);
288 if (WARN_ON(!hb_clk))
289 return NULL;
290
291 hb_clk->reg = sregs_base + reg;
292
293 of_property_read_string(node, "clock-output-names", &clk_name);
294
295 init.name = clk_name;
296 init.ops = ops;
297 init.flags = 0;
298 parent_name = of_clk_get_parent_name(node, 0);
299 init.parent_names = &parent_name;
300 init.num_parents = 1;
301
302 hb_clk->hw.init = &init;
303
304 clk = clk_register(NULL, &hb_clk->hw);
305 if (WARN_ON(IS_ERR(clk))) {
306 kfree(hb_clk);
307 return NULL;
308 }
309 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
310 return clk;
311}
312
313static void __init hb_pll_init(struct device_node *node)
314{
315 hb_clk_init(node, &clk_pll_ops);
316}
317
318static void __init hb_a9periph_init(struct device_node *node)
319{
320 hb_clk_init(node, &a9periphclk_ops);
321}
322
323static void __init hb_a9bus_init(struct device_node *node)
324{
325 struct clk *clk = hb_clk_init(node, &a9bclk_ops);
326 clk_prepare_enable(clk);
327}
328
329static void __init hb_emmc_init(struct device_node *node)
330{
331 hb_clk_init(node, &periclk_ops);
332}
333
334static const __initconst struct of_device_id clk_match[] = {
335 { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
336 { .compatible = "calxeda,hb-pll-clock", .data = hb_pll_init, },
337 { .compatible = "calxeda,hb-a9periph-clock", .data = hb_a9periph_init, },
338 { .compatible = "calxeda,hb-a9bus-clock", .data = hb_a9bus_init, },
339 { .compatible = "calxeda,hb-emmc-clock", .data = hb_emmc_init, },
340 {}
341};
342
343void __init highbank_clocks_init(void)
344{
345 of_clk_init(clk_match);
346}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index fd36a8ea73d9..508c032edce4 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -106,7 +106,7 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
106 106
107 init.name = name; 107 init.name = name;
108 init.ops = &clk_mux_ops; 108 init.ops = &clk_mux_ops;
109 init.flags = flags; 109 init.flags = flags | CLK_IS_BASIC;
110 init.parent_names = parent_names; 110 init.parent_names = parent_names;
111 init.num_parents = num_parents; 111 init.num_parents = num_parents;
112 112
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
new file mode 100644
index 000000000000..517a8ff7121e
--- /dev/null
+++ b/drivers/clk/clk-nomadik.c
@@ -0,0 +1,47 @@
1#include <linux/clk.h>
2#include <linux/clkdev.h>
3#include <linux/err.h>
4#include <linux/io.h>
5#include <linux/clk-provider.h>
6
7/*
8 * The Nomadik clock tree is described in the STN8815A12 DB V4.2
9 * reference manual for the chip, page 94 ff.
10 */
11
12void __init nomadik_clk_init(void)
13{
14 struct clk *clk;
15
16 clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
17 clk_register_clkdev(clk, "apb_pclk", NULL);
18 clk_register_clkdev(clk, NULL, "gpio.0");
19 clk_register_clkdev(clk, NULL, "gpio.1");
20 clk_register_clkdev(clk, NULL, "gpio.2");
21 clk_register_clkdev(clk, NULL, "gpio.3");
22 clk_register_clkdev(clk, NULL, "rng");
23
24 /*
25 * The 2.4 MHz TIMCLK reference clock is active at boot time, this is
26 * actually the MXTALCLK @19.2 MHz divided by 8. This clock is used
27 * by the timers and watchdog. See page 105 ff.
28 */
29 clk = clk_register_fixed_rate(NULL, "TIMCLK", NULL, CLK_IS_ROOT,
30 2400000);
31 clk_register_clkdev(clk, NULL, "mtu0");
32 clk_register_clkdev(clk, NULL, "mtu1");
33
34 /*
35 * At boot time, PLL2 is set to generate a set of fixed clocks,
36 * one of them is CLK48, the 48 MHz clock, routed to the UART, MMC/SD
37 * I2C, IrDA, USB and SSP blocks.
38 */
39 clk = clk_register_fixed_rate(NULL, "CLK48", NULL, CLK_IS_ROOT,
40 48000000);
41 clk_register_clkdev(clk, NULL, "uart0");
42 clk_register_clkdev(clk, NULL, "uart1");
43 clk_register_clkdev(clk, NULL, "mmci");
44 clk_register_clkdev(clk, NULL, "ssp");
45 clk_register_clkdev(clk, NULL, "nmk-i2c.0");
46 clk_register_clkdev(clk, NULL, "nmk-i2c.1");
47}
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
new file mode 100644
index 000000000000..a15f7928fb11
--- /dev/null
+++ b/drivers/clk/clk-u300.c
@@ -0,0 +1,746 @@
1/*
2 * U300 clock implementation
3 * Copyright (C) 2007-2012 ST-Ericsson AB
4 * License terms: GNU General Public License (GPL) version 2
5 * Author: Linus Walleij <linus.walleij@stericsson.com>
6 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
7 */
8#include <linux/clk.h>
9#include <linux/clkdev.h>
10#include <linux/err.h>
11#include <linux/io.h>
12#include <linux/clk-provider.h>
13#include <linux/spinlock.h>
14#include <mach/syscon.h>
15
16/*
17 * The clocking hierarchy currently looks like this.
18 * NOTE: the idea is NOT to show how the clocks are routed on the chip!
19 * The ideas is to show dependencies, so a clock higher up in the
20 * hierarchy has to be on in order for another clock to be on. Now,
21 * both CPU and DMA can actually be on top of the hierarchy, and that
22 * is not modeled currently. Instead we have the backbone AMBA bus on
23 * top. This bus cannot be programmed in any way but conceptually it
24 * needs to be active for the bridges and devices to transport data.
25 *
26 * Please be aware that a few clocks are hw controlled, which mean that
27 * the hw itself can turn on/off or change the rate of the clock when
28 * needed!
29 *
30 * AMBA bus
31 * |
32 * +- CPU
33 * +- FSMC NANDIF NAND Flash interface
34 * +- SEMI Shared Memory interface
35 * +- ISP Image Signal Processor (U335 only)
36 * +- CDS (U335 only)
37 * +- DMA Direct Memory Access Controller
38 * +- AAIF APP/ACC Inteface (Mobile Scalable Link, MSL)
39 * +- APEX
40 * +- VIDEO_ENC AVE2/3 Video Encoder
41 * +- XGAM Graphics Accelerator Controller
42 * +- AHB
43 * |
44 * +- ahb:0 AHB Bridge
45 * | |
46 * | +- ahb:1 INTCON Interrupt controller
47 * | +- ahb:3 MSPRO Memory Stick Pro controller
48 * | +- ahb:4 EMIF External Memory interface
49 * |
50 * +- fast:0 FAST bridge
51 * | |
52 * | +- fast:1 MMCSD MMC/SD card reader controller
53 * | +- fast:2 I2S0 PCM I2S channel 0 controller
54 * | +- fast:3 I2S1 PCM I2S channel 1 controller
55 * | +- fast:4 I2C0 I2C channel 0 controller
56 * | +- fast:5 I2C1 I2C channel 1 controller
57 * | +- fast:6 SPI SPI controller
58 * | +- fast:7 UART1 Secondary UART (U335 only)
59 * |
60 * +- slow:0 SLOW bridge
61 * |
62 * +- slow:1 SYSCON (not possible to control)
63 * +- slow:2 WDOG Watchdog
64 * +- slow:3 UART0 primary UART
65 * +- slow:4 TIMER_APP Application timer - used in Linux
66 * +- slow:5 KEYPAD controller
67 * +- slow:6 GPIO controller
68 * +- slow:7 RTC controller
69 * +- slow:8 BT Bus Tracer (not used currently)
70 * +- slow:9 EH Event Handler (not used currently)
71 * +- slow:a TIMER_ACC Access style timer (not used currently)
72 * +- slow:b PPM (U335 only, what is that?)
73 */
74
75/* Global syscon virtual base */
76static void __iomem *syscon_vbase;
77
78/**
79 * struct clk_syscon - U300 syscon clock
80 * @hw: corresponding clock hardware entry
81 * @hw_ctrld: whether this clock is hardware controlled (for refcount etc)
82 * and does not need any magic pokes to be enabled/disabled
83 * @reset: state holder, whether this block's reset line is asserted or not
84 * @res_reg: reset line enable/disable flag register
85 * @res_bit: bit for resetting or taking this consumer out of reset
86 * @en_reg: clock line enable/disable flag register
87 * @en_bit: bit for enabling/disabling this consumer clock line
88 * @clk_val: magic value to poke in the register to enable/disable
89 * this one clock
90 */
91struct clk_syscon {
92 struct clk_hw hw;
93 bool hw_ctrld;
94 bool reset;
95 void __iomem *res_reg;
96 u8 res_bit;
97 void __iomem *en_reg;
98 u8 en_bit;
99 u16 clk_val;
100};
101
102#define to_syscon(_hw) container_of(_hw, struct clk_syscon, hw)
103
104static DEFINE_SPINLOCK(syscon_resetreg_lock);
105
106/*
107 * Reset control functions. We remember if a block has been
108 * taken out of reset and don't remove the reset assertion again
109 * and vice versa. Currently we only remove resets so the
110 * enablement function is defined out.
111 */
112static void syscon_block_reset_enable(struct clk_syscon *sclk)
113{
114 unsigned long iflags;
115 u16 val;
116
117 /* Not all blocks support resetting */
118 if (!sclk->res_reg)
119 return;
120 spin_lock_irqsave(&syscon_resetreg_lock, iflags);
121 val = readw(sclk->res_reg);
122 val |= BIT(sclk->res_bit);
123 writew(val, sclk->res_reg);
124 spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
125 sclk->reset = true;
126}
127
128static void syscon_block_reset_disable(struct clk_syscon *sclk)
129{
130 unsigned long iflags;
131 u16 val;
132
133 /* Not all blocks support resetting */
134 if (!sclk->res_reg)
135 return;
136 spin_lock_irqsave(&syscon_resetreg_lock, iflags);
137 val = readw(sclk->res_reg);
138 val &= ~BIT(sclk->res_bit);
139 writew(val, sclk->res_reg);
140 spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
141 sclk->reset = false;
142}
143
144static int syscon_clk_prepare(struct clk_hw *hw)
145{
146 struct clk_syscon *sclk = to_syscon(hw);
147
148 /* If the block is in reset, bring it out */
149 if (sclk->reset)
150 syscon_block_reset_disable(sclk);
151 return 0;
152}
153
154static void syscon_clk_unprepare(struct clk_hw *hw)
155{
156 struct clk_syscon *sclk = to_syscon(hw);
157
158 /* Please don't force the console into reset */
159 if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
160 return;
161 /* When unpreparing, force block into reset */
162 if (!sclk->reset)
163 syscon_block_reset_enable(sclk);
164}
165
166static int syscon_clk_enable(struct clk_hw *hw)
167{
168 struct clk_syscon *sclk = to_syscon(hw);
169
170 /* Don't touch the hardware controlled clocks */
171 if (sclk->hw_ctrld)
172 return 0;
173 /* These cannot be controlled */
174 if (sclk->clk_val == 0xFFFFU)
175 return 0;
176
177 writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCER);
178 return 0;
179}
180
181static void syscon_clk_disable(struct clk_hw *hw)
182{
183 struct clk_syscon *sclk = to_syscon(hw);
184
185 /* Don't touch the hardware controlled clocks */
186 if (sclk->hw_ctrld)
187 return;
188 if (sclk->clk_val == 0xFFFFU)
189 return;
190 /* Please don't disable the console port */
191 if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
192 return;
193
194 writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCDR);
195}
196
197static int syscon_clk_is_enabled(struct clk_hw *hw)
198{
199 struct clk_syscon *sclk = to_syscon(hw);
200 u16 val;
201
202 /* If no enable register defined, it's always-on */
203 if (!sclk->en_reg)
204 return 1;
205
206 val = readw(sclk->en_reg);
207 val &= BIT(sclk->en_bit);
208
209 return val ? 1 : 0;
210}
211
212static u16 syscon_get_perf(void)
213{
214 u16 val;
215
216 val = readw(syscon_vbase + U300_SYSCON_CCR);
217 val &= U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
218 return val;
219}
220
221static unsigned long
222syscon_clk_recalc_rate(struct clk_hw *hw,
223 unsigned long parent_rate)
224{
225 struct clk_syscon *sclk = to_syscon(hw);
226 u16 perf = syscon_get_perf();
227
228 switch(sclk->clk_val) {
229 case U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN:
230 case U300_SYSCON_SBCER_I2C0_CLK_EN:
231 case U300_SYSCON_SBCER_I2C1_CLK_EN:
232 case U300_SYSCON_SBCER_MMC_CLK_EN:
233 case U300_SYSCON_SBCER_SPI_CLK_EN:
234 /* The FAST clocks have one progression */
235 switch(perf) {
236 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
237 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
238 return 13000000;
239 default:
240 return parent_rate; /* 26 MHz */
241 }
242 case U300_SYSCON_SBCER_DMAC_CLK_EN:
243 case U300_SYSCON_SBCER_NANDIF_CLK_EN:
244 case U300_SYSCON_SBCER_XGAM_CLK_EN:
245 /* AMBA interconnect peripherals */
246 switch(perf) {
247 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
248 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
249 return 6500000;
250 case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
251 return 26000000;
252 default:
253 return parent_rate; /* 52 MHz */
254 }
255 case U300_SYSCON_SBCER_SEMI_CLK_EN:
256 case U300_SYSCON_SBCER_EMIF_CLK_EN:
257 /* EMIF speeds */
258 switch(perf) {
259 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
260 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
261 return 13000000;
262 case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
263 return 52000000;
264 default:
265 return 104000000;
266 }
267 case U300_SYSCON_SBCER_CPU_CLK_EN:
268 /* And the fast CPU clock */
269 switch(perf) {
270 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
271 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
272 return 13000000;
273 case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
274 return 52000000;
275 case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
276 return 104000000;
277 default:
278 return parent_rate; /* 208 MHz */
279 }
280 default:
281 /*
282 * The SLOW clocks and default just inherit the rate of
283 * their parent (typically PLL13 13 MHz).
284 */
285 return parent_rate;
286 }
287}
288
289static long
290syscon_clk_round_rate(struct clk_hw *hw, unsigned long rate,
291 unsigned long *prate)
292{
293 struct clk_syscon *sclk = to_syscon(hw);
294
295 if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
296 return *prate;
297 /* We really only support setting the rate of the CPU clock */
298 if (rate <= 13000000)
299 return 13000000;
300 if (rate <= 52000000)
301 return 52000000;
302 if (rate <= 104000000)
303 return 104000000;
304 return 208000000;
305}
306
307static int syscon_clk_set_rate(struct clk_hw *hw, unsigned long rate,
308 unsigned long parent_rate)
309{
310 struct clk_syscon *sclk = to_syscon(hw);
311 u16 val;
312
313 /* We only support setting the rate of the CPU clock */
314 if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
315 return -EINVAL;
316 switch (rate) {
317 case 13000000:
318 val = U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER;
319 break;
320 case 52000000:
321 val = U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE;
322 break;
323 case 104000000:
324 val = U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH;
325 break;
326 case 208000000:
327 val = U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST;
328 break;
329 default:
330 return -EINVAL;
331 }
332 val |= readw(syscon_vbase + U300_SYSCON_CCR) &
333 ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK ;
334 writew(val, syscon_vbase + U300_SYSCON_CCR);
335 return 0;
336}
337
338static const struct clk_ops syscon_clk_ops = {
339 .prepare = syscon_clk_prepare,
340 .unprepare = syscon_clk_unprepare,
341 .enable = syscon_clk_enable,
342 .disable = syscon_clk_disable,
343 .is_enabled = syscon_clk_is_enabled,
344 .recalc_rate = syscon_clk_recalc_rate,
345 .round_rate = syscon_clk_round_rate,
346 .set_rate = syscon_clk_set_rate,
347};
348
349static struct clk * __init
350syscon_clk_register(struct device *dev, const char *name,
351 const char *parent_name, unsigned long flags,
352 bool hw_ctrld,
353 void __iomem *res_reg, u8 res_bit,
354 void __iomem *en_reg, u8 en_bit,
355 u16 clk_val)
356{
357 struct clk *clk;
358 struct clk_syscon *sclk;
359 struct clk_init_data init;
360
361 sclk = kzalloc(sizeof(struct clk_syscon), GFP_KERNEL);
362 if (!sclk) {
363 pr_err("could not allocate syscon clock %s\n",
364 name);
365 return ERR_PTR(-ENOMEM);
366 }
367 init.name = name;
368 init.ops = &syscon_clk_ops;
369 init.flags = flags;
370 init.parent_names = (parent_name ? &parent_name : NULL);
371 init.num_parents = (parent_name ? 1 : 0);
372 sclk->hw.init = &init;
373 sclk->hw_ctrld = hw_ctrld;
374 /* Assume the block is in reset at registration */
375 sclk->reset = true;
376 sclk->res_reg = res_reg;
377 sclk->res_bit = res_bit;
378 sclk->en_reg = en_reg;
379 sclk->en_bit = en_bit;
380 sclk->clk_val = clk_val;
381
382 clk = clk_register(dev, &sclk->hw);
383 if (IS_ERR(clk))
384 kfree(sclk);
385
386 return clk;
387}
388
389/**
390 * struct clk_mclk - U300 MCLK clock (MMC/SD clock)
391 * @hw: corresponding clock hardware entry
392 * @is_mspro: if this is the memory stick clock rather than MMC/SD
393 */
394struct clk_mclk {
395 struct clk_hw hw;
396 bool is_mspro;
397};
398
399#define to_mclk(_hw) container_of(_hw, struct clk_mclk, hw)
400
401static int mclk_clk_prepare(struct clk_hw *hw)
402{
403 struct clk_mclk *mclk = to_mclk(hw);
404 u16 val;
405
406 /* The MMC and MSPRO clocks need some special set-up */
407 if (!mclk->is_mspro) {
408 /* Set default MMC clock divisor to 18.9 MHz */
409 writew(0x0054U, syscon_vbase + U300_SYSCON_MMF0R);
410 val = readw(syscon_vbase + U300_SYSCON_MMCR);
411 /* Disable the MMC feedback clock */
412 val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
413 /* Disable MSPRO frequency */
414 val &= ~U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
415 writew(val, syscon_vbase + U300_SYSCON_MMCR);
416 } else {
417 val = readw(syscon_vbase + U300_SYSCON_MMCR);
418 /* Disable the MMC feedback clock */
419 val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
420 /* Enable MSPRO frequency */
421 val |= U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
422 writew(val, syscon_vbase + U300_SYSCON_MMCR);
423 }
424
425 return 0;
426}
427
428static unsigned long
429mclk_clk_recalc_rate(struct clk_hw *hw,
430 unsigned long parent_rate)
431{
432 u16 perf = syscon_get_perf();
433
434 switch (perf) {
435 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
436 /*
437 * Here, the 208 MHz PLL gets shut down and the always
438 * on 13 MHz PLL used for RTC etc kicks into use
439 * instead.
440 */
441 return 13000000;
442 case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
443 case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
444 case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
445 case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
446 {
447 /*
448 * This clock is under program control. The register is
449 * divided in two nybbles, bit 7-4 gives cycles-1 to count
450 * high, bit 3-0 gives cycles-1 to count low. Distribute
451 * these with no more than 1 cycle difference between
452 * low and high and add low and high to get the actual
453 * divisor. The base PLL is 208 MHz. Writing 0x00 will
454 * divide by 1 and 1 so the highest frequency possible
455 * is 104 MHz.
456 *
457 * e.g. 0x54 =>
458 * f = 208 / ((5+1) + (4+1)) = 208 / 11 = 18.9 MHz
459 */
460 u16 val = readw(syscon_vbase + U300_SYSCON_MMF0R) &
461 U300_SYSCON_MMF0R_MASK;
462 switch (val) {
463 case 0x0054:
464 return 18900000;
465 case 0x0044:
466 return 20800000;
467 case 0x0043:
468 return 23100000;
469 case 0x0033:
470 return 26000000;
471 case 0x0032:
472 return 29700000;
473 case 0x0022:
474 return 34700000;
475 case 0x0021:
476 return 41600000;
477 case 0x0011:
478 return 52000000;
479 case 0x0000:
480 return 104000000;
481 default:
482 break;
483 }
484 }
485 default:
486 break;
487 }
488 return parent_rate;
489}
490
491static long
492mclk_clk_round_rate(struct clk_hw *hw, unsigned long rate,
493 unsigned long *prate)
494{
495 if (rate <= 18900000)
496 return 18900000;
497 if (rate <= 20800000)
498 return 20800000;
499 if (rate <= 23100000)
500 return 23100000;
501 if (rate <= 26000000)
502 return 26000000;
503 if (rate <= 29700000)
504 return 29700000;
505 if (rate <= 34700000)
506 return 34700000;
507 if (rate <= 41600000)
508 return 41600000;
509 /* Highest rate */
510 return 52000000;
511}
512
513static int mclk_clk_set_rate(struct clk_hw *hw, unsigned long rate,
514 unsigned long parent_rate)
515{
516 u16 val;
517 u16 reg;
518
519 switch (rate) {
520 case 18900000:
521 val = 0x0054;
522 break;
523 case 20800000:
524 val = 0x0044;
525 break;
526 case 23100000:
527 val = 0x0043;
528 break;
529 case 26000000:
530 val = 0x0033;
531 break;
532 case 29700000:
533 val = 0x0032;
534 break;
535 case 34700000:
536 val = 0x0022;
537 break;
538 case 41600000:
539 val = 0x0021;
540 break;
541 case 52000000:
542 val = 0x0011;
543 break;
544 case 104000000:
545 val = 0x0000;
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 reg = readw(syscon_vbase + U300_SYSCON_MMF0R) &
552 ~U300_SYSCON_MMF0R_MASK;
553 writew(reg | val, syscon_vbase + U300_SYSCON_MMF0R);
554 return 0;
555}
556
557static const struct clk_ops mclk_ops = {
558 .prepare = mclk_clk_prepare,
559 .recalc_rate = mclk_clk_recalc_rate,
560 .round_rate = mclk_clk_round_rate,
561 .set_rate = mclk_clk_set_rate,
562};
563
564static struct clk * __init
565mclk_clk_register(struct device *dev, const char *name,
566 const char *parent_name, bool is_mspro)
567{
568 struct clk *clk;
569 struct clk_mclk *mclk;
570 struct clk_init_data init;
571
572 mclk = kzalloc(sizeof(struct clk_mclk), GFP_KERNEL);
573 if (!mclk) {
574 pr_err("could not allocate MMC/SD clock %s\n",
575 name);
576 return ERR_PTR(-ENOMEM);
577 }
578 init.name = "mclk";
579 init.ops = &mclk_ops;
580 init.flags = 0;
581 init.parent_names = (parent_name ? &parent_name : NULL);
582 init.num_parents = (parent_name ? 1 : 0);
583 mclk->hw.init = &init;
584 mclk->is_mspro = is_mspro;
585
586 clk = clk_register(dev, &mclk->hw);
587 if (IS_ERR(clk))
588 kfree(mclk);
589
590 return clk;
591}
592
593void __init u300_clk_init(void __iomem *base)
594{
595 u16 val;
596 struct clk *clk;
597
598 syscon_vbase = base;
599
600 /* Set system to run at PLL208, max performance, a known state. */
601 val = readw(syscon_vbase + U300_SYSCON_CCR);
602 val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
603 writew(val, syscon_vbase + U300_SYSCON_CCR);
604 /* Wait for the PLL208 to lock if not locked in yet */
605 while (!(readw(syscon_vbase + U300_SYSCON_CSR) &
606 U300_SYSCON_CSR_PLL208_LOCK_IND));
607
608 /* Power management enable */
609 val = readw(syscon_vbase + U300_SYSCON_PMCR);
610 val |= U300_SYSCON_PMCR_PWR_MGNT_ENABLE;
611 writew(val, syscon_vbase + U300_SYSCON_PMCR);
612
613 /* These are always available (RTC and PLL13) */
614 clk = clk_register_fixed_rate(NULL, "app_32_clk", NULL,
615 CLK_IS_ROOT, 32768);
616 /* The watchdog sits directly on the 32 kHz clock */
617 clk_register_clkdev(clk, NULL, "coh901327_wdog");
618 clk = clk_register_fixed_rate(NULL, "pll13", NULL,
619 CLK_IS_ROOT, 13000000);
620
621 /* These derive from PLL208 */
622 clk = clk_register_fixed_rate(NULL, "pll208", NULL,
623 CLK_IS_ROOT, 208000000);
624 clk = clk_register_fixed_factor(NULL, "app_208_clk", "pll208",
625 0, 1, 1);
626 clk = clk_register_fixed_factor(NULL, "app_104_clk", "pll208",
627 0, 1, 2);
628 clk = clk_register_fixed_factor(NULL, "app_52_clk", "pll208",
629 0, 1, 4);
630 /* The 52 MHz is divided down to 26 MHz */
631 clk = clk_register_fixed_factor(NULL, "app_26_clk", "app_52_clk",
632 0, 1, 2);
633
634 /* Directly on the AMBA interconnect */
635 clk = syscon_clk_register(NULL, "cpu_clk", "app_208_clk", 0, true,
636 syscon_vbase + U300_SYSCON_RRR, 3,
637 syscon_vbase + U300_SYSCON_CERR, 3,
638 U300_SYSCON_SBCER_CPU_CLK_EN);
639 clk = syscon_clk_register(NULL, "dmac_clk", "app_52_clk", 0, true,
640 syscon_vbase + U300_SYSCON_RRR, 4,
641 syscon_vbase + U300_SYSCON_CERR, 4,
642 U300_SYSCON_SBCER_DMAC_CLK_EN);
643 clk_register_clkdev(clk, NULL, "dma");
644 clk = syscon_clk_register(NULL, "fsmc_clk", "app_52_clk", 0, false,
645 syscon_vbase + U300_SYSCON_RRR, 6,
646 syscon_vbase + U300_SYSCON_CERR, 6,
647 U300_SYSCON_SBCER_NANDIF_CLK_EN);
648 clk_register_clkdev(clk, NULL, "fsmc-nand");
649 clk = syscon_clk_register(NULL, "xgam_clk", "app_52_clk", 0, true,
650 syscon_vbase + U300_SYSCON_RRR, 8,
651 syscon_vbase + U300_SYSCON_CERR, 8,
652 U300_SYSCON_SBCER_XGAM_CLK_EN);
653 clk_register_clkdev(clk, NULL, "xgam");
654 clk = syscon_clk_register(NULL, "semi_clk", "app_104_clk", 0, false,
655 syscon_vbase + U300_SYSCON_RRR, 9,
656 syscon_vbase + U300_SYSCON_CERR, 9,
657 U300_SYSCON_SBCER_SEMI_CLK_EN);
658 clk_register_clkdev(clk, NULL, "semi");
659
660 /* AHB bridge clocks */
661 clk = syscon_clk_register(NULL, "ahb_subsys_clk", "app_52_clk", 0, true,
662 syscon_vbase + U300_SYSCON_RRR, 10,
663 syscon_vbase + U300_SYSCON_CERR, 10,
664 U300_SYSCON_SBCER_AHB_SUBSYS_BRIDGE_CLK_EN);
665 clk = syscon_clk_register(NULL, "intcon_clk", "ahb_subsys_clk", 0, false,
666 syscon_vbase + U300_SYSCON_RRR, 12,
667 syscon_vbase + U300_SYSCON_CERR, 12,
668 /* Cannot be enabled, just taken out of reset */
669 0xFFFFU);
670 clk_register_clkdev(clk, NULL, "intcon");
671 clk = syscon_clk_register(NULL, "emif_clk", "ahb_subsys_clk", 0, false,
672 syscon_vbase + U300_SYSCON_RRR, 5,
673 syscon_vbase + U300_SYSCON_CERR, 5,
674 U300_SYSCON_SBCER_EMIF_CLK_EN);
675 clk_register_clkdev(clk, NULL, "pl172");
676
677 /* FAST bridge clocks */
678 clk = syscon_clk_register(NULL, "fast_clk", "app_26_clk", 0, true,
679 syscon_vbase + U300_SYSCON_RFR, 0,
680 syscon_vbase + U300_SYSCON_CEFR, 0,
681 U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN);
682 clk = syscon_clk_register(NULL, "i2c0_p_clk", "fast_clk", 0, false,
683 syscon_vbase + U300_SYSCON_RFR, 1,
684 syscon_vbase + U300_SYSCON_CEFR, 1,
685 U300_SYSCON_SBCER_I2C0_CLK_EN);
686 clk_register_clkdev(clk, NULL, "stu300.0");
687 clk = syscon_clk_register(NULL, "i2c1_p_clk", "fast_clk", 0, false,
688 syscon_vbase + U300_SYSCON_RFR, 2,
689 syscon_vbase + U300_SYSCON_CEFR, 2,
690 U300_SYSCON_SBCER_I2C1_CLK_EN);
691 clk_register_clkdev(clk, NULL, "stu300.1");
692 clk = syscon_clk_register(NULL, "mmc_p_clk", "fast_clk", 0, false,
693 syscon_vbase + U300_SYSCON_RFR, 5,
694 syscon_vbase + U300_SYSCON_CEFR, 5,
695 U300_SYSCON_SBCER_MMC_CLK_EN);
696 clk_register_clkdev(clk, "apb_pclk", "mmci");
697 clk = syscon_clk_register(NULL, "spi_p_clk", "fast_clk", 0, false,
698 syscon_vbase + U300_SYSCON_RFR, 6,
699 syscon_vbase + U300_SYSCON_CEFR, 6,
700 U300_SYSCON_SBCER_SPI_CLK_EN);
701 /* The SPI has no external clock for the outward bus, uses the pclk */
702 clk_register_clkdev(clk, NULL, "pl022");
703 clk_register_clkdev(clk, "apb_pclk", "pl022");
704
705 /* SLOW bridge clocks */
706 clk = syscon_clk_register(NULL, "slow_clk", "pll13", 0, true,
707 syscon_vbase + U300_SYSCON_RSR, 0,
708 syscon_vbase + U300_SYSCON_CESR, 0,
709 U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN);
710 clk = syscon_clk_register(NULL, "uart0_clk", "slow_clk", 0, false,
711 syscon_vbase + U300_SYSCON_RSR, 1,
712 syscon_vbase + U300_SYSCON_CESR, 1,
713 U300_SYSCON_SBCER_UART_CLK_EN);
714 /* Same clock is used for APB and outward bus */
715 clk_register_clkdev(clk, NULL, "uart0");
716 clk_register_clkdev(clk, "apb_pclk", "uart0");
717 clk = syscon_clk_register(NULL, "gpio_clk", "slow_clk", 0, false,
718 syscon_vbase + U300_SYSCON_RSR, 4,
719 syscon_vbase + U300_SYSCON_CESR, 4,
720 U300_SYSCON_SBCER_GPIO_CLK_EN);
721 clk_register_clkdev(clk, NULL, "u300-gpio");
722 clk = syscon_clk_register(NULL, "keypad_clk", "slow_clk", 0, false,
723 syscon_vbase + U300_SYSCON_RSR, 5,
724 syscon_vbase + U300_SYSCON_CESR, 6,
725 U300_SYSCON_SBCER_KEYPAD_CLK_EN);
726 clk_register_clkdev(clk, NULL, "coh901461-keypad");
727 clk = syscon_clk_register(NULL, "rtc_clk", "slow_clk", 0, true,
728 syscon_vbase + U300_SYSCON_RSR, 6,
729 /* No clock enable register bit */
730 NULL, 0, 0xFFFFU);
731 clk_register_clkdev(clk, NULL, "rtc-coh901331");
732 clk = syscon_clk_register(NULL, "app_tmr_clk", "slow_clk", 0, false,
733 syscon_vbase + U300_SYSCON_RSR, 7,
734 syscon_vbase + U300_SYSCON_CESR, 7,
735 U300_SYSCON_SBCER_APP_TMR_CLK_EN);
736 clk_register_clkdev(clk, NULL, "apptimer");
737 clk = syscon_clk_register(NULL, "acc_tmr_clk", "slow_clk", 0, false,
738 syscon_vbase + U300_SYSCON_RSR, 8,
739 syscon_vbase + U300_SYSCON_CESR, 8,
740 U300_SYSCON_SBCER_ACC_TMR_CLK_EN);
741 clk_register_clkdev(clk, NULL, "timer");
742
743 /* Then this special MMC/SD clock */
744 clk = mclk_clk_register(NULL, "mmc_clk", "mmc_p_clk", false);
745 clk_register_clkdev(clk, NULL, "mmci");
746}
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
new file mode 100644
index 000000000000..e7b7765e85f3
--- /dev/null
+++ b/drivers/clk/clk-wm831x.c
@@ -0,0 +1,428 @@
1/*
2 * WM831x clock control
3 *
4 * Copyright 2011-2 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/clk.h>
16#include <linux/clk-provider.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/platform_device.h>
21#include <linux/mfd/wm831x/core.h>
22
23struct wm831x_clk {
24 struct wm831x *wm831x;
25 struct clk_hw xtal_hw;
26 struct clk_hw fll_hw;
27 struct clk_hw clkout_hw;
28 struct clk *xtal;
29 struct clk *fll;
30 struct clk *clkout;
31 bool xtal_ena;
32};
33
34static int wm831x_xtal_is_enabled(struct clk_hw *hw)
35{
36 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
37 xtal_hw);
38
39 return clkdata->xtal_ena;
40}
41
42static unsigned long wm831x_xtal_recalc_rate(struct clk_hw *hw,
43 unsigned long parent_rate)
44{
45 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
46 xtal_hw);
47
48 if (clkdata->xtal_ena)
49 return 32768;
50 else
51 return 0;
52}
53
54static const struct clk_ops wm831x_xtal_ops = {
55 .is_enabled = wm831x_xtal_is_enabled,
56 .recalc_rate = wm831x_xtal_recalc_rate,
57};
58
59static struct clk_init_data wm831x_xtal_init = {
60 .name = "xtal",
61 .ops = &wm831x_xtal_ops,
62 .flags = CLK_IS_ROOT,
63};
64
65static const unsigned long wm831x_fll_auto_rates[] = {
66 2048000,
67 11289600,
68 12000000,
69 12288000,
70 19200000,
71 22579600,
72 24000000,
73 24576000,
74};
75
76static int wm831x_fll_is_enabled(struct clk_hw *hw)
77{
78 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
79 fll_hw);
80 struct wm831x *wm831x = clkdata->wm831x;
81 int ret;
82
83 ret = wm831x_reg_read(wm831x, WM831X_FLL_CONTROL_1);
84 if (ret < 0) {
85 dev_err(wm831x->dev, "Unable to read FLL_CONTROL_1: %d\n",
86 ret);
87 return true;
88 }
89
90 return (ret & WM831X_FLL_ENA) != 0;
91}
92
93static int wm831x_fll_prepare(struct clk_hw *hw)
94{
95 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
96 fll_hw);
97 struct wm831x *wm831x = clkdata->wm831x;
98 int ret;
99
100 ret = wm831x_set_bits(wm831x, WM831X_FLL_CONTROL_2,
101 WM831X_FLL_ENA, WM831X_FLL_ENA);
102 if (ret != 0)
103 dev_crit(wm831x->dev, "Failed to enable FLL: %d\n", ret);
104
105 usleep_range(2000, 2000);
106
107 return ret;
108}
109
110static void wm831x_fll_unprepare(struct clk_hw *hw)
111{
112 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
113 fll_hw);
114 struct wm831x *wm831x = clkdata->wm831x;
115 int ret;
116
117 ret = wm831x_set_bits(wm831x, WM831X_FLL_CONTROL_2, WM831X_FLL_ENA, 0);
118 if (ret != 0)
119 dev_crit(wm831x->dev, "Failed to disaable FLL: %d\n", ret);
120}
121
122static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw,
123 unsigned long parent_rate)
124{
125 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
126 fll_hw);
127 struct wm831x *wm831x = clkdata->wm831x;
128 int ret;
129
130 ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
131 if (ret < 0) {
132 dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
133 ret);
134 return 0;
135 }
136
137 if (ret & WM831X_FLL_AUTO)
138 return wm831x_fll_auto_rates[ret & WM831X_FLL_AUTO_FREQ_MASK];
139
140 dev_err(wm831x->dev, "FLL only supported in AUTO mode\n");
141
142 return 0;
143}
144
145static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate,
146 unsigned long *unused)
147{
148 int best = 0;
149 int i;
150
151 for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
152 if (abs(wm831x_fll_auto_rates[i] - rate) <
153 abs(wm831x_fll_auto_rates[best] - rate))
154 best = i;
155
156 return wm831x_fll_auto_rates[best];
157}
158
159static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
160 unsigned long parent_rate)
161{
162 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
163 fll_hw);
164 struct wm831x *wm831x = clkdata->wm831x;
165 int i;
166
167 for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
168 if (wm831x_fll_auto_rates[i] == rate)
169 break;
170 if (i == ARRAY_SIZE(wm831x_fll_auto_rates))
171 return -EINVAL;
172
173 if (wm831x_fll_is_enabled(hw))
174 return -EPERM;
175
176 return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_2,
177 WM831X_FLL_AUTO_FREQ_MASK, i);
178}
179
180static const char *wm831x_fll_parents[] = {
181 "xtal",
182 "clkin",
183};
184
185static u8 wm831x_fll_get_parent(struct clk_hw *hw)
186{
187 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
188 fll_hw);
189 struct wm831x *wm831x = clkdata->wm831x;
190 int ret;
191
192 /* AUTO mode is always clocked from the crystal */
193 ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
194 if (ret < 0) {
195 dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
196 ret);
197 return 0;
198 }
199
200 if (ret & WM831X_FLL_AUTO)
201 return 0;
202
203 ret = wm831x_reg_read(wm831x, WM831X_FLL_CONTROL_5);
204 if (ret < 0) {
205 dev_err(wm831x->dev, "Unable to read FLL_CONTROL_5: %d\n",
206 ret);
207 return 0;
208 }
209
210 switch (ret & WM831X_FLL_CLK_SRC_MASK) {
211 case 0:
212 return 0;
213 case 1:
214 return 1;
215 default:
216 dev_err(wm831x->dev, "Unsupported FLL clock source %d\n",
217 ret & WM831X_FLL_CLK_SRC_MASK);
218 return 0;
219 }
220}
221
222static const struct clk_ops wm831x_fll_ops = {
223 .is_enabled = wm831x_fll_is_enabled,
224 .prepare = wm831x_fll_prepare,
225 .unprepare = wm831x_fll_unprepare,
226 .round_rate = wm831x_fll_round_rate,
227 .recalc_rate = wm831x_fll_recalc_rate,
228 .set_rate = wm831x_fll_set_rate,
229 .get_parent = wm831x_fll_get_parent,
230};
231
232static struct clk_init_data wm831x_fll_init = {
233 .name = "fll",
234 .ops = &wm831x_fll_ops,
235 .parent_names = wm831x_fll_parents,
236 .num_parents = ARRAY_SIZE(wm831x_fll_parents),
237 .flags = CLK_SET_RATE_GATE,
238};
239
240static int wm831x_clkout_is_enabled(struct clk_hw *hw)
241{
242 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
243 clkout_hw);
244 struct wm831x *wm831x = clkdata->wm831x;
245 int ret;
246
247 ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_1);
248 if (ret < 0) {
249 dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
250 ret);
251 return true;
252 }
253
254 return (ret & WM831X_CLKOUT_ENA) != 0;
255}
256
257static int wm831x_clkout_prepare(struct clk_hw *hw)
258{
259 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
260 clkout_hw);
261 struct wm831x *wm831x = clkdata->wm831x;
262 int ret;
263
264 ret = wm831x_reg_unlock(wm831x);
265 if (ret != 0) {
266 dev_crit(wm831x->dev, "Failed to lock registers: %d\n", ret);
267 return ret;
268 }
269
270 ret = wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
271 WM831X_CLKOUT_ENA, WM831X_CLKOUT_ENA);
272 if (ret != 0)
273 dev_crit(wm831x->dev, "Failed to enable CLKOUT: %d\n", ret);
274
275 wm831x_reg_lock(wm831x);
276
277 return ret;
278}
279
280static void wm831x_clkout_unprepare(struct clk_hw *hw)
281{
282 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
283 clkout_hw);
284 struct wm831x *wm831x = clkdata->wm831x;
285 int ret;
286
287 ret = wm831x_reg_unlock(wm831x);
288 if (ret != 0) {
289 dev_crit(wm831x->dev, "Failed to lock registers: %d\n", ret);
290 return;
291 }
292
293 ret = wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
294 WM831X_CLKOUT_ENA, 0);
295 if (ret != 0)
296 dev_crit(wm831x->dev, "Failed to disable CLKOUT: %d\n", ret);
297
298 wm831x_reg_lock(wm831x);
299}
300
301static const char *wm831x_clkout_parents[] = {
302 "xtal",
303 "fll",
304};
305
306static u8 wm831x_clkout_get_parent(struct clk_hw *hw)
307{
308 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
309 clkout_hw);
310 struct wm831x *wm831x = clkdata->wm831x;
311 int ret;
312
313 ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_1);
314 if (ret < 0) {
315 dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
316 ret);
317 return 0;
318 }
319
320 if (ret & WM831X_CLKOUT_SRC)
321 return 0;
322 else
323 return 1;
324}
325
326static int wm831x_clkout_set_parent(struct clk_hw *hw, u8 parent)
327{
328 struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
329 clkout_hw);
330 struct wm831x *wm831x = clkdata->wm831x;
331
332 return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
333 WM831X_CLKOUT_SRC,
334 parent << WM831X_CLKOUT_SRC_SHIFT);
335}
336
337static const struct clk_ops wm831x_clkout_ops = {
338 .is_enabled = wm831x_clkout_is_enabled,
339 .prepare = wm831x_clkout_prepare,
340 .unprepare = wm831x_clkout_unprepare,
341 .get_parent = wm831x_clkout_get_parent,
342 .set_parent = wm831x_clkout_set_parent,
343};
344
345static struct clk_init_data wm831x_clkout_init = {
346 .name = "clkout",
347 .ops = &wm831x_clkout_ops,
348 .parent_names = wm831x_clkout_parents,
349 .num_parents = ARRAY_SIZE(wm831x_clkout_parents),
350 .flags = CLK_SET_RATE_PARENT,
351};
352
353static __devinit int wm831x_clk_probe(struct platform_device *pdev)
354{
355 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
356 struct wm831x_clk *clkdata;
357 int ret;
358
359 clkdata = devm_kzalloc(&pdev->dev, sizeof(*clkdata), GFP_KERNEL);
360 if (!clkdata)
361 return -ENOMEM;
362
363 /* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
364 ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
365 if (ret < 0) {
366 dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
367 ret);
368 return ret;
369 }
370 clkdata->xtal_ena = ret & WM831X_XTAL_ENA;
371
372 clkdata->xtal_hw.init = &wm831x_xtal_init;
373 clkdata->xtal = clk_register(&pdev->dev, &clkdata->xtal_hw);
374 if (!clkdata->xtal)
375 return -EINVAL;
376
377 clkdata->fll_hw.init = &wm831x_fll_init;
378 clkdata->fll = clk_register(&pdev->dev, &clkdata->fll_hw);
379 if (!clkdata->fll) {
380 ret = -EINVAL;
381 goto err_xtal;
382 }
383
384 clkdata->clkout_hw.init = &wm831x_clkout_init;
385 clkdata->clkout = clk_register(&pdev->dev, &clkdata->clkout_hw);
386 if (!clkdata->clkout) {
387 ret = -EINVAL;
388 goto err_fll;
389 }
390
391 dev_set_drvdata(&pdev->dev, clkdata);
392
393 return 0;
394
395err_fll:
396 clk_unregister(clkdata->fll);
397err_xtal:
398 clk_unregister(clkdata->xtal);
399 return ret;
400}
401
402static int __devexit wm831x_clk_remove(struct platform_device *pdev)
403{
404 struct wm831x_clk *clkdata = dev_get_drvdata(&pdev->dev);
405
406 clk_unregister(clkdata->clkout);
407 clk_unregister(clkdata->fll);
408 clk_unregister(clkdata->xtal);
409
410 return 0;
411}
412
413static struct platform_driver wm831x_clk_driver = {
414 .probe = wm831x_clk_probe,
415 .remove = __devexit_p(wm831x_clk_remove),
416 .driver = {
417 .name = "wm831x-clk",
418 .owner = THIS_MODULE,
419 },
420};
421
422module_platform_driver(wm831x_clk_driver);
423
424/* Module information */
425MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
426MODULE_DESCRIPTION("WM831x clock driver");
427MODULE_LICENSE("GPL");
428MODULE_ALIAS("platform:wm831x-clk");
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 687b00d67c8a..efdfd009c270 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/of.h>
19 20
20static DEFINE_SPINLOCK(enable_lock); 21static DEFINE_SPINLOCK(enable_lock);
21static DEFINE_MUTEX(prepare_lock); 22static DEFINE_MUTEX(prepare_lock);
@@ -464,6 +465,9 @@ static void __clk_disable(struct clk *clk)
464 if (!clk) 465 if (!clk)
465 return; 466 return;
466 467
468 if (WARN_ON(IS_ERR(clk)))
469 return;
470
467 if (WARN_ON(clk->enable_count == 0)) 471 if (WARN_ON(clk->enable_count == 0))
468 return; 472 return;
469 473
@@ -850,18 +854,21 @@ static void clk_change_rate(struct clk *clk)
850{ 854{
851 struct clk *child; 855 struct clk *child;
852 unsigned long old_rate; 856 unsigned long old_rate;
857 unsigned long best_parent_rate = 0;
853 struct hlist_node *tmp; 858 struct hlist_node *tmp;
854 859
855 old_rate = clk->rate; 860 old_rate = clk->rate;
856 861
862 if (clk->parent)
863 best_parent_rate = clk->parent->rate;
864
857 if (clk->ops->set_rate) 865 if (clk->ops->set_rate)
858 clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate); 866 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
859 867
860 if (clk->ops->recalc_rate) 868 if (clk->ops->recalc_rate)
861 clk->rate = clk->ops->recalc_rate(clk->hw, 869 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
862 clk->parent->rate);
863 else 870 else
864 clk->rate = clk->parent->rate; 871 clk->rate = best_parent_rate;
865 872
866 if (clk->notifier_count && old_rate != clk->rate) 873 if (clk->notifier_count && old_rate != clk->rate)
867 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 874 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
@@ -999,7 +1006,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
999 1006
1000 if (!clk->parents) 1007 if (!clk->parents)
1001 clk->parents = 1008 clk->parents =
1002 kmalloc((sizeof(struct clk*) * clk->num_parents), 1009 kzalloc((sizeof(struct clk*) * clk->num_parents),
1003 GFP_KERNEL); 1010 GFP_KERNEL);
1004 1011
1005 if (!clk->parents) 1012 if (!clk->parents)
@@ -1064,21 +1071,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
1064 1071
1065 old_parent = clk->parent; 1072 old_parent = clk->parent;
1066 1073
1067 /* find index of new parent clock using cached parent ptrs */ 1074 if (!clk->parents)
1068 for (i = 0; i < clk->num_parents; i++) 1075 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1069 if (clk->parents[i] == parent) 1076 GFP_KERNEL);
1070 break;
1071 1077
1072 /* 1078 /*
1073 * find index of new parent clock using string name comparison 1079 * find index of new parent clock using cached parent ptrs,
1074 * also try to cache the parent to avoid future calls to __clk_lookup 1080 * or if not yet cached, use string name comparison and cache
1081 * them now to avoid future calls to __clk_lookup.
1075 */ 1082 */
1076 if (i == clk->num_parents) 1083 for (i = 0; i < clk->num_parents; i++) {
1077 for (i = 0; i < clk->num_parents; i++) 1084 if (clk->parents && clk->parents[i] == parent)
1078 if (!strcmp(clk->parent_names[i], parent->name)) { 1085 break;
1086 else if (!strcmp(clk->parent_names[i], parent->name)) {
1087 if (clk->parents)
1079 clk->parents[i] = __clk_lookup(parent->name); 1088 clk->parents[i] = __clk_lookup(parent->name);
1080 break; 1089 break;
1081 } 1090 }
1091 }
1082 1092
1083 if (i == clk->num_parents) { 1093 if (i == clk->num_parents) {
1084 pr_debug("%s: clock %s is not a possible parent of clock %s\n", 1094 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
@@ -1229,8 +1239,8 @@ int __clk_init(struct device *dev, struct clk *clk)
1229 * If clk->parents is not NULL we skip this entire block. This allows 1239 * If clk->parents is not NULL we skip this entire block. This allows
1230 * for clock drivers to statically initialize clk->parents. 1240 * for clock drivers to statically initialize clk->parents.
1231 */ 1241 */
1232 if (clk->num_parents && !clk->parents) { 1242 if (clk->num_parents > 1 && !clk->parents) {
1233 clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents), 1243 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1234 GFP_KERNEL); 1244 GFP_KERNEL);
1235 /* 1245 /*
1236 * __clk_lookup returns NULL for parents that have not been 1246 * __clk_lookup returns NULL for parents that have not been
@@ -1544,3 +1554,142 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1544 return ret; 1554 return ret;
1545} 1555}
1546EXPORT_SYMBOL_GPL(clk_notifier_unregister); 1556EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1557
1558#ifdef CONFIG_OF
1559/**
1560 * struct of_clk_provider - Clock provider registration structure
1561 * @link: Entry in global list of clock providers
1562 * @node: Pointer to device tree node of clock provider
1563 * @get: Get clock callback. Returns NULL or a struct clk for the
1564 * given clock specifier
1565 * @data: context pointer to be passed into @get callback
1566 */
1567struct of_clk_provider {
1568 struct list_head link;
1569
1570 struct device_node *node;
1571 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1572 void *data;
1573};
1574
1575static LIST_HEAD(of_clk_providers);
1576static DEFINE_MUTEX(of_clk_lock);
1577
1578struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1579 void *data)
1580{
1581 return data;
1582}
1583EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1584
1585/**
1586 * of_clk_add_provider() - Register a clock provider for a node
1587 * @np: Device node pointer associated with clock provider
1588 * @clk_src_get: callback for decoding clock
1589 * @data: context pointer for @clk_src_get callback.
1590 */
1591int of_clk_add_provider(struct device_node *np,
1592 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1593 void *data),
1594 void *data)
1595{
1596 struct of_clk_provider *cp;
1597
1598 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1599 if (!cp)
1600 return -ENOMEM;
1601
1602 cp->node = of_node_get(np);
1603 cp->data = data;
1604 cp->get = clk_src_get;
1605
1606 mutex_lock(&of_clk_lock);
1607 list_add(&cp->link, &of_clk_providers);
1608 mutex_unlock(&of_clk_lock);
1609 pr_debug("Added clock from %s\n", np->full_name);
1610
1611 return 0;
1612}
1613EXPORT_SYMBOL_GPL(of_clk_add_provider);
1614
1615/**
1616 * of_clk_del_provider() - Remove a previously registered clock provider
1617 * @np: Device node pointer associated with clock provider
1618 */
1619void of_clk_del_provider(struct device_node *np)
1620{
1621 struct of_clk_provider *cp;
1622
1623 mutex_lock(&of_clk_lock);
1624 list_for_each_entry(cp, &of_clk_providers, link) {
1625 if (cp->node == np) {
1626 list_del(&cp->link);
1627 of_node_put(cp->node);
1628 kfree(cp);
1629 break;
1630 }
1631 }
1632 mutex_unlock(&of_clk_lock);
1633}
1634EXPORT_SYMBOL_GPL(of_clk_del_provider);
1635
1636struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1637{
1638 struct of_clk_provider *provider;
1639 struct clk *clk = ERR_PTR(-ENOENT);
1640
1641 /* Check if we have such a provider in our array */
1642 mutex_lock(&of_clk_lock);
1643 list_for_each_entry(provider, &of_clk_providers, link) {
1644 if (provider->node == clkspec->np)
1645 clk = provider->get(clkspec, provider->data);
1646 if (!IS_ERR(clk))
1647 break;
1648 }
1649 mutex_unlock(&of_clk_lock);
1650
1651 return clk;
1652}
1653
1654const char *of_clk_get_parent_name(struct device_node *np, int index)
1655{
1656 struct of_phandle_args clkspec;
1657 const char *clk_name;
1658 int rc;
1659
1660 if (index < 0)
1661 return NULL;
1662
1663 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1664 &clkspec);
1665 if (rc)
1666 return NULL;
1667
1668 if (of_property_read_string_index(clkspec.np, "clock-output-names",
1669 clkspec.args_count ? clkspec.args[0] : 0,
1670 &clk_name) < 0)
1671 clk_name = clkspec.np->name;
1672
1673 of_node_put(clkspec.np);
1674 return clk_name;
1675}
1676EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1677
1678/**
1679 * of_clk_init() - Scan and init clock providers from the DT
1680 * @matches: array of compatible values and init functions for providers.
1681 *
1682 * This function scans the device tree for matching clock providers and
1683 * calls their initialization functions
1684 */
1685void __init of_clk_init(const struct of_device_id *matches)
1686{
1687 struct device_node *np;
1688
1689 for_each_matching_node(np, matches) {
1690 const struct of_device_id *match = of_match_node(matches, np);
1691 of_clk_init_cb_t clk_init_cb = match->data;
1692 clk_init_cb(np);
1693 }
1694}
1695#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index c535cf8c5770..d423c9bdd71a 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -19,10 +19,80 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/clkdev.h> 21#include <linux/clkdev.h>
22#include <linux/of.h>
22 23
23static LIST_HEAD(clocks); 24static LIST_HEAD(clocks);
24static DEFINE_MUTEX(clocks_mutex); 25static DEFINE_MUTEX(clocks_mutex);
25 26
27#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
28struct clk *of_clk_get(struct device_node *np, int index)
29{
30 struct of_phandle_args clkspec;
31 struct clk *clk;
32 int rc;
33
34 if (index < 0)
35 return ERR_PTR(-EINVAL);
36
37 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
38 &clkspec);
39 if (rc)
40 return ERR_PTR(rc);
41
42 clk = of_clk_get_from_provider(&clkspec);
43 of_node_put(clkspec.np);
44 return clk;
45}
46EXPORT_SYMBOL(of_clk_get);
47
48/**
49 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
50 * @np: pointer to clock consumer node
51 * @name: name of consumer's clock input, or NULL for the first clock reference
52 *
53 * This function parses the clocks and clock-names properties,
54 * and uses them to look up the struct clk from the registered list of clock
55 * providers.
56 */
57struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
58{
59 struct clk *clk = ERR_PTR(-ENOENT);
60
61 /* Walk up the tree of devices looking for a clock that matches */
62 while (np) {
63 int index = 0;
64
65 /*
66 * For named clocks, first look up the name in the
67 * "clock-names" property. If it cannot be found, then
68 * index will be an error code, and of_clk_get() will fail.
69 */
70 if (name)
71 index = of_property_match_string(np, "clock-names", name);
72 clk = of_clk_get(np, index);
73 if (!IS_ERR(clk))
74 break;
75 else if (name && index >= 0) {
76 pr_err("ERROR: could not get clock %s:%s(%i)\n",
77 np->full_name, name ? name : "", index);
78 return clk;
79 }
80
81 /*
82 * No matching clock found on this node. If the parent node
83 * has a "clock-ranges" property, then we can try one of its
84 * clocks.
85 */
86 np = np->parent;
87 if (np && !of_get_property(np, "clock-ranges", NULL))
88 break;
89 }
90
91 return clk;
92}
93EXPORT_SYMBOL(of_clk_get_by_name);
94#endif
95
26/* 96/*
27 * Find the correct struct clk for the device and connection ID. 97 * Find the correct struct clk for the device and connection ID.
28 * We do slightly fuzzy matching here: 98 * We do slightly fuzzy matching here:
@@ -83,6 +153,13 @@ EXPORT_SYMBOL(clk_get_sys);
83struct clk *clk_get(struct device *dev, const char *con_id) 153struct clk *clk_get(struct device *dev, const char *con_id)
84{ 154{
85 const char *dev_id = dev ? dev_name(dev) : NULL; 155 const char *dev_id = dev ? dev_name(dev) : NULL;
156 struct clk *clk;
157
158 if (dev) {
159 clk = of_clk_get_by_name(dev->of_node, con_id);
160 if (!IS_ERR(clk) && __clk_get(clk))
161 return clk;
162 }
86 163
87 return clk_get_sys(dev_id, con_id); 164 return clk_get_sys(dev_id, con_id);
88} 165}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index f7be225f544c..844043ad0fe4 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -71,7 +71,7 @@ static void __init clk_misc_init(void)
71 __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC); 71 __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
72} 72}
73 73
74static struct clk_lookup uart_lookups[] __initdata = { 74static struct clk_lookup uart_lookups[] = {
75 { .dev_id = "duart", }, 75 { .dev_id = "duart", },
76 { .dev_id = "mxs-auart.0", }, 76 { .dev_id = "mxs-auart.0", },
77 { .dev_id = "mxs-auart.1", }, 77 { .dev_id = "mxs-auart.1", },
@@ -80,33 +80,33 @@ static struct clk_lookup uart_lookups[] __initdata = {
80 { .dev_id = "80070000.serial", }, 80 { .dev_id = "80070000.serial", },
81}; 81};
82 82
83static struct clk_lookup hbus_lookups[] __initdata = { 83static struct clk_lookup hbus_lookups[] = {
84 { .dev_id = "imx23-dma-apbh", }, 84 { .dev_id = "imx23-dma-apbh", },
85 { .dev_id = "80004000.dma-apbh", }, 85 { .dev_id = "80004000.dma-apbh", },
86}; 86};
87 87
88static struct clk_lookup xbus_lookups[] __initdata = { 88static struct clk_lookup xbus_lookups[] = {
89 { .dev_id = "duart", .con_id = "apb_pclk"}, 89 { .dev_id = "duart", .con_id = "apb_pclk"},
90 { .dev_id = "80070000.serial", .con_id = "apb_pclk"}, 90 { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
91 { .dev_id = "imx23-dma-apbx", }, 91 { .dev_id = "imx23-dma-apbx", },
92 { .dev_id = "80024000.dma-apbx", }, 92 { .dev_id = "80024000.dma-apbx", },
93}; 93};
94 94
95static struct clk_lookup ssp_lookups[] __initdata = { 95static struct clk_lookup ssp_lookups[] = {
96 { .dev_id = "imx23-mmc.0", }, 96 { .dev_id = "imx23-mmc.0", },
97 { .dev_id = "imx23-mmc.1", }, 97 { .dev_id = "imx23-mmc.1", },
98 { .dev_id = "80010000.ssp", }, 98 { .dev_id = "80010000.ssp", },
99 { .dev_id = "80034000.ssp", }, 99 { .dev_id = "80034000.ssp", },
100}; 100};
101 101
102static struct clk_lookup lcdif_lookups[] __initdata = { 102static struct clk_lookup lcdif_lookups[] = {
103 { .dev_id = "imx23-fb", }, 103 { .dev_id = "imx23-fb", },
104 { .dev_id = "80030000.lcdif", }, 104 { .dev_id = "80030000.lcdif", },
105}; 105};
106 106
107static struct clk_lookup gpmi_lookups[] __initdata = { 107static struct clk_lookup gpmi_lookups[] = {
108 { .dev_id = "imx23-gpmi-nand", }, 108 { .dev_id = "imx23-gpmi-nand", },
109 { .dev_id = "8000c000.gpmi", }, 109 { .dev_id = "8000c000.gpmi-nand", },
110}; 110};
111 111
112static const char *sel_pll[] __initconst = { "pll", "ref_xtal", }; 112static const char *sel_pll[] __initconst = { "pll", "ref_xtal", };
@@ -189,6 +189,7 @@ int __init mx23_clocks_init(void)
189 } 189 }
190 190
191 clk_register_clkdev(clks[clk32k], NULL, "timrot"); 191 clk_register_clkdev(clks[clk32k], NULL, "timrot");
192 clk_register_clkdev(clks[pwm], NULL, "80064000.pwm");
192 clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups)); 193 clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
193 clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups)); 194 clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
194 clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups)); 195 clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index 2826a2606a29..e3aab67b3eb7 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -112,15 +112,15 @@ static void __init clk_misc_init(void)
112 112
113 /* 113 /*
114 * 480 MHz seems too high to be ssp clock source directly, 114 * 480 MHz seems too high to be ssp clock source directly,
115 * so set frac0 to get a 288 MHz ref_io0. 115 * so set frac0 to get a 288 MHz ref_io0 and ref_io1.
116 */ 116 */
117 val = readl_relaxed(FRAC0); 117 val = readl_relaxed(FRAC0);
118 val &= ~(0x3f << BP_FRAC0_IO0FRAC); 118 val &= ~((0x3f << BP_FRAC0_IO0FRAC) | (0x3f << BP_FRAC0_IO1FRAC));
119 val |= 30 << BP_FRAC0_IO0FRAC; 119 val |= (30 << BP_FRAC0_IO0FRAC) | (30 << BP_FRAC0_IO1FRAC);
120 writel_relaxed(val, FRAC0); 120 writel_relaxed(val, FRAC0);
121} 121}
122 122
123static struct clk_lookup uart_lookups[] __initdata = { 123static struct clk_lookup uart_lookups[] = {
124 { .dev_id = "duart", }, 124 { .dev_id = "duart", },
125 { .dev_id = "mxs-auart.0", }, 125 { .dev_id = "mxs-auart.0", },
126 { .dev_id = "mxs-auart.1", }, 126 { .dev_id = "mxs-auart.1", },
@@ -135,71 +135,71 @@ static struct clk_lookup uart_lookups[] __initdata = {
135 { .dev_id = "80074000.serial", }, 135 { .dev_id = "80074000.serial", },
136}; 136};
137 137
138static struct clk_lookup hbus_lookups[] __initdata = { 138static struct clk_lookup hbus_lookups[] = {
139 { .dev_id = "imx28-dma-apbh", }, 139 { .dev_id = "imx28-dma-apbh", },
140 { .dev_id = "80004000.dma-apbh", }, 140 { .dev_id = "80004000.dma-apbh", },
141}; 141};
142 142
143static struct clk_lookup xbus_lookups[] __initdata = { 143static struct clk_lookup xbus_lookups[] = {
144 { .dev_id = "duart", .con_id = "apb_pclk"}, 144 { .dev_id = "duart", .con_id = "apb_pclk"},
145 { .dev_id = "80074000.serial", .con_id = "apb_pclk"}, 145 { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
146 { .dev_id = "imx28-dma-apbx", }, 146 { .dev_id = "imx28-dma-apbx", },
147 { .dev_id = "80024000.dma-apbx", }, 147 { .dev_id = "80024000.dma-apbx", },
148}; 148};
149 149
150static struct clk_lookup ssp0_lookups[] __initdata = { 150static struct clk_lookup ssp0_lookups[] = {
151 { .dev_id = "imx28-mmc.0", }, 151 { .dev_id = "imx28-mmc.0", },
152 { .dev_id = "80010000.ssp", }, 152 { .dev_id = "80010000.ssp", },
153}; 153};
154 154
155static struct clk_lookup ssp1_lookups[] __initdata = { 155static struct clk_lookup ssp1_lookups[] = {
156 { .dev_id = "imx28-mmc.1", }, 156 { .dev_id = "imx28-mmc.1", },
157 { .dev_id = "80012000.ssp", }, 157 { .dev_id = "80012000.ssp", },
158}; 158};
159 159
160static struct clk_lookup ssp2_lookups[] __initdata = { 160static struct clk_lookup ssp2_lookups[] = {
161 { .dev_id = "imx28-mmc.2", }, 161 { .dev_id = "imx28-mmc.2", },
162 { .dev_id = "80014000.ssp", }, 162 { .dev_id = "80014000.ssp", },
163}; 163};
164 164
165static struct clk_lookup ssp3_lookups[] __initdata = { 165static struct clk_lookup ssp3_lookups[] = {
166 { .dev_id = "imx28-mmc.3", }, 166 { .dev_id = "imx28-mmc.3", },
167 { .dev_id = "80016000.ssp", }, 167 { .dev_id = "80016000.ssp", },
168}; 168};
169 169
170static struct clk_lookup lcdif_lookups[] __initdata = { 170static struct clk_lookup lcdif_lookups[] = {
171 { .dev_id = "imx28-fb", }, 171 { .dev_id = "imx28-fb", },
172 { .dev_id = "80030000.lcdif", }, 172 { .dev_id = "80030000.lcdif", },
173}; 173};
174 174
175static struct clk_lookup gpmi_lookups[] __initdata = { 175static struct clk_lookup gpmi_lookups[] = {
176 { .dev_id = "imx28-gpmi-nand", }, 176 { .dev_id = "imx28-gpmi-nand", },
177 { .dev_id = "8000c000.gpmi", }, 177 { .dev_id = "8000c000.gpmi-nand", },
178}; 178};
179 179
180static struct clk_lookup fec_lookups[] __initdata = { 180static struct clk_lookup fec_lookups[] = {
181 { .dev_id = "imx28-fec.0", }, 181 { .dev_id = "imx28-fec.0", },
182 { .dev_id = "imx28-fec.1", }, 182 { .dev_id = "imx28-fec.1", },
183 { .dev_id = "800f0000.ethernet", }, 183 { .dev_id = "800f0000.ethernet", },
184 { .dev_id = "800f4000.ethernet", }, 184 { .dev_id = "800f4000.ethernet", },
185}; 185};
186 186
187static struct clk_lookup can0_lookups[] __initdata = { 187static struct clk_lookup can0_lookups[] = {
188 { .dev_id = "flexcan.0", }, 188 { .dev_id = "flexcan.0", },
189 { .dev_id = "80032000.can", }, 189 { .dev_id = "80032000.can", },
190}; 190};
191 191
192static struct clk_lookup can1_lookups[] __initdata = { 192static struct clk_lookup can1_lookups[] = {
193 { .dev_id = "flexcan.1", }, 193 { .dev_id = "flexcan.1", },
194 { .dev_id = "80034000.can", }, 194 { .dev_id = "80034000.can", },
195}; 195};
196 196
197static struct clk_lookup saif0_lookups[] __initdata = { 197static struct clk_lookup saif0_lookups[] = {
198 { .dev_id = "mxs-saif.0", }, 198 { .dev_id = "mxs-saif.0", },
199 { .dev_id = "80042000.saif", }, 199 { .dev_id = "80042000.saif", },
200}; 200};
201 201
202static struct clk_lookup saif1_lookups[] __initdata = { 202static struct clk_lookup saif1_lookups[] = {
203 { .dev_id = "mxs-saif.1", }, 203 { .dev_id = "mxs-saif.1", },
204 { .dev_id = "80046000.saif", }, 204 { .dev_id = "80046000.saif", },
205}; 205};
@@ -245,8 +245,8 @@ int __init mx28_clocks_init(void)
245 clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000); 245 clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
246 clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0); 246 clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
247 clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1); 247 clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
248 clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2); 248 clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2);
249 clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3); 249 clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3);
250 clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0); 250 clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
251 clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1); 251 clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
252 clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2); 252 clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
@@ -314,6 +314,7 @@ int __init mx28_clocks_init(void)
314 314
315 clk_register_clkdev(clks[clk32k], NULL, "timrot"); 315 clk_register_clkdev(clks[clk32k], NULL, "timrot");
316 clk_register_clkdev(clks[enet_out], NULL, "enet_out"); 316 clk_register_clkdev(clks[enet_out], NULL, "enet_out");
317 clk_register_clkdev(clks[pwm], NULL, "80064000.pwm");
317 clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups)); 318 clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
318 clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups)); 319 clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
319 clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups)); 320 clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
@@ -328,6 +329,10 @@ int __init mx28_clocks_init(void)
328 clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups)); 329 clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups));
329 clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups)); 330 clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups));
330 clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups)); 331 clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups));
332 clk_register_clkdev(clks[usb0_pwr], NULL, "8007c000.usbphy");
333 clk_register_clkdev(clks[usb1_pwr], NULL, "8007e000.usbphy");
334 clk_register_clkdev(clks[usb0], NULL, "80080000.usb");
335 clk_register_clkdev(clks[usb1], NULL, "80090000.usb");
331 336
332 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) 337 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
333 clk_prepare_enable(clks[clks_init_on[i]]); 338 clk_prepare_enable(clks[clks_init_on[i]]);
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
new file mode 100644
index 000000000000..0303c0b99cd0
--- /dev/null
+++ b/drivers/clk/socfpga/Makefile
@@ -0,0 +1 @@
obj-y += clk.o
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
new file mode 100644
index 000000000000..2c855a6394ff
--- /dev/null
+++ b/drivers/clk/socfpga/clk.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/clk.h>
18#include <linux/clkdev.h>
19#include <linux/clk-provider.h>
20
21#define SOCFPGA_OSC1_CLK 10000000
22#define SOCFPGA_MPU_CLK 800000000
23#define SOCFPGA_MAIN_QSPI_CLK 432000000
24#define SOCFPGA_MAIN_NAND_SDMMC_CLK 250000000
25#define SOCFPGA_S2F_USR_CLK 125000000
26
27void __init socfpga_init_clocks(void)
28{
29 struct clk *clk;
30
31 clk = clk_register_fixed_rate(NULL, "osc1_clk", NULL, CLK_IS_ROOT, SOCFPGA_OSC1_CLK);
32 clk_register_clkdev(clk, "osc1_clk", NULL);
33
34 clk = clk_register_fixed_rate(NULL, "mpu_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK);
35 clk_register_clkdev(clk, "mpu_clk", NULL);
36
37 clk = clk_register_fixed_rate(NULL, "main_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK/2);
38 clk_register_clkdev(clk, "main_clk", NULL);
39
40 clk = clk_register_fixed_rate(NULL, "dbg_base_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK/2);
41 clk_register_clkdev(clk, "dbg_base_clk", NULL);
42
43 clk = clk_register_fixed_rate(NULL, "main_qspi_clk", NULL, CLK_IS_ROOT, SOCFPGA_MAIN_QSPI_CLK);
44 clk_register_clkdev(clk, "main_qspi_clk", NULL);
45
46 clk = clk_register_fixed_rate(NULL, "main_nand_sdmmc_clk", NULL, CLK_IS_ROOT, SOCFPGA_MAIN_NAND_SDMMC_CLK);
47 clk_register_clkdev(clk, "main_nand_sdmmc_clk", NULL);
48
49 clk = clk_register_fixed_rate(NULL, "s2f_usr_clk", NULL, CLK_IS_ROOT, SOCFPGA_S2F_USR_CLK);
50 clk_register_clkdev(clk, "s2f_usr_clk", NULL);
51}
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index af34074e702b..6756e7c3bc07 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 4dbdb3fe18e0..958aa3ad1d60 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index b471c9762a97..1afc18c4effc 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index dcd4bdf4b0d9..5f1b6badeb15 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 376d4e5ff326..7cd63788d546 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 3321c46a071c..931737677dfa 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
2 * Clock framework definitions for SPEAr platform 2 * Clock framework definitions for SPEAr platform
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 42b68df9aeef..0fcec2aae19c 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1310 machine clock framework source file 4 * SPEAr1310 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
@@ -345,31 +345,30 @@ static struct frac_rate_tbl gen_rtbl[] = {
345/* clock parents */ 345/* clock parents */
346static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", }; 346static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
347static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", }; 347static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
348static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", }; 348static const char *uart0_parents[] = { "pll5_clk", "uart_syn_gclk", };
349static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", }; 349static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
350static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk", 350static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
351 "osc_25m_clk", }; 351 "osc_25m_clk", };
352static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk", 352static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
353 "gmac_phy_synth_gate_clk", };
354static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", }; 353static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
355static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", }; 354static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
356static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk", 355static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
357 "i2s_src_pad_clk", }; 356 "i2s_src_pad_clk", };
358static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", }; 357static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
359static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk", 358static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
360 "pll3_clk", }; 359 "pll3_clk", };
361static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk", 360static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
362 "pll2_clk", }; 361 "pll2_clk", };
363static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none", 362static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
364 "ras_pll2_clk", "ras_synth0_clk", }; 363 "ras_pll2_clk", "ras_syn0_clk", };
365static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk", 364static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
366 "ras_pll2_clk", "ras_synth0_clk", }; 365 "ras_pll2_clk", "ras_syn0_clk", };
367static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", }; 366static const char *uart_parents[] = { "ras_apb_clk", "gen_syn3_clk", };
368static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", }; 367static const char *i2c_parents[] = { "ras_apb_clk", "gen_syn1_clk", };
369static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk", 368static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk",
370 "ras_plclk0_clk", }; 369 "ras_plclk0_clk", };
371static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", }; 370static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", };
372static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", }; 371static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", };
373 372
374void __init spear1310_clk_init(void) 373void __init spear1310_clk_init(void)
375{ 374{
@@ -390,9 +389,9 @@ void __init spear1310_clk_init(void)
390 25000000); 389 25000000);
391 clk_register_clkdev(clk, "osc_25m_clk", NULL); 390 clk_register_clkdev(clk, "osc_25m_clk", NULL);
392 391
393 clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL, 392 clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
394 CLK_IS_ROOT, 125000000); 393 125000000);
395 clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL); 394 clk_register_clkdev(clk, "gmii_pad_clk", NULL);
396 395
397 clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL, 396 clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
398 CLK_IS_ROOT, 12288000); 397 CLK_IS_ROOT, 12288000);
@@ -406,34 +405,34 @@ void __init spear1310_clk_init(void)
406 405
407 /* clock derived from 24 or 25 MHz osc clk */ 406 /* clock derived from 24 or 25 MHz osc clk */
408 /* vco-pll */ 407 /* vco-pll */
409 clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents, 408 clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
410 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG, 409 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
411 SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0, 410 SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
412 &_lock); 411 &_lock);
413 clk_register_clkdev(clk, "vco1_mux_clk", NULL); 412 clk_register_clkdev(clk, "vco1_mclk", NULL);
414 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk", 413 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
415 0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl, 414 0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
416 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); 415 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
417 clk_register_clkdev(clk, "vco1_clk", NULL); 416 clk_register_clkdev(clk, "vco1_clk", NULL);
418 clk_register_clkdev(clk1, "pll1_clk", NULL); 417 clk_register_clkdev(clk1, "pll1_clk", NULL);
419 418
420 clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents, 419 clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
421 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG, 420 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
422 SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0, 421 SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
423 &_lock); 422 &_lock);
424 clk_register_clkdev(clk, "vco2_mux_clk", NULL); 423 clk_register_clkdev(clk, "vco2_mclk", NULL);
425 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk", 424 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
426 0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl, 425 0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
427 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); 426 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
428 clk_register_clkdev(clk, "vco2_clk", NULL); 427 clk_register_clkdev(clk, "vco2_clk", NULL);
429 clk_register_clkdev(clk1, "pll2_clk", NULL); 428 clk_register_clkdev(clk1, "pll2_clk", NULL);
430 429
431 clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents, 430 clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
432 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG, 431 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
433 SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0, 432 SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
434 &_lock); 433 &_lock);
435 clk_register_clkdev(clk, "vco3_mux_clk", NULL); 434 clk_register_clkdev(clk, "vco3_mclk", NULL);
436 clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk", 435 clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
437 0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl, 436 0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
438 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); 437 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
439 clk_register_clkdev(clk, "vco3_clk", NULL); 438 clk_register_clkdev(clk, "vco3_clk", NULL);
@@ -473,7 +472,7 @@ void __init spear1310_clk_init(void)
473 /* peripherals */ 472 /* peripherals */
474 clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1, 473 clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
475 128); 474 128);
476 clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0, 475 clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
477 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0, 476 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
478 &_lock); 477 &_lock);
479 clk_register_clkdev(clk, NULL, "spear_thermal"); 478 clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -500,177 +499,176 @@ void __init spear1310_clk_init(void)
500 clk_register_clkdev(clk, "apb_clk", NULL); 499 clk_register_clkdev(clk, "apb_clk", NULL);
501 500
502 /* gpt clocks */ 501 /* gpt clocks */
503 clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents, 502 clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
504 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG, 503 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
505 SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0, 504 SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
506 &_lock); 505 &_lock);
507 clk_register_clkdev(clk, "gpt0_mux_clk", NULL); 506 clk_register_clkdev(clk, "gpt0_mclk", NULL);
508 clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0, 507 clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
509 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0, 508 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
510 &_lock); 509 &_lock);
511 clk_register_clkdev(clk, NULL, "gpt0"); 510 clk_register_clkdev(clk, NULL, "gpt0");
512 511
513 clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents, 512 clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
514 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG, 513 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
515 SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0, 514 SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
516 &_lock); 515 &_lock);
517 clk_register_clkdev(clk, "gpt1_mux_clk", NULL); 516 clk_register_clkdev(clk, "gpt1_mclk", NULL);
518 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0, 517 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
519 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0, 518 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
520 &_lock); 519 &_lock);
521 clk_register_clkdev(clk, NULL, "gpt1"); 520 clk_register_clkdev(clk, NULL, "gpt1");
522 521
523 clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents, 522 clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
524 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG, 523 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
525 SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0, 524 SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
526 &_lock); 525 &_lock);
527 clk_register_clkdev(clk, "gpt2_mux_clk", NULL); 526 clk_register_clkdev(clk, "gpt2_mclk", NULL);
528 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0, 527 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
529 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0, 528 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
530 &_lock); 529 &_lock);
531 clk_register_clkdev(clk, NULL, "gpt2"); 530 clk_register_clkdev(clk, NULL, "gpt2");
532 531
533 clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents, 532 clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
534 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG, 533 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
535 SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0, 534 SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
536 &_lock); 535 &_lock);
537 clk_register_clkdev(clk, "gpt3_mux_clk", NULL); 536 clk_register_clkdev(clk, "gpt3_mclk", NULL);
538 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0, 537 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
539 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0, 538 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
540 &_lock); 539 &_lock);
541 clk_register_clkdev(clk, NULL, "gpt3"); 540 clk_register_clkdev(clk, NULL, "gpt3");
542 541
543 /* others */ 542 /* others */
544 clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk", 543 clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "vco1div2_clk",
545 "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL, 544 0, SPEAR1310_UART_CLK_SYNT, NULL, aux_rtbl,
546 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 545 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
547 clk_register_clkdev(clk, "uart_synth_clk", NULL); 546 clk_register_clkdev(clk, "uart_syn_clk", NULL);
548 clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL); 547 clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
549 548
550 clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents, 549 clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
551 ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG, 550 ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
552 SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0, 551 SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
553 &_lock); 552 &_lock);
554 clk_register_clkdev(clk, "uart0_mux_clk", NULL); 553 clk_register_clkdev(clk, "uart0_mclk", NULL);
555 554
556 clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0, 555 clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
557 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0, 556 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
558 &_lock); 557 &_lock);
559 clk_register_clkdev(clk, NULL, "e0000000.serial"); 558 clk_register_clkdev(clk, NULL, "e0000000.serial");
560 559
561 clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk", 560 clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
562 "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL, 561 "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
563 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 562 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
564 clk_register_clkdev(clk, "sdhci_synth_clk", NULL); 563 clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
565 clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL); 564 clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
566 565
567 clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0, 566 clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
568 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0, 567 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
569 &_lock); 568 &_lock);
570 clk_register_clkdev(clk, NULL, "b3000000.sdhci"); 569 clk_register_clkdev(clk, NULL, "b3000000.sdhci");
571 570
572 clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk", 571 clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
573 "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL, 572 0, SPEAR1310_CFXD_CLK_SYNT, NULL, aux_rtbl,
574 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 573 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
575 clk_register_clkdev(clk, "cfxd_synth_clk", NULL); 574 clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
576 clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL); 575 clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
577 576
578 clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0, 577 clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
579 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0, 578 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
580 &_lock); 579 &_lock);
581 clk_register_clkdev(clk, NULL, "b2800000.cf"); 580 clk_register_clkdev(clk, NULL, "b2800000.cf");
582 clk_register_clkdev(clk, NULL, "arasan_xd"); 581 clk_register_clkdev(clk, NULL, "arasan_xd");
583 582
584 clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk", 583 clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk",
585 "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL, 584 0, SPEAR1310_C3_CLK_SYNT, NULL, aux_rtbl,
586 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 585 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
587 clk_register_clkdev(clk, "c3_synth_clk", NULL); 586 clk_register_clkdev(clk, "c3_syn_clk", NULL);
588 clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL); 587 clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
589 588
590 clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents, 589 clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
591 ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG, 590 ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
592 SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0, 591 SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
593 &_lock); 592 &_lock);
594 clk_register_clkdev(clk, "c3_mux_clk", NULL); 593 clk_register_clkdev(clk, "c3_mclk", NULL);
595 594
596 clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0, 595 clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
597 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0, 596 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
598 &_lock); 597 &_lock);
599 clk_register_clkdev(clk, NULL, "c3"); 598 clk_register_clkdev(clk, NULL, "c3");
600 599
601 /* gmac */ 600 /* gmac */
602 clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk", 601 clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
603 gmac_phy_input_parents,
604 ARRAY_SIZE(gmac_phy_input_parents), 0, 602 ARRAY_SIZE(gmac_phy_input_parents), 0,
605 SPEAR1310_GMAC_CLK_CFG, 603 SPEAR1310_GMAC_CLK_CFG,
606 SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT, 604 SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
607 SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock); 605 SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
608 clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL); 606 clk_register_clkdev(clk, "phy_input_mclk", NULL);
609 607
610 clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk", 608 clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
611 "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT, 609 0, SPEAR1310_GMAC_CLK_SYNT, NULL, gmac_rtbl,
612 NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1); 610 ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
613 clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL); 611 clk_register_clkdev(clk, "phy_syn_clk", NULL);
614 clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL); 612 clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
615 613
616 clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents, 614 clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
617 ARRAY_SIZE(gmac_phy_parents), 0, 615 ARRAY_SIZE(gmac_phy_parents), 0,
618 SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT, 616 SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
619 SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock); 617 SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
620 clk_register_clkdev(clk, NULL, "stmmacphy.0"); 618 clk_register_clkdev(clk, NULL, "stmmacphy.0");
621 619
622 /* clcd */ 620 /* clcd */
623 clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents, 621 clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
624 ARRAY_SIZE(clcd_synth_parents), 0, 622 ARRAY_SIZE(clcd_synth_parents), 0,
625 SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT, 623 SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
626 SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock); 624 SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
627 clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL); 625 clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
628 626
629 clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0, 627 clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
630 SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl, 628 SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
631 ARRAY_SIZE(clcd_rtbl), &_lock); 629 ARRAY_SIZE(clcd_rtbl), &_lock);
632 clk_register_clkdev(clk, "clcd_synth_clk", NULL); 630 clk_register_clkdev(clk, "clcd_syn_clk", NULL);
633 631
634 clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents, 632 clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
635 ARRAY_SIZE(clcd_pixel_parents), 0, 633 ARRAY_SIZE(clcd_pixel_parents), 0,
636 SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT, 634 SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
637 SPEAR1310_CLCD_CLK_MASK, 0, &_lock); 635 SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
638 clk_register_clkdev(clk, "clcd_pixel_clk", NULL); 636 clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
639 637
640 clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0, 638 clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
641 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0, 639 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
642 &_lock); 640 &_lock);
643 clk_register_clkdev(clk, "clcd_clk", NULL); 641 clk_register_clkdev(clk, "clcd_clk", NULL);
644 642
645 /* i2s */ 643 /* i2s */
646 clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents, 644 clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
647 ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG, 645 ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
648 SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK, 646 SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
649 0, &_lock); 647 0, &_lock);
650 clk_register_clkdev(clk, "i2s_src_clk", NULL); 648 clk_register_clkdev(clk, "i2s_src_clk", NULL);
651 649
652 clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0, 650 clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
653 SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl, 651 SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
654 ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL); 652 ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
655 clk_register_clkdev(clk, "i2s_prs1_clk", NULL); 653 clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
656 654
657 clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents, 655 clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
658 ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG, 656 ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
659 SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0, 657 SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
660 &_lock); 658 &_lock);
661 clk_register_clkdev(clk, "i2s_ref_clk", NULL); 659 clk_register_clkdev(clk, "i2s_ref_clk", NULL);
662 660
663 clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0, 661 clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
664 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB, 662 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
665 0, &_lock); 663 0, &_lock);
666 clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL); 664 clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
667 665
668 clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk", 666 clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk",
669 "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG, 667 "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
670 &i2s_sclk_masks, i2s_sclk_rtbl, 668 &i2s_sclk_masks, i2s_sclk_rtbl,
671 ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1); 669 ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
672 clk_register_clkdev(clk, "i2s_sclk_clk", NULL); 670 clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
673 clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL); 671 clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
674 672
675 /* clock derived from ahb clk */ 673 /* clock derived from ahb clk */
676 clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, 674 clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -747,13 +745,13 @@ void __init spear1310_clk_init(void)
747 &_lock); 745 &_lock);
748 clk_register_clkdev(clk, "sysram1_clk", NULL); 746 clk_register_clkdev(clk, "sysram1_clk", NULL);
749 747
750 clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk", 748 clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
751 0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl, 749 0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
752 ARRAY_SIZE(adc_rtbl), &_lock, &clk1); 750 ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
753 clk_register_clkdev(clk, "adc_synth_clk", NULL); 751 clk_register_clkdev(clk, "adc_syn_clk", NULL);
754 clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL); 752 clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
755 753
756 clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0, 754 clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
757 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0, 755 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
758 &_lock); 756 &_lock);
759 clk_register_clkdev(clk, NULL, "adc_clk"); 757 clk_register_clkdev(clk, NULL, "adc_clk");
@@ -790,37 +788,37 @@ void __init spear1310_clk_init(void)
790 clk_register_clkdev(clk, NULL, "e0300000.kbd"); 788 clk_register_clkdev(clk, NULL, "e0300000.kbd");
791 789
792 /* RAS clks */ 790 /* RAS clks */
793 clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk", 791 clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
794 gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents), 792 ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
795 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT, 793 SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
796 SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock); 794 SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
797 clk_register_clkdev(clk, "gen_synth0_1_clk", NULL); 795 clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
798 796
799 clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk", 797 clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
800 gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents), 798 ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
801 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT, 799 SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
802 SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock); 800 SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
803 clk_register_clkdev(clk, "gen_synth2_3_clk", NULL); 801 clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
804 802
805 clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0, 803 clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
806 SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl), 804 SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
807 &_lock); 805 &_lock);
808 clk_register_clkdev(clk, "gen_synth0_clk", NULL); 806 clk_register_clkdev(clk, "gen_syn0_clk", NULL);
809 807
810 clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0, 808 clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
811 SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl), 809 SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
812 &_lock); 810 &_lock);
813 clk_register_clkdev(clk, "gen_synth1_clk", NULL); 811 clk_register_clkdev(clk, "gen_syn1_clk", NULL);
814 812
815 clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0, 813 clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
816 SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl), 814 SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
817 &_lock); 815 &_lock);
818 clk_register_clkdev(clk, "gen_synth2_clk", NULL); 816 clk_register_clkdev(clk, "gen_syn2_clk", NULL);
819 817
820 clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0, 818 clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
821 SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl), 819 SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
822 &_lock); 820 &_lock);
823 clk_register_clkdev(clk, "gen_synth3_clk", NULL); 821 clk_register_clkdev(clk, "gen_syn3_clk", NULL);
824 822
825 clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0, 823 clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
826 SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0, 824 SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
@@ -847,7 +845,7 @@ void __init spear1310_clk_init(void)
847 &_lock); 845 &_lock);
848 clk_register_clkdev(clk, "ras_pll3_clk", NULL); 846 clk_register_clkdev(clk, "ras_pll3_clk", NULL);
849 847
850 clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0, 848 clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_pad_clk", 0,
851 SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0, 849 SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
852 &_lock); 850 &_lock);
853 clk_register_clkdev(clk, "ras_tx125_clk", NULL); 851 clk_register_clkdev(clk, "ras_tx125_clk", NULL);
@@ -912,7 +910,7 @@ void __init spear1310_clk_init(void)
912 &_lock); 910 &_lock);
913 clk_register_clkdev(clk, NULL, "5c700000.eth"); 911 clk_register_clkdev(clk, NULL, "5c700000.eth");
914 912
915 clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk", 913 clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
916 smii_rgmii_phy_parents, 914 smii_rgmii_phy_parents,
917 ARRAY_SIZE(smii_rgmii_phy_parents), 0, 915 ARRAY_SIZE(smii_rgmii_phy_parents), 0,
918 SPEAR1310_RAS_CTRL_REG1, 916 SPEAR1310_RAS_CTRL_REG1,
@@ -922,184 +920,184 @@ void __init spear1310_clk_init(void)
922 clk_register_clkdev(clk, NULL, "stmmacphy.2"); 920 clk_register_clkdev(clk, NULL, "stmmacphy.2");
923 clk_register_clkdev(clk, NULL, "stmmacphy.4"); 921 clk_register_clkdev(clk, NULL, "stmmacphy.4");
924 922
925 clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents, 923 clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
926 ARRAY_SIZE(rmii_phy_parents), 0, 924 ARRAY_SIZE(rmii_phy_parents), 0,
927 SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT, 925 SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
928 SPEAR1310_PHY_CLK_MASK, 0, &_lock); 926 SPEAR1310_PHY_CLK_MASK, 0, &_lock);
929 clk_register_clkdev(clk, NULL, "stmmacphy.3"); 927 clk_register_clkdev(clk, NULL, "stmmacphy.3");
930 928
931 clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents, 929 clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
932 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, 930 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
933 SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 931 SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
934 0, &_lock); 932 0, &_lock);
935 clk_register_clkdev(clk, "uart1_mux_clk", NULL); 933 clk_register_clkdev(clk, "uart1_mclk", NULL);
936 934
937 clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0, 935 clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
938 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0, 936 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
939 &_lock); 937 &_lock);
940 clk_register_clkdev(clk, NULL, "5c800000.serial"); 938 clk_register_clkdev(clk, NULL, "5c800000.serial");
941 939
942 clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents, 940 clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
943 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, 941 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
944 SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 942 SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
945 0, &_lock); 943 0, &_lock);
946 clk_register_clkdev(clk, "uart2_mux_clk", NULL); 944 clk_register_clkdev(clk, "uart2_mclk", NULL);
947 945
948 clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0, 946 clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
949 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0, 947 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
950 &_lock); 948 &_lock);
951 clk_register_clkdev(clk, NULL, "5c900000.serial"); 949 clk_register_clkdev(clk, NULL, "5c900000.serial");
952 950
953 clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents, 951 clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
954 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, 952 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
955 SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 953 SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
956 0, &_lock); 954 0, &_lock);
957 clk_register_clkdev(clk, "uart3_mux_clk", NULL); 955 clk_register_clkdev(clk, "uart3_mclk", NULL);
958 956
959 clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0, 957 clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
960 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0, 958 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
961 &_lock); 959 &_lock);
962 clk_register_clkdev(clk, NULL, "5ca00000.serial"); 960 clk_register_clkdev(clk, NULL, "5ca00000.serial");
963 961
964 clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents, 962 clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
965 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, 963 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
966 SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 964 SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
967 0, &_lock); 965 0, &_lock);
968 clk_register_clkdev(clk, "uart4_mux_clk", NULL); 966 clk_register_clkdev(clk, "uart4_mclk", NULL);
969 967
970 clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0, 968 clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
971 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0, 969 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
972 &_lock); 970 &_lock);
973 clk_register_clkdev(clk, NULL, "5cb00000.serial"); 971 clk_register_clkdev(clk, NULL, "5cb00000.serial");
974 972
975 clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents, 973 clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
976 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, 974 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
977 SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 975 SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
978 0, &_lock); 976 0, &_lock);
979 clk_register_clkdev(clk, "uart5_mux_clk", NULL); 977 clk_register_clkdev(clk, "uart5_mclk", NULL);
980 978
981 clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0, 979 clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
982 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0, 980 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
983 &_lock); 981 &_lock);
984 clk_register_clkdev(clk, NULL, "5cc00000.serial"); 982 clk_register_clkdev(clk, NULL, "5cc00000.serial");
985 983
986 clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents, 984 clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
987 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, 985 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
988 SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, 986 SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
989 &_lock); 987 &_lock);
990 clk_register_clkdev(clk, "i2c1_mux_clk", NULL); 988 clk_register_clkdev(clk, "i2c1_mclk", NULL);
991 989
992 clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0, 990 clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
993 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0, 991 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
994 &_lock); 992 &_lock);
995 clk_register_clkdev(clk, NULL, "5cd00000.i2c"); 993 clk_register_clkdev(clk, NULL, "5cd00000.i2c");
996 994
997 clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents, 995 clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
998 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, 996 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
999 SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, 997 SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1000 &_lock); 998 &_lock);
1001 clk_register_clkdev(clk, "i2c2_mux_clk", NULL); 999 clk_register_clkdev(clk, "i2c2_mclk", NULL);
1002 1000
1003 clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0, 1001 clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
1004 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0, 1002 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
1005 &_lock); 1003 &_lock);
1006 clk_register_clkdev(clk, NULL, "5ce00000.i2c"); 1004 clk_register_clkdev(clk, NULL, "5ce00000.i2c");
1007 1005
1008 clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents, 1006 clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
1009 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1007 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1010 SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, 1008 SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1011 &_lock); 1009 &_lock);
1012 clk_register_clkdev(clk, "i2c3_mux_clk", NULL); 1010 clk_register_clkdev(clk, "i2c3_mclk", NULL);
1013 1011
1014 clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0, 1012 clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
1015 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0, 1013 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
1016 &_lock); 1014 &_lock);
1017 clk_register_clkdev(clk, NULL, "5cf00000.i2c"); 1015 clk_register_clkdev(clk, NULL, "5cf00000.i2c");
1018 1016
1019 clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents, 1017 clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
1020 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1018 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1021 SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, 1019 SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1022 &_lock); 1020 &_lock);
1023 clk_register_clkdev(clk, "i2c4_mux_clk", NULL); 1021 clk_register_clkdev(clk, "i2c4_mclk", NULL);
1024 1022
1025 clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0, 1023 clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
1026 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0, 1024 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
1027 &_lock); 1025 &_lock);
1028 clk_register_clkdev(clk, NULL, "5d000000.i2c"); 1026 clk_register_clkdev(clk, NULL, "5d000000.i2c");
1029 1027
1030 clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents, 1028 clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
1031 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1029 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1032 SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, 1030 SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1033 &_lock); 1031 &_lock);
1034 clk_register_clkdev(clk, "i2c5_mux_clk", NULL); 1032 clk_register_clkdev(clk, "i2c5_mclk", NULL);
1035 1033
1036 clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0, 1034 clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
1037 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0, 1035 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
1038 &_lock); 1036 &_lock);
1039 clk_register_clkdev(clk, NULL, "5d100000.i2c"); 1037 clk_register_clkdev(clk, NULL, "5d100000.i2c");
1040 1038
1041 clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents, 1039 clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
1042 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1040 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1043 SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, 1041 SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1044 &_lock); 1042 &_lock);
1045 clk_register_clkdev(clk, "i2c6_mux_clk", NULL); 1043 clk_register_clkdev(clk, "i2c6_mclk", NULL);
1046 1044
1047 clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0, 1045 clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
1048 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0, 1046 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
1049 &_lock); 1047 &_lock);
1050 clk_register_clkdev(clk, NULL, "5d200000.i2c"); 1048 clk_register_clkdev(clk, NULL, "5d200000.i2c");
1051 1049
1052 clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents, 1050 clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
1053 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1051 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1054 SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, 1052 SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1055 &_lock); 1053 &_lock);
1056 clk_register_clkdev(clk, "i2c7_mux_clk", NULL); 1054 clk_register_clkdev(clk, "i2c7_mclk", NULL);
1057 1055
1058 clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0, 1056 clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
1059 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0, 1057 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
1060 &_lock); 1058 &_lock);
1061 clk_register_clkdev(clk, NULL, "5d300000.i2c"); 1059 clk_register_clkdev(clk, NULL, "5d300000.i2c");
1062 1060
1063 clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents, 1061 clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
1064 ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1062 ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1065 SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0, 1063 SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
1066 &_lock); 1064 &_lock);
1067 clk_register_clkdev(clk, "ssp1_mux_clk", NULL); 1065 clk_register_clkdev(clk, "ssp1_mclk", NULL);
1068 1066
1069 clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0, 1067 clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
1070 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0, 1068 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
1071 &_lock); 1069 &_lock);
1072 clk_register_clkdev(clk, NULL, "5d400000.spi"); 1070 clk_register_clkdev(clk, NULL, "5d400000.spi");
1073 1071
1074 clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents, 1072 clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
1075 ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1073 ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1076 SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0, 1074 SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
1077 &_lock); 1075 &_lock);
1078 clk_register_clkdev(clk, "pci_mux_clk", NULL); 1076 clk_register_clkdev(clk, "pci_mclk", NULL);
1079 1077
1080 clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0, 1078 clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
1081 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0, 1079 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
1082 &_lock); 1080 &_lock);
1083 clk_register_clkdev(clk, NULL, "pci"); 1081 clk_register_clkdev(clk, NULL, "pci");
1084 1082
1085 clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents, 1083 clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
1086 ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1084 ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1087 SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0, 1085 SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
1088 &_lock); 1086 &_lock);
1089 clk_register_clkdev(clk, "tdm1_mux_clk", NULL); 1087 clk_register_clkdev(clk, "tdm1_mclk", NULL);
1090 1088
1091 clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0, 1089 clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
1092 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0, 1090 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
1093 &_lock); 1091 &_lock);
1094 clk_register_clkdev(clk, NULL, "tdm_hdlc.0"); 1092 clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
1095 1093
1096 clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents, 1094 clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
1097 ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0, 1095 ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1098 SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0, 1096 SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
1099 &_lock); 1097 &_lock);
1100 clk_register_clkdev(clk, "tdm2_mux_clk", NULL); 1098 clk_register_clkdev(clk, "tdm2_mclk", NULL);
1101 1099
1102 clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0, 1100 clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
1103 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0, 1101 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
1104 &_lock); 1102 &_lock);
1105 clk_register_clkdev(clk, NULL, "tdm_hdlc.1"); 1103 clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index f130919d5bf8..2352cee7f645 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1340 machine clock framework source file 4 * SPEAr1340 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
@@ -369,27 +369,25 @@ static struct frac_rate_tbl gen_rtbl[] = {
369 369
370/* clock parents */ 370/* clock parents */
371static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", }; 371static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
372static const char *sys_parents[] = { "none", "pll1_clk", "none", "none", 372static const char *sys_parents[] = { "pll1_clk", "pll1_clk", "pll1_clk",
373 "sys_synth_clk", "none", "pll2_clk", "pll3_clk", }; 373 "pll1_clk", "sys_synth_clk", "sys_synth_clk", "pll2_clk", "pll3_clk", };
374static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", }; 374static const char *ahb_parents[] = { "cpu_div3_clk", "amba_syn_clk", };
375static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", }; 375static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
376static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk", 376static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
377 "uart0_synth_gate_clk", }; 377 "uart0_syn_gclk", };
378static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk", 378static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
379 "uart1_synth_gate_clk", }; 379 "uart1_syn_gclk", };
380static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", }; 380static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
381static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk", 381static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
382 "osc_25m_clk", }; 382 "osc_25m_clk", };
383static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk", 383static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
384 "gmac_phy_synth_gate_clk", };
385static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", }; 384static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
386static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", }; 385static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
387static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk", 386static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
388 "i2s_src_pad_clk", }; 387 "i2s_src_pad_clk", };
389static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", }; 388static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
390static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk", 389static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_syn2_clk", };
391}; 390static const char *spdif_in_parents[] = { "pll2_clk", "gen_syn3_clk", };
392static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
393 391
394static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk", 392static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
395 "pll3_clk", }; 393 "pll3_clk", };
@@ -415,9 +413,9 @@ void __init spear1340_clk_init(void)
415 25000000); 413 25000000);
416 clk_register_clkdev(clk, "osc_25m_clk", NULL); 414 clk_register_clkdev(clk, "osc_25m_clk", NULL);
417 415
418 clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL, 416 clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
419 CLK_IS_ROOT, 125000000); 417 125000000);
420 clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL); 418 clk_register_clkdev(clk, "gmii_pad_clk", NULL);
421 419
422 clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL, 420 clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
423 CLK_IS_ROOT, 12288000); 421 CLK_IS_ROOT, 12288000);
@@ -431,35 +429,35 @@ void __init spear1340_clk_init(void)
431 429
432 /* clock derived from 24 or 25 MHz osc clk */ 430 /* clock derived from 24 or 25 MHz osc clk */
433 /* vco-pll */ 431 /* vco-pll */
434 clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents, 432 clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
435 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG, 433 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
436 SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0, 434 SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
437 &_lock); 435 &_lock);
438 clk_register_clkdev(clk, "vco1_mux_clk", NULL); 436 clk_register_clkdev(clk, "vco1_mclk", NULL);
439 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk", 437 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
440 0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl, 438 SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
441 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); 439 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
442 clk_register_clkdev(clk, "vco1_clk", NULL); 440 clk_register_clkdev(clk, "vco1_clk", NULL);
443 clk_register_clkdev(clk1, "pll1_clk", NULL); 441 clk_register_clkdev(clk1, "pll1_clk", NULL);
444 442
445 clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents, 443 clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
446 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG, 444 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
447 SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0, 445 SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
448 &_lock); 446 &_lock);
449 clk_register_clkdev(clk, "vco2_mux_clk", NULL); 447 clk_register_clkdev(clk, "vco2_mclk", NULL);
450 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk", 448 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
451 0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl, 449 SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
452 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); 450 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
453 clk_register_clkdev(clk, "vco2_clk", NULL); 451 clk_register_clkdev(clk, "vco2_clk", NULL);
454 clk_register_clkdev(clk1, "pll2_clk", NULL); 452 clk_register_clkdev(clk1, "pll2_clk", NULL);
455 453
456 clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents, 454 clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
457 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG, 455 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
458 SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0, 456 SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
459 &_lock); 457 &_lock);
460 clk_register_clkdev(clk, "vco3_mux_clk", NULL); 458 clk_register_clkdev(clk, "vco3_mclk", NULL);
461 clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk", 459 clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
462 0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl, 460 SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
463 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); 461 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
464 clk_register_clkdev(clk, "vco3_clk", NULL); 462 clk_register_clkdev(clk, "vco3_clk", NULL);
465 clk_register_clkdev(clk1, "pll3_clk", NULL); 463 clk_register_clkdev(clk1, "pll3_clk", NULL);
@@ -498,7 +496,7 @@ void __init spear1340_clk_init(void)
498 /* peripherals */ 496 /* peripherals */
499 clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1, 497 clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
500 128); 498 128);
501 clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0, 499 clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
502 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0, 500 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
503 &_lock); 501 &_lock);
504 clk_register_clkdev(clk, NULL, "spear_thermal"); 502 clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -509,23 +507,23 @@ void __init spear1340_clk_init(void)
509 clk_register_clkdev(clk, "ddr_clk", NULL); 507 clk_register_clkdev(clk, "ddr_clk", NULL);
510 508
511 /* clock derived from pll1 clk */ 509 /* clock derived from pll1 clk */
512 clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0, 510 clk = clk_register_frac("sys_syn_clk", "vco1div2_clk", 0,
513 SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl, 511 SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
514 ARRAY_SIZE(sys_synth_rtbl), &_lock); 512 ARRAY_SIZE(sys_synth_rtbl), &_lock);
515 clk_register_clkdev(clk, "sys_synth_clk", NULL); 513 clk_register_clkdev(clk, "sys_syn_clk", NULL);
516 514
517 clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0, 515 clk = clk_register_frac("amba_syn_clk", "vco1div2_clk", 0,
518 SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl, 516 SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
519 ARRAY_SIZE(amba_synth_rtbl), &_lock); 517 ARRAY_SIZE(amba_synth_rtbl), &_lock);
520 clk_register_clkdev(clk, "amba_synth_clk", NULL); 518 clk_register_clkdev(clk, "amba_syn_clk", NULL);
521 519
522 clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents, 520 clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
523 ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL, 521 ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
524 SPEAR1340_SCLK_SRC_SEL_SHIFT, 522 SPEAR1340_SCLK_SRC_SEL_SHIFT,
525 SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock); 523 SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
526 clk_register_clkdev(clk, "sys_clk", NULL); 524 clk_register_clkdev(clk, "sys_clk", NULL);
527 525
528 clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1, 526 clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mclk", 0, 1,
529 2); 527 2);
530 clk_register_clkdev(clk, "cpu_clk", NULL); 528 clk_register_clkdev(clk, "cpu_clk", NULL);
531 529
@@ -548,194 +546,193 @@ void __init spear1340_clk_init(void)
548 clk_register_clkdev(clk, "apb_clk", NULL); 546 clk_register_clkdev(clk, "apb_clk", NULL);
549 547
550 /* gpt clocks */ 548 /* gpt clocks */
551 clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents, 549 clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
552 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG, 550 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
553 SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0, 551 SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
554 &_lock); 552 &_lock);
555 clk_register_clkdev(clk, "gpt0_mux_clk", NULL); 553 clk_register_clkdev(clk, "gpt0_mclk", NULL);
556 clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0, 554 clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
557 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0, 555 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
558 &_lock); 556 &_lock);
559 clk_register_clkdev(clk, NULL, "gpt0"); 557 clk_register_clkdev(clk, NULL, "gpt0");
560 558
561 clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents, 559 clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
562 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG, 560 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
563 SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0, 561 SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
564 &_lock); 562 &_lock);
565 clk_register_clkdev(clk, "gpt1_mux_clk", NULL); 563 clk_register_clkdev(clk, "gpt1_mclk", NULL);
566 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0, 564 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
567 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0, 565 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
568 &_lock); 566 &_lock);
569 clk_register_clkdev(clk, NULL, "gpt1"); 567 clk_register_clkdev(clk, NULL, "gpt1");
570 568
571 clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents, 569 clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
572 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG, 570 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
573 SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0, 571 SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
574 &_lock); 572 &_lock);
575 clk_register_clkdev(clk, "gpt2_mux_clk", NULL); 573 clk_register_clkdev(clk, "gpt2_mclk", NULL);
576 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0, 574 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
577 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0, 575 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
578 &_lock); 576 &_lock);
579 clk_register_clkdev(clk, NULL, "gpt2"); 577 clk_register_clkdev(clk, NULL, "gpt2");
580 578
581 clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents, 579 clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
582 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG, 580 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
583 SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0, 581 SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
584 &_lock); 582 &_lock);
585 clk_register_clkdev(clk, "gpt3_mux_clk", NULL); 583 clk_register_clkdev(clk, "gpt3_mclk", NULL);
586 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0, 584 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
587 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0, 585 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
588 &_lock); 586 &_lock);
589 clk_register_clkdev(clk, NULL, "gpt3"); 587 clk_register_clkdev(clk, NULL, "gpt3");
590 588
591 /* others */ 589 /* others */
592 clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk", 590 clk = clk_register_aux("uart0_syn_clk", "uart0_syn_gclk",
593 "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL, 591 "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
594 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 592 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
595 clk_register_clkdev(clk, "uart0_synth_clk", NULL); 593 clk_register_clkdev(clk, "uart0_syn_clk", NULL);
596 clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL); 594 clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
597 595
598 clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents, 596 clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
599 ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG, 597 ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
600 SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0, 598 SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
601 &_lock); 599 &_lock);
602 clk_register_clkdev(clk, "uart0_mux_clk", NULL); 600 clk_register_clkdev(clk, "uart0_mclk", NULL);
603 601
604 clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0, 602 clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
605 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0, 603 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
606 &_lock); 604 &_lock);
607 clk_register_clkdev(clk, NULL, "e0000000.serial"); 605 clk_register_clkdev(clk, NULL, "e0000000.serial");
608 606
609 clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk", 607 clk = clk_register_aux("uart1_syn_clk", "uart1_syn_gclk",
610 "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL, 608 "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
611 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 609 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
612 clk_register_clkdev(clk, "uart1_synth_clk", NULL); 610 clk_register_clkdev(clk, "uart1_syn_clk", NULL);
613 clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL); 611 clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
614 612
615 clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents, 613 clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
616 ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG, 614 ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
617 SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0, 615 SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
618 &_lock); 616 &_lock);
619 clk_register_clkdev(clk, "uart1_mux_clk", NULL); 617 clk_register_clkdev(clk, "uart1_mclk", NULL);
620 618
621 clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0, 619 clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
622 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0, 620 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
623 &_lock); 621 &_lock);
624 clk_register_clkdev(clk, NULL, "b4100000.serial"); 622 clk_register_clkdev(clk, NULL, "b4100000.serial");
625 623
626 clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk", 624 clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
627 "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL, 625 "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
628 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 626 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
629 clk_register_clkdev(clk, "sdhci_synth_clk", NULL); 627 clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
630 clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL); 628 clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
631 629
632 clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0, 630 clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
633 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0, 631 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
634 &_lock); 632 &_lock);
635 clk_register_clkdev(clk, NULL, "b3000000.sdhci"); 633 clk_register_clkdev(clk, NULL, "b3000000.sdhci");
636 634
637 clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk", 635 clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
638 "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL, 636 0, SPEAR1340_CFXD_CLK_SYNT, NULL, aux_rtbl,
639 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 637 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
640 clk_register_clkdev(clk, "cfxd_synth_clk", NULL); 638 clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
641 clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL); 639 clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
642 640
643 clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0, 641 clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
644 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0, 642 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
645 &_lock); 643 &_lock);
646 clk_register_clkdev(clk, NULL, "b2800000.cf"); 644 clk_register_clkdev(clk, NULL, "b2800000.cf");
647 clk_register_clkdev(clk, NULL, "arasan_xd"); 645 clk_register_clkdev(clk, NULL, "arasan_xd");
648 646
649 clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk", 647 clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", 0,
650 "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL, 648 SPEAR1340_C3_CLK_SYNT, NULL, aux_rtbl,
651 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 649 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
652 clk_register_clkdev(clk, "c3_synth_clk", NULL); 650 clk_register_clkdev(clk, "c3_syn_clk", NULL);
653 clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL); 651 clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
654 652
655 clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents, 653 clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
656 ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG, 654 ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
657 SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0, 655 SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
658 &_lock); 656 &_lock);
659 clk_register_clkdev(clk, "c3_mux_clk", NULL); 657 clk_register_clkdev(clk, "c3_mclk", NULL);
660 658
661 clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0, 659 clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
662 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0, 660 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
663 &_lock); 661 &_lock);
664 clk_register_clkdev(clk, NULL, "c3"); 662 clk_register_clkdev(clk, NULL, "c3");
665 663
666 /* gmac */ 664 /* gmac */
667 clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk", 665 clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
668 gmac_phy_input_parents,
669 ARRAY_SIZE(gmac_phy_input_parents), 0, 666 ARRAY_SIZE(gmac_phy_input_parents), 0,
670 SPEAR1340_GMAC_CLK_CFG, 667 SPEAR1340_GMAC_CLK_CFG,
671 SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT, 668 SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
672 SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock); 669 SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
673 clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL); 670 clk_register_clkdev(clk, "phy_input_mclk", NULL);
674 671
675 clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk", 672 clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
676 "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT, 673 0, SPEAR1340_GMAC_CLK_SYNT, NULL, gmac_rtbl,
677 NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1); 674 ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
678 clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL); 675 clk_register_clkdev(clk, "phy_syn_clk", NULL);
679 clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL); 676 clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
680 677
681 clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents, 678 clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
682 ARRAY_SIZE(gmac_phy_parents), 0, 679 ARRAY_SIZE(gmac_phy_parents), 0,
683 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT, 680 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
684 SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock); 681 SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
685 clk_register_clkdev(clk, NULL, "stmmacphy.0"); 682 clk_register_clkdev(clk, NULL, "stmmacphy.0");
686 683
687 /* clcd */ 684 /* clcd */
688 clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents, 685 clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
689 ARRAY_SIZE(clcd_synth_parents), 0, 686 ARRAY_SIZE(clcd_synth_parents), 0,
690 SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT, 687 SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
691 SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock); 688 SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
692 clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL); 689 clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
693 690
694 clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0, 691 clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
695 SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl, 692 SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
696 ARRAY_SIZE(clcd_rtbl), &_lock); 693 ARRAY_SIZE(clcd_rtbl), &_lock);
697 clk_register_clkdev(clk, "clcd_synth_clk", NULL); 694 clk_register_clkdev(clk, "clcd_syn_clk", NULL);
698 695
699 clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents, 696 clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
700 ARRAY_SIZE(clcd_pixel_parents), 0, 697 ARRAY_SIZE(clcd_pixel_parents), 0,
701 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT, 698 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
702 SPEAR1340_CLCD_CLK_MASK, 0, &_lock); 699 SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
703 clk_register_clkdev(clk, "clcd_pixel_clk", NULL); 700 clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
704 701
705 clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0, 702 clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
706 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0, 703 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
707 &_lock); 704 &_lock);
708 clk_register_clkdev(clk, "clcd_clk", NULL); 705 clk_register_clkdev(clk, "clcd_clk", NULL);
709 706
710 /* i2s */ 707 /* i2s */
711 clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents, 708 clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
712 ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG, 709 ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
713 SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK, 710 SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
714 0, &_lock); 711 0, &_lock);
715 clk_register_clkdev(clk, "i2s_src_clk", NULL); 712 clk_register_clkdev(clk, "i2s_src_clk", NULL);
716 713
717 clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0, 714 clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
718 SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl, 715 SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
719 ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL); 716 ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
720 clk_register_clkdev(clk, "i2s_prs1_clk", NULL); 717 clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
721 718
722 clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents, 719 clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
723 ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG, 720 ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
724 SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0, 721 SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
725 &_lock); 722 &_lock);
726 clk_register_clkdev(clk, "i2s_ref_clk", NULL); 723 clk_register_clkdev(clk, "i2s_ref_clk", NULL);
727 724
728 clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0, 725 clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
729 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB, 726 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
730 0, &_lock); 727 0, &_lock);
731 clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL); 728 clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
732 729
733 clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk", 730 clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", "i2s_ref_mclk",
734 "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG, 731 0, SPEAR1340_I2S_CLK_CFG, &i2s_sclk_masks,
735 &i2s_sclk_masks, i2s_sclk_rtbl, 732 i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock,
736 ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1); 733 &clk1);
737 clk_register_clkdev(clk, "i2s_sclk_clk", NULL); 734 clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
738 clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL); 735 clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
739 736
740 /* clock derived from ahb clk */ 737 /* clock derived from ahb clk */
741 clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, 738 clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -744,7 +741,7 @@ void __init spear1340_clk_init(void)
744 clk_register_clkdev(clk, NULL, "e0280000.i2c"); 741 clk_register_clkdev(clk, NULL, "e0280000.i2c");
745 742
746 clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0, 743 clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
747 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0, 744 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
748 &_lock); 745 &_lock);
749 clk_register_clkdev(clk, NULL, "b4000000.i2c"); 746 clk_register_clkdev(clk, NULL, "b4000000.i2c");
750 747
@@ -800,13 +797,13 @@ void __init spear1340_clk_init(void)
800 &_lock); 797 &_lock);
801 clk_register_clkdev(clk, "sysram1_clk", NULL); 798 clk_register_clkdev(clk, "sysram1_clk", NULL);
802 799
803 clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk", 800 clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
804 0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl, 801 0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
805 ARRAY_SIZE(adc_rtbl), &_lock, &clk1); 802 ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
806 clk_register_clkdev(clk, "adc_synth_clk", NULL); 803 clk_register_clkdev(clk, "adc_syn_clk", NULL);
807 clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL); 804 clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
808 805
809 clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0, 806 clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
810 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0, 807 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
811 &_lock); 808 &_lock);
812 clk_register_clkdev(clk, NULL, "adc_clk"); 809 clk_register_clkdev(clk, NULL, "adc_clk");
@@ -843,39 +840,39 @@ void __init spear1340_clk_init(void)
843 clk_register_clkdev(clk, NULL, "e0300000.kbd"); 840 clk_register_clkdev(clk, NULL, "e0300000.kbd");
844 841
845 /* RAS clks */ 842 /* RAS clks */
846 clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk", 843 clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
847 gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents), 844 ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
848 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT, 845 SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
849 SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock); 846 SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
850 clk_register_clkdev(clk, "gen_synth0_1_clk", NULL); 847 clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
851 848
852 clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk", 849 clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
853 gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents), 850 ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
854 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT, 851 SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
855 SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock); 852 SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
856 clk_register_clkdev(clk, "gen_synth2_3_clk", NULL); 853 clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
857 854
858 clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0, 855 clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
859 SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl), 856 SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
860 &_lock); 857 &_lock);
861 clk_register_clkdev(clk, "gen_synth0_clk", NULL); 858 clk_register_clkdev(clk, "gen_syn0_clk", NULL);
862 859
863 clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0, 860 clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
864 SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl), 861 SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
865 &_lock); 862 &_lock);
866 clk_register_clkdev(clk, "gen_synth1_clk", NULL); 863 clk_register_clkdev(clk, "gen_syn1_clk", NULL);
867 864
868 clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0, 865 clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
869 SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl), 866 SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
870 &_lock); 867 &_lock);
871 clk_register_clkdev(clk, "gen_synth2_clk", NULL); 868 clk_register_clkdev(clk, "gen_syn2_clk", NULL);
872 869
873 clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0, 870 clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
874 SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl), 871 SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
875 &_lock); 872 &_lock);
876 clk_register_clkdev(clk, "gen_synth3_clk", NULL); 873 clk_register_clkdev(clk, "gen_syn3_clk", NULL);
877 874
878 clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0, 875 clk = clk_register_gate(NULL, "mali_clk", "gen_syn3_clk", 0,
879 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0, 876 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
880 &_lock); 877 &_lock);
881 clk_register_clkdev(clk, NULL, "mali"); 878 clk_register_clkdev(clk, NULL, "mali");
@@ -890,74 +887,74 @@ void __init spear1340_clk_init(void)
890 &_lock); 887 &_lock);
891 clk_register_clkdev(clk, NULL, "spear_cec.1"); 888 clk_register_clkdev(clk, NULL, "spear_cec.1");
892 889
893 clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents, 890 clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
894 ARRAY_SIZE(spdif_out_parents), 0, 891 ARRAY_SIZE(spdif_out_parents), 0,
895 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT, 892 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
896 SPEAR1340_SPDIF_CLK_MASK, 0, &_lock); 893 SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
897 clk_register_clkdev(clk, "spdif_out_mux_clk", NULL); 894 clk_register_clkdev(clk, "spdif_out_mclk", NULL);
898 895
899 clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0, 896 clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mclk", 0,
900 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB, 897 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
901 0, &_lock); 898 0, &_lock);
902 clk_register_clkdev(clk, NULL, "spdif-out"); 899 clk_register_clkdev(clk, NULL, "spdif-out");
903 900
904 clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents, 901 clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
905 ARRAY_SIZE(spdif_in_parents), 0, 902 ARRAY_SIZE(spdif_in_parents), 0,
906 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT, 903 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
907 SPEAR1340_SPDIF_CLK_MASK, 0, &_lock); 904 SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
908 clk_register_clkdev(clk, "spdif_in_mux_clk", NULL); 905 clk_register_clkdev(clk, "spdif_in_mclk", NULL);
909 906
910 clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0, 907 clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mclk", 0,
911 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0, 908 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
912 &_lock); 909 &_lock);
913 clk_register_clkdev(clk, NULL, "spdif-in"); 910 clk_register_clkdev(clk, NULL, "spdif-in");
914 911
915 clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0, 912 clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
916 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0, 913 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
917 &_lock); 914 &_lock);
918 clk_register_clkdev(clk, NULL, "acp_clk"); 915 clk_register_clkdev(clk, NULL, "acp_clk");
919 916
920 clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0, 917 clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
921 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0, 918 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
922 &_lock); 919 &_lock);
923 clk_register_clkdev(clk, NULL, "plgpio"); 920 clk_register_clkdev(clk, NULL, "plgpio");
924 921
925 clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0, 922 clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
926 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB, 923 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
927 0, &_lock); 924 0, &_lock);
928 clk_register_clkdev(clk, NULL, "video_dec"); 925 clk_register_clkdev(clk, NULL, "video_dec");
929 926
930 clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0, 927 clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
931 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB, 928 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
932 0, &_lock); 929 0, &_lock);
933 clk_register_clkdev(clk, NULL, "video_enc"); 930 clk_register_clkdev(clk, NULL, "video_enc");
934 931
935 clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0, 932 clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
936 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0, 933 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
937 &_lock); 934 &_lock);
938 clk_register_clkdev(clk, NULL, "spear_vip"); 935 clk_register_clkdev(clk, NULL, "spear_vip");
939 936
940 clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0, 937 clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
941 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0, 938 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
942 &_lock); 939 &_lock);
943 clk_register_clkdev(clk, NULL, "spear_camif.0"); 940 clk_register_clkdev(clk, NULL, "spear_camif.0");
944 941
945 clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0, 942 clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
946 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0, 943 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
947 &_lock); 944 &_lock);
948 clk_register_clkdev(clk, NULL, "spear_camif.1"); 945 clk_register_clkdev(clk, NULL, "spear_camif.1");
949 946
950 clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0, 947 clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
951 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0, 948 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
952 &_lock); 949 &_lock);
953 clk_register_clkdev(clk, NULL, "spear_camif.2"); 950 clk_register_clkdev(clk, NULL, "spear_camif.2");
954 951
955 clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0, 952 clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
956 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0, 953 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
957 &_lock); 954 &_lock);
958 clk_register_clkdev(clk, NULL, "spear_camif.3"); 955 clk_register_clkdev(clk, NULL, "spear_camif.3");
959 956
960 clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0, 957 clk = clk_register_gate(NULL, "pwm_clk", "pwm_mclk", 0,
961 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0, 958 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
962 &_lock); 959 &_lock);
963 clk_register_clkdev(clk, NULL, "pwm"); 960 clk_register_clkdev(clk, NULL, "pwm");
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 440bb3e4c971..c3157454bb3f 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr3xx machines clock framework source file 2 * SPEAr3xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -122,12 +122,12 @@ static struct gpt_rate_tbl gpt_rtbl[] = {
122}; 122};
123 123
124/* clock parents */ 124/* clock parents */
125static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", }; 125static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", };
126static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk", 126static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk",
127}; 127};
128static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", }; 128static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", };
129static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", }; 129static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", };
130static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", }; 130static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
131static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", }; 131static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
132static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none", 132static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
133 "pll2_clk", }; 133 "pll2_clk", };
@@ -137,7 +137,7 @@ static void __init spear300_clk_init(void)
137{ 137{
138 struct clk *clk; 138 struct clk *clk;
139 139
140 clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0, 140 clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
141 1, 1); 141 1, 1);
142 clk_register_clkdev(clk, NULL, "60000000.clcd"); 142 clk_register_clkdev(clk, NULL, "60000000.clcd");
143 143
@@ -219,15 +219,11 @@ static void __init spear310_clk_init(void)
219 #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0 219 #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0
220 #define SPEAR320_UARTX_PCLK_VAL_APB 0x1 220 #define SPEAR320_UARTX_PCLK_VAL_APB 0x1
221 221
222static const char *i2s_ref_parents[] = { "ras_pll2_clk", 222static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", };
223 "ras_gen2_synth_gate_clk", }; 223static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", };
224static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
225 "ras_gen3_synth_gate_clk",
226};
227static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk", 224static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
228 "ras_gen0_synth_gate_clk", }; 225 "ras_syn0_gclk", };
229static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk", 226static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
230};
231 227
232static void __init spear320_clk_init(void) 228static void __init spear320_clk_init(void)
233{ 229{
@@ -237,7 +233,7 @@ static void __init spear320_clk_init(void)
237 CLK_IS_ROOT, 125000000); 233 CLK_IS_ROOT, 125000000);
238 clk_register_clkdev(clk, "smii_125m_pad", NULL); 234 clk_register_clkdev(clk, "smii_125m_pad", NULL);
239 235
240 clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0, 236 clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
241 1, 1); 237 1, 1);
242 clk_register_clkdev(clk, NULL, "90000000.clcd"); 238 clk_register_clkdev(clk, NULL, "90000000.clcd");
243 239
@@ -363,9 +359,9 @@ void __init spear3xx_clk_init(void)
363 clk_register_clkdev(clk, NULL, "fc900000.rtc"); 359 clk_register_clkdev(clk, NULL, "fc900000.rtc");
364 360
365 /* clock derived from 24 MHz osc clk */ 361 /* clock derived from 24 MHz osc clk */
366 clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0, 362 clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
367 48000000); 363 48000000);
368 clk_register_clkdev(clk, "pll3_48m_clk", NULL); 364 clk_register_clkdev(clk, "pll3_clk", NULL);
369 365
370 clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1, 366 clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
371 1); 367 1);
@@ -392,98 +388,98 @@ void __init spear3xx_clk_init(void)
392 HCLK_RATIO_MASK, 0, &_lock); 388 HCLK_RATIO_MASK, 0, &_lock);
393 clk_register_clkdev(clk, "ahb_clk", NULL); 389 clk_register_clkdev(clk, "ahb_clk", NULL);
394 390
395 clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk", 391 clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
396 "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl, 392 UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
397 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 393 &_lock, &clk1);
398 clk_register_clkdev(clk, "uart_synth_clk", NULL); 394 clk_register_clkdev(clk, "uart_syn_clk", NULL);
399 clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL); 395 clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
400 396
401 clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents, 397 clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
402 ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG, 398 ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
403 UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock); 399 UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
404 clk_register_clkdev(clk, "uart0_mux_clk", NULL); 400 clk_register_clkdev(clk, "uart0_mclk", NULL);
405 401
406 clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0, 402 clk = clk_register_gate(NULL, "uart0", "uart0_mclk", 0, PERIP1_CLK_ENB,
407 PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock); 403 UART_CLK_ENB, 0, &_lock);
408 clk_register_clkdev(clk, NULL, "d0000000.serial"); 404 clk_register_clkdev(clk, NULL, "d0000000.serial");
409 405
410 clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk", 406 clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0,
411 "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, 407 FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
412 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 408 &_lock, &clk1);
413 clk_register_clkdev(clk, "firda_synth_clk", NULL); 409 clk_register_clkdev(clk, "firda_syn_clk", NULL);
414 clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL); 410 clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
415 411
416 clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents, 412 clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
417 ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG, 413 ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
418 FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock); 414 FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
419 clk_register_clkdev(clk, "firda_mux_clk", NULL); 415 clk_register_clkdev(clk, "firda_mclk", NULL);
420 416
421 clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0, 417 clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
422 PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock); 418 PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
423 clk_register_clkdev(clk, NULL, "firda"); 419 clk_register_clkdev(clk, NULL, "firda");
424 420
425 /* gpt clocks */ 421 /* gpt clocks */
426 clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG, 422 clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
427 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); 423 ARRAY_SIZE(gpt_rtbl), &_lock);
428 clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents, 424 clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
429 ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG, 425 ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
430 GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); 426 GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
431 clk_register_clkdev(clk, NULL, "gpt0"); 427 clk_register_clkdev(clk, NULL, "gpt0");
432 428
433 clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG, 429 clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
434 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); 430 ARRAY_SIZE(gpt_rtbl), &_lock);
435 clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents, 431 clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
436 ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG, 432 ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
437 GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); 433 GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
438 clk_register_clkdev(clk, "gpt1_mux_clk", NULL); 434 clk_register_clkdev(clk, "gpt1_mclk", NULL);
439 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0, 435 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
440 PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock); 436 PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
441 clk_register_clkdev(clk, NULL, "gpt1"); 437 clk_register_clkdev(clk, NULL, "gpt1");
442 438
443 clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG, 439 clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
444 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); 440 ARRAY_SIZE(gpt_rtbl), &_lock);
445 clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents, 441 clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
446 ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG, 442 ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
447 GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); 443 GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
448 clk_register_clkdev(clk, "gpt2_mux_clk", NULL); 444 clk_register_clkdev(clk, "gpt2_mclk", NULL);
449 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0, 445 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
450 PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock); 446 PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
451 clk_register_clkdev(clk, NULL, "gpt2"); 447 clk_register_clkdev(clk, NULL, "gpt2");
452 448
453 /* general synths clocks */ 449 /* general synths clocks */
454 clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk", 450 clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk",
455 "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl, 451 0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
456 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 452 &_lock, &clk1);
457 clk_register_clkdev(clk, "gen0_synth_clk", NULL); 453 clk_register_clkdev(clk, "gen0_syn_clk", NULL);
458 clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL); 454 clk_register_clkdev(clk1, "gen0_syn_gclk", NULL);
459 455
460 clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk", 456 clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk",
461 "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl, 457 0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
462 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 458 &_lock, &clk1);
463 clk_register_clkdev(clk, "gen1_synth_clk", NULL); 459 clk_register_clkdev(clk, "gen1_syn_clk", NULL);
464 clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL); 460 clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
465 461
466 clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents, 462 clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
467 ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG, 463 ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
468 GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0, 464 GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
469 &_lock); 465 &_lock);
470 clk_register_clkdev(clk, "gen2_3_parent_clk", NULL); 466 clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
471 467
472 clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk", 468 clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
473 "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl, 469 "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
474 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 470 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
475 clk_register_clkdev(clk, "gen2_synth_clk", NULL); 471 clk_register_clkdev(clk, "gen2_syn_clk", NULL);
476 clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL); 472 clk_register_clkdev(clk1, "gen2_syn_gclk", NULL);
477 473
478 clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk", 474 clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk",
479 "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl, 475 "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
480 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 476 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
481 clk_register_clkdev(clk, "gen3_synth_clk", NULL); 477 clk_register_clkdev(clk, "gen3_syn_clk", NULL);
482 clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL); 478 clk_register_clkdev(clk1, "gen3_syn_gclk", NULL);
483 479
484 /* clock derived from pll3 clk */ 480 /* clock derived from pll3 clk */
485 clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0, 481 clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
486 PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock); 482 USBH_CLK_ENB, 0, &_lock);
487 clk_register_clkdev(clk, "usbh_clk", NULL); 483 clk_register_clkdev(clk, "usbh_clk", NULL);
488 484
489 clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1, 485 clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
@@ -494,8 +490,8 @@ void __init spear3xx_clk_init(void)
494 1); 490 1);
495 clk_register_clkdev(clk, "usbh.1_clk", NULL); 491 clk_register_clkdev(clk, "usbh.1_clk", NULL);
496 492
497 clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0, 493 clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
498 PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock); 494 USBD_CLK_ENB, 0, &_lock);
499 clk_register_clkdev(clk, NULL, "designware_udc"); 495 clk_register_clkdev(clk, NULL, "designware_udc");
500 496
501 /* clock derived from ahb clk */ 497 /* clock derived from ahb clk */
@@ -579,29 +575,25 @@ void __init spear3xx_clk_init(void)
579 RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock); 575 RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
580 clk_register_clkdev(clk, "ras_pll2_clk", NULL); 576 clk_register_clkdev(clk, "ras_pll2_clk", NULL);
581 577
582 clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0, 578 clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
583 RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock); 579 RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
584 clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL); 580 clk_register_clkdev(clk, "ras_pll3_clk", NULL);
585 581
586 clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk", 582 clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", 0,
587 "gen0_synth_gate_clk", 0, RAS_CLK_ENB, 583 RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock);
588 RAS_SYNT0_CLK_ENB, 0, &_lock); 584 clk_register_clkdev(clk, "ras_syn0_gclk", NULL);
589 clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL); 585
590 586 clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", 0,
591 clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk", 587 RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock);
592 "gen1_synth_gate_clk", 0, RAS_CLK_ENB, 588 clk_register_clkdev(clk, "ras_syn1_gclk", NULL);
593 RAS_SYNT1_CLK_ENB, 0, &_lock); 589
594 clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL); 590 clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", 0,
595 591 RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock);
596 clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk", 592 clk_register_clkdev(clk, "ras_syn2_gclk", NULL);
597 "gen2_synth_gate_clk", 0, RAS_CLK_ENB, 593
598 RAS_SYNT2_CLK_ENB, 0, &_lock); 594 clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", 0,
599 clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL); 595 RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock);
600 596 clk_register_clkdev(clk, "ras_syn3_gclk", NULL);
601 clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
602 "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
603 RAS_SYNT3_CLK_ENB, 0, &_lock);
604 clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
605 597
606 if (of_machine_is_compatible("st,spear300")) 598 if (of_machine_is_compatible("st,spear300"))
607 spear300_clk_init(); 599 spear300_clk_init();
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index f9a20b382304..a98d0866f541 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr6xx machines clock framework source file 2 * SPEAr6xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -97,13 +97,12 @@ static struct aux_rate_tbl aux_rtbl[] = {
97 {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */ 97 {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
98}; 98};
99 99
100static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", }; 100static const char *clcd_parents[] = { "pll3_clk", "clcd_syn_gclk", };
101static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk", 101static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", };
102}; 102static const char *uart_parents[] = { "pll3_clk", "uart_syn_gclk", };
103static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", }; 103static const char *gpt0_1_parents[] = { "pll3_clk", "gpt0_1_syn_clk", };
104static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", }; 104static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
105static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", }; 105static const char *gpt3_parents[] = { "pll3_clk", "gpt3_syn_clk", };
106static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
107static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none", 106static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
108 "pll2_clk", }; 107 "pll2_clk", };
109 108
@@ -136,9 +135,9 @@ void __init spear6xx_clk_init(void)
136 clk_register_clkdev(clk, NULL, "rtc-spear"); 135 clk_register_clkdev(clk, NULL, "rtc-spear");
137 136
138 /* clock derived from 30 MHz osc clk */ 137 /* clock derived from 30 MHz osc clk */
139 clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0, 138 clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
140 48000000); 139 48000000);
141 clk_register_clkdev(clk, "pll3_48m_clk", NULL); 140 clk_register_clkdev(clk, "pll3_clk", NULL);
142 141
143 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk", 142 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
144 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), 143 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
@@ -146,9 +145,9 @@ void __init spear6xx_clk_init(void)
146 clk_register_clkdev(clk, "vco1_clk", NULL); 145 clk_register_clkdev(clk, "vco1_clk", NULL);
147 clk_register_clkdev(clk1, "pll1_clk", NULL); 146 clk_register_clkdev(clk1, "pll1_clk", NULL);
148 147
149 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, 148 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_30m_clk",
150 "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, 149 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
151 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); 150 &_lock, &clk1, NULL);
152 clk_register_clkdev(clk, "vco2_clk", NULL); 151 clk_register_clkdev(clk, "vco2_clk", NULL);
153 clk_register_clkdev(clk1, "pll2_clk", NULL); 152 clk_register_clkdev(clk1, "pll2_clk", NULL);
154 153
@@ -165,111 +164,111 @@ void __init spear6xx_clk_init(void)
165 HCLK_RATIO_MASK, 0, &_lock); 164 HCLK_RATIO_MASK, 0, &_lock);
166 clk_register_clkdev(clk, "ahb_clk", NULL); 165 clk_register_clkdev(clk, "ahb_clk", NULL);
167 166
168 clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk", 167 clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
169 "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl, 168 UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
170 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 169 &_lock, &clk1);
171 clk_register_clkdev(clk, "uart_synth_clk", NULL); 170 clk_register_clkdev(clk, "uart_syn_clk", NULL);
172 clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL); 171 clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
173 172
174 clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents, 173 clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
175 ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG, 174 ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
176 UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock); 175 UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
177 clk_register_clkdev(clk, "uart_mux_clk", NULL); 176 clk_register_clkdev(clk, "uart_mclk", NULL);
178 177
179 clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0, 178 clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
180 PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock); 179 UART0_CLK_ENB, 0, &_lock);
181 clk_register_clkdev(clk, NULL, "d0000000.serial"); 180 clk_register_clkdev(clk, NULL, "d0000000.serial");
182 181
183 clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0, 182 clk = clk_register_gate(NULL, "uart1", "uart_mclk", 0, PERIP1_CLK_ENB,
184 PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock); 183 UART1_CLK_ENB, 0, &_lock);
185 clk_register_clkdev(clk, NULL, "d0080000.serial"); 184 clk_register_clkdev(clk, NULL, "d0080000.serial");
186 185
187 clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk", 186 clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk",
188 "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, 187 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
189 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 188 &_lock, &clk1);
190 clk_register_clkdev(clk, "firda_synth_clk", NULL); 189 clk_register_clkdev(clk, "firda_syn_clk", NULL);
191 clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL); 190 clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
192 191
193 clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents, 192 clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
194 ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG, 193 ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
195 FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock); 194 FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
196 clk_register_clkdev(clk, "firda_mux_clk", NULL); 195 clk_register_clkdev(clk, "firda_mclk", NULL);
197 196
198 clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0, 197 clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
199 PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock); 198 PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
200 clk_register_clkdev(clk, NULL, "firda"); 199 clk_register_clkdev(clk, NULL, "firda");
201 200
202 clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk", 201 clk = clk_register_aux("clcd_syn_clk", "clcd_syn_gclk", "pll1_clk",
203 "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl, 202 0, CLCD_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
204 ARRAY_SIZE(aux_rtbl), &_lock, &clk1); 203 &_lock, &clk1);
205 clk_register_clkdev(clk, "clcd_synth_clk", NULL); 204 clk_register_clkdev(clk, "clcd_syn_clk", NULL);
206 clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL); 205 clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
207 206
208 clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents, 207 clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
209 ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG, 208 ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
210 CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock); 209 CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
211 clk_register_clkdev(clk, "clcd_mux_clk", NULL); 210 clk_register_clkdev(clk, "clcd_mclk", NULL);
212 211
213 clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0, 212 clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
214 PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock); 213 PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
215 clk_register_clkdev(clk, NULL, "clcd"); 214 clk_register_clkdev(clk, NULL, "clcd");
216 215
217 /* gpt clocks */ 216 /* gpt clocks */
218 clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG, 217 clk = clk_register_gpt("gpt0_1_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
219 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); 218 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
220 clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL); 219 clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
221 220
222 clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents, 221 clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
223 ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG, 222 ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
224 GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); 223 GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
225 clk_register_clkdev(clk, NULL, "gpt0"); 224 clk_register_clkdev(clk, NULL, "gpt0");
226 225
227 clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents, 226 clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
228 ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG, 227 ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
229 GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); 228 GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
230 clk_register_clkdev(clk, "gpt1_mux_clk", NULL); 229 clk_register_clkdev(clk, "gpt1_mclk", NULL);
231 230
232 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0, 231 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
233 PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock); 232 PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
234 clk_register_clkdev(clk, NULL, "gpt1"); 233 clk_register_clkdev(clk, NULL, "gpt1");
235 234
236 clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG, 235 clk = clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
237 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); 236 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
238 clk_register_clkdev(clk, "gpt2_synth_clk", NULL); 237 clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
239 238
240 clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents, 239 clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
241 ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG, 240 ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
242 GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); 241 GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
243 clk_register_clkdev(clk, "gpt2_mux_clk", NULL); 242 clk_register_clkdev(clk, "gpt2_mclk", NULL);
244 243
245 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0, 244 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
246 PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock); 245 PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
247 clk_register_clkdev(clk, NULL, "gpt2"); 246 clk_register_clkdev(clk, NULL, "gpt2");
248 247
249 clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG, 248 clk = clk_register_gpt("gpt3_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
250 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); 249 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
251 clk_register_clkdev(clk, "gpt3_synth_clk", NULL); 250 clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
252 251
253 clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents, 252 clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
254 ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG, 253 ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
255 GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); 254 GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
256 clk_register_clkdev(clk, "gpt3_mux_clk", NULL); 255 clk_register_clkdev(clk, "gpt3_mclk", NULL);
257 256
258 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0, 257 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
259 PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock); 258 PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
260 clk_register_clkdev(clk, NULL, "gpt3"); 259 clk_register_clkdev(clk, NULL, "gpt3");
261 260
262 /* clock derived from pll3 clk */ 261 /* clock derived from pll3 clk */
263 clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0, 262 clk = clk_register_gate(NULL, "usbh0_clk", "pll3_clk", 0,
264 PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock); 263 PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
265 clk_register_clkdev(clk, NULL, "usbh.0_clk"); 264 clk_register_clkdev(clk, NULL, "usbh.0_clk");
266 265
267 clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0, 266 clk = clk_register_gate(NULL, "usbh1_clk", "pll3_clk", 0,
268 PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock); 267 PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
269 clk_register_clkdev(clk, NULL, "usbh.1_clk"); 268 clk_register_clkdev(clk, NULL, "usbh.1_clk");
270 269
271 clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0, 270 clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
272 PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock); 271 USBD_CLK_ENB, 0, &_lock);
273 clk_register_clkdev(clk, NULL, "designware_udc"); 272 clk_register_clkdev(clk, NULL, "designware_udc");
274 273
275 /* clock derived from ahb clk */ 274 /* clock derived from ahb clk */
@@ -278,9 +277,8 @@ void __init spear6xx_clk_init(void)
278 clk_register_clkdev(clk, "ahbmult2_clk", NULL); 277 clk_register_clkdev(clk, "ahbmult2_clk", NULL);
279 278
280 clk = clk_register_mux(NULL, "ddr_clk", ddr_parents, 279 clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
281 ARRAY_SIZE(ddr_parents), 280 ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
282 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, 281 MCTR_CLK_MASK, 0, &_lock);
283 &_lock);
284 clk_register_clkdev(clk, "ddr_clk", NULL); 282 clk_register_clkdev(clk, "ddr_clk", NULL);
285 283
286 clk = clk_register_divider(NULL, "apb_clk", "ahb_clk", 284 clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
@@ -298,7 +296,7 @@ void __init spear6xx_clk_init(void)
298 296
299 clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB, 297 clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
300 GMAC_CLK_ENB, 0, &_lock); 298 GMAC_CLK_ENB, 0, &_lock);
301 clk_register_clkdev(clk, NULL, "gmac"); 299 clk_register_clkdev(clk, NULL, "e0800000.ethernet");
302 300
303 clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB, 301 clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
304 I2C_CLK_ENB, 0, &_lock); 302 I2C_CLK_ENB, 0, &_lock);
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
new file mode 100644
index 000000000000..50cf6a2ee693
--- /dev/null
+++ b/drivers/clk/versatile/Makefile
@@ -0,0 +1,3 @@
1# Makefile for Versatile-specific clocks
2obj-$(CONFIG_ICST) += clk-icst.o
3obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
new file mode 100644
index 000000000000..f555b50a5fa5
--- /dev/null
+++ b/drivers/clk/versatile/clk-icst.c
@@ -0,0 +1,100 @@
1/*
2 * Driver for the ICST307 VCO clock found in the ARM Reference designs.
3 * We wrap the custom interface from <asm/hardware/icst.h> into the generic
4 * clock framework.
5 *
6 * TODO: when all ARM reference designs are migrated to generic clocks, the
7 * ICST clock code from the ARM tree should probably be merged into this
8 * file.
9 */
10#include <linux/clk.h>
11#include <linux/clkdev.h>
12#include <linux/err.h>
13#include <linux/clk-provider.h>
14
15#include "clk-icst.h"
16
17/**
18 * struct clk_icst - ICST VCO clock wrapper
19 * @hw: corresponding clock hardware entry
20 * @params: parameters for this ICST instance
21 * @rate: current rate
22 * @setvco: function to commit ICST settings to hardware
23 */
24struct clk_icst {
25 struct clk_hw hw;
26 const struct icst_params *params;
27 unsigned long rate;
28 struct icst_vco (*getvco)(void);
29 void (*setvco)(struct icst_vco);
30};
31
32#define to_icst(_hw) container_of(_hw, struct clk_icst, hw)
33
34static unsigned long icst_recalc_rate(struct clk_hw *hw,
35 unsigned long parent_rate)
36{
37 struct clk_icst *icst = to_icst(hw);
38 struct icst_vco vco;
39
40 vco = icst->getvco();
41 icst->rate = icst_hz(icst->params, vco);
42 return icst->rate;
43}
44
45static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
46 unsigned long *prate)
47{
48 struct clk_icst *icst = to_icst(hw);
49 struct icst_vco vco;
50
51 vco = icst_hz_to_vco(icst->params, rate);
52 return icst_hz(icst->params, vco);
53}
54
55static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
56 unsigned long parent_rate)
57{
58 struct clk_icst *icst = to_icst(hw);
59 struct icst_vco vco;
60
61 vco = icst_hz_to_vco(icst->params, rate);
62 icst->rate = icst_hz(icst->params, vco);
63 icst->setvco(vco);
64 return 0;
65}
66
67static const struct clk_ops icst_ops = {
68 .recalc_rate = icst_recalc_rate,
69 .round_rate = icst_round_rate,
70 .set_rate = icst_set_rate,
71};
72
73struct clk * __init icst_clk_register(struct device *dev,
74 const struct clk_icst_desc *desc)
75{
76 struct clk *clk;
77 struct clk_icst *icst;
78 struct clk_init_data init;
79
80 icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL);
81 if (!icst) {
82 pr_err("could not allocate ICST clock!\n");
83 return ERR_PTR(-ENOMEM);
84 }
85 init.name = "icst";
86 init.ops = &icst_ops;
87 init.flags = CLK_IS_ROOT;
88 init.parent_names = NULL;
89 init.num_parents = 0;
90 icst->hw.init = &init;
91 icst->params = desc->params;
92 icst->getvco = desc->getvco;
93 icst->setvco = desc->setvco;
94
95 clk = clk_register(dev, &icst->hw);
96 if (IS_ERR(clk))
97 kfree(icst);
98
99 return clk;
100}
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
new file mode 100644
index 000000000000..71b4c56c1410
--- /dev/null
+++ b/drivers/clk/versatile/clk-icst.h
@@ -0,0 +1,10 @@
1#include <asm/hardware/icst.h>
2
3struct clk_icst_desc {
4 const struct icst_params *params;
5 struct icst_vco (*getvco)(void);
6 void (*setvco)(struct icst_vco);
7};
8
9struct clk *icst_clk_register(struct device *dev,
10 const struct clk_icst_desc *desc);
diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c
new file mode 100644
index 000000000000..a5053921bf7f
--- /dev/null
+++ b/drivers/clk/versatile/clk-integrator.c
@@ -0,0 +1,111 @@
1#include <linux/clk.h>
2#include <linux/clkdev.h>
3#include <linux/err.h>
4#include <linux/io.h>
5#include <linux/clk-provider.h>
6
7#include <mach/hardware.h>
8#include <mach/platform.h>
9
10#include "clk-icst.h"
11
12/*
13 * Implementation of the ARM Integrator/AP and Integrator/CP clock tree.
14 * Inspired by portions of:
15 * plat-versatile/clock.c and plat-versatile/include/plat/clock.h
16 */
17#define CM_LOCK (__io_address(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
18#define CM_AUXOSC (__io_address(INTEGRATOR_HDR_BASE)+0x1c)
19
20/**
21 * cp_auxvco_get() - get ICST VCO settings for the Integrator/CP
22 * @vco: ICST VCO parameters to update with hardware status
23 */
24static struct icst_vco cp_auxvco_get(void)
25{
26 u32 val;
27 struct icst_vco vco;
28
29 val = readl(CM_AUXOSC);
30 vco.v = val & 0x1ff;
31 vco.r = (val >> 9) & 0x7f;
32 vco.s = (val >> 16) & 03;
33 return vco;
34}
35
36/**
37 * cp_auxvco_set() - commit changes to Integrator/CP ICST VCO
38 * @vco: ICST VCO parameters to commit
39 */
40static void cp_auxvco_set(struct icst_vco vco)
41{
42 u32 val;
43
44 val = readl(CM_AUXOSC) & ~0x7ffff;
45 val |= vco.v | (vco.r << 9) | (vco.s << 16);
46
47 /* This magic unlocks the CM VCO so it can be controlled */
48 writel(0xa05f, CM_LOCK);
49 writel(val, CM_AUXOSC);
50 /* This locks the CM again */
51 writel(0, CM_LOCK);
52}
53
54static const struct icst_params cp_auxvco_params = {
55 .ref = 24000000,
56 .vco_max = ICST525_VCO_MAX_5V,
57 .vco_min = ICST525_VCO_MIN,
58 .vd_min = 8,
59 .vd_max = 263,
60 .rd_min = 3,
61 .rd_max = 65,
62 .s2div = icst525_s2div,
63 .idx2s = icst525_idx2s,
64};
65
66static const struct clk_icst_desc __initdata cp_icst_desc = {
67 .params = &cp_auxvco_params,
68 .getvco = cp_auxvco_get,
69 .setvco = cp_auxvco_set,
70};
71
72/*
73 * integrator_clk_init() - set up the integrator clock tree
74 * @is_cp: pass true if it's the Integrator/CP else AP is assumed
75 */
76void __init integrator_clk_init(bool is_cp)
77{
78 struct clk *clk;
79
80 /* APB clock dummy */
81 clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
82 clk_register_clkdev(clk, "apb_pclk", NULL);
83
84 /* UART reference clock */
85 clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT,
86 14745600);
87 clk_register_clkdev(clk, NULL, "uart0");
88 clk_register_clkdev(clk, NULL, "uart1");
89 if (is_cp)
90 clk_register_clkdev(clk, NULL, "mmci");
91
92 /* 24 MHz clock */
93 clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT,
94 24000000);
95 clk_register_clkdev(clk, NULL, "kmi0");
96 clk_register_clkdev(clk, NULL, "kmi1");
97 if (!is_cp)
98 clk_register_clkdev(clk, NULL, "ap_timer");
99
100 if (!is_cp)
101 return;
102
103 /* 1 MHz clock */
104 clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT,
105 1000000);
106 clk_register_clkdev(clk, NULL, "sp804");
107
108 /* ICST VCO clock used on the Integrator/CP CLCD */
109 clk = icst_clk_register(NULL, &cp_icst_desc);
110 clk_register_clkdev(clk, NULL, "clcd");
111}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 99c6b203e6cd..d53cd0afc200 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -16,6 +16,12 @@ config CLKSRC_MMIO
16config DW_APB_TIMER 16config DW_APB_TIMER
17 bool 17 bool
18 18
19config DW_APB_TIMER_OF
20 bool
21
22config ARMADA_370_XP_TIMER
23 bool
24
19config CLKSRC_DBX500_PRCMU 25config CLKSRC_DBX500_PRCMU
20 bool "Clocksource PRCMU Timer" 26 bool "Clocksource PRCMU Timer"
21 depends on UX500_SOC_DB8500 27 depends on UX500_SOC_DB8500
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index dd3e661a124d..b65d0c56ab35 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -10,4 +10,6 @@ obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
10obj-$(CONFIG_CLKBLD_I8253) += i8253.o 10obj-$(CONFIG_CLKBLD_I8253) += i8253.o
11obj-$(CONFIG_CLKSRC_MMIO) += mmio.o 11obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
12obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o 12obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
13obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o \ No newline at end of file 13obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
14obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
15obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 540795cd0760..d9279385304d 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock;
53#define MFGPT_PERIODIC (MFGPT_HZ / HZ) 53#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
54 54
55/* 55/*
56 * The MFPGT timers on the CS5536 provide us with suitable timers to use 56 * The MFGPT timers on the CS5536 provide us with suitable timers to use
57 * as clock event sources - not as good as a HPET or APIC, but certainly 57 * as clock event sources - not as good as a HPET or APIC, but certainly
58 * better than the PIT. This isn't a general purpose MFGPT driver, but 58 * better than the PIT. This isn't a general purpose MFGPT driver, but
59 * a simplified one designed specifically to act as a clock event source. 59 * a simplified one designed specifically to act as a clock event source.
@@ -144,7 +144,7 @@ static int __init cs5535_mfgpt_init(void)
144 144
145 timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); 145 timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
146 if (!timer) { 146 if (!timer) {
147 printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); 147 printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n");
148 return -ENODEV; 148 return -ENODEV;
149 } 149 }
150 cs5535_event_clock = timer; 150 cs5535_event_clock = timer;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
new file mode 100644
index 000000000000..f7dba5b79b44
--- /dev/null
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright (C) 2012 Altera Corporation
3 * Copyright (c) 2011 Picochip Ltd., Jamie Iles
4 *
5 * Modified from mach-picoxcell/time.c
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#include <linux/dw_apb_timer.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23
24#include <asm/mach/time.h>
25#include <asm/sched_clock.h>
26
27static void timer_get_base_and_rate(struct device_node *np,
28 void __iomem **base, u32 *rate)
29{
30 *base = of_iomap(np, 0);
31
32 if (!*base)
33 panic("Unable to map regs for %s", np->name);
34
35 if (of_property_read_u32(np, "clock-freq", rate) &&
36 of_property_read_u32(np, "clock-frequency", rate))
37 panic("No clock-frequency property for %s", np->name);
38}
39
40static void add_clockevent(struct device_node *event_timer)
41{
42 void __iomem *iobase;
43 struct dw_apb_clock_event_device *ced;
44 u32 irq, rate;
45
46 irq = irq_of_parse_and_map(event_timer, 0);
47 if (irq == NO_IRQ)
48 panic("No IRQ for clock event timer");
49
50 timer_get_base_and_rate(event_timer, &iobase, &rate);
51
52 ced = dw_apb_clockevent_init(0, event_timer->name, 300, iobase, irq,
53 rate);
54 if (!ced)
55 panic("Unable to initialise clockevent device");
56
57 dw_apb_clockevent_register(ced);
58}
59
60static void add_clocksource(struct device_node *source_timer)
61{
62 void __iomem *iobase;
63 struct dw_apb_clocksource *cs;
64 u32 rate;
65
66 timer_get_base_and_rate(source_timer, &iobase, &rate);
67
68 cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate);
69 if (!cs)
70 panic("Unable to initialise clocksource device");
71
72 dw_apb_clocksource_start(cs);
73 dw_apb_clocksource_register(cs);
74}
75
76static void __iomem *sched_io_base;
77
78static u32 read_sched_clock(void)
79{
80 return __raw_readl(sched_io_base);
81}
82
83static const struct of_device_id sptimer_ids[] __initconst = {
84 { .compatible = "picochip,pc3x2-rtc" },
85 { .compatible = "snps,dw-apb-timer-sp" },
86 { /* Sentinel */ },
87};
88
89static void init_sched_clock(void)
90{
91 struct device_node *sched_timer;
92 u32 rate;
93
94 sched_timer = of_find_matching_node(NULL, sptimer_ids);
95 if (!sched_timer)
96 panic("No RTC for sched clock to use");
97
98 timer_get_base_and_rate(sched_timer, &sched_io_base, &rate);
99 of_node_put(sched_timer);
100
101 setup_sched_clock(read_sched_clock, 32, rate);
102}
103
104static const struct of_device_id osctimer_ids[] __initconst = {
105 { .compatible = "picochip,pc3x2-timer" },
106 { .compatible = "snps,dw-apb-timer-osc" },
107 {},
108};
109
110static void __init timer_init(void)
111{
112 struct device_node *event_timer, *source_timer;
113
114 event_timer = of_find_matching_node(NULL, osctimer_ids);
115 if (!event_timer)
116 panic("No timer for clockevent");
117 add_clockevent(event_timer);
118
119 source_timer = of_find_matching_node(event_timer, osctimer_ids);
120 if (!source_timer)
121 panic("No timer for clocksource");
122 add_clocksource(source_timer);
123
124 of_node_put(source_timer);
125
126 init_sched_clock();
127}
128
129struct sys_timer dw_apb_timer = {
130 .init = timer_init,
131};
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 32fe9ef5cc5c..98b06baafcc6 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -48,13 +48,13 @@ struct sh_cmt_priv {
48 unsigned long next_match_value; 48 unsigned long next_match_value;
49 unsigned long max_match_value; 49 unsigned long max_match_value;
50 unsigned long rate; 50 unsigned long rate;
51 spinlock_t lock; 51 raw_spinlock_t lock;
52 struct clock_event_device ced; 52 struct clock_event_device ced;
53 struct clocksource cs; 53 struct clocksource cs;
54 unsigned long total_cycles; 54 unsigned long total_cycles;
55}; 55};
56 56
57static DEFINE_SPINLOCK(sh_cmt_lock); 57static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
58 58
59#define CMSTR -1 /* shared register */ 59#define CMSTR -1 /* shared register */
60#define CMCSR 0 /* channel register */ 60#define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
139 unsigned long flags, value; 139 unsigned long flags, value;
140 140
141 /* start stop register shared by multiple timer channels */ 141 /* start stop register shared by multiple timer channels */
142 spin_lock_irqsave(&sh_cmt_lock, flags); 142 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
143 value = sh_cmt_read(p, CMSTR); 143 value = sh_cmt_read(p, CMSTR);
144 144
145 if (start) 145 if (start)
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
148 value &= ~(1 << cfg->timer_bit); 148 value &= ~(1 << cfg->timer_bit);
149 149
150 sh_cmt_write(p, CMSTR, value); 150 sh_cmt_write(p, CMSTR, value);
151 spin_unlock_irqrestore(&sh_cmt_lock, flags); 151 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
152} 152}
153 153
154static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 154static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
328{ 328{
329 unsigned long flags; 329 unsigned long flags;
330 330
331 spin_lock_irqsave(&p->lock, flags); 331 raw_spin_lock_irqsave(&p->lock, flags);
332 __sh_cmt_set_next(p, delta); 332 __sh_cmt_set_next(p, delta);
333 spin_unlock_irqrestore(&p->lock, flags); 333 raw_spin_unlock_irqrestore(&p->lock, flags);
334} 334}
335 335
336static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) 336static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
385 int ret = 0; 385 int ret = 0;
386 unsigned long flags; 386 unsigned long flags;
387 387
388 spin_lock_irqsave(&p->lock, flags); 388 raw_spin_lock_irqsave(&p->lock, flags);
389 389
390 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 390 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
391 ret = sh_cmt_enable(p, &p->rate); 391 ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
398 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) 398 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
399 __sh_cmt_set_next(p, p->max_match_value); 399 __sh_cmt_set_next(p, p->max_match_value);
400 out: 400 out:
401 spin_unlock_irqrestore(&p->lock, flags); 401 raw_spin_unlock_irqrestore(&p->lock, flags);
402 402
403 return ret; 403 return ret;
404} 404}
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
408 unsigned long flags; 408 unsigned long flags;
409 unsigned long f; 409 unsigned long f;
410 410
411 spin_lock_irqsave(&p->lock, flags); 411 raw_spin_lock_irqsave(&p->lock, flags);
412 412
413 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 413 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
414 p->flags &= ~flag; 414 p->flags &= ~flag;
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
420 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) 420 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
421 __sh_cmt_set_next(p, p->max_match_value); 421 __sh_cmt_set_next(p, p->max_match_value);
422 422
423 spin_unlock_irqrestore(&p->lock, flags); 423 raw_spin_unlock_irqrestore(&p->lock, flags);
424} 424}
425 425
426static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) 426static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
435 unsigned long value; 435 unsigned long value;
436 int has_wrapped; 436 int has_wrapped;
437 437
438 spin_lock_irqsave(&p->lock, flags); 438 raw_spin_lock_irqsave(&p->lock, flags);
439 value = p->total_cycles; 439 value = p->total_cycles;
440 raw = sh_cmt_get_counter(p, &has_wrapped); 440 raw = sh_cmt_get_counter(p, &has_wrapped);
441 441
442 if (unlikely(has_wrapped)) 442 if (unlikely(has_wrapped))
443 raw += p->match_value + 1; 443 raw += p->match_value + 1;
444 spin_unlock_irqrestore(&p->lock, flags); 444 raw_spin_unlock_irqrestore(&p->lock, flags);
445 445
446 return value + raw; 446 return value + raw;
447} 447}
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
591 p->max_match_value = (1 << p->width) - 1; 591 p->max_match_value = (1 << p->width) - 1;
592 592
593 p->match_value = p->max_match_value; 593 p->match_value = p->max_match_value;
594 spin_lock_init(&p->lock); 594 raw_spin_lock_init(&p->lock);
595 595
596 if (clockevent_rating) 596 if (clockevent_rating)
597 sh_cmt_register_clockevent(p, name, clockevent_rating); 597 sh_cmt_register_clockevent(p, name, clockevent_rating);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index a2172f690418..d9b76ca64a61 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -43,7 +43,7 @@ struct sh_mtu2_priv {
43 struct clock_event_device ced; 43 struct clock_event_device ced;
44}; 44};
45 45
46static DEFINE_SPINLOCK(sh_mtu2_lock); 46static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
47 47
48#define TSTR -1 /* shared register */ 48#define TSTR -1 /* shared register */
49#define TCR 0 /* channel register */ 49#define TCR 0 /* channel register */
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
107 unsigned long flags, value; 107 unsigned long flags, value;
108 108
109 /* start stop register shared by multiple timer channels */ 109 /* start stop register shared by multiple timer channels */
110 spin_lock_irqsave(&sh_mtu2_lock, flags); 110 raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
111 value = sh_mtu2_read(p, TSTR); 111 value = sh_mtu2_read(p, TSTR);
112 112
113 if (start) 113 if (start)
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
116 value &= ~(1 << cfg->timer_bit); 116 value &= ~(1 << cfg->timer_bit);
117 117
118 sh_mtu2_write(p, TSTR, value); 118 sh_mtu2_write(p, TSTR, value);
119 spin_unlock_irqrestore(&sh_mtu2_lock, flags); 119 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
120} 120}
121 121
122static int sh_mtu2_enable(struct sh_mtu2_priv *p) 122static int sh_mtu2_enable(struct sh_mtu2_priv *p)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 97f54b634be4..c1b51d49d106 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -45,7 +45,7 @@ struct sh_tmu_priv {
45 struct clocksource cs; 45 struct clocksource cs;
46}; 46};
47 47
48static DEFINE_SPINLOCK(sh_tmu_lock); 48static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
49 49
50#define TSTR -1 /* shared register */ 50#define TSTR -1 /* shared register */
51#define TCOR 0 /* channel register */ 51#define TCOR 0 /* channel register */
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
95 unsigned long flags, value; 95 unsigned long flags, value;
96 96
97 /* start stop register shared by multiple timer channels */ 97 /* start stop register shared by multiple timer channels */
98 spin_lock_irqsave(&sh_tmu_lock, flags); 98 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
99 value = sh_tmu_read(p, TSTR); 99 value = sh_tmu_read(p, TSTR);
100 100
101 if (start) 101 if (start)
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
104 value &= ~(1 << cfg->timer_bit); 104 value &= ~(1 << cfg->timer_bit);
105 105
106 sh_tmu_write(p, TSTR, value); 106 sh_tmu_write(p, TSTR, value);
107 spin_unlock_irqrestore(&sh_tmu_lock, flags); 107 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
108} 108}
109 109
110static int sh_tmu_enable(struct sh_tmu_priv *p) 110static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
245 245
246 sh_tmu_enable(p); 246 sh_tmu_enable(p);
247 247
248 /* TODO: calculate good shift from rate and counter bit width */ 248 clockevents_config(ced, p->rate);
249
250 ced->shift = 32;
251 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
252 ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
253 ced->min_delta_ns = 5000;
254 249
255 if (periodic) { 250 if (periodic) {
256 p->periodic = (p->rate + HZ/2) / HZ; 251 p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
323 ced->set_mode = sh_tmu_clock_event_mode; 318 ced->set_mode = sh_tmu_clock_event_mode;
324 319
325 dev_info(&p->pdev->dev, "used for clock events\n"); 320 dev_info(&p->pdev->dev, "used for clock events\n");
326 clockevents_register_device(ced); 321
322 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
327 323
328 ret = setup_irq(p->irqaction.irq, &p->irqaction); 324 ret = setup_irq(p->irqaction.irq, &p->irqaction);
329 if (ret) { 325 if (ret) {
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
new file mode 100644
index 000000000000..4674f94957cd
--- /dev/null
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -0,0 +1,226 @@
1/*
2 * Marvell Armada 370/XP SoC timer handling.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * Timer 0 is used as free-running clocksource, while timer 1 is
15 * used as clock_event_device.
16 */
17
18#include <linux/init.h>
19#include <linux/platform_device.h>
20#include <linux/kernel.h>
21#include <linux/timer.h>
22#include <linux/clockchips.h>
23#include <linux/interrupt.h>
24#include <linux/of.h>
25#include <linux/of_irq.h>
26#include <linux/of_address.h>
27#include <linux/irq.h>
28#include <linux/module.h>
29#include <asm/sched_clock.h>
30
31/*
32 * Timer block registers.
33 */
34#define TIMER_CTRL_OFF 0x0000
35#define TIMER0_EN 0x0001
36#define TIMER0_RELOAD_EN 0x0002
37#define TIMER0_25MHZ 0x0800
38#define TIMER0_DIV(div) ((div) << 19)
39#define TIMER1_EN 0x0004
40#define TIMER1_RELOAD_EN 0x0008
41#define TIMER1_25MHZ 0x1000
42#define TIMER1_DIV(div) ((div) << 22)
43#define TIMER_EVENTS_STATUS 0x0004
44#define TIMER0_CLR_MASK (~0x1)
45#define TIMER1_CLR_MASK (~0x100)
46#define TIMER0_RELOAD_OFF 0x0010
47#define TIMER0_VAL_OFF 0x0014
48#define TIMER1_RELOAD_OFF 0x0018
49#define TIMER1_VAL_OFF 0x001c
50
51/* Global timers are connected to the coherency fabric clock, and the
52 below divider reduces their incrementing frequency. */
53#define TIMER_DIVIDER_SHIFT 5
54#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT)
55
56/*
57 * SoC-specific data.
58 */
59static void __iomem *timer_base;
60static int timer_irq;
61
62/*
63 * Number of timer ticks per jiffy.
64 */
65static u32 ticks_per_jiffy;
66
67static u32 notrace armada_370_xp_read_sched_clock(void)
68{
69 return ~readl(timer_base + TIMER0_VAL_OFF);
70}
71
72/*
73 * Clockevent handling.
74 */
75static int
76armada_370_xp_clkevt_next_event(unsigned long delta,
77 struct clock_event_device *dev)
78{
79 u32 u;
80
81 /*
82 * Clear clockevent timer interrupt.
83 */
84 writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
85
86 /*
87 * Setup new clockevent timer value.
88 */
89 writel(delta, timer_base + TIMER1_VAL_OFF);
90
91 /*
92 * Enable the timer.
93 */
94 u = readl(timer_base + TIMER_CTRL_OFF);
95 u = ((u & ~TIMER1_RELOAD_EN) | TIMER1_EN |
96 TIMER1_DIV(TIMER_DIVIDER_SHIFT));
97 writel(u, timer_base + TIMER_CTRL_OFF);
98
99 return 0;
100}
101
102static void
103armada_370_xp_clkevt_mode(enum clock_event_mode mode,
104 struct clock_event_device *dev)
105{
106 u32 u;
107
108 if (mode == CLOCK_EVT_MODE_PERIODIC) {
109 /*
110 * Setup timer to fire at 1/HZ intervals.
111 */
112 writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF);
113 writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF);
114
115 /*
116 * Enable timer.
117 */
118 u = readl(timer_base + TIMER_CTRL_OFF);
119
120 writel((u | TIMER1_EN | TIMER1_RELOAD_EN |
121 TIMER1_DIV(TIMER_DIVIDER_SHIFT)),
122 timer_base + TIMER_CTRL_OFF);
123 } else {
124 /*
125 * Disable timer.
126 */
127 u = readl(timer_base + TIMER_CTRL_OFF);
128 writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF);
129
130 /*
131 * ACK pending timer interrupt.
132 */
133 writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
134
135 }
136}
137
138static struct clock_event_device armada_370_xp_clkevt = {
139 .name = "armada_370_xp_tick",
140 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
141 .shift = 32,
142 .rating = 300,
143 .set_next_event = armada_370_xp_clkevt_next_event,
144 .set_mode = armada_370_xp_clkevt_mode,
145};
146
147static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
148{
149 /*
150 * ACK timer interrupt and call event handler.
151 */
152
153 writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
154 armada_370_xp_clkevt.event_handler(&armada_370_xp_clkevt);
155
156 return IRQ_HANDLED;
157}
158
159static struct irqaction armada_370_xp_timer_irq = {
160 .name = "armada_370_xp_tick",
161 .flags = IRQF_DISABLED | IRQF_TIMER,
162 .handler = armada_370_xp_timer_interrupt
163};
164
165void __init armada_370_xp_timer_init(void)
166{
167 u32 u;
168 struct device_node *np;
169 unsigned int timer_clk;
170 int ret;
171 np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
172 timer_base = of_iomap(np, 0);
173 WARN_ON(!timer_base);
174
175 if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
176 /* The fixed 25MHz timer is available so let's use it */
177 u = readl(timer_base + TIMER_CTRL_OFF);
178 writel(u | TIMER0_25MHZ | TIMER1_25MHZ,
179 timer_base + TIMER_CTRL_OFF);
180 timer_clk = 25000000;
181 } else {
182 u32 clk = 0;
183 ret = of_property_read_u32(np, "clock-frequency", &clk);
184 WARN_ON(!clk || ret < 0);
185 u = readl(timer_base + TIMER_CTRL_OFF);
186 writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
187 timer_base + TIMER_CTRL_OFF);
188 timer_clk = clk / TIMER_DIVIDER;
189 }
190
191 /* We use timer 0 as clocksource, and timer 1 for
192 clockevents */
193 timer_irq = irq_of_parse_and_map(np, 1);
194
195 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
196
197 /*
198 * Set scale and timer for sched_clock.
199 */
200 setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
201
202 /*
203 * Setup free-running clocksource timer (interrupts
204 * disabled).
205 */
206 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
207 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
208
209 u = readl(timer_base + TIMER_CTRL_OFF);
210
211 writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
212 TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF);
213
214 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
215 "armada_370_xp_clocksource",
216 timer_clk, 300, 32, clocksource_mmio_readl_down);
217
218 /*
219 * Setup clockevent timer (interrupt-driven).
220 */
221 setup_irq(timer_irq, &armada_370_xp_timer_irq);
222 armada_370_xp_clkevt.cpumask = cpumask_of(0);
223 clockevents_config_and_register(&armada_370_xp_clkevt,
224 timer_clk, 1, 0xfffffffe);
225}
226
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 77e1e6cd66ce..3e92b7d3fcd2 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -46,7 +46,7 @@ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
46static inline void get_seq(__u32 *ts, int *cpu) 46static inline void get_seq(__u32 *ts, int *cpu)
47{ 47{
48 preempt_disable(); 48 preempt_disable();
49 *ts = __this_cpu_inc_return(proc_event_counts) -1; 49 *ts = __this_cpu_inc_return(proc_event_counts) - 1;
50 *cpu = smp_processor_id(); 50 *cpu = smp_processor_id();
51 preempt_enable(); 51 preempt_enable();
52} 52}
@@ -62,8 +62,8 @@ void proc_fork_connector(struct task_struct *task)
62 if (atomic_read(&proc_event_num_listeners) < 1) 62 if (atomic_read(&proc_event_num_listeners) < 1)
63 return; 63 return;
64 64
65 msg = (struct cn_msg*)buffer; 65 msg = (struct cn_msg *)buffer;
66 ev = (struct proc_event*)msg->data; 66 ev = (struct proc_event *)msg->data;
67 get_seq(&msg->seq, &ev->cpu); 67 get_seq(&msg->seq, &ev->cpu);
68 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 68 ktime_get_ts(&ts); /* get high res monotonic timestamp */
69 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 69 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -93,8 +93,8 @@ void proc_exec_connector(struct task_struct *task)
93 if (atomic_read(&proc_event_num_listeners) < 1) 93 if (atomic_read(&proc_event_num_listeners) < 1)
94 return; 94 return;
95 95
96 msg = (struct cn_msg*)buffer; 96 msg = (struct cn_msg *)buffer;
97 ev = (struct proc_event*)msg->data; 97 ev = (struct proc_event *)msg->data;
98 get_seq(&msg->seq, &ev->cpu); 98 get_seq(&msg->seq, &ev->cpu);
99 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 99 ktime_get_ts(&ts); /* get high res monotonic timestamp */
100 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 100 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -119,8 +119,8 @@ void proc_id_connector(struct task_struct *task, int which_id)
119 if (atomic_read(&proc_event_num_listeners) < 1) 119 if (atomic_read(&proc_event_num_listeners) < 1)
120 return; 120 return;
121 121
122 msg = (struct cn_msg*)buffer; 122 msg = (struct cn_msg *)buffer;
123 ev = (struct proc_event*)msg->data; 123 ev = (struct proc_event *)msg->data;
124 ev->what = which_id; 124 ev->what = which_id;
125 ev->event_data.id.process_pid = task->pid; 125 ev->event_data.id.process_pid = task->pid;
126 ev->event_data.id.process_tgid = task->tgid; 126 ev->event_data.id.process_tgid = task->tgid;
@@ -134,7 +134,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
134 ev->event_data.id.e.egid = cred->egid; 134 ev->event_data.id.e.egid = cred->egid;
135 } else { 135 } else {
136 rcu_read_unlock(); 136 rcu_read_unlock();
137 return; 137 return;
138 } 138 }
139 rcu_read_unlock(); 139 rcu_read_unlock();
140 get_seq(&msg->seq, &ev->cpu); 140 get_seq(&msg->seq, &ev->cpu);
@@ -241,8 +241,8 @@ void proc_exit_connector(struct task_struct *task)
241 if (atomic_read(&proc_event_num_listeners) < 1) 241 if (atomic_read(&proc_event_num_listeners) < 1)
242 return; 242 return;
243 243
244 msg = (struct cn_msg*)buffer; 244 msg = (struct cn_msg *)buffer;
245 ev = (struct proc_event*)msg->data; 245 ev = (struct proc_event *)msg->data;
246 get_seq(&msg->seq, &ev->cpu); 246 get_seq(&msg->seq, &ev->cpu);
247 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 247 ktime_get_ts(&ts); /* get high res monotonic timestamp */
248 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 248 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -276,8 +276,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
276 if (atomic_read(&proc_event_num_listeners) < 1) 276 if (atomic_read(&proc_event_num_listeners) < 1)
277 return; 277 return;
278 278
279 msg = (struct cn_msg*)buffer; 279 msg = (struct cn_msg *)buffer;
280 ev = (struct proc_event*)msg->data; 280 ev = (struct proc_event *)msg->data;
281 msg->seq = rcvd_seq; 281 msg->seq = rcvd_seq;
282 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 282 ktime_get_ts(&ts); /* get high res monotonic timestamp */
283 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 283 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -303,7 +303,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
303 if (msg->len != sizeof(*mc_op)) 303 if (msg->len != sizeof(*mc_op))
304 return; 304 return;
305 305
306 mc_op = (enum proc_cn_mcast_op*)msg->data; 306 mc_op = (enum proc_cn_mcast_op *)msg->data;
307 switch (*mc_op) { 307 switch (*mc_op) {
308 case PROC_CN_MCAST_LISTEN: 308 case PROC_CN_MCAST_LISTEN:
309 atomic_inc(&proc_event_num_listeners); 309 atomic_inc(&proc_event_num_listeners);
@@ -325,11 +325,11 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
325 */ 325 */
326static int __init cn_proc_init(void) 326static int __init cn_proc_init(void)
327{ 327{
328 int err; 328 int err = cn_add_callback(&cn_proc_event_id,
329 329 "cn_proc",
330 if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc", 330 &cn_proc_mcast_ctl);
331 &cn_proc_mcast_ctl))) { 331 if (err) {
332 printk(KERN_WARNING "cn_proc failed to register\n"); 332 pr_warn("cn_proc failed to register\n");
333 return err; 333 return err;
334 } 334 }
335 return 0; 335 return 0;
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index c42c9d517790..1f8bf054d11c 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * cn_queue.c 2 * cn_queue.c
3 * 3 *
4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> 4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved. 5 * All rights reserved.
@@ -34,13 +34,14 @@
34static struct cn_callback_entry * 34static struct cn_callback_entry *
35cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, 35cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
36 struct cb_id *id, 36 struct cb_id *id,
37 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 37 void (*callback)(struct cn_msg *,
38 struct netlink_skb_parms *))
38{ 39{
39 struct cn_callback_entry *cbq; 40 struct cn_callback_entry *cbq;
40 41
41 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL); 42 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
42 if (!cbq) { 43 if (!cbq) {
43 printk(KERN_ERR "Failed to create new callback queue.\n"); 44 pr_err("Failed to create new callback queue.\n");
44 return NULL; 45 return NULL;
45 } 46 }
46 47
@@ -71,7 +72,8 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
71 72
72int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, 73int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
73 struct cb_id *id, 74 struct cb_id *id,
74 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 75 void (*callback)(struct cn_msg *,
76 struct netlink_skb_parms *))
75{ 77{
76 struct cn_callback_entry *cbq, *__cbq; 78 struct cn_callback_entry *cbq, *__cbq;
77 int found = 0; 79 int found = 0;
@@ -149,7 +151,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev)
149 spin_unlock_bh(&dev->queue_lock); 151 spin_unlock_bh(&dev->queue_lock);
150 152
151 while (atomic_read(&dev->refcnt)) { 153 while (atomic_read(&dev->refcnt)) {
152 printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n", 154 pr_info("Waiting for %s to become free: refcnt=%d.\n",
153 dev->name, atomic_read(&dev->refcnt)); 155 dev->name, atomic_read(&dev->refcnt));
154 msleep(1000); 156 msleep(1000);
155 } 157 }
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index dde6a0fad408..82fa4f0f91d6 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * connector.c 2 * connector.c
3 * 3 *
4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> 4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved. 5 * All rights reserved.
@@ -101,19 +101,19 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
101 if (!skb) 101 if (!skb)
102 return -ENOMEM; 102 return -ENOMEM;
103 103
104 nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh)); 104 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0);
105 if (!nlh) {
106 kfree_skb(skb);
107 return -EMSGSIZE;
108 }
105 109
106 data = NLMSG_DATA(nlh); 110 data = nlmsg_data(nlh);
107 111
108 memcpy(data, msg, sizeof(*data) + msg->len); 112 memcpy(data, msg, sizeof(*data) + msg->len);
109 113
110 NETLINK_CB(skb).dst_group = group; 114 NETLINK_CB(skb).dst_group = group;
111 115
112 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask); 116 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
113
114nlmsg_failure:
115 kfree_skb(skb);
116 return -EINVAL;
117} 117}
118EXPORT_SYMBOL_GPL(cn_netlink_send); 118EXPORT_SYMBOL_GPL(cn_netlink_send);
119 119
@@ -185,7 +185,8 @@ static void cn_rx_skb(struct sk_buff *__skb)
185 * May sleep. 185 * May sleep.
186 */ 186 */
187int cn_add_callback(struct cb_id *id, const char *name, 187int cn_add_callback(struct cb_id *id, const char *name,
188 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 188 void (*callback)(struct cn_msg *,
189 struct netlink_skb_parms *))
189{ 190{
190 int err; 191 int err;
191 struct cn_dev *dev = &cdev; 192 struct cn_dev *dev = &cdev;
@@ -251,15 +252,20 @@ static const struct file_operations cn_file_ops = {
251 .release = single_release 252 .release = single_release
252}; 253};
253 254
255static struct cn_dev cdev = {
256 .input = cn_rx_skb,
257};
258
254static int __devinit cn_init(void) 259static int __devinit cn_init(void)
255{ 260{
256 struct cn_dev *dev = &cdev; 261 struct cn_dev *dev = &cdev;
257 262 struct netlink_kernel_cfg cfg = {
258 dev->input = cn_rx_skb; 263 .groups = CN_NETLINK_USERS + 0xf,
264 .input = dev->input,
265 };
259 266
260 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 267 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
261 CN_NETLINK_USERS + 0xf, 268 THIS_MODULE, &cfg);
262 dev->input, NULL, THIS_MODULE);
263 if (!dev->nls) 269 if (!dev->nls)
264 return -EIO; 270 return -EIO;
265 271
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7f2f149ae40f..fb8a5279c5d8 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -138,7 +138,7 @@ void disable_cpufreq(void)
138static LIST_HEAD(cpufreq_governor_list); 138static LIST_HEAD(cpufreq_governor_list);
139static DEFINE_MUTEX(cpufreq_governor_mutex); 139static DEFINE_MUTEX(cpufreq_governor_mutex);
140 140
141struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 141static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
142{ 142{
143 struct cpufreq_policy *data; 143 struct cpufreq_policy *data;
144 unsigned long flags; 144 unsigned long flags;
@@ -162,7 +162,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
162 if (!data) 162 if (!data)
163 goto err_out_put_module; 163 goto err_out_put_module;
164 164
165 if (!kobject_get(&data->kobj)) 165 if (!sysfs && !kobject_get(&data->kobj))
166 goto err_out_put_module; 166 goto err_out_put_module;
167 167
168 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 168 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -175,16 +175,35 @@ err_out_unlock:
175err_out: 175err_out:
176 return NULL; 176 return NULL;
177} 177}
178
179struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
180{
181 return __cpufreq_cpu_get(cpu, false);
182}
178EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 183EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
179 184
185static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
186{
187 return __cpufreq_cpu_get(cpu, true);
188}
180 189
181void cpufreq_cpu_put(struct cpufreq_policy *data) 190static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
182{ 191{
183 kobject_put(&data->kobj); 192 if (!sysfs)
193 kobject_put(&data->kobj);
184 module_put(cpufreq_driver->owner); 194 module_put(cpufreq_driver->owner);
185} 195}
196
197void cpufreq_cpu_put(struct cpufreq_policy *data)
198{
199 __cpufreq_cpu_put(data, false);
200}
186EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 201EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
187 202
203static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
204{
205 __cpufreq_cpu_put(data, true);
206}
188 207
189/********************************************************************* 208/*********************************************************************
190 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 209 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
@@ -617,7 +636,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
617 struct cpufreq_policy *policy = to_policy(kobj); 636 struct cpufreq_policy *policy = to_policy(kobj);
618 struct freq_attr *fattr = to_attr(attr); 637 struct freq_attr *fattr = to_attr(attr);
619 ssize_t ret = -EINVAL; 638 ssize_t ret = -EINVAL;
620 policy = cpufreq_cpu_get(policy->cpu); 639 policy = cpufreq_cpu_get_sysfs(policy->cpu);
621 if (!policy) 640 if (!policy)
622 goto no_policy; 641 goto no_policy;
623 642
@@ -631,7 +650,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
631 650
632 unlock_policy_rwsem_read(policy->cpu); 651 unlock_policy_rwsem_read(policy->cpu);
633fail: 652fail:
634 cpufreq_cpu_put(policy); 653 cpufreq_cpu_put_sysfs(policy);
635no_policy: 654no_policy:
636 return ret; 655 return ret;
637} 656}
@@ -642,7 +661,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
642 struct cpufreq_policy *policy = to_policy(kobj); 661 struct cpufreq_policy *policy = to_policy(kobj);
643 struct freq_attr *fattr = to_attr(attr); 662 struct freq_attr *fattr = to_attr(attr);
644 ssize_t ret = -EINVAL; 663 ssize_t ret = -EINVAL;
645 policy = cpufreq_cpu_get(policy->cpu); 664 policy = cpufreq_cpu_get_sysfs(policy->cpu);
646 if (!policy) 665 if (!policy)
647 goto no_policy; 666 goto no_policy;
648 667
@@ -656,7 +675,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
656 675
657 unlock_policy_rwsem_write(policy->cpu); 676 unlock_policy_rwsem_write(policy->cpu);
658fail: 677fail:
659 cpufreq_cpu_put(policy); 678 cpufreq_cpu_put_sysfs(policy);
660no_policy: 679no_policy:
661 return ret; 680 return ret;
662} 681}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index b243a7ee01f6..af2d81e10f71 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -62,8 +62,18 @@ static int exynos_target(struct cpufreq_policy *policy,
62 goto out; 62 goto out;
63 } 63 }
64 64
65 if (cpufreq_frequency_table_target(policy, freq_table, 65 /*
66 freqs.old, relation, &old_index)) { 66 * The policy max have been changed so that we cannot get proper
67 * old_index with cpufreq_frequency_table_target(). Thus, ignore
68 * policy and get the index from the raw freqeuncy table.
69 */
70 for (old_index = 0;
71 freq_table[old_index].frequency != CPUFREQ_TABLE_END;
72 old_index++)
73 if (freq_table[old_index].frequency == freqs.old)
74 break;
75
76 if (freq_table[old_index].frequency == CPUFREQ_TABLE_END) {
67 ret = -EINVAL; 77 ret = -EINVAL;
68 goto out; 78 goto out;
69 } 79 }
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index a88331644ebf..e64c253cb169 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -65,20 +65,20 @@ static unsigned int clkdiv_cpu0_5250[CPUFREQ_LEVEL_END][8] = {
65 * Clock divider value for following 65 * Clock divider value for following
66 * { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 } 66 * { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 }
67 */ 67 */
68 { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1700 MHz - N/A */ 68 { 0, 3, 7, 7, 7, 3, 5, 0 }, /* 1700 MHz */
69 { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1600 MHz - N/A */ 69 { 0, 3, 7, 7, 7, 1, 4, 0 }, /* 1600 MHz */
70 { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1500 MHz - N/A */ 70 { 0, 2, 7, 7, 7, 1, 4, 0 }, /* 1500 MHz */
71 { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1400 MHz */ 71 { 0, 2, 7, 7, 6, 1, 4, 0 }, /* 1400 MHz */
72 { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */ 72 { 0, 2, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */
73 { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */ 73 { 0, 2, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */
74 { 0, 2, 7, 7, 5, 1, 2, 0 }, /* 1100 MHz */ 74 { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1100 MHz */
75 { 0, 2, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */ 75 { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */
76 { 0, 2, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */ 76 { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */
77 { 0, 2, 7, 7, 3, 1, 1, 0 }, /* 800 MHz */ 77 { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 800 MHz */
78 { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */ 78 { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */
79 { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 600 MHz */ 79 { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 600 MHz */
80 { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */ 80 { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */
81 { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 400 MHz */ 81 { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 400 MHz */
82 { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */ 82 { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */
83 { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */ 83 { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */
84}; 84};
@@ -87,9 +87,9 @@ static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = {
87 /* Clock divider value for following 87 /* Clock divider value for following
88 * { COPY, HPM } 88 * { COPY, HPM }
89 */ 89 */
90 { 0, 2 }, /* 1700 MHz - N/A */ 90 { 0, 2 }, /* 1700 MHz */
91 { 0, 2 }, /* 1600 MHz - N/A */ 91 { 0, 2 }, /* 1600 MHz */
92 { 0, 2 }, /* 1500 MHz - N/A */ 92 { 0, 2 }, /* 1500 MHz */
93 { 0, 2 }, /* 1400 MHz */ 93 { 0, 2 }, /* 1400 MHz */
94 { 0, 2 }, /* 1300 MHz */ 94 { 0, 2 }, /* 1300 MHz */
95 { 0, 2 }, /* 1200 MHz */ 95 { 0, 2 }, /* 1200 MHz */
@@ -106,10 +106,10 @@ static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = {
106}; 106};
107 107
108static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = { 108static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = {
109 (0), /* 1700 MHz - N/A */ 109 ((425 << 16) | (6 << 8) | 0), /* 1700 MHz */
110 (0), /* 1600 MHz - N/A */ 110 ((200 << 16) | (3 << 8) | 0), /* 1600 MHz */
111 (0), /* 1500 MHz - N/A */ 111 ((250 << 16) | (4 << 8) | 0), /* 1500 MHz */
112 (0), /* 1400 MHz */ 112 ((175 << 16) | (3 << 8) | 0), /* 1400 MHz */
113 ((325 << 16) | (6 << 8) | 0), /* 1300 MHz */ 113 ((325 << 16) | (6 << 8) | 0), /* 1300 MHz */
114 ((200 << 16) | (4 << 8) | 0), /* 1200 MHz */ 114 ((200 << 16) | (4 << 8) | 0), /* 1200 MHz */
115 ((275 << 16) | (6 << 8) | 0), /* 1100 MHz */ 115 ((275 << 16) | (6 << 8) | 0), /* 1100 MHz */
@@ -126,9 +126,10 @@ static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = {
126 126
127/* ASV group voltage table */ 127/* ASV group voltage table */
128static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = { 128static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = {
129 0, 0, 0, 0, 0, 0, 0, /* 1700 MHz ~ 1100 MHz Not supported */ 129 1300000, 1250000, 1225000, 1200000, 1150000,
130 1175000, 1125000, 1075000, 1050000, 1000000, 130 1125000, 1100000, 1075000, 1050000, 1025000,
131 950000, 925000, 925000, 900000 131 1012500, 1000000, 975000, 950000, 937500,
132 925000
132}; 133};
133 134
134static void set_clkdiv(unsigned int div_index) 135static void set_clkdiv(unsigned int div_index)
@@ -248,15 +249,7 @@ static void __init set_volt_table(void)
248{ 249{
249 unsigned int i; 250 unsigned int i;
250 251
251 exynos5250_freq_table[L0].frequency = CPUFREQ_ENTRY_INVALID; 252 max_support_idx = L0;
252 exynos5250_freq_table[L1].frequency = CPUFREQ_ENTRY_INVALID;
253 exynos5250_freq_table[L2].frequency = CPUFREQ_ENTRY_INVALID;
254 exynos5250_freq_table[L3].frequency = CPUFREQ_ENTRY_INVALID;
255 exynos5250_freq_table[L4].frequency = CPUFREQ_ENTRY_INVALID;
256 exynos5250_freq_table[L5].frequency = CPUFREQ_ENTRY_INVALID;
257 exynos5250_freq_table[L6].frequency = CPUFREQ_ENTRY_INVALID;
258
259 max_support_idx = L7;
260 253
261 for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++) 254 for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
262 exynos5250_volt_table[i] = asv_voltage_5250[i]; 255 exynos5250_volt_table[i] = asv_voltage_5250[i];
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 17fa04d08be9..b47034e650a5 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -218,7 +218,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
218 218
219 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); 219 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
220 220
221 if (atomic_inc_return(&freq_table_users) == 1) 221 if (!freq_table)
222 result = opp_init_cpufreq_table(mpu_dev, &freq_table); 222 result = opp_init_cpufreq_table(mpu_dev, &freq_table);
223 223
224 if (result) { 224 if (result) {
@@ -227,6 +227,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
227 goto fail_ck; 227 goto fail_ck;
228 } 228 }
229 229
230 atomic_inc_return(&freq_table_users);
231
230 result = cpufreq_frequency_table_cpuinfo(policy, freq_table); 232 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
231 if (result) 233 if (result)
232 goto fail_table; 234 goto fail_table;
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index cdc02ac8f41a..503996a94a6a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void)
454 mem_resource->address_length); 454 mem_resource->address_length);
455 if (pcch_virt_addr == NULL) { 455 if (pcch_virt_addr == NULL) {
456 pr_debug("probe: could not map shared mem region\n"); 456 pr_debug("probe: could not map shared mem region\n");
457 ret = -ENOMEM;
457 goto out_free; 458 goto out_free;
458 } 459 }
459 pcch_hdr = pcch_virt_addr; 460 pcch_hdr = pcch_virt_addr;
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 50d2f15a3c8a..bcc053bc02c4 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -153,7 +153,7 @@ static int s3c2416_cpufreq_enter_dvs(struct s3c2416_data *s3c_freq, int idx)
153 if (s3c_freq->vddarm) { 153 if (s3c_freq->vddarm) {
154 dvfs = &s3c2416_dvfs_table[idx]; 154 dvfs = &s3c2416_dvfs_table[idx];
155 155
156 pr_debug("cpufreq: setting regultor to %d-%d\n", 156 pr_debug("cpufreq: setting regulator to %d-%d\n",
157 dvfs->vddarm_min, dvfs->vddarm_max); 157 dvfs->vddarm_min, dvfs->vddarm_max);
158 ret = regulator_set_voltage(s3c_freq->vddarm, 158 ret = regulator_set_voltage(s3c_freq->vddarm,
159 dvfs->vddarm_min, 159 dvfs->vddarm_min,
@@ -186,7 +186,7 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
186 if (s3c_freq->vddarm) { 186 if (s3c_freq->vddarm) {
187 dvfs = &s3c2416_dvfs_table[idx]; 187 dvfs = &s3c2416_dvfs_table[idx];
188 188
189 pr_debug("cpufreq: setting regultor to %d-%d\n", 189 pr_debug("cpufreq: setting regulator to %d-%d\n",
190 dvfs->vddarm_min, dvfs->vddarm_max); 190 dvfs->vddarm_min, dvfs->vddarm_max);
191 ret = regulator_set_voltage(s3c_freq->vddarm, 191 ret = regulator_set_voltage(s3c_freq->vddarm,
192 dvfs->vddarm_min, 192 dvfs->vddarm_min,
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 7432b3a72cd4..e29b59aa68a8 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -203,7 +203,7 @@ static unsigned int speedstep_detect_chipset(void)
203 if (speedstep_chipset_dev) { 203 if (speedstep_chipset_dev) {
204 /* speedstep.c causes lockups on Dell Inspirons 8000 and 204 /* speedstep.c causes lockups on Dell Inspirons 8000 and
205 * 8100 which use a pretty old revision of the 82815 205 * 8100 which use a pretty old revision of the 82815
206 * host brige. Abort on these systems. 206 * host bridge. Abort on these systems.
207 */ 207 */
208 static struct pci_dev *hostbridge; 208 static struct pci_dev *hostbridge;
209 209
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 78a666d1e5f5..a76b689e553b 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -18,3 +18,6 @@ config CPU_IDLE_GOV_MENU
18 bool 18 bool
19 depends on CPU_IDLE && NO_HZ 19 depends on CPU_IDLE && NO_HZ
20 default y 20 default y
21
22config ARCH_NEEDS_CPU_IDLE_COUPLED
23 def_bool n
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 5634f88379df..38c8f69f30cf 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ 5obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
6obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
new file mode 100644
index 000000000000..3265844839bf
--- /dev/null
+++ b/drivers/cpuidle/coupled.c
@@ -0,0 +1,727 @@
1/*
2 * coupled.c - helper functions to enter the same idle state on multiple cpus
3 *
4 * Copyright (c) 2011 Google, Inc.
5 *
6 * Author: Colin Cross <ccross@android.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/cpu.h>
21#include <linux/cpuidle.h>
22#include <linux/mutex.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "cpuidle.h"
28
29/**
30 * DOC: Coupled cpuidle states
31 *
32 * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
33 * cpus cannot be independently powered down, either due to
34 * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
35 * power down), or due to HW bugs (on OMAP4460, a cpu powering up
36 * will corrupt the gic state unless the other cpu runs a work
37 * around). Each cpu has a power state that it can enter without
38 * coordinating with the other cpu (usually Wait For Interrupt, or
39 * WFI), and one or more "coupled" power states that affect blocks
40 * shared between the cpus (L2 cache, interrupt controller, and
41 * sometimes the whole SoC). Entering a coupled power state must
42 * be tightly controlled on both cpus.
43 *
44 * This file implements a solution, where each cpu will wait in the
45 * WFI state until all cpus are ready to enter a coupled state, at
46 * which point the coupled state function will be called on all
47 * cpus at approximately the same time.
48 *
49 * Once all cpus are ready to enter idle, they are woken by an smp
50 * cross call. At this point, there is a chance that one of the
51 * cpus will find work to do, and choose not to enter idle. A
52 * final pass is needed to guarantee that all cpus will call the
53 * power state enter function at the same time. During this pass,
54 * each cpu will increment the ready counter, and continue once the
55 * ready counter matches the number of online coupled cpus. If any
56 * cpu exits idle, the other cpus will decrement their counter and
57 * retry.
58 *
59 * requested_state stores the deepest coupled idle state each cpu
60 * is ready for. It is assumed that the states are indexed from
61 * shallowest (highest power, lowest exit latency) to deepest
62 * (lowest power, highest exit latency). The requested_state
63 * variable is not locked. It is only written from the cpu that
64 * it stores (or by the on/offlining cpu if that cpu is offline),
65 * and only read after all the cpus are ready for the coupled idle
66 * state are are no longer updating it.
67 *
68 * Three atomic counters are used. alive_count tracks the number
69 * of cpus in the coupled set that are currently or soon will be
70 * online. waiting_count tracks the number of cpus that are in
71 * the waiting loop, in the ready loop, or in the coupled idle state.
72 * ready_count tracks the number of cpus that are in the ready loop
73 * or in the coupled idle state.
74 *
75 * To use coupled cpuidle states, a cpuidle driver must:
76 *
77 * Set struct cpuidle_device.coupled_cpus to the mask of all
78 * coupled cpus, usually the same as cpu_possible_mask if all cpus
79 * are part of the same cluster. The coupled_cpus mask must be
80 * set in the struct cpuidle_device for each cpu.
81 *
82 * Set struct cpuidle_device.safe_state to a state that is not a
83 * coupled state. This is usually WFI.
84 *
85 * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
86 * state that affects multiple cpus.
87 *
88 * Provide a struct cpuidle_state.enter function for each state
89 * that affects multiple cpus. This function is guaranteed to be
90 * called on all cpus at approximately the same time. The driver
91 * should ensure that the cpus all abort together if any cpu tries
92 * to abort once the function is called. The function should return
93 * with interrupts still disabled.
94 */
95
96/**
97 * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
98 * @coupled_cpus: mask of cpus that are part of the coupled set
99 * @requested_state: array of requested states for cpus in the coupled set
100 * @ready_waiting_counts: combined count of cpus in ready or waiting loops
101 * @online_count: count of cpus that are online
102 * @refcnt: reference count of cpuidle devices that are using this struct
103 * @prevent: flag to prevent coupled idle while a cpu is hotplugging
104 */
105struct cpuidle_coupled {
106 cpumask_t coupled_cpus;
107 int requested_state[NR_CPUS];
108 atomic_t ready_waiting_counts;
109 int online_count;
110 int refcnt;
111 int prevent;
112};
113
114#define WAITING_BITS 16
115#define MAX_WAITING_CPUS (1 << WAITING_BITS)
116#define WAITING_MASK (MAX_WAITING_CPUS - 1)
117#define READY_MASK (~WAITING_MASK)
118
119#define CPUIDLE_COUPLED_NOT_IDLE (-1)
120
121static DEFINE_MUTEX(cpuidle_coupled_lock);
122static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
123
124/*
125 * The cpuidle_coupled_poked_mask mask is used to avoid calling
126 * __smp_call_function_single with the per cpu call_single_data struct already
127 * in use. This prevents a deadlock where two cpus are waiting for each others
128 * call_single_data struct to be available
129 */
130static cpumask_t cpuidle_coupled_poked_mask;
131
132/**
133 * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
134 * @dev: cpuidle_device of the calling cpu
135 * @a: atomic variable to hold the barrier
136 *
137 * No caller to this function will return from this function until all online
138 * cpus in the same coupled group have called this function. Once any caller
139 * has returned from this function, the barrier is immediately available for
140 * reuse.
141 *
142 * The atomic variable a must be initialized to 0 before any cpu calls
143 * this function, will be reset to 0 before any cpu returns from this function.
144 *
145 * Must only be called from within a coupled idle state handler
146 * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
147 *
148 * Provides full smp barrier semantics before and after calling.
149 */
150void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
151{
152 int n = dev->coupled->online_count;
153
154 smp_mb__before_atomic_inc();
155 atomic_inc(a);
156
157 while (atomic_read(a) < n)
158 cpu_relax();
159
160 if (atomic_inc_return(a) == n * 2) {
161 atomic_set(a, 0);
162 return;
163 }
164
165 while (atomic_read(a) > n)
166 cpu_relax();
167}
168
169/**
170 * cpuidle_state_is_coupled - check if a state is part of a coupled set
171 * @dev: struct cpuidle_device for the current cpu
172 * @drv: struct cpuidle_driver for the platform
173 * @state: index of the target state in drv->states
174 *
175 * Returns true if the target state is coupled with cpus besides this one
176 */
177bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
178 struct cpuidle_driver *drv, int state)
179{
180 return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
181}
182
183/**
184 * cpuidle_coupled_set_ready - mark a cpu as ready
185 * @coupled: the struct coupled that contains the current cpu
186 */
187static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
188{
189 atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
190}
191
192/**
193 * cpuidle_coupled_set_not_ready - mark a cpu as not ready
194 * @coupled: the struct coupled that contains the current cpu
195 *
196 * Decrements the ready counter, unless the ready (and thus the waiting) counter
197 * is equal to the number of online cpus. Prevents a race where one cpu
198 * decrements the waiting counter and then re-increments it just before another
199 * cpu has decremented its ready counter, leading to the ready counter going
200 * down from the number of online cpus without going through the coupled idle
201 * state.
202 *
203 * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
204 * counter was equal to the number of online cpus.
205 */
206static
207inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
208{
209 int all;
210 int ret;
211
212 all = coupled->online_count || (coupled->online_count << WAITING_BITS);
213 ret = atomic_add_unless(&coupled->ready_waiting_counts,
214 -MAX_WAITING_CPUS, all);
215
216 return ret ? 0 : -EINVAL;
217}
218
219/**
220 * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
221 * @coupled: the struct coupled that contains the current cpu
222 *
223 * Returns true if all of the cpus in a coupled set are out of the ready loop.
224 */
225static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
226{
227 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
228 return r == 0;
229}
230
231/**
232 * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
233 * @coupled: the struct coupled that contains the current cpu
234 *
235 * Returns true if all cpus coupled to this target state are in the ready loop
236 */
237static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
238{
239 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
240 return r == coupled->online_count;
241}
242
243/**
244 * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
245 * @coupled: the struct coupled that contains the current cpu
246 *
247 * Returns true if all cpus coupled to this target state are in the wait loop
248 */
249static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
250{
251 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
252 return w == coupled->online_count;
253}
254
255/**
256 * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
257 * @coupled: the struct coupled that contains the current cpu
258 *
259 * Returns true if all of the cpus in a coupled set are out of the waiting loop.
260 */
261static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
262{
263 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
264 return w == 0;
265}
266
267/**
268 * cpuidle_coupled_get_state - determine the deepest idle state
269 * @dev: struct cpuidle_device for this cpu
270 * @coupled: the struct coupled that contains the current cpu
271 *
272 * Returns the deepest idle state that all coupled cpus can enter
273 */
274static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
275 struct cpuidle_coupled *coupled)
276{
277 int i;
278 int state = INT_MAX;
279
280 /*
281 * Read barrier ensures that read of requested_state is ordered after
282 * reads of ready_count. Matches the write barriers
283 * cpuidle_set_state_waiting.
284 */
285 smp_rmb();
286
287 for_each_cpu_mask(i, coupled->coupled_cpus)
288 if (cpu_online(i) && coupled->requested_state[i] < state)
289 state = coupled->requested_state[i];
290
291 return state;
292}
293
294static void cpuidle_coupled_poked(void *info)
295{
296 int cpu = (unsigned long)info;
297 cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
298}
299
300/**
301 * cpuidle_coupled_poke - wake up a cpu that may be waiting
302 * @cpu: target cpu
303 *
304 * Ensures that the target cpu exits it's waiting idle state (if it is in it)
305 * and will see updates to waiting_count before it re-enters it's waiting idle
306 * state.
307 *
308 * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
309 * either has or will soon have a pending IPI that will wake it out of idle,
310 * or it is currently processing the IPI and is not in idle.
311 */
312static void cpuidle_coupled_poke(int cpu)
313{
314 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
315
316 if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
317 __smp_call_function_single(cpu, csd, 0);
318}
319
320/**
321 * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
322 * @dev: struct cpuidle_device for this cpu
323 * @coupled: the struct coupled that contains the current cpu
324 *
325 * Calls cpuidle_coupled_poke on all other online cpus.
326 */
327static void cpuidle_coupled_poke_others(int this_cpu,
328 struct cpuidle_coupled *coupled)
329{
330 int cpu;
331
332 for_each_cpu_mask(cpu, coupled->coupled_cpus)
333 if (cpu != this_cpu && cpu_online(cpu))
334 cpuidle_coupled_poke(cpu);
335}
336
337/**
338 * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
339 * @dev: struct cpuidle_device for this cpu
340 * @coupled: the struct coupled that contains the current cpu
341 * @next_state: the index in drv->states of the requested state for this cpu
342 *
343 * Updates the requested idle state for the specified cpuidle device,
344 * poking all coupled cpus out of idle if necessary to let them see the new
345 * state.
346 */
347static void cpuidle_coupled_set_waiting(int cpu,
348 struct cpuidle_coupled *coupled, int next_state)
349{
350 int w;
351
352 coupled->requested_state[cpu] = next_state;
353
354 /*
355 * If this is the last cpu to enter the waiting state, poke
356 * all the other cpus out of their waiting state so they can
357 * enter a deeper state. This can race with one of the cpus
358 * exiting the waiting state due to an interrupt and
359 * decrementing waiting_count, see comment below.
360 *
361 * The atomic_inc_return provides a write barrier to order the write
362 * to requested_state with the later write that increments ready_count.
363 */
364 w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
365 if (w == coupled->online_count)
366 cpuidle_coupled_poke_others(cpu, coupled);
367}
368
369/**
370 * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
371 * @dev: struct cpuidle_device for this cpu
372 * @coupled: the struct coupled that contains the current cpu
373 *
374 * Removes the requested idle state for the specified cpuidle device.
375 */
376static void cpuidle_coupled_set_not_waiting(int cpu,
377 struct cpuidle_coupled *coupled)
378{
379 /*
380 * Decrementing waiting count can race with incrementing it in
381 * cpuidle_coupled_set_waiting, but that's OK. Worst case, some
382 * cpus will increment ready_count and then spin until they
383 * notice that this cpu has cleared it's requested_state.
384 */
385 atomic_dec(&coupled->ready_waiting_counts);
386
387 coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
388}
389
390/**
391 * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
392 * @cpu: the current cpu
393 * @coupled: the struct coupled that contains the current cpu
394 *
395 * Marks this cpu as no longer in the ready and waiting loops. Decrements
396 * the waiting count first to prevent another cpu looping back in and seeing
397 * this cpu as waiting just before it exits idle.
398 */
399static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
400{
401 cpuidle_coupled_set_not_waiting(cpu, coupled);
402 atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
403}
404
405/**
406 * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
407 * @cpu - this cpu
408 *
409 * Turns on interrupts and spins until any outstanding poke interrupts have
410 * been processed and the poke bit has been cleared.
411 *
412 * Other interrupts may also be processed while interrupts are enabled, so
413 * need_resched() must be tested after turning interrupts off again to make sure
414 * the interrupt didn't schedule work that should take the cpu out of idle.
415 *
416 * Returns 0 if need_resched was false, -EINTR if need_resched was true.
417 */
418static int cpuidle_coupled_clear_pokes(int cpu)
419{
420 local_irq_enable();
421 while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
422 cpu_relax();
423 local_irq_disable();
424
425 return need_resched() ? -EINTR : 0;
426}
427
428/**
429 * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
430 * @dev: struct cpuidle_device for the current cpu
431 * @drv: struct cpuidle_driver for the platform
432 * @next_state: index of the requested state in drv->states
433 *
434 * Coordinate with coupled cpus to enter the target state. This is a two
435 * stage process. In the first stage, the cpus are operating independently,
436 * and may call into cpuidle_enter_state_coupled at completely different times.
437 * To save as much power as possible, the first cpus to call this function will
438 * go to an intermediate state (the cpuidle_device's safe state), and wait for
439 * all the other cpus to call this function. Once all coupled cpus are idle,
440 * the second stage will start. Each coupled cpu will spin until all cpus have
441 * guaranteed that they will call the target_state.
442 *
443 * This function must be called with interrupts disabled. It may enable
444 * interrupts while preparing for idle, and it will always return with
445 * interrupts enabled.
446 */
447int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
448 struct cpuidle_driver *drv, int next_state)
449{
450 int entered_state = -1;
451 struct cpuidle_coupled *coupled = dev->coupled;
452
453 if (!coupled)
454 return -EINVAL;
455
456 while (coupled->prevent) {
457 if (cpuidle_coupled_clear_pokes(dev->cpu)) {
458 local_irq_enable();
459 return entered_state;
460 }
461 entered_state = cpuidle_enter_state(dev, drv,
462 dev->safe_state_index);
463 }
464
465 /* Read barrier ensures online_count is read after prevent is cleared */
466 smp_rmb();
467
468 cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
469
470retry:
471 /*
472 * Wait for all coupled cpus to be idle, using the deepest state
473 * allowed for a single cpu.
474 */
475 while (!cpuidle_coupled_cpus_waiting(coupled)) {
476 if (cpuidle_coupled_clear_pokes(dev->cpu)) {
477 cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
478 goto out;
479 }
480
481 if (coupled->prevent) {
482 cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
483 goto out;
484 }
485
486 entered_state = cpuidle_enter_state(dev, drv,
487 dev->safe_state_index);
488 }
489
490 if (cpuidle_coupled_clear_pokes(dev->cpu)) {
491 cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
492 goto out;
493 }
494
495 /*
496 * All coupled cpus are probably idle. There is a small chance that
497 * one of the other cpus just became active. Increment the ready count,
498 * and spin until all coupled cpus have incremented the counter. Once a
499 * cpu has incremented the ready counter, it cannot abort idle and must
500 * spin until either all cpus have incremented the ready counter, or
501 * another cpu leaves idle and decrements the waiting counter.
502 */
503
504 cpuidle_coupled_set_ready(coupled);
505 while (!cpuidle_coupled_cpus_ready(coupled)) {
506 /* Check if any other cpus bailed out of idle. */
507 if (!cpuidle_coupled_cpus_waiting(coupled))
508 if (!cpuidle_coupled_set_not_ready(coupled))
509 goto retry;
510
511 cpu_relax();
512 }
513
514 /* all cpus have acked the coupled state */
515 next_state = cpuidle_coupled_get_state(dev, coupled);
516
517 entered_state = cpuidle_enter_state(dev, drv, next_state);
518
519 cpuidle_coupled_set_done(dev->cpu, coupled);
520
521out:
522 /*
523 * Normal cpuidle states are expected to return with irqs enabled.
524 * That leads to an inefficiency where a cpu receiving an interrupt
525 * that brings it out of idle will process that interrupt before
526 * exiting the idle enter function and decrementing ready_count. All
527 * other cpus will need to spin waiting for the cpu that is processing
528 * the interrupt. If the driver returns with interrupts disabled,
529 * all other cpus will loop back into the safe idle state instead of
530 * spinning, saving power.
531 *
532 * Calling local_irq_enable here allows coupled states to return with
533 * interrupts disabled, but won't cause problems for drivers that
534 * exit with interrupts enabled.
535 */
536 local_irq_enable();
537
538 /*
539 * Wait until all coupled cpus have exited idle. There is no risk that
540 * a cpu exits and re-enters the ready state because this cpu has
541 * already decremented its waiting_count.
542 */
543 while (!cpuidle_coupled_no_cpus_ready(coupled))
544 cpu_relax();
545
546 return entered_state;
547}
548
549static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
550{
551 cpumask_t cpus;
552 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
553 coupled->online_count = cpumask_weight(&cpus);
554}
555
556/**
557 * cpuidle_coupled_register_device - register a coupled cpuidle device
558 * @dev: struct cpuidle_device for the current cpu
559 *
560 * Called from cpuidle_register_device to handle coupled idle init. Finds the
561 * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
562 * exists yet.
563 */
564int cpuidle_coupled_register_device(struct cpuidle_device *dev)
565{
566 int cpu;
567 struct cpuidle_device *other_dev;
568 struct call_single_data *csd;
569 struct cpuidle_coupled *coupled;
570
571 if (cpumask_empty(&dev->coupled_cpus))
572 return 0;
573
574 for_each_cpu_mask(cpu, dev->coupled_cpus) {
575 other_dev = per_cpu(cpuidle_devices, cpu);
576 if (other_dev && other_dev->coupled) {
577 coupled = other_dev->coupled;
578 goto have_coupled;
579 }
580 }
581
582 /* No existing coupled info found, create a new one */
583 coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
584 if (!coupled)
585 return -ENOMEM;
586
587 coupled->coupled_cpus = dev->coupled_cpus;
588
589have_coupled:
590 dev->coupled = coupled;
591 if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
592 coupled->prevent++;
593
594 cpuidle_coupled_update_online_cpus(coupled);
595
596 coupled->refcnt++;
597
598 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
599 csd->func = cpuidle_coupled_poked;
600 csd->info = (void *)(unsigned long)dev->cpu;
601
602 return 0;
603}
604
605/**
606 * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
607 * @dev: struct cpuidle_device for the current cpu
608 *
609 * Called from cpuidle_unregister_device to tear down coupled idle. Removes the
610 * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
611 * this was the last cpu in the set.
612 */
613void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
614{
615 struct cpuidle_coupled *coupled = dev->coupled;
616
617 if (cpumask_empty(&dev->coupled_cpus))
618 return;
619
620 if (--coupled->refcnt)
621 kfree(coupled);
622 dev->coupled = NULL;
623}
624
625/**
626 * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
627 * @coupled: the struct coupled that contains the cpu that is changing state
628 *
629 * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that
630 * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
631 */
632static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
633{
634 int cpu = get_cpu();
635
636 /* Force all cpus out of the waiting loop. */
637 coupled->prevent++;
638 cpuidle_coupled_poke_others(cpu, coupled);
639 put_cpu();
640 while (!cpuidle_coupled_no_cpus_waiting(coupled))
641 cpu_relax();
642}
643
644/**
645 * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
646 * @coupled: the struct coupled that contains the cpu that is changing state
647 *
648 * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that
649 * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
650 */
651static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
652{
653 int cpu = get_cpu();
654
655 /*
656 * Write barrier ensures readers see the new online_count when they
657 * see prevent == 0.
658 */
659 smp_wmb();
660 coupled->prevent--;
661 /* Force cpus out of the prevent loop. */
662 cpuidle_coupled_poke_others(cpu, coupled);
663 put_cpu();
664}
665
666/**
667 * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
668 * @nb: notifier block
669 * @action: hotplug transition
670 * @hcpu: target cpu number
671 *
672 * Called when a cpu is brought on or offline using hotplug. Updates the
673 * coupled cpu set appropriately
674 */
675static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
676 unsigned long action, void *hcpu)
677{
678 int cpu = (unsigned long)hcpu;
679 struct cpuidle_device *dev;
680
681 switch (action & ~CPU_TASKS_FROZEN) {
682 case CPU_UP_PREPARE:
683 case CPU_DOWN_PREPARE:
684 case CPU_ONLINE:
685 case CPU_DEAD:
686 case CPU_UP_CANCELED:
687 case CPU_DOWN_FAILED:
688 break;
689 default:
690 return NOTIFY_OK;
691 }
692
693 mutex_lock(&cpuidle_lock);
694
695 dev = per_cpu(cpuidle_devices, cpu);
696 if (!dev || !dev->coupled)
697 goto out;
698
699 switch (action & ~CPU_TASKS_FROZEN) {
700 case CPU_UP_PREPARE:
701 case CPU_DOWN_PREPARE:
702 cpuidle_coupled_prevent_idle(dev->coupled);
703 break;
704 case CPU_ONLINE:
705 case CPU_DEAD:
706 cpuidle_coupled_update_online_cpus(dev->coupled);
707 /* Fall through */
708 case CPU_UP_CANCELED:
709 case CPU_DOWN_FAILED:
710 cpuidle_coupled_allow_idle(dev->coupled);
711 break;
712 }
713
714out:
715 mutex_unlock(&cpuidle_lock);
716 return NOTIFY_OK;
717}
718
719static struct notifier_block cpuidle_coupled_cpu_notifier = {
720 .notifier_call = cpuidle_coupled_cpu_notify,
721};
722
723static int __init cpuidle_coupled_init(void)
724{
725 return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
726}
727core_initcall(cpuidle_coupled_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d90519cec880..e28f6ea46f1a 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -92,6 +92,34 @@ int cpuidle_play_dead(void)
92} 92}
93 93
94/** 94/**
95 * cpuidle_enter_state - enter the state and update stats
96 * @dev: cpuidle device for this cpu
97 * @drv: cpuidle driver for this cpu
98 * @next_state: index into drv->states of the state to enter
99 */
100int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
101 int next_state)
102{
103 int entered_state;
104
105 entered_state = cpuidle_enter_ops(dev, drv, next_state);
106
107 if (entered_state >= 0) {
108 /* Update cpuidle counters */
109 /* This can be moved to within driver enter routine
110 * but that results in multiple copies of same code.
111 */
112 dev->states_usage[entered_state].time +=
113 (unsigned long long)dev->last_residency;
114 dev->states_usage[entered_state].usage++;
115 } else {
116 dev->last_residency = 0;
117 }
118
119 return entered_state;
120}
121
122/**
95 * cpuidle_idle_call - the main idle loop 123 * cpuidle_idle_call - the main idle loop
96 * 124 *
97 * NOTE: no locks or semaphores should be used here 125 * NOTE: no locks or semaphores should be used here
@@ -113,15 +141,6 @@ int cpuidle_idle_call(void)
113 if (!dev || !dev->enabled) 141 if (!dev || !dev->enabled)
114 return -EBUSY; 142 return -EBUSY;
115 143
116#if 0
117 /* shows regressions, re-enable for 2.6.29 */
118 /*
119 * run any timers that can be run now, at this point
120 * before calculating the idle duration etc.
121 */
122 hrtimer_peek_ahead_timers();
123#endif
124
125 /* ask the governor for the next state */ 144 /* ask the governor for the next state */
126 next_state = cpuidle_curr_governor->select(drv, dev); 145 next_state = cpuidle_curr_governor->select(drv, dev);
127 if (need_resched()) { 146 if (need_resched()) {
@@ -132,23 +151,15 @@ int cpuidle_idle_call(void)
132 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); 151 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
133 trace_cpu_idle_rcuidle(next_state, dev->cpu); 152 trace_cpu_idle_rcuidle(next_state, dev->cpu);
134 153
135 entered_state = cpuidle_enter_ops(dev, drv, next_state); 154 if (cpuidle_state_is_coupled(dev, drv, next_state))
155 entered_state = cpuidle_enter_state_coupled(dev, drv,
156 next_state);
157 else
158 entered_state = cpuidle_enter_state(dev, drv, next_state);
136 159
137 trace_power_end_rcuidle(dev->cpu); 160 trace_power_end_rcuidle(dev->cpu);
138 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 161 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
139 162
140 if (entered_state >= 0) {
141 /* Update cpuidle counters */
142 /* This can be moved to within driver enter routine
143 * but that results in multiple copies of same code.
144 */
145 dev->states_usage[entered_state].time +=
146 (unsigned long long)dev->last_residency;
147 dev->states_usage[entered_state].usage++;
148 } else {
149 dev->last_residency = 0;
150 }
151
152 /* give the governor an opportunity to reflect on the outcome */ 163 /* give the governor an opportunity to reflect on the outcome */
153 if (cpuidle_curr_governor->reflect) 164 if (cpuidle_curr_governor->reflect)
154 cpuidle_curr_governor->reflect(dev, entered_state); 165 cpuidle_curr_governor->reflect(dev, entered_state);
@@ -201,6 +212,22 @@ void cpuidle_resume_and_unlock(void)
201 212
202EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 213EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
203 214
215/* Currently used in suspend/resume path to suspend cpuidle */
216void cpuidle_pause(void)
217{
218 mutex_lock(&cpuidle_lock);
219 cpuidle_uninstall_idle_handler();
220 mutex_unlock(&cpuidle_lock);
221}
222
223/* Currently used in suspend/resume path to resume cpuidle */
224void cpuidle_resume(void)
225{
226 mutex_lock(&cpuidle_lock);
227 cpuidle_install_idle_handler();
228 mutex_unlock(&cpuidle_lock);
229}
230
204/** 231/**
205 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 232 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
206 * @dev: pointer to a valid cpuidle_device object 233 * @dev: pointer to a valid cpuidle_device object
@@ -265,7 +292,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
265 state->power_usage = -1; 292 state->power_usage = -1;
266 state->flags = 0; 293 state->flags = 0;
267 state->enter = poll_idle; 294 state->enter = poll_idle;
268 state->disable = 0; 295 state->disabled = false;
269} 296}
270#else 297#else
271static void poll_idle_init(struct cpuidle_driver *drv) {} 298static void poll_idle_init(struct cpuidle_driver *drv) {}
@@ -283,6 +310,9 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
283 int ret, i; 310 int ret, i;
284 struct cpuidle_driver *drv = cpuidle_get_driver(); 311 struct cpuidle_driver *drv = cpuidle_get_driver();
285 312
313 if (!dev)
314 return -EINVAL;
315
286 if (dev->enabled) 316 if (dev->enabled)
287 return 0; 317 return 0;
288 if (!drv || !cpuidle_curr_governor) 318 if (!drv || !cpuidle_curr_governor)
@@ -367,8 +397,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
367 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 397 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
368 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 398 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
369 399
370 if (!dev)
371 return -EINVAL;
372 if (!try_module_get(cpuidle_driver->owner)) 400 if (!try_module_get(cpuidle_driver->owner))
373 return -EINVAL; 401 return -EINVAL;
374 402
@@ -376,13 +404,25 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
376 404
377 per_cpu(cpuidle_devices, dev->cpu) = dev; 405 per_cpu(cpuidle_devices, dev->cpu) = dev;
378 list_add(&dev->device_list, &cpuidle_detected_devices); 406 list_add(&dev->device_list, &cpuidle_detected_devices);
379 if ((ret = cpuidle_add_sysfs(cpu_dev))) { 407 ret = cpuidle_add_sysfs(cpu_dev);
380 module_put(cpuidle_driver->owner); 408 if (ret)
381 return ret; 409 goto err_sysfs;
382 } 410
411 ret = cpuidle_coupled_register_device(dev);
412 if (ret)
413 goto err_coupled;
383 414
384 dev->registered = 1; 415 dev->registered = 1;
385 return 0; 416 return 0;
417
418err_coupled:
419 cpuidle_remove_sysfs(cpu_dev);
420 wait_for_completion(&dev->kobj_unregister);
421err_sysfs:
422 list_del(&dev->device_list);
423 per_cpu(cpuidle_devices, dev->cpu) = NULL;
424 module_put(cpuidle_driver->owner);
425 return ret;
386} 426}
387 427
388/** 428/**
@@ -393,6 +433,9 @@ int cpuidle_register_device(struct cpuidle_device *dev)
393{ 433{
394 int ret; 434 int ret;
395 435
436 if (!dev)
437 return -EINVAL;
438
396 mutex_lock(&cpuidle_lock); 439 mutex_lock(&cpuidle_lock);
397 440
398 if ((ret = __cpuidle_register_device(dev))) { 441 if ((ret = __cpuidle_register_device(dev))) {
@@ -432,6 +475,8 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
432 wait_for_completion(&dev->kobj_unregister); 475 wait_for_completion(&dev->kobj_unregister);
433 per_cpu(cpuidle_devices, dev->cpu) = NULL; 476 per_cpu(cpuidle_devices, dev->cpu) = NULL;
434 477
478 cpuidle_coupled_unregister_device(dev);
479
435 cpuidle_resume_and_unlock(); 480 cpuidle_resume_and_unlock();
436 481
437 module_put(cpuidle_driver->owner); 482 module_put(cpuidle_driver->owner);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 7db186685c27..76e7f696ad8c 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -14,6 +14,8 @@ extern struct list_head cpuidle_detected_devices;
14extern struct mutex cpuidle_lock; 14extern struct mutex cpuidle_lock;
15extern spinlock_t cpuidle_driver_lock; 15extern spinlock_t cpuidle_driver_lock;
16extern int cpuidle_disabled(void); 16extern int cpuidle_disabled(void);
17extern int cpuidle_enter_state(struct cpuidle_device *dev,
18 struct cpuidle_driver *drv, int next_state);
17 19
18/* idle loop */ 20/* idle loop */
19extern void cpuidle_install_idle_handler(void); 21extern void cpuidle_install_idle_handler(void);
@@ -30,4 +32,34 @@ extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
30extern int cpuidle_add_sysfs(struct device *dev); 32extern int cpuidle_add_sysfs(struct device *dev);
31extern void cpuidle_remove_sysfs(struct device *dev); 33extern void cpuidle_remove_sysfs(struct device *dev);
32 34
35#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
36bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
37 struct cpuidle_driver *drv, int state);
38int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
39 struct cpuidle_driver *drv, int next_state);
40int cpuidle_coupled_register_device(struct cpuidle_device *dev);
41void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
42#else
43static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
44 struct cpuidle_driver *drv, int state)
45{
46 return false;
47}
48
49static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
50 struct cpuidle_driver *drv, int next_state)
51{
52 return -1;
53}
54
55static inline int cpuidle_coupled_register_device(struct cpuidle_device *dev)
56{
57 return 0;
58}
59
60static inline void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
61{
62}
63#endif
64
33#endif /* __DRIVER_CPUIDLE_H */ 65#endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 40cd3f3024df..58bf3b1ac9c4 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -16,6 +16,7 @@
16 16
17static struct cpuidle_driver *cpuidle_curr_driver; 17static struct cpuidle_driver *cpuidle_curr_driver;
18DEFINE_SPINLOCK(cpuidle_driver_lock); 18DEFINE_SPINLOCK(cpuidle_driver_lock);
19int cpuidle_driver_refcount;
19 20
20static void __cpuidle_register_driver(struct cpuidle_driver *drv) 21static void __cpuidle_register_driver(struct cpuidle_driver *drv)
21{ 22{
@@ -89,8 +90,34 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv)
89 } 90 }
90 91
91 spin_lock(&cpuidle_driver_lock); 92 spin_lock(&cpuidle_driver_lock);
92 cpuidle_curr_driver = NULL; 93
94 if (!WARN_ON(cpuidle_driver_refcount > 0))
95 cpuidle_curr_driver = NULL;
96
93 spin_unlock(&cpuidle_driver_lock); 97 spin_unlock(&cpuidle_driver_lock);
94} 98}
95 99
96EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); 100EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
101
102struct cpuidle_driver *cpuidle_driver_ref(void)
103{
104 struct cpuidle_driver *drv;
105
106 spin_lock(&cpuidle_driver_lock);
107
108 drv = cpuidle_curr_driver;
109 cpuidle_driver_refcount++;
110
111 spin_unlock(&cpuidle_driver_lock);
112 return drv;
113}
114
115void cpuidle_driver_unref(void)
116{
117 spin_lock(&cpuidle_driver_lock);
118
119 if (!WARN_ON(cpuidle_driver_refcount <= 0))
120 cpuidle_driver_refcount--;
121
122 spin_unlock(&cpuidle_driver_lock);
123}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 06335756ea14..5b1f2c372c1f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -281,7 +281,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
281 * unless the timer is happening really really soon. 281 * unless the timer is happening really really soon.
282 */ 282 */
283 if (data->expected_us > 5 && 283 if (data->expected_us > 5 &&
284 drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0) 284 !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
285 dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
285 data->last_state_idx = CPUIDLE_DRIVER_STATE_START; 286 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
286 287
287 /* 288 /*
@@ -290,8 +291,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
290 */ 291 */
291 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 292 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
292 struct cpuidle_state *s = &drv->states[i]; 293 struct cpuidle_state *s = &drv->states[i];
294 struct cpuidle_state_usage *su = &dev->states_usage[i];
293 295
294 if (s->disable) 296 if (s->disabled || su->disable)
295 continue; 297 continue;
296 if (s->target_residency > data->predicted_us) 298 if (s->target_residency > data->predicted_us)
297 continue; 299 continue;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 88032b4dc6d2..5f809e337b89 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -217,7 +217,8 @@ struct cpuidle_state_attr {
217 struct attribute attr; 217 struct attribute attr;
218 ssize_t (*show)(struct cpuidle_state *, \ 218 ssize_t (*show)(struct cpuidle_state *, \
219 struct cpuidle_state_usage *, char *); 219 struct cpuidle_state_usage *, char *);
220 ssize_t (*store)(struct cpuidle_state *, const char *, size_t); 220 ssize_t (*store)(struct cpuidle_state *, \
221 struct cpuidle_state_usage *, const char *, size_t);
221}; 222};
222 223
223#define define_one_state_ro(_name, show) \ 224#define define_one_state_ro(_name, show) \
@@ -233,21 +234,22 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
233 return sprintf(buf, "%u\n", state->_name);\ 234 return sprintf(buf, "%u\n", state->_name);\
234} 235}
235 236
236#define define_store_state_function(_name) \ 237#define define_store_state_ull_function(_name) \
237static ssize_t store_state_##_name(struct cpuidle_state *state, \ 238static ssize_t store_state_##_name(struct cpuidle_state *state, \
239 struct cpuidle_state_usage *state_usage, \
238 const char *buf, size_t size) \ 240 const char *buf, size_t size) \
239{ \ 241{ \
240 long value; \ 242 unsigned long long value; \
241 int err; \ 243 int err; \
242 if (!capable(CAP_SYS_ADMIN)) \ 244 if (!capable(CAP_SYS_ADMIN)) \
243 return -EPERM; \ 245 return -EPERM; \
244 err = kstrtol(buf, 0, &value); \ 246 err = kstrtoull(buf, 0, &value); \
245 if (err) \ 247 if (err) \
246 return err; \ 248 return err; \
247 if (value) \ 249 if (value) \
248 state->disable = 1; \ 250 state_usage->_name = 1; \
249 else \ 251 else \
250 state->disable = 0; \ 252 state_usage->_name = 0; \
251 return size; \ 253 return size; \
252} 254}
253 255
@@ -273,8 +275,8 @@ define_show_state_ull_function(usage)
273define_show_state_ull_function(time) 275define_show_state_ull_function(time)
274define_show_state_str_function(name) 276define_show_state_str_function(name)
275define_show_state_str_function(desc) 277define_show_state_str_function(desc)
276define_show_state_function(disable) 278define_show_state_ull_function(disable)
277define_store_state_function(disable) 279define_store_state_ull_function(disable)
278 280
279define_one_state_ro(name, show_state_name); 281define_one_state_ro(name, show_state_name);
280define_one_state_ro(desc, show_state_desc); 282define_one_state_ro(desc, show_state_desc);
@@ -318,10 +320,11 @@ static ssize_t cpuidle_state_store(struct kobject *kobj,
318{ 320{
319 int ret = -EIO; 321 int ret = -EIO;
320 struct cpuidle_state *state = kobj_to_state(kobj); 322 struct cpuidle_state *state = kobj_to_state(kobj);
323 struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
321 struct cpuidle_state_attr *cattr = attr_to_stateattr(attr); 324 struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
322 325
323 if (cattr->store) 326 if (cattr->store)
324 ret = cattr->store(state, buf, size); 327 ret = cattr->store(state, state_usage, buf, size);
325 328
326 return ret; 329 return ret;
327} 330}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1092a770482e..7d74d092aa8f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -298,7 +298,7 @@ config CRYPTO_DEV_TEGRA_AES
298 will be called tegra-aes. 298 will be called tegra-aes.
299 299
300config CRYPTO_DEV_NX 300config CRYPTO_DEV_NX
301 tristate "Support for Power7+ in-Nest cryptographic accleration" 301 tristate "Support for Power7+ in-Nest cryptographic acceleration"
302 depends on PPC64 && IBMVIO 302 depends on PPC64 && IBMVIO
303 select CRYPTO_AES 303 select CRYPTO_AES
304 select CRYPTO_CBC 304 select CRYPTO_CBC
@@ -325,4 +325,58 @@ if CRYPTO_DEV_UX500
325 source "drivers/crypto/ux500/Kconfig" 325 source "drivers/crypto/ux500/Kconfig"
326endif # if CRYPTO_DEV_UX500 326endif # if CRYPTO_DEV_UX500
327 327
328config CRYPTO_DEV_BFIN_CRC
329 tristate "Support for Blackfin CRC hardware"
330 depends on BF60x
331 help
332 Newer Blackfin processors have CRC hardware. Select this if you
333 want to use the Blackfin CRC module.
334
335config CRYPTO_DEV_ATMEL_AES
336 tristate "Support for Atmel AES hw accelerator"
337 depends on ARCH_AT91
338 select CRYPTO_CBC
339 select CRYPTO_ECB
340 select CRYPTO_AES
341 select CRYPTO_ALGAPI
342 select CRYPTO_BLKCIPHER
343 select CONFIG_AT_HDMAC
344 help
345 Some Atmel processors have AES hw accelerator.
346 Select this if you want to use the Atmel module for
347 AES algorithms.
348
349 To compile this driver as a module, choose M here: the module
350 will be called atmel-aes.
351
352config CRYPTO_DEV_ATMEL_TDES
353 tristate "Support for Atmel DES/TDES hw accelerator"
354 depends on ARCH_AT91
355 select CRYPTO_DES
356 select CRYPTO_CBC
357 select CRYPTO_ECB
358 select CRYPTO_ALGAPI
359 select CRYPTO_BLKCIPHER
360 help
361 Some Atmel processors have DES/TDES hw accelerator.
362 Select this if you want to use the Atmel module for
363 DES/TDES algorithms.
364
365 To compile this driver as a module, choose M here: the module
366 will be called atmel-tdes.
367
368config CRYPTO_DEV_ATMEL_SHA
369 tristate "Support for Atmel SHA1/SHA256 hw accelerator"
370 depends on ARCH_AT91
371 select CRYPTO_SHA1
372 select CRYPTO_SHA256
373 select CRYPTO_ALGAPI
374 help
375 Some Atmel processors have SHA1/SHA256 hw accelerator.
376 Select this if you want to use the Atmel module for
377 SHA1/SHA256 algorithms.
378
379 To compile this driver as a module, choose M here: the module
380 will be called atmel-sha.
381
328endif # CRYPTO_HW 382endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 01390325d72d..880a47b0b023 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -14,4 +14,9 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
14obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o 14obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
15obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o 15obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
16obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o 16obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
17obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ \ No newline at end of file 17obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
18obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
19obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
20obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
21obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
22obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
new file mode 100644
index 000000000000..2786bb1a5aa0
--- /dev/null
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -0,0 +1,62 @@
1#ifndef __ATMEL_AES_REGS_H__
2#define __ATMEL_AES_REGS_H__
3
4#define AES_CR 0x00
5#define AES_CR_START (1 << 0)
6#define AES_CR_SWRST (1 << 8)
7#define AES_CR_LOADSEED (1 << 16)
8
9#define AES_MR 0x04
10#define AES_MR_CYPHER_DEC (0 << 0)
11#define AES_MR_CYPHER_ENC (1 << 0)
12#define AES_MR_DUALBUFF (1 << 3)
13#define AES_MR_PROCDLY_MASK (0xF << 4)
14#define AES_MR_PROCDLY_OFFSET 4
15#define AES_MR_SMOD_MASK (0x3 << 8)
16#define AES_MR_SMOD_MANUAL (0x0 << 8)
17#define AES_MR_SMOD_AUTO (0x1 << 8)
18#define AES_MR_SMOD_IDATAR0 (0x2 << 8)
19#define AES_MR_KEYSIZE_MASK (0x3 << 10)
20#define AES_MR_KEYSIZE_128 (0x0 << 10)
21#define AES_MR_KEYSIZE_192 (0x1 << 10)
22#define AES_MR_KEYSIZE_256 (0x2 << 10)
23#define AES_MR_OPMOD_MASK (0x7 << 12)
24#define AES_MR_OPMOD_ECB (0x0 << 12)
25#define AES_MR_OPMOD_CBC (0x1 << 12)
26#define AES_MR_OPMOD_OFB (0x2 << 12)
27#define AES_MR_OPMOD_CFB (0x3 << 12)
28#define AES_MR_OPMOD_CTR (0x4 << 12)
29#define AES_MR_LOD (0x1 << 15)
30#define AES_MR_CFBS_MASK (0x7 << 16)
31#define AES_MR_CFBS_128b (0x0 << 16)
32#define AES_MR_CFBS_64b (0x1 << 16)
33#define AES_MR_CFBS_32b (0x2 << 16)
34#define AES_MR_CFBS_16b (0x3 << 16)
35#define AES_MR_CFBS_8b (0x4 << 16)
36#define AES_MR_CKEY_MASK (0xF << 20)
37#define AES_MR_CKEY_OFFSET 20
38#define AES_MR_CMTYP_MASK (0x1F << 24)
39#define AES_MR_CMTYP_OFFSET 24
40
41#define AES_IER 0x10
42#define AES_IDR 0x14
43#define AES_IMR 0x18
44#define AES_ISR 0x1C
45#define AES_INT_DATARDY (1 << 0)
46#define AES_INT_URAD (1 << 8)
47#define AES_ISR_URAT_MASK (0xF << 12)
48#define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12)
49#define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12)
50#define AES_ISR_URAT_MR_WR_PROC (0x2 << 12)
51#define AES_ISR_URAT_ODR_RD_SUBK (0x3 << 12)
52#define AES_ISR_URAT_MR_WR_SUBK (0x4 << 12)
53#define AES_ISR_URAT_WOR_RD (0x5 << 12)
54
55#define AES_KEYWR(x) (0x20 + ((x) * 0x04))
56#define AES_IDATAR(x) (0x40 + ((x) * 0x04))
57#define AES_ODATAR(x) (0x50 + ((x) * 0x04))
58#define AES_IVR(x) (0x60 + ((x) * 0x04))
59
60#define AES_HW_VERSION 0xFC
61
62#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
new file mode 100644
index 000000000000..6bb20fffbf49
--- /dev/null
+++ b/drivers/crypto/atmel-aes.c
@@ -0,0 +1,1206 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL AES HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-aes.c driver.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/errno.h>
30#include <linux/interrupt.h>
31#include <linux/kernel.h>
32#include <linux/clk.h>
33#include <linux/irq.h>
34#include <linux/io.h>
35#include <linux/platform_device.h>
36#include <linux/scatterlist.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
39#include <linux/crypto.h>
40#include <linux/cryptohash.h>
41#include <crypto/scatterwalk.h>
42#include <crypto/algapi.h>
43#include <crypto/aes.h>
44#include <crypto/hash.h>
45#include <crypto/internal/hash.h>
46#include <linux/platform_data/atmel-aes.h>
47#include "atmel-aes-regs.h"
48
49#define CFB8_BLOCK_SIZE 1
50#define CFB16_BLOCK_SIZE 2
51#define CFB32_BLOCK_SIZE 4
52#define CFB64_BLOCK_SIZE 8
53
54/* AES flags */
55#define AES_FLAGS_MODE_MASK 0x01ff
56#define AES_FLAGS_ENCRYPT BIT(0)
57#define AES_FLAGS_CBC BIT(1)
58#define AES_FLAGS_CFB BIT(2)
59#define AES_FLAGS_CFB8 BIT(3)
60#define AES_FLAGS_CFB16 BIT(4)
61#define AES_FLAGS_CFB32 BIT(5)
62#define AES_FLAGS_CFB64 BIT(6)
63#define AES_FLAGS_OFB BIT(7)
64#define AES_FLAGS_CTR BIT(8)
65
66#define AES_FLAGS_INIT BIT(16)
67#define AES_FLAGS_DMA BIT(17)
68#define AES_FLAGS_BUSY BIT(18)
69
70#define AES_FLAGS_DUALBUFF BIT(24)
71
72#define ATMEL_AES_QUEUE_LENGTH 1
73#define ATMEL_AES_CACHE_SIZE 0
74
75#define ATMEL_AES_DMA_THRESHOLD 16
76
77
78struct atmel_aes_dev;
79
80struct atmel_aes_ctx {
81 struct atmel_aes_dev *dd;
82
83 int keylen;
84 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
85};
86
87struct atmel_aes_reqctx {
88 unsigned long mode;
89};
90
91struct atmel_aes_dma {
92 struct dma_chan *chan;
93 struct dma_slave_config dma_conf;
94};
95
96struct atmel_aes_dev {
97 struct list_head list;
98 unsigned long phys_base;
99 void __iomem *io_base;
100
101 struct atmel_aes_ctx *ctx;
102 struct device *dev;
103 struct clk *iclk;
104 int irq;
105
106 unsigned long flags;
107 int err;
108
109 spinlock_t lock;
110 struct crypto_queue queue;
111
112 struct tasklet_struct done_task;
113 struct tasklet_struct queue_task;
114
115 struct ablkcipher_request *req;
116 size_t total;
117
118 struct scatterlist *in_sg;
119 unsigned int nb_in_sg;
120
121 struct scatterlist *out_sg;
122 unsigned int nb_out_sg;
123
124 size_t bufcnt;
125
126 u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
127 int dma_in;
128 struct atmel_aes_dma dma_lch_in;
129
130 u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
131 int dma_out;
132 struct atmel_aes_dma dma_lch_out;
133
134 u32 hw_version;
135};
136
137struct atmel_aes_drv {
138 struct list_head dev_list;
139 spinlock_t lock;
140};
141
142static struct atmel_aes_drv atmel_aes = {
143 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
144 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
145};
146
147static int atmel_aes_sg_length(struct ablkcipher_request *req,
148 struct scatterlist *sg)
149{
150 unsigned int total = req->nbytes;
151 int sg_nb;
152 unsigned int len;
153 struct scatterlist *sg_list;
154
155 sg_nb = 0;
156 sg_list = sg;
157 total = req->nbytes;
158
159 while (total) {
160 len = min(sg_list->length, total);
161
162 sg_nb++;
163 total -= len;
164
165 sg_list = sg_next(sg_list);
166 if (!sg_list)
167 total = 0;
168 }
169
170 return sg_nb;
171}
172
173static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
174{
175 return readl_relaxed(dd->io_base + offset);
176}
177
178static inline void atmel_aes_write(struct atmel_aes_dev *dd,
179 u32 offset, u32 value)
180{
181 writel_relaxed(value, dd->io_base + offset);
182}
183
184static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
185 u32 *value, int count)
186{
187 for (; count--; value++, offset += 4)
188 *value = atmel_aes_read(dd, offset);
189}
190
191static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
192 u32 *value, int count)
193{
194 for (; count--; value++, offset += 4)
195 atmel_aes_write(dd, offset, *value);
196}
197
198static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
199{
200 atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
201
202 if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
203 dd->flags |= AES_FLAGS_DUALBUFF;
204}
205
206static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
207{
208 struct atmel_aes_dev *aes_dd = NULL;
209 struct atmel_aes_dev *tmp;
210
211 spin_lock_bh(&atmel_aes.lock);
212 if (!ctx->dd) {
213 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
214 aes_dd = tmp;
215 break;
216 }
217 ctx->dd = aes_dd;
218 } else {
219 aes_dd = ctx->dd;
220 }
221
222 spin_unlock_bh(&atmel_aes.lock);
223
224 return aes_dd;
225}
226
227static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
228{
229 clk_prepare_enable(dd->iclk);
230
231 if (!(dd->flags & AES_FLAGS_INIT)) {
232 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
233 atmel_aes_dualbuff_test(dd);
234 dd->flags |= AES_FLAGS_INIT;
235 dd->err = 0;
236 }
237
238 return 0;
239}
240
241static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
242{
243 atmel_aes_hw_init(dd);
244
245 dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
246
247 clk_disable_unprepare(dd->iclk);
248}
249
250static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
251{
252 struct ablkcipher_request *req = dd->req;
253
254 clk_disable_unprepare(dd->iclk);
255 dd->flags &= ~AES_FLAGS_BUSY;
256
257 req->base.complete(&req->base, err);
258}
259
260static void atmel_aes_dma_callback(void *data)
261{
262 struct atmel_aes_dev *dd = data;
263
264 /* dma_lch_out - completed */
265 tasklet_schedule(&dd->done_task);
266}
267
268static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
269{
270 struct dma_async_tx_descriptor *in_desc, *out_desc;
271 int nb_dma_sg_in, nb_dma_sg_out;
272
273 dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
274 if (!dd->nb_in_sg)
275 goto exit_err;
276
277 nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
278 DMA_TO_DEVICE);
279 if (!nb_dma_sg_in)
280 goto exit_err;
281
282 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
283 nb_dma_sg_in, DMA_MEM_TO_DEV,
284 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
285
286 if (!in_desc)
287 goto unmap_in;
288
289 /* callback not needed */
290
291 dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
292 if (!dd->nb_out_sg)
293 goto unmap_in;
294
295 nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
296 DMA_FROM_DEVICE);
297 if (!nb_dma_sg_out)
298 goto unmap_out;
299
300 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
301 nb_dma_sg_out, DMA_DEV_TO_MEM,
302 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
303
304 if (!out_desc)
305 goto unmap_out;
306
307 out_desc->callback = atmel_aes_dma_callback;
308 out_desc->callback_param = dd;
309
310 dd->total -= dd->req->nbytes;
311
312 dmaengine_submit(out_desc);
313 dma_async_issue_pending(dd->dma_lch_out.chan);
314
315 dmaengine_submit(in_desc);
316 dma_async_issue_pending(dd->dma_lch_in.chan);
317
318 return 0;
319
320unmap_out:
321 dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
322 DMA_FROM_DEVICE);
323unmap_in:
324 dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
325 DMA_TO_DEVICE);
326exit_err:
327 return -EINVAL;
328}
329
330static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
331{
332 dd->flags &= ~AES_FLAGS_DMA;
333
334 /* use cache buffers */
335 dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
336 if (!dd->nb_in_sg)
337 return -EINVAL;
338
339 dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
340 if (!dd->nb_in_sg)
341 return -EINVAL;
342
343 dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
344 dd->buf_in, dd->total);
345
346 if (!dd->bufcnt)
347 return -EINVAL;
348
349 dd->total -= dd->bufcnt;
350
351 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
352 atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
353 dd->bufcnt >> 2);
354
355 return 0;
356}
357
358static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
359{
360 int err;
361
362 if (dd->flags & AES_FLAGS_CFB8) {
363 dd->dma_lch_in.dma_conf.dst_addr_width =
364 DMA_SLAVE_BUSWIDTH_1_BYTE;
365 dd->dma_lch_out.dma_conf.src_addr_width =
366 DMA_SLAVE_BUSWIDTH_1_BYTE;
367 } else if (dd->flags & AES_FLAGS_CFB16) {
368 dd->dma_lch_in.dma_conf.dst_addr_width =
369 DMA_SLAVE_BUSWIDTH_2_BYTES;
370 dd->dma_lch_out.dma_conf.src_addr_width =
371 DMA_SLAVE_BUSWIDTH_2_BYTES;
372 } else {
373 dd->dma_lch_in.dma_conf.dst_addr_width =
374 DMA_SLAVE_BUSWIDTH_4_BYTES;
375 dd->dma_lch_out.dma_conf.src_addr_width =
376 DMA_SLAVE_BUSWIDTH_4_BYTES;
377 }
378
379 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
380 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
381
382 dd->flags |= AES_FLAGS_DMA;
383 err = atmel_aes_crypt_dma(dd);
384
385 return err;
386}
387
388static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
389{
390 int err;
391 u32 valcr = 0, valmr = 0;
392
393 err = atmel_aes_hw_init(dd);
394
395 if (err)
396 return err;
397
398 /* MR register must be set before IV registers */
399 if (dd->ctx->keylen == AES_KEYSIZE_128)
400 valmr |= AES_MR_KEYSIZE_128;
401 else if (dd->ctx->keylen == AES_KEYSIZE_192)
402 valmr |= AES_MR_KEYSIZE_192;
403 else
404 valmr |= AES_MR_KEYSIZE_256;
405
406 if (dd->flags & AES_FLAGS_CBC) {
407 valmr |= AES_MR_OPMOD_CBC;
408 } else if (dd->flags & AES_FLAGS_CFB) {
409 valmr |= AES_MR_OPMOD_CFB;
410 if (dd->flags & AES_FLAGS_CFB8)
411 valmr |= AES_MR_CFBS_8b;
412 else if (dd->flags & AES_FLAGS_CFB16)
413 valmr |= AES_MR_CFBS_16b;
414 else if (dd->flags & AES_FLAGS_CFB32)
415 valmr |= AES_MR_CFBS_32b;
416 else if (dd->flags & AES_FLAGS_CFB64)
417 valmr |= AES_MR_CFBS_64b;
418 } else if (dd->flags & AES_FLAGS_OFB) {
419 valmr |= AES_MR_OPMOD_OFB;
420 } else if (dd->flags & AES_FLAGS_CTR) {
421 valmr |= AES_MR_OPMOD_CTR;
422 } else {
423 valmr |= AES_MR_OPMOD_ECB;
424 }
425
426 if (dd->flags & AES_FLAGS_ENCRYPT)
427 valmr |= AES_MR_CYPHER_ENC;
428
429 if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
430 valmr |= AES_MR_SMOD_IDATAR0;
431 if (dd->flags & AES_FLAGS_DUALBUFF)
432 valmr |= AES_MR_DUALBUFF;
433 } else {
434 valmr |= AES_MR_SMOD_AUTO;
435 }
436
437 atmel_aes_write(dd, AES_CR, valcr);
438 atmel_aes_write(dd, AES_MR, valmr);
439
440 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
441 dd->ctx->keylen >> 2);
442
443 if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
444 (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
445 dd->req->info) {
446 atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
447 }
448
449 return 0;
450}
451
452static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
453 struct ablkcipher_request *req)
454{
455 struct crypto_async_request *async_req, *backlog;
456 struct atmel_aes_ctx *ctx;
457 struct atmel_aes_reqctx *rctx;
458 unsigned long flags;
459 int err, ret = 0;
460
461 spin_lock_irqsave(&dd->lock, flags);
462 if (req)
463 ret = ablkcipher_enqueue_request(&dd->queue, req);
464 if (dd->flags & AES_FLAGS_BUSY) {
465 spin_unlock_irqrestore(&dd->lock, flags);
466 return ret;
467 }
468 backlog = crypto_get_backlog(&dd->queue);
469 async_req = crypto_dequeue_request(&dd->queue);
470 if (async_req)
471 dd->flags |= AES_FLAGS_BUSY;
472 spin_unlock_irqrestore(&dd->lock, flags);
473
474 if (!async_req)
475 return ret;
476
477 if (backlog)
478 backlog->complete(backlog, -EINPROGRESS);
479
480 req = ablkcipher_request_cast(async_req);
481
482 /* assign new request to device */
483 dd->req = req;
484 dd->total = req->nbytes;
485 dd->in_sg = req->src;
486 dd->out_sg = req->dst;
487
488 rctx = ablkcipher_request_ctx(req);
489 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
490 rctx->mode &= AES_FLAGS_MODE_MASK;
491 dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
492 dd->ctx = ctx;
493 ctx->dd = dd;
494
495 err = atmel_aes_write_ctrl(dd);
496 if (!err) {
497 if (dd->total > ATMEL_AES_DMA_THRESHOLD)
498 err = atmel_aes_crypt_dma_start(dd);
499 else
500 err = atmel_aes_crypt_cpu_start(dd);
501 }
502 if (err) {
503 /* aes_task will not finish it, so do it here */
504 atmel_aes_finish_req(dd, err);
505 tasklet_schedule(&dd->queue_task);
506 }
507
508 return ret;
509}
510
511static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
512{
513 int err = -EINVAL;
514
515 if (dd->flags & AES_FLAGS_DMA) {
516 dma_unmap_sg(dd->dev, dd->out_sg,
517 dd->nb_out_sg, DMA_FROM_DEVICE);
518 dma_unmap_sg(dd->dev, dd->in_sg,
519 dd->nb_in_sg, DMA_TO_DEVICE);
520 err = 0;
521 }
522
523 return err;
524}
525
526static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
527{
528 struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
529 crypto_ablkcipher_reqtfm(req));
530 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
531 struct atmel_aes_dev *dd;
532
533 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
534 pr_err("request size is not exact amount of AES blocks\n");
535 return -EINVAL;
536 }
537
538 dd = atmel_aes_find_dev(ctx);
539 if (!dd)
540 return -ENODEV;
541
542 rctx->mode = mode;
543
544 return atmel_aes_handle_queue(dd, req);
545}
546
547static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
548{
549 struct at_dma_slave *sl = slave;
550
551 if (sl && sl->dma_dev == chan->device->dev) {
552 chan->private = sl;
553 return true;
554 } else {
555 return false;
556 }
557}
558
559static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
560{
561 int err = -ENOMEM;
562 struct aes_platform_data *pdata;
563 dma_cap_mask_t mask_in, mask_out;
564
565 pdata = dd->dev->platform_data;
566
567 if (pdata && pdata->dma_slave->txdata.dma_dev &&
568 pdata->dma_slave->rxdata.dma_dev) {
569
570 /* Try to grab 2 DMA channels */
571 dma_cap_zero(mask_in);
572 dma_cap_set(DMA_SLAVE, mask_in);
573
574 dd->dma_lch_in.chan = dma_request_channel(mask_in,
575 atmel_aes_filter, &pdata->dma_slave->rxdata);
576 if (!dd->dma_lch_in.chan)
577 goto err_dma_in;
578
579 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
580 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
581 AES_IDATAR(0);
582 dd->dma_lch_in.dma_conf.src_maxburst = 1;
583 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
584 dd->dma_lch_in.dma_conf.device_fc = false;
585
586 dma_cap_zero(mask_out);
587 dma_cap_set(DMA_SLAVE, mask_out);
588 dd->dma_lch_out.chan = dma_request_channel(mask_out,
589 atmel_aes_filter, &pdata->dma_slave->txdata);
590 if (!dd->dma_lch_out.chan)
591 goto err_dma_out;
592
593 dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
594 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
595 AES_ODATAR(0);
596 dd->dma_lch_out.dma_conf.src_maxburst = 1;
597 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
598 dd->dma_lch_out.dma_conf.device_fc = false;
599
600 return 0;
601 } else {
602 return -ENODEV;
603 }
604
605err_dma_out:
606 dma_release_channel(dd->dma_lch_in.chan);
607err_dma_in:
608 return err;
609}
610
611static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
612{
613 dma_release_channel(dd->dma_lch_in.chan);
614 dma_release_channel(dd->dma_lch_out.chan);
615}
616
617static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
618 unsigned int keylen)
619{
620 struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
621
622 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
623 keylen != AES_KEYSIZE_256) {
624 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
625 return -EINVAL;
626 }
627
628 memcpy(ctx->key, key, keylen);
629 ctx->keylen = keylen;
630
631 return 0;
632}
633
634static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
635{
636 return atmel_aes_crypt(req,
637 AES_FLAGS_ENCRYPT);
638}
639
640static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
641{
642 return atmel_aes_crypt(req,
643 0);
644}
645
646static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
647{
648 return atmel_aes_crypt(req,
649 AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
650}
651
652static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
653{
654 return atmel_aes_crypt(req,
655 AES_FLAGS_CBC);
656}
657
658static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
659{
660 return atmel_aes_crypt(req,
661 AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
662}
663
664static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
665{
666 return atmel_aes_crypt(req,
667 AES_FLAGS_OFB);
668}
669
670static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
671{
672 return atmel_aes_crypt(req,
673 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB);
674}
675
676static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
677{
678 return atmel_aes_crypt(req,
679 AES_FLAGS_CFB);
680}
681
682static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
683{
684 return atmel_aes_crypt(req,
685 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
686}
687
688static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
689{
690 return atmel_aes_crypt(req,
691 AES_FLAGS_CFB | AES_FLAGS_CFB64);
692}
693
694static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
695{
696 return atmel_aes_crypt(req,
697 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
698}
699
700static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
701{
702 return atmel_aes_crypt(req,
703 AES_FLAGS_CFB | AES_FLAGS_CFB32);
704}
705
706static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
707{
708 return atmel_aes_crypt(req,
709 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
710}
711
712static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
713{
714 return atmel_aes_crypt(req,
715 AES_FLAGS_CFB | AES_FLAGS_CFB16);
716}
717
718static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
719{
720 return atmel_aes_crypt(req,
721 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8);
722}
723
724static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
725{
726 return atmel_aes_crypt(req,
727 AES_FLAGS_CFB | AES_FLAGS_CFB8);
728}
729
730static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
731{
732 return atmel_aes_crypt(req,
733 AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
734}
735
736static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
737{
738 return atmel_aes_crypt(req,
739 AES_FLAGS_CTR);
740}
741
742static int atmel_aes_cra_init(struct crypto_tfm *tfm)
743{
744 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
745
746 return 0;
747}
748
749static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
750{
751}
752
753static struct crypto_alg aes_algs[] = {
754{
755 .cra_name = "ecb(aes)",
756 .cra_driver_name = "atmel-ecb-aes",
757 .cra_priority = 100,
758 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
759 .cra_blocksize = AES_BLOCK_SIZE,
760 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
761 .cra_alignmask = 0x0,
762 .cra_type = &crypto_ablkcipher_type,
763 .cra_module = THIS_MODULE,
764 .cra_init = atmel_aes_cra_init,
765 .cra_exit = atmel_aes_cra_exit,
766 .cra_u.ablkcipher = {
767 .min_keysize = AES_MIN_KEY_SIZE,
768 .max_keysize = AES_MAX_KEY_SIZE,
769 .setkey = atmel_aes_setkey,
770 .encrypt = atmel_aes_ecb_encrypt,
771 .decrypt = atmel_aes_ecb_decrypt,
772 }
773},
774{
775 .cra_name = "cbc(aes)",
776 .cra_driver_name = "atmel-cbc-aes",
777 .cra_priority = 100,
778 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
779 .cra_blocksize = AES_BLOCK_SIZE,
780 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
781 .cra_alignmask = 0x0,
782 .cra_type = &crypto_ablkcipher_type,
783 .cra_module = THIS_MODULE,
784 .cra_init = atmel_aes_cra_init,
785 .cra_exit = atmel_aes_cra_exit,
786 .cra_u.ablkcipher = {
787 .min_keysize = AES_MIN_KEY_SIZE,
788 .max_keysize = AES_MAX_KEY_SIZE,
789 .ivsize = AES_BLOCK_SIZE,
790 .setkey = atmel_aes_setkey,
791 .encrypt = atmel_aes_cbc_encrypt,
792 .decrypt = atmel_aes_cbc_decrypt,
793 }
794},
795{
796 .cra_name = "ofb(aes)",
797 .cra_driver_name = "atmel-ofb-aes",
798 .cra_priority = 100,
799 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
800 .cra_blocksize = AES_BLOCK_SIZE,
801 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
802 .cra_alignmask = 0x0,
803 .cra_type = &crypto_ablkcipher_type,
804 .cra_module = THIS_MODULE,
805 .cra_init = atmel_aes_cra_init,
806 .cra_exit = atmel_aes_cra_exit,
807 .cra_u.ablkcipher = {
808 .min_keysize = AES_MIN_KEY_SIZE,
809 .max_keysize = AES_MAX_KEY_SIZE,
810 .ivsize = AES_BLOCK_SIZE,
811 .setkey = atmel_aes_setkey,
812 .encrypt = atmel_aes_ofb_encrypt,
813 .decrypt = atmel_aes_ofb_decrypt,
814 }
815},
816{
817 .cra_name = "cfb(aes)",
818 .cra_driver_name = "atmel-cfb-aes",
819 .cra_priority = 100,
820 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
821 .cra_blocksize = AES_BLOCK_SIZE,
822 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
823 .cra_alignmask = 0x0,
824 .cra_type = &crypto_ablkcipher_type,
825 .cra_module = THIS_MODULE,
826 .cra_init = atmel_aes_cra_init,
827 .cra_exit = atmel_aes_cra_exit,
828 .cra_u.ablkcipher = {
829 .min_keysize = AES_MIN_KEY_SIZE,
830 .max_keysize = AES_MAX_KEY_SIZE,
831 .ivsize = AES_BLOCK_SIZE,
832 .setkey = atmel_aes_setkey,
833 .encrypt = atmel_aes_cfb_encrypt,
834 .decrypt = atmel_aes_cfb_decrypt,
835 }
836},
837{
838 .cra_name = "cfb32(aes)",
839 .cra_driver_name = "atmel-cfb32-aes",
840 .cra_priority = 100,
841 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
842 .cra_blocksize = CFB32_BLOCK_SIZE,
843 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
844 .cra_alignmask = 0x0,
845 .cra_type = &crypto_ablkcipher_type,
846 .cra_module = THIS_MODULE,
847 .cra_init = atmel_aes_cra_init,
848 .cra_exit = atmel_aes_cra_exit,
849 .cra_u.ablkcipher = {
850 .min_keysize = AES_MIN_KEY_SIZE,
851 .max_keysize = AES_MAX_KEY_SIZE,
852 .ivsize = AES_BLOCK_SIZE,
853 .setkey = atmel_aes_setkey,
854 .encrypt = atmel_aes_cfb32_encrypt,
855 .decrypt = atmel_aes_cfb32_decrypt,
856 }
857},
858{
859 .cra_name = "cfb16(aes)",
860 .cra_driver_name = "atmel-cfb16-aes",
861 .cra_priority = 100,
862 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
863 .cra_blocksize = CFB16_BLOCK_SIZE,
864 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
865 .cra_alignmask = 0x0,
866 .cra_type = &crypto_ablkcipher_type,
867 .cra_module = THIS_MODULE,
868 .cra_init = atmel_aes_cra_init,
869 .cra_exit = atmel_aes_cra_exit,
870 .cra_u.ablkcipher = {
871 .min_keysize = AES_MIN_KEY_SIZE,
872 .max_keysize = AES_MAX_KEY_SIZE,
873 .ivsize = AES_BLOCK_SIZE,
874 .setkey = atmel_aes_setkey,
875 .encrypt = atmel_aes_cfb16_encrypt,
876 .decrypt = atmel_aes_cfb16_decrypt,
877 }
878},
879{
880 .cra_name = "cfb8(aes)",
881 .cra_driver_name = "atmel-cfb8-aes",
882 .cra_priority = 100,
883 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
884 .cra_blocksize = CFB64_BLOCK_SIZE,
885 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
886 .cra_alignmask = 0x0,
887 .cra_type = &crypto_ablkcipher_type,
888 .cra_module = THIS_MODULE,
889 .cra_init = atmel_aes_cra_init,
890 .cra_exit = atmel_aes_cra_exit,
891 .cra_u.ablkcipher = {
892 .min_keysize = AES_MIN_KEY_SIZE,
893 .max_keysize = AES_MAX_KEY_SIZE,
894 .ivsize = AES_BLOCK_SIZE,
895 .setkey = atmel_aes_setkey,
896 .encrypt = atmel_aes_cfb8_encrypt,
897 .decrypt = atmel_aes_cfb8_decrypt,
898 }
899},
900{
901 .cra_name = "ctr(aes)",
902 .cra_driver_name = "atmel-ctr-aes",
903 .cra_priority = 100,
904 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
905 .cra_blocksize = AES_BLOCK_SIZE,
906 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
907 .cra_alignmask = 0x0,
908 .cra_type = &crypto_ablkcipher_type,
909 .cra_module = THIS_MODULE,
910 .cra_init = atmel_aes_cra_init,
911 .cra_exit = atmel_aes_cra_exit,
912 .cra_u.ablkcipher = {
913 .min_keysize = AES_MIN_KEY_SIZE,
914 .max_keysize = AES_MAX_KEY_SIZE,
915 .ivsize = AES_BLOCK_SIZE,
916 .setkey = atmel_aes_setkey,
917 .encrypt = atmel_aes_ctr_encrypt,
918 .decrypt = atmel_aes_ctr_decrypt,
919 }
920},
921};
922
923static struct crypto_alg aes_cfb64_alg[] = {
924{
925 .cra_name = "cfb64(aes)",
926 .cra_driver_name = "atmel-cfb64-aes",
927 .cra_priority = 100,
928 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
929 .cra_blocksize = CFB64_BLOCK_SIZE,
930 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
931 .cra_alignmask = 0x0,
932 .cra_type = &crypto_ablkcipher_type,
933 .cra_module = THIS_MODULE,
934 .cra_init = atmel_aes_cra_init,
935 .cra_exit = atmel_aes_cra_exit,
936 .cra_u.ablkcipher = {
937 .min_keysize = AES_MIN_KEY_SIZE,
938 .max_keysize = AES_MAX_KEY_SIZE,
939 .ivsize = AES_BLOCK_SIZE,
940 .setkey = atmel_aes_setkey,
941 .encrypt = atmel_aes_cfb64_encrypt,
942 .decrypt = atmel_aes_cfb64_decrypt,
943 }
944},
945};
946
947static void atmel_aes_queue_task(unsigned long data)
948{
949 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
950
951 atmel_aes_handle_queue(dd, NULL);
952}
953
954static void atmel_aes_done_task(unsigned long data)
955{
956 struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
957 int err;
958
959 if (!(dd->flags & AES_FLAGS_DMA)) {
960 atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
961 dd->bufcnt >> 2);
962
963 if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
964 dd->buf_out, dd->bufcnt))
965 err = 0;
966 else
967 err = -EINVAL;
968
969 goto cpu_end;
970 }
971
972 err = atmel_aes_crypt_dma_stop(dd);
973
974 err = dd->err ? : err;
975
976 if (dd->total && !err) {
977 err = atmel_aes_crypt_dma_start(dd);
978 if (!err)
979 return; /* DMA started. Not fininishing. */
980 }
981
982cpu_end:
983 atmel_aes_finish_req(dd, err);
984 atmel_aes_handle_queue(dd, NULL);
985}
986
987static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
988{
989 struct atmel_aes_dev *aes_dd = dev_id;
990 u32 reg;
991
992 reg = atmel_aes_read(aes_dd, AES_ISR);
993 if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
994 atmel_aes_write(aes_dd, AES_IDR, reg);
995 if (AES_FLAGS_BUSY & aes_dd->flags)
996 tasklet_schedule(&aes_dd->done_task);
997 else
998 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
999 return IRQ_HANDLED;
1000 }
1001
1002 return IRQ_NONE;
1003}
1004
1005static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1006{
1007 int i;
1008
1009 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1010 crypto_unregister_alg(&aes_algs[i]);
1011 if (dd->hw_version >= 0x130)
1012 crypto_unregister_alg(&aes_cfb64_alg[0]);
1013}
1014
1015static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1016{
1017 int err, i, j;
1018
1019 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1020 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1021 err = crypto_register_alg(&aes_algs[i]);
1022 if (err)
1023 goto err_aes_algs;
1024 }
1025
1026 atmel_aes_hw_version_init(dd);
1027
1028 if (dd->hw_version >= 0x130) {
1029 INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list);
1030 err = crypto_register_alg(&aes_cfb64_alg[0]);
1031 if (err)
1032 goto err_aes_cfb64_alg;
1033 }
1034
1035 return 0;
1036
1037err_aes_cfb64_alg:
1038 i = ARRAY_SIZE(aes_algs);
1039err_aes_algs:
1040 for (j = 0; j < i; j++)
1041 crypto_unregister_alg(&aes_algs[j]);
1042
1043 return err;
1044}
1045
1046static int __devinit atmel_aes_probe(struct platform_device *pdev)
1047{
1048 struct atmel_aes_dev *aes_dd;
1049 struct aes_platform_data *pdata;
1050 struct device *dev = &pdev->dev;
1051 struct resource *aes_res;
1052 unsigned long aes_phys_size;
1053 int err;
1054
1055 pdata = pdev->dev.platform_data;
1056 if (!pdata) {
1057 err = -ENXIO;
1058 goto aes_dd_err;
1059 }
1060
1061 aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
1062 if (aes_dd == NULL) {
1063 dev_err(dev, "unable to alloc data struct.\n");
1064 err = -ENOMEM;
1065 goto aes_dd_err;
1066 }
1067
1068 aes_dd->dev = dev;
1069
1070 platform_set_drvdata(pdev, aes_dd);
1071
1072 INIT_LIST_HEAD(&aes_dd->list);
1073
1074 tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1075 (unsigned long)aes_dd);
1076 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1077 (unsigned long)aes_dd);
1078
1079 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1080
1081 aes_dd->irq = -1;
1082
1083 /* Get the base address */
1084 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1085 if (!aes_res) {
1086 dev_err(dev, "no MEM resource info\n");
1087 err = -ENODEV;
1088 goto res_err;
1089 }
1090 aes_dd->phys_base = aes_res->start;
1091 aes_phys_size = resource_size(aes_res);
1092
1093 /* Get the IRQ */
1094 aes_dd->irq = platform_get_irq(pdev, 0);
1095 if (aes_dd->irq < 0) {
1096 dev_err(dev, "no IRQ resource info\n");
1097 err = aes_dd->irq;
1098 goto aes_irq_err;
1099 }
1100
1101 err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
1102 aes_dd);
1103 if (err) {
1104 dev_err(dev, "unable to request aes irq.\n");
1105 goto aes_irq_err;
1106 }
1107
1108 /* Initializing the clock */
1109 aes_dd->iclk = clk_get(&pdev->dev, NULL);
1110 if (IS_ERR(aes_dd->iclk)) {
1111 dev_err(dev, "clock intialization failed.\n");
1112 err = PTR_ERR(aes_dd->iclk);
1113 goto clk_err;
1114 }
1115
1116 aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
1117 if (!aes_dd->io_base) {
1118 dev_err(dev, "can't ioremap\n");
1119 err = -ENOMEM;
1120 goto aes_io_err;
1121 }
1122
1123 err = atmel_aes_dma_init(aes_dd);
1124 if (err)
1125 goto err_aes_dma;
1126
1127 spin_lock(&atmel_aes.lock);
1128 list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1129 spin_unlock(&atmel_aes.lock);
1130
1131 err = atmel_aes_register_algs(aes_dd);
1132 if (err)
1133 goto err_algs;
1134
1135 dev_info(dev, "Atmel AES\n");
1136
1137 return 0;
1138
1139err_algs:
1140 spin_lock(&atmel_aes.lock);
1141 list_del(&aes_dd->list);
1142 spin_unlock(&atmel_aes.lock);
1143 atmel_aes_dma_cleanup(aes_dd);
1144err_aes_dma:
1145 iounmap(aes_dd->io_base);
1146aes_io_err:
1147 clk_put(aes_dd->iclk);
1148clk_err:
1149 free_irq(aes_dd->irq, aes_dd);
1150aes_irq_err:
1151res_err:
1152 tasklet_kill(&aes_dd->done_task);
1153 tasklet_kill(&aes_dd->queue_task);
1154 kfree(aes_dd);
1155 aes_dd = NULL;
1156aes_dd_err:
1157 dev_err(dev, "initialization failed.\n");
1158
1159 return err;
1160}
1161
1162static int __devexit atmel_aes_remove(struct platform_device *pdev)
1163{
1164 static struct atmel_aes_dev *aes_dd;
1165
1166 aes_dd = platform_get_drvdata(pdev);
1167 if (!aes_dd)
1168 return -ENODEV;
1169 spin_lock(&atmel_aes.lock);
1170 list_del(&aes_dd->list);
1171 spin_unlock(&atmel_aes.lock);
1172
1173 atmel_aes_unregister_algs(aes_dd);
1174
1175 tasklet_kill(&aes_dd->done_task);
1176 tasklet_kill(&aes_dd->queue_task);
1177
1178 atmel_aes_dma_cleanup(aes_dd);
1179
1180 iounmap(aes_dd->io_base);
1181
1182 clk_put(aes_dd->iclk);
1183
1184 if (aes_dd->irq > 0)
1185 free_irq(aes_dd->irq, aes_dd);
1186
1187 kfree(aes_dd);
1188 aes_dd = NULL;
1189
1190 return 0;
1191}
1192
1193static struct platform_driver atmel_aes_driver = {
1194 .probe = atmel_aes_probe,
1195 .remove = __devexit_p(atmel_aes_remove),
1196 .driver = {
1197 .name = "atmel_aes",
1198 .owner = THIS_MODULE,
1199 },
1200};
1201
1202module_platform_driver(atmel_aes_driver);
1203
1204MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1205MODULE_LICENSE("GPL v2");
1206MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
new file mode 100644
index 000000000000..dc53a20d7da1
--- /dev/null
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -0,0 +1,46 @@
1#ifndef __ATMEL_SHA_REGS_H__
2#define __ATMEL_SHA_REGS_H__
3
4#define SHA_REG_DIGEST(x) (0x80 + ((x) * 0x04))
5#define SHA_REG_DIN(x) (0x40 + ((x) * 0x04))
6
7#define SHA_CR 0x00
8#define SHA_CR_START (1 << 0)
9#define SHA_CR_FIRST (1 << 4)
10#define SHA_CR_SWRST (1 << 8)
11
12#define SHA_MR 0x04
13#define SHA_MR_MODE_MASK (0x3 << 0)
14#define SHA_MR_MODE_MANUAL 0x0
15#define SHA_MR_MODE_AUTO 0x1
16#define SHA_MR_MODE_PDC 0x2
17#define SHA_MR_DUALBUFF (1 << 3)
18#define SHA_MR_PROCDLY (1 << 4)
19#define SHA_MR_ALGO_SHA1 (0 << 8)
20#define SHA_MR_ALGO_SHA256 (1 << 8)
21
22#define SHA_IER 0x10
23#define SHA_IDR 0x14
24#define SHA_IMR 0x18
25#define SHA_ISR 0x1C
26#define SHA_INT_DATARDY (1 << 0)
27#define SHA_INT_ENDTX (1 << 1)
28#define SHA_INT_TXBUFE (1 << 2)
29#define SHA_INT_URAD (1 << 8)
30#define SHA_ISR_URAT_MASK (0x7 << 12)
31#define SHA_ISR_URAT_IDR (0x0 << 12)
32#define SHA_ISR_URAT_ODR (0x1 << 12)
33#define SHA_ISR_URAT_MR (0x2 << 12)
34#define SHA_ISR_URAT_WO (0x5 << 12)
35
36#define SHA_TPR 0x108
37#define SHA_TCR 0x10C
38#define SHA_TNPR 0x118
39#define SHA_TNCR 0x11C
40#define SHA_PTCR 0x120
41#define SHA_PTCR_TXTEN (1 << 8)
42#define SHA_PTCR_TXTDIS (1 << 9)
43#define SHA_PTSR 0x124
44#define SHA_PTSR_TXTEN (1 << 8)
45
46#endif /* __ATMEL_SHA_REGS_H__ */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
new file mode 100644
index 000000000000..f938b9d79b66
--- /dev/null
+++ b/drivers/crypto/atmel-sha.c
@@ -0,0 +1,1112 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL SHA1/SHA256 HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-sham.c drivers.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/errno.h>
30#include <linux/interrupt.h>
31#include <linux/kernel.h>
32#include <linux/clk.h>
33#include <linux/irq.h>
34#include <linux/io.h>
35#include <linux/platform_device.h>
36#include <linux/scatterlist.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
39#include <linux/crypto.h>
40#include <linux/cryptohash.h>
41#include <crypto/scatterwalk.h>
42#include <crypto/algapi.h>
43#include <crypto/sha.h>
44#include <crypto/hash.h>
45#include <crypto/internal/hash.h>
46#include "atmel-sha-regs.h"
47
48/* SHA flags */
49#define SHA_FLAGS_BUSY BIT(0)
50#define SHA_FLAGS_FINAL BIT(1)
51#define SHA_FLAGS_DMA_ACTIVE BIT(2)
52#define SHA_FLAGS_OUTPUT_READY BIT(3)
53#define SHA_FLAGS_INIT BIT(4)
54#define SHA_FLAGS_CPU BIT(5)
55#define SHA_FLAGS_DMA_READY BIT(6)
56
57#define SHA_FLAGS_FINUP BIT(16)
58#define SHA_FLAGS_SG BIT(17)
59#define SHA_FLAGS_SHA1 BIT(18)
60#define SHA_FLAGS_SHA256 BIT(19)
61#define SHA_FLAGS_ERROR BIT(20)
62#define SHA_FLAGS_PAD BIT(21)
63
64#define SHA_FLAGS_DUALBUFF BIT(24)
65
66#define SHA_OP_UPDATE 1
67#define SHA_OP_FINAL 2
68
69#define SHA_BUFFER_LEN PAGE_SIZE
70
71#define ATMEL_SHA_DMA_THRESHOLD 56
72
73
74struct atmel_sha_dev;
75
76struct atmel_sha_reqctx {
77 struct atmel_sha_dev *dd;
78 unsigned long flags;
79 unsigned long op;
80
81 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
82 size_t digcnt;
83 size_t bufcnt;
84 size_t buflen;
85 dma_addr_t dma_addr;
86
87 /* walk state */
88 struct scatterlist *sg;
89 unsigned int offset; /* offset in current sg */
90 unsigned int total; /* total request */
91
92 u8 buffer[0] __aligned(sizeof(u32));
93};
94
95struct atmel_sha_ctx {
96 struct atmel_sha_dev *dd;
97
98 unsigned long flags;
99
100 /* fallback stuff */
101 struct crypto_shash *fallback;
102
103};
104
105#define ATMEL_SHA_QUEUE_LENGTH 1
106
107struct atmel_sha_dev {
108 struct list_head list;
109 unsigned long phys_base;
110 struct device *dev;
111 struct clk *iclk;
112 int irq;
113 void __iomem *io_base;
114
115 spinlock_t lock;
116 int err;
117 struct tasklet_struct done_task;
118
119 unsigned long flags;
120 struct crypto_queue queue;
121 struct ahash_request *req;
122};
123
124struct atmel_sha_drv {
125 struct list_head dev_list;
126 spinlock_t lock;
127};
128
129static struct atmel_sha_drv atmel_sha = {
130 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
131 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
132};
133
134static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
135{
136 return readl_relaxed(dd->io_base + offset);
137}
138
139static inline void atmel_sha_write(struct atmel_sha_dev *dd,
140 u32 offset, u32 value)
141{
142 writel_relaxed(value, dd->io_base + offset);
143}
144
145static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd)
146{
147 atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF);
148
149 if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF)
150 dd->flags |= SHA_FLAGS_DUALBUFF;
151}
152
153static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
154{
155 size_t count;
156
157 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
158 count = min(ctx->sg->length - ctx->offset, ctx->total);
159 count = min(count, ctx->buflen - ctx->bufcnt);
160
161 if (count <= 0)
162 break;
163
164 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
165 ctx->offset, count, 0);
166
167 ctx->bufcnt += count;
168 ctx->offset += count;
169 ctx->total -= count;
170
171 if (ctx->offset == ctx->sg->length) {
172 ctx->sg = sg_next(ctx->sg);
173 if (ctx->sg)
174 ctx->offset = 0;
175 else
176 ctx->total = 0;
177 }
178 }
179
180 return 0;
181}
182
183/*
184 * The purpose of this padding is to ensure that the padded message
185 * is a multiple of 512 bits. The bit "1" is appended at the end of
186 * the message followed by "padlen-1" zero bits. Then a 64 bits block
187 * equals to the message length in bits is appended.
188 *
189 * padlen is calculated as followed:
190 * - if message length < 56 bytes then padlen = 56 - message length
191 * - else padlen = 64 + 56 - message length
192 */
193static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
194{
195 unsigned int index, padlen;
196 u64 bits;
197 u64 size;
198
199 bits = (ctx->bufcnt + ctx->digcnt + length) << 3;
200 size = cpu_to_be64(bits);
201
202 index = ctx->bufcnt & 0x3f;
203 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
204 *(ctx->buffer + ctx->bufcnt) = 0x80;
205 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
206 memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8);
207 ctx->bufcnt += padlen + 8;
208 ctx->flags |= SHA_FLAGS_PAD;
209}
210
211static int atmel_sha_init(struct ahash_request *req)
212{
213 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
214 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
215 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
216 struct atmel_sha_dev *dd = NULL;
217 struct atmel_sha_dev *tmp;
218
219 spin_lock_bh(&atmel_sha.lock);
220 if (!tctx->dd) {
221 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
222 dd = tmp;
223 break;
224 }
225 tctx->dd = dd;
226 } else {
227 dd = tctx->dd;
228 }
229
230 spin_unlock_bh(&atmel_sha.lock);
231
232 ctx->dd = dd;
233
234 ctx->flags = 0;
235
236 dev_dbg(dd->dev, "init: digest size: %d\n",
237 crypto_ahash_digestsize(tfm));
238
239 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
240 ctx->flags |= SHA_FLAGS_SHA1;
241 else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE)
242 ctx->flags |= SHA_FLAGS_SHA256;
243
244 ctx->bufcnt = 0;
245 ctx->digcnt = 0;
246 ctx->buflen = SHA_BUFFER_LEN;
247
248 return 0;
249}
250
251static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
252{
253 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
254 u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
255
256 if (likely(dma)) {
257 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
258 valmr = SHA_MR_MODE_PDC;
259 if (dd->flags & SHA_FLAGS_DUALBUFF)
260 valmr = SHA_MR_DUALBUFF;
261 } else {
262 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
263 }
264
265 if (ctx->flags & SHA_FLAGS_SHA256)
266 valmr |= SHA_MR_ALGO_SHA256;
267
268 /* Setting CR_FIRST only for the first iteration */
269 if (!ctx->digcnt)
270 valcr = SHA_CR_FIRST;
271
272 atmel_sha_write(dd, SHA_CR, valcr);
273 atmel_sha_write(dd, SHA_MR, valmr);
274}
275
276static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
277 size_t length, int final)
278{
279 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
280 int count, len32;
281 const u32 *buffer = (const u32 *)buf;
282
283 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
284 ctx->digcnt, length, final);
285
286 atmel_sha_write_ctrl(dd, 0);
287
288 /* should be non-zero before next lines to disable clocks later */
289 ctx->digcnt += length;
290
291 if (final)
292 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
293
294 len32 = DIV_ROUND_UP(length, sizeof(u32));
295
296 dd->flags |= SHA_FLAGS_CPU;
297
298 for (count = 0; count < len32; count++)
299 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
300
301 return -EINPROGRESS;
302}
303
304static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
305 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
306{
307 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
308 int len32;
309
310 dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n",
311 ctx->digcnt, length1, final);
312
313 len32 = DIV_ROUND_UP(length1, sizeof(u32));
314 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
315 atmel_sha_write(dd, SHA_TPR, dma_addr1);
316 atmel_sha_write(dd, SHA_TCR, len32);
317
318 len32 = DIV_ROUND_UP(length2, sizeof(u32));
319 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
320 atmel_sha_write(dd, SHA_TNCR, len32);
321
322 atmel_sha_write_ctrl(dd, 1);
323
324 /* should be non-zero before next lines to disable clocks later */
325 ctx->digcnt += length1;
326
327 if (final)
328 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
329
330 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
331
332 /* Start DMA transfer */
333 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
334
335 return -EINPROGRESS;
336}
337
338static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
339{
340 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
341 int bufcnt;
342
343 atmel_sha_append_sg(ctx);
344 atmel_sha_fill_padding(ctx, 0);
345
346 bufcnt = ctx->bufcnt;
347 ctx->bufcnt = 0;
348
349 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
350}
351
352static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
353 struct atmel_sha_reqctx *ctx,
354 size_t length, int final)
355{
356 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
357 ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
358 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
359 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
360 SHA1_BLOCK_SIZE);
361 return -EINVAL;
362 }
363
364 ctx->flags &= ~SHA_FLAGS_SG;
365
366 /* next call does not fail... so no unmap in the case of error */
367 return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final);
368}
369
370static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
371{
372 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
373 unsigned int final;
374 size_t count;
375
376 atmel_sha_append_sg(ctx);
377
378 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
379
380 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
381 ctx->bufcnt, ctx->digcnt, final);
382
383 if (final)
384 atmel_sha_fill_padding(ctx, 0);
385
386 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
387 count = ctx->bufcnt;
388 ctx->bufcnt = 0;
389 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
390 }
391
392 return 0;
393}
394
395static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
396{
397 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
398 unsigned int length, final, tail;
399 struct scatterlist *sg;
400 unsigned int count;
401
402 if (!ctx->total)
403 return 0;
404
405 if (ctx->bufcnt || ctx->offset)
406 return atmel_sha_update_dma_slow(dd);
407
408 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
409 ctx->digcnt, ctx->bufcnt, ctx->total);
410
411 sg = ctx->sg;
412
413 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
414 return atmel_sha_update_dma_slow(dd);
415
416 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE))
417 /* size is not SHA1_BLOCK_SIZE aligned */
418 return atmel_sha_update_dma_slow(dd);
419
420 length = min(ctx->total, sg->length);
421
422 if (sg_is_last(sg)) {
423 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
424 /* not last sg must be SHA1_BLOCK_SIZE aligned */
425 tail = length & (SHA1_BLOCK_SIZE - 1);
426 length -= tail;
427 if (length == 0) {
428 /* offset where to start slow */
429 ctx->offset = length;
430 return atmel_sha_update_dma_slow(dd);
431 }
432 }
433 }
434
435 ctx->total -= length;
436 ctx->offset = length; /* offset where to start slow */
437
438 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
439
440 /* Add padding */
441 if (final) {
442 tail = length & (SHA1_BLOCK_SIZE - 1);
443 length -= tail;
444 ctx->total += tail;
445 ctx->offset = length; /* offset where to start slow */
446
447 sg = ctx->sg;
448 atmel_sha_append_sg(ctx);
449
450 atmel_sha_fill_padding(ctx, length);
451
452 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
453 ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
454 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
455 dev_err(dd->dev, "dma %u bytes error\n",
456 ctx->buflen + SHA1_BLOCK_SIZE);
457 return -EINVAL;
458 }
459
460 if (length == 0) {
461 ctx->flags &= ~SHA_FLAGS_SG;
462 count = ctx->bufcnt;
463 ctx->bufcnt = 0;
464 return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0,
465 0, final);
466 } else {
467 ctx->sg = sg;
468 if (!dma_map_sg(dd->dev, ctx->sg, 1,
469 DMA_TO_DEVICE)) {
470 dev_err(dd->dev, "dma_map_sg error\n");
471 return -EINVAL;
472 }
473
474 ctx->flags |= SHA_FLAGS_SG;
475
476 count = ctx->bufcnt;
477 ctx->bufcnt = 0;
478 return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg),
479 length, ctx->dma_addr, count, final);
480 }
481 }
482
483 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
484 dev_err(dd->dev, "dma_map_sg error\n");
485 return -EINVAL;
486 }
487
488 ctx->flags |= SHA_FLAGS_SG;
489
490 /* next call does not fail... so no unmap in the case of error */
491 return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0,
492 0, final);
493}
494
495static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
496{
497 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
498
499 if (ctx->flags & SHA_FLAGS_SG) {
500 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
501 if (ctx->sg->length == ctx->offset) {
502 ctx->sg = sg_next(ctx->sg);
503 if (ctx->sg)
504 ctx->offset = 0;
505 }
506 if (ctx->flags & SHA_FLAGS_PAD)
507 dma_unmap_single(dd->dev, ctx->dma_addr,
508 ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
509 } else {
510 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
511 SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
512 }
513
514 return 0;
515}
516
517static int atmel_sha_update_req(struct atmel_sha_dev *dd)
518{
519 struct ahash_request *req = dd->req;
520 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
521 int err;
522
523 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
524 ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0);
525
526 if (ctx->flags & SHA_FLAGS_CPU)
527 err = atmel_sha_update_cpu(dd);
528 else
529 err = atmel_sha_update_dma_start(dd);
530
531 /* wait for dma completion before can take more data */
532 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n",
533 err, ctx->digcnt);
534
535 return err;
536}
537
538static int atmel_sha_final_req(struct atmel_sha_dev *dd)
539{
540 struct ahash_request *req = dd->req;
541 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
542 int err = 0;
543 int count;
544
545 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
546 atmel_sha_fill_padding(ctx, 0);
547 count = ctx->bufcnt;
548 ctx->bufcnt = 0;
549 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
550 }
551 /* faster to handle last block with cpu */
552 else {
553 atmel_sha_fill_padding(ctx, 0);
554 count = ctx->bufcnt;
555 ctx->bufcnt = 0;
556 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
557 }
558
559 dev_dbg(dd->dev, "final_req: err: %d\n", err);
560
561 return err;
562}
563
564static void atmel_sha_copy_hash(struct ahash_request *req)
565{
566 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
567 u32 *hash = (u32 *)ctx->digest;
568 int i;
569
570 if (likely(ctx->flags & SHA_FLAGS_SHA1))
571 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
572 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
573 else
574 for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
575 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
576}
577
578static void atmel_sha_copy_ready_hash(struct ahash_request *req)
579{
580 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
581
582 if (!req->result)
583 return;
584
585 if (likely(ctx->flags & SHA_FLAGS_SHA1))
586 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
587 else
588 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
589}
590
591static int atmel_sha_finish(struct ahash_request *req)
592{
593 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
594 struct atmel_sha_dev *dd = ctx->dd;
595 int err = 0;
596
597 if (ctx->digcnt)
598 atmel_sha_copy_ready_hash(req);
599
600 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt,
601 ctx->bufcnt);
602
603 return err;
604}
605
606static void atmel_sha_finish_req(struct ahash_request *req, int err)
607{
608 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
609 struct atmel_sha_dev *dd = ctx->dd;
610
611 if (!err) {
612 atmel_sha_copy_hash(req);
613 if (SHA_FLAGS_FINAL & dd->flags)
614 err = atmel_sha_finish(req);
615 } else {
616 ctx->flags |= SHA_FLAGS_ERROR;
617 }
618
619 /* atomic operation is not needed here */
620 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
621 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
622
623 clk_disable_unprepare(dd->iclk);
624
625 if (req->base.complete)
626 req->base.complete(&req->base, err);
627
628 /* handle new request */
629 tasklet_schedule(&dd->done_task);
630}
631
632static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
633{
634 clk_prepare_enable(dd->iclk);
635
636 if (SHA_FLAGS_INIT & dd->flags) {
637 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
638 atmel_sha_dualbuff_test(dd);
639 dd->flags |= SHA_FLAGS_INIT;
640 dd->err = 0;
641 }
642
643 return 0;
644}
645
646static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
647 struct ahash_request *req)
648{
649 struct crypto_async_request *async_req, *backlog;
650 struct atmel_sha_reqctx *ctx;
651 unsigned long flags;
652 int err = 0, ret = 0;
653
654 spin_lock_irqsave(&dd->lock, flags);
655 if (req)
656 ret = ahash_enqueue_request(&dd->queue, req);
657
658 if (SHA_FLAGS_BUSY & dd->flags) {
659 spin_unlock_irqrestore(&dd->lock, flags);
660 return ret;
661 }
662
663 backlog = crypto_get_backlog(&dd->queue);
664 async_req = crypto_dequeue_request(&dd->queue);
665 if (async_req)
666 dd->flags |= SHA_FLAGS_BUSY;
667
668 spin_unlock_irqrestore(&dd->lock, flags);
669
670 if (!async_req)
671 return ret;
672
673 if (backlog)
674 backlog->complete(backlog, -EINPROGRESS);
675
676 req = ahash_request_cast(async_req);
677 dd->req = req;
678 ctx = ahash_request_ctx(req);
679
680 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
681 ctx->op, req->nbytes);
682
683 err = atmel_sha_hw_init(dd);
684
685 if (err)
686 goto err1;
687
688 if (ctx->op == SHA_OP_UPDATE) {
689 err = atmel_sha_update_req(dd);
690 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) {
691 /* no final() after finup() */
692 err = atmel_sha_final_req(dd);
693 }
694 } else if (ctx->op == SHA_OP_FINAL) {
695 err = atmel_sha_final_req(dd);
696 }
697
698err1:
699 if (err != -EINPROGRESS)
700 /* done_task will not finish it, so do it here */
701 atmel_sha_finish_req(req, err);
702
703 dev_dbg(dd->dev, "exit, err: %d\n", err);
704
705 return ret;
706}
707
708static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
709{
710 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
711 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
712 struct atmel_sha_dev *dd = tctx->dd;
713
714 ctx->op = op;
715
716 return atmel_sha_handle_queue(dd, req);
717}
718
719static int atmel_sha_update(struct ahash_request *req)
720{
721 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
722
723 if (!req->nbytes)
724 return 0;
725
726 ctx->total = req->nbytes;
727 ctx->sg = req->src;
728 ctx->offset = 0;
729
730 if (ctx->flags & SHA_FLAGS_FINUP) {
731 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
732 /* faster to use CPU for short transfers */
733 ctx->flags |= SHA_FLAGS_CPU;
734 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
735 atmel_sha_append_sg(ctx);
736 return 0;
737 }
738 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
739}
740
741static int atmel_sha_final(struct ahash_request *req)
742{
743 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
744 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
745 struct atmel_sha_dev *dd = tctx->dd;
746
747 int err = 0;
748
749 ctx->flags |= SHA_FLAGS_FINUP;
750
751 if (ctx->flags & SHA_FLAGS_ERROR)
752 return 0; /* uncompleted hash is not needed */
753
754 if (ctx->bufcnt) {
755 return atmel_sha_enqueue(req, SHA_OP_FINAL);
756 } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
757 err = atmel_sha_hw_init(dd);
758 if (err)
759 goto err1;
760
761 dd->flags |= SHA_FLAGS_BUSY;
762 err = atmel_sha_final_req(dd);
763 } else {
764 /* copy ready hash (+ finalize hmac) */
765 return atmel_sha_finish(req);
766 }
767
768err1:
769 if (err != -EINPROGRESS)
770 /* done_task will not finish it, so do it here */
771 atmel_sha_finish_req(req, err);
772
773 return err;
774}
775
776static int atmel_sha_finup(struct ahash_request *req)
777{
778 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
779 int err1, err2;
780
781 ctx->flags |= SHA_FLAGS_FINUP;
782
783 err1 = atmel_sha_update(req);
784 if (err1 == -EINPROGRESS || err1 == -EBUSY)
785 return err1;
786
787 /*
788 * final() has to be always called to cleanup resources
789 * even if udpate() failed, except EINPROGRESS
790 */
791 err2 = atmel_sha_final(req);
792
793 return err1 ?: err2;
794}
795
796static int atmel_sha_digest(struct ahash_request *req)
797{
798 return atmel_sha_init(req) ?: atmel_sha_finup(req);
799}
800
801static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
802{
803 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
804 const char *alg_name = crypto_tfm_alg_name(tfm);
805
806 /* Allocate a fallback and abort if it failed. */
807 tctx->fallback = crypto_alloc_shash(alg_name, 0,
808 CRYPTO_ALG_NEED_FALLBACK);
809 if (IS_ERR(tctx->fallback)) {
810 pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
811 alg_name);
812 return PTR_ERR(tctx->fallback);
813 }
814 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
815 sizeof(struct atmel_sha_reqctx) +
816 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
817
818 return 0;
819}
820
821static int atmel_sha_cra_init(struct crypto_tfm *tfm)
822{
823 return atmel_sha_cra_init_alg(tfm, NULL);
824}
825
826static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
827{
828 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
829
830 crypto_free_shash(tctx->fallback);
831 tctx->fallback = NULL;
832}
833
834static struct ahash_alg sha_algs[] = {
835{
836 .init = atmel_sha_init,
837 .update = atmel_sha_update,
838 .final = atmel_sha_final,
839 .finup = atmel_sha_finup,
840 .digest = atmel_sha_digest,
841 .halg = {
842 .digestsize = SHA1_DIGEST_SIZE,
843 .base = {
844 .cra_name = "sha1",
845 .cra_driver_name = "atmel-sha1",
846 .cra_priority = 100,
847 .cra_flags = CRYPTO_ALG_ASYNC |
848 CRYPTO_ALG_NEED_FALLBACK,
849 .cra_blocksize = SHA1_BLOCK_SIZE,
850 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
851 .cra_alignmask = 0,
852 .cra_module = THIS_MODULE,
853 .cra_init = atmel_sha_cra_init,
854 .cra_exit = atmel_sha_cra_exit,
855 }
856 }
857},
858{
859 .init = atmel_sha_init,
860 .update = atmel_sha_update,
861 .final = atmel_sha_final,
862 .finup = atmel_sha_finup,
863 .digest = atmel_sha_digest,
864 .halg = {
865 .digestsize = SHA256_DIGEST_SIZE,
866 .base = {
867 .cra_name = "sha256",
868 .cra_driver_name = "atmel-sha256",
869 .cra_priority = 100,
870 .cra_flags = CRYPTO_ALG_ASYNC |
871 CRYPTO_ALG_NEED_FALLBACK,
872 .cra_blocksize = SHA256_BLOCK_SIZE,
873 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
874 .cra_alignmask = 0,
875 .cra_module = THIS_MODULE,
876 .cra_init = atmel_sha_cra_init,
877 .cra_exit = atmel_sha_cra_exit,
878 }
879 }
880},
881};
882
883static void atmel_sha_done_task(unsigned long data)
884{
885 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
886 int err = 0;
887
888 if (!(SHA_FLAGS_BUSY & dd->flags)) {
889 atmel_sha_handle_queue(dd, NULL);
890 return;
891 }
892
893 if (SHA_FLAGS_CPU & dd->flags) {
894 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
895 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
896 goto finish;
897 }
898 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
899 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
900 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
901 atmel_sha_update_dma_stop(dd);
902 if (dd->err) {
903 err = dd->err;
904 goto finish;
905 }
906 }
907 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
908 /* hash or semi-hash ready */
909 dd->flags &= ~(SHA_FLAGS_DMA_READY |
910 SHA_FLAGS_OUTPUT_READY);
911 err = atmel_sha_update_dma_start(dd);
912 if (err != -EINPROGRESS)
913 goto finish;
914 }
915 }
916 return;
917
918finish:
919 /* finish curent request */
920 atmel_sha_finish_req(dd->req, err);
921}
922
923static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
924{
925 struct atmel_sha_dev *sha_dd = dev_id;
926 u32 reg;
927
928 reg = atmel_sha_read(sha_dd, SHA_ISR);
929 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
930 atmel_sha_write(sha_dd, SHA_IDR, reg);
931 if (SHA_FLAGS_BUSY & sha_dd->flags) {
932 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
933 if (!(SHA_FLAGS_CPU & sha_dd->flags))
934 sha_dd->flags |= SHA_FLAGS_DMA_READY;
935 tasklet_schedule(&sha_dd->done_task);
936 } else {
937 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
938 }
939 return IRQ_HANDLED;
940 }
941
942 return IRQ_NONE;
943}
944
945static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
946{
947 int i;
948
949 for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
950 crypto_unregister_ahash(&sha_algs[i]);
951}
952
953static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
954{
955 int err, i, j;
956
957 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
958 err = crypto_register_ahash(&sha_algs[i]);
959 if (err)
960 goto err_sha_algs;
961 }
962
963 return 0;
964
965err_sha_algs:
966 for (j = 0; j < i; j++)
967 crypto_unregister_ahash(&sha_algs[j]);
968
969 return err;
970}
971
972static int __devinit atmel_sha_probe(struct platform_device *pdev)
973{
974 struct atmel_sha_dev *sha_dd;
975 struct device *dev = &pdev->dev;
976 struct resource *sha_res;
977 unsigned long sha_phys_size;
978 int err;
979
980 sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
981 if (sha_dd == NULL) {
982 dev_err(dev, "unable to alloc data struct.\n");
983 err = -ENOMEM;
984 goto sha_dd_err;
985 }
986
987 sha_dd->dev = dev;
988
989 platform_set_drvdata(pdev, sha_dd);
990
991 INIT_LIST_HEAD(&sha_dd->list);
992
993 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
994 (unsigned long)sha_dd);
995
996 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
997
998 sha_dd->irq = -1;
999
1000 /* Get the base address */
1001 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1002 if (!sha_res) {
1003 dev_err(dev, "no MEM resource info\n");
1004 err = -ENODEV;
1005 goto res_err;
1006 }
1007 sha_dd->phys_base = sha_res->start;
1008 sha_phys_size = resource_size(sha_res);
1009
1010 /* Get the IRQ */
1011 sha_dd->irq = platform_get_irq(pdev, 0);
1012 if (sha_dd->irq < 0) {
1013 dev_err(dev, "no IRQ resource info\n");
1014 err = sha_dd->irq;
1015 goto res_err;
1016 }
1017
1018 err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha",
1019 sha_dd);
1020 if (err) {
1021 dev_err(dev, "unable to request sha irq.\n");
1022 goto res_err;
1023 }
1024
1025 /* Initializing the clock */
1026 sha_dd->iclk = clk_get(&pdev->dev, NULL);
1027 if (IS_ERR(sha_dd->iclk)) {
1028 dev_err(dev, "clock intialization failed.\n");
1029 err = PTR_ERR(sha_dd->iclk);
1030 goto clk_err;
1031 }
1032
1033 sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size);
1034 if (!sha_dd->io_base) {
1035 dev_err(dev, "can't ioremap\n");
1036 err = -ENOMEM;
1037 goto sha_io_err;
1038 }
1039
1040 spin_lock(&atmel_sha.lock);
1041 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1042 spin_unlock(&atmel_sha.lock);
1043
1044 err = atmel_sha_register_algs(sha_dd);
1045 if (err)
1046 goto err_algs;
1047
1048 dev_info(dev, "Atmel SHA1/SHA256\n");
1049
1050 return 0;
1051
1052err_algs:
1053 spin_lock(&atmel_sha.lock);
1054 list_del(&sha_dd->list);
1055 spin_unlock(&atmel_sha.lock);
1056 iounmap(sha_dd->io_base);
1057sha_io_err:
1058 clk_put(sha_dd->iclk);
1059clk_err:
1060 free_irq(sha_dd->irq, sha_dd);
1061res_err:
1062 tasklet_kill(&sha_dd->done_task);
1063 kfree(sha_dd);
1064 sha_dd = NULL;
1065sha_dd_err:
1066 dev_err(dev, "initialization failed.\n");
1067
1068 return err;
1069}
1070
1071static int __devexit atmel_sha_remove(struct platform_device *pdev)
1072{
1073 static struct atmel_sha_dev *sha_dd;
1074
1075 sha_dd = platform_get_drvdata(pdev);
1076 if (!sha_dd)
1077 return -ENODEV;
1078 spin_lock(&atmel_sha.lock);
1079 list_del(&sha_dd->list);
1080 spin_unlock(&atmel_sha.lock);
1081
1082 atmel_sha_unregister_algs(sha_dd);
1083
1084 tasklet_kill(&sha_dd->done_task);
1085
1086 iounmap(sha_dd->io_base);
1087
1088 clk_put(sha_dd->iclk);
1089
1090 if (sha_dd->irq >= 0)
1091 free_irq(sha_dd->irq, sha_dd);
1092
1093 kfree(sha_dd);
1094 sha_dd = NULL;
1095
1096 return 0;
1097}
1098
1099static struct platform_driver atmel_sha_driver = {
1100 .probe = atmel_sha_probe,
1101 .remove = __devexit_p(atmel_sha_remove),
1102 .driver = {
1103 .name = "atmel_sha",
1104 .owner = THIS_MODULE,
1105 },
1106};
1107
1108module_platform_driver(atmel_sha_driver);
1109
1110MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support.");
1111MODULE_LICENSE("GPL v2");
1112MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h
new file mode 100644
index 000000000000..5ac2a900d80c
--- /dev/null
+++ b/drivers/crypto/atmel-tdes-regs.h
@@ -0,0 +1,89 @@
1#ifndef __ATMEL_TDES_REGS_H__
2#define __ATMEL_TDES_REGS_H__
3
4#define TDES_CR 0x00
5#define TDES_CR_START (1 << 0)
6#define TDES_CR_SWRST (1 << 8)
7#define TDES_CR_LOADSEED (1 << 16)
8
9#define TDES_MR 0x04
10#define TDES_MR_CYPHER_DEC (0 << 0)
11#define TDES_MR_CYPHER_ENC (1 << 0)
12#define TDES_MR_TDESMOD_MASK (0x3 << 1)
13#define TDES_MR_TDESMOD_DES (0x0 << 1)
14#define TDES_MR_TDESMOD_TDES (0x1 << 1)
15#define TDES_MR_TDESMOD_XTEA (0x2 << 1)
16#define TDES_MR_KEYMOD_3KEY (0 << 4)
17#define TDES_MR_KEYMOD_2KEY (1 << 4)
18#define TDES_MR_SMOD_MASK (0x3 << 8)
19#define TDES_MR_SMOD_MANUAL (0x0 << 8)
20#define TDES_MR_SMOD_AUTO (0x1 << 8)
21#define TDES_MR_SMOD_PDC (0x2 << 8)
22#define TDES_MR_OPMOD_MASK (0x3 << 12)
23#define TDES_MR_OPMOD_ECB (0x0 << 12)
24#define TDES_MR_OPMOD_CBC (0x1 << 12)
25#define TDES_MR_OPMOD_OFB (0x2 << 12)
26#define TDES_MR_OPMOD_CFB (0x3 << 12)
27#define TDES_MR_LOD (0x1 << 15)
28#define TDES_MR_CFBS_MASK (0x3 << 16)
29#define TDES_MR_CFBS_64b (0x0 << 16)
30#define TDES_MR_CFBS_32b (0x1 << 16)
31#define TDES_MR_CFBS_16b (0x2 << 16)
32#define TDES_MR_CFBS_8b (0x3 << 16)
33#define TDES_MR_CKEY_MASK (0xF << 20)
34#define TDES_MR_CKEY_OFFSET 20
35#define TDES_MR_CTYPE_MASK (0x3F << 24)
36#define TDES_MR_CTYPE_OFFSET 24
37
38#define TDES_IER 0x10
39#define TDES_IDR 0x14
40#define TDES_IMR 0x18
41#define TDES_ISR 0x1C
42#define TDES_INT_DATARDY (1 << 0)
43#define TDES_INT_ENDRX (1 << 1)
44#define TDES_INT_ENDTX (1 << 2)
45#define TDES_INT_RXBUFF (1 << 3)
46#define TDES_INT_TXBUFE (1 << 4)
47#define TDES_INT_URAD (1 << 8)
48#define TDES_ISR_URAT_MASK (0x3 << 12)
49#define TDES_ISR_URAT_IDR (0x0 << 12)
50#define TDES_ISR_URAT_ODR (0x1 << 12)
51#define TDES_ISR_URAT_MR (0x2 << 12)
52#define TDES_ISR_URAT_WO (0x3 << 12)
53
54
55#define TDES_KEY1W1R 0x20
56#define TDES_KEY1W2R 0x24
57#define TDES_KEY2W1R 0x28
58#define TDES_KEY2W2R 0x2C
59#define TDES_KEY3W1R 0x30
60#define TDES_KEY3W2R 0x34
61#define TDES_IDATA1R 0x40
62#define TDES_IDATA2R 0x44
63#define TDES_ODATA1R 0x50
64#define TDES_ODATA2R 0x54
65#define TDES_IV1R 0x60
66#define TDES_IV2R 0x64
67
68#define TDES_XTEARNDR 0x70
69#define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0)
70#define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0
71
72#define TDES_RPR 0x100
73#define TDES_RCR 0x104
74#define TDES_TPR 0x108
75#define TDES_TCR 0x10C
76#define TDES_RNPR 0x118
77#define TDES_RNCR 0x11C
78#define TDES_TNPR 0x118
79#define TDES_TNCR 0x11C
80#define TDES_PTCR 0x120
81#define TDES_PTCR_RXTEN (1 << 0)
82#define TDES_PTCR_RXTDIS (1 << 1)
83#define TDES_PTCR_TXTEN (1 << 8)
84#define TDES_PTCR_TXTDIS (1 << 9)
85#define TDES_PTSR 0x124
86#define TDES_PTSR_RXTEN (1 << 0)
87#define TDES_PTSR_TXTEN (1 << 8)
88
89#endif /* __ATMEL_TDES_REGS_H__ */
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
new file mode 100644
index 000000000000..eb2b61e57e2d
--- /dev/null
+++ b/drivers/crypto/atmel-tdes.c
@@ -0,0 +1,1215 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL DES/TDES HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-aes.c drivers.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/errno.h>
30#include <linux/interrupt.h>
31#include <linux/kernel.h>
32#include <linux/clk.h>
33#include <linux/irq.h>
34#include <linux/io.h>
35#include <linux/platform_device.h>
36#include <linux/scatterlist.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
39#include <linux/crypto.h>
40#include <linux/cryptohash.h>
41#include <crypto/scatterwalk.h>
42#include <crypto/algapi.h>
43#include <crypto/des.h>
44#include <crypto/hash.h>
45#include <crypto/internal/hash.h>
46#include "atmel-tdes-regs.h"
47
48/* TDES flags */
49#define TDES_FLAGS_MODE_MASK 0x007f
50#define TDES_FLAGS_ENCRYPT BIT(0)
51#define TDES_FLAGS_CBC BIT(1)
52#define TDES_FLAGS_CFB BIT(2)
53#define TDES_FLAGS_CFB8 BIT(3)
54#define TDES_FLAGS_CFB16 BIT(4)
55#define TDES_FLAGS_CFB32 BIT(5)
56#define TDES_FLAGS_OFB BIT(6)
57
58#define TDES_FLAGS_INIT BIT(16)
59#define TDES_FLAGS_FAST BIT(17)
60#define TDES_FLAGS_BUSY BIT(18)
61
62#define ATMEL_TDES_QUEUE_LENGTH 1
63
64#define CFB8_BLOCK_SIZE 1
65#define CFB16_BLOCK_SIZE 2
66#define CFB32_BLOCK_SIZE 4
67#define CFB64_BLOCK_SIZE 8
68
69
70struct atmel_tdes_dev;
71
72struct atmel_tdes_ctx {
73 struct atmel_tdes_dev *dd;
74
75 int keylen;
76 u32 key[3*DES_KEY_SIZE / sizeof(u32)];
77 unsigned long flags;
78};
79
80struct atmel_tdes_reqctx {
81 unsigned long mode;
82};
83
84struct atmel_tdes_dev {
85 struct list_head list;
86 unsigned long phys_base;
87 void __iomem *io_base;
88
89 struct atmel_tdes_ctx *ctx;
90 struct device *dev;
91 struct clk *iclk;
92 int irq;
93
94 unsigned long flags;
95 int err;
96
97 spinlock_t lock;
98 struct crypto_queue queue;
99
100 struct tasklet_struct done_task;
101 struct tasklet_struct queue_task;
102
103 struct ablkcipher_request *req;
104 size_t total;
105
106 struct scatterlist *in_sg;
107 size_t in_offset;
108 struct scatterlist *out_sg;
109 size_t out_offset;
110
111 size_t buflen;
112 size_t dma_size;
113
114 void *buf_in;
115 int dma_in;
116 dma_addr_t dma_addr_in;
117
118 void *buf_out;
119 int dma_out;
120 dma_addr_t dma_addr_out;
121};
122
123struct atmel_tdes_drv {
124 struct list_head dev_list;
125 spinlock_t lock;
126};
127
128static struct atmel_tdes_drv atmel_tdes = {
129 .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
130 .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
131};
132
133static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
134 void *buf, size_t buflen, size_t total, int out)
135{
136 unsigned int count, off = 0;
137
138 while (buflen && total) {
139 count = min((*sg)->length - *offset, total);
140 count = min(count, buflen);
141
142 if (!count)
143 return off;
144
145 scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
146
147 off += count;
148 buflen -= count;
149 *offset += count;
150 total -= count;
151
152 if (*offset == (*sg)->length) {
153 *sg = sg_next(*sg);
154 if (*sg)
155 *offset = 0;
156 else
157 total = 0;
158 }
159 }
160
161 return off;
162}
163
164static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
165{
166 return readl_relaxed(dd->io_base + offset);
167}
168
169static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
170 u32 offset, u32 value)
171{
172 writel_relaxed(value, dd->io_base + offset);
173}
174
175static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
176 u32 *value, int count)
177{
178 for (; count--; value++, offset += 4)
179 atmel_tdes_write(dd, offset, *value);
180}
181
182static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
183{
184 struct atmel_tdes_dev *tdes_dd = NULL;
185 struct atmel_tdes_dev *tmp;
186
187 spin_lock_bh(&atmel_tdes.lock);
188 if (!ctx->dd) {
189 list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
190 tdes_dd = tmp;
191 break;
192 }
193 ctx->dd = tdes_dd;
194 } else {
195 tdes_dd = ctx->dd;
196 }
197 spin_unlock_bh(&atmel_tdes.lock);
198
199 return tdes_dd;
200}
201
202static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
203{
204 clk_prepare_enable(dd->iclk);
205
206 if (!(dd->flags & TDES_FLAGS_INIT)) {
207 atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
208 dd->flags |= TDES_FLAGS_INIT;
209 dd->err = 0;
210 }
211
212 return 0;
213}
214
215static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
216{
217 int err;
218 u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
219
220 err = atmel_tdes_hw_init(dd);
221
222 if (err)
223 return err;
224
225 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
226
227 /* MR register must be set before IV registers */
228 if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
229 valmr |= TDES_MR_KEYMOD_3KEY;
230 valmr |= TDES_MR_TDESMOD_TDES;
231 } else if (dd->ctx->keylen > DES_KEY_SIZE) {
232 valmr |= TDES_MR_KEYMOD_2KEY;
233 valmr |= TDES_MR_TDESMOD_TDES;
234 } else {
235 valmr |= TDES_MR_TDESMOD_DES;
236 }
237
238 if (dd->flags & TDES_FLAGS_CBC) {
239 valmr |= TDES_MR_OPMOD_CBC;
240 } else if (dd->flags & TDES_FLAGS_CFB) {
241 valmr |= TDES_MR_OPMOD_CFB;
242
243 if (dd->flags & TDES_FLAGS_CFB8)
244 valmr |= TDES_MR_CFBS_8b;
245 else if (dd->flags & TDES_FLAGS_CFB16)
246 valmr |= TDES_MR_CFBS_16b;
247 else if (dd->flags & TDES_FLAGS_CFB32)
248 valmr |= TDES_MR_CFBS_32b;
249 } else if (dd->flags & TDES_FLAGS_OFB) {
250 valmr |= TDES_MR_OPMOD_OFB;
251 }
252
253 if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
254 valmr |= TDES_MR_CYPHER_ENC;
255
256 atmel_tdes_write(dd, TDES_CR, valcr);
257 atmel_tdes_write(dd, TDES_MR, valmr);
258
259 atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
260 dd->ctx->keylen >> 2);
261
262 if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
263 (dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
264 atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
265 }
266
267 return 0;
268}
269
270static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
271{
272 int err = 0;
273 size_t count;
274
275 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
276
277 if (dd->flags & TDES_FLAGS_FAST) {
278 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
279 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
280 } else {
281 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
282 dd->dma_size, DMA_FROM_DEVICE);
283
284 /* copy data */
285 count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
286 dd->buf_out, dd->buflen, dd->dma_size, 1);
287 if (count != dd->dma_size) {
288 err = -EINVAL;
289 pr_err("not all data converted: %u\n", count);
290 }
291 }
292
293 return err;
294}
295
296static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
297{
298 int err = -ENOMEM;
299
300 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
301 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
302 dd->buflen = PAGE_SIZE;
303 dd->buflen &= ~(DES_BLOCK_SIZE - 1);
304
305 if (!dd->buf_in || !dd->buf_out) {
306 dev_err(dd->dev, "unable to alloc pages.\n");
307 goto err_alloc;
308 }
309
310 /* MAP here */
311 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
312 dd->buflen, DMA_TO_DEVICE);
313 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
314 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
315 err = -EINVAL;
316 goto err_map_in;
317 }
318
319 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
320 dd->buflen, DMA_FROM_DEVICE);
321 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
322 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
323 err = -EINVAL;
324 goto err_map_out;
325 }
326
327 return 0;
328
329err_map_out:
330 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
331 DMA_TO_DEVICE);
332err_map_in:
333 free_page((unsigned long)dd->buf_out);
334 free_page((unsigned long)dd->buf_in);
335err_alloc:
336 if (err)
337 pr_err("error: %d\n", err);
338 return err;
339}
340
341static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
342{
343 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
344 DMA_FROM_DEVICE);
345 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
346 DMA_TO_DEVICE);
347 free_page((unsigned long)dd->buf_out);
348 free_page((unsigned long)dd->buf_in);
349}
350
351static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
352 dma_addr_t dma_addr_out, int length)
353{
354 struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
355 struct atmel_tdes_dev *dd = ctx->dd;
356 int len32;
357
358 dd->dma_size = length;
359
360 if (!(dd->flags & TDES_FLAGS_FAST)) {
361 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
362 DMA_TO_DEVICE);
363 }
364
365 if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
366 len32 = DIV_ROUND_UP(length, sizeof(u8));
367 else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
368 len32 = DIV_ROUND_UP(length, sizeof(u16));
369 else
370 len32 = DIV_ROUND_UP(length, sizeof(u32));
371
372 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
373 atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
374 atmel_tdes_write(dd, TDES_TCR, len32);
375 atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
376 atmel_tdes_write(dd, TDES_RCR, len32);
377
378 /* Enable Interrupt */
379 atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
380
381 /* Start DMA transfer */
382 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
383
384 return 0;
385}
386
387static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
388{
389 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
390 crypto_ablkcipher_reqtfm(dd->req));
391 int err, fast = 0, in, out;
392 size_t count;
393 dma_addr_t addr_in, addr_out;
394
395 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
396 /* check for alignment */
397 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
398 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
399
400 fast = in && out;
401 }
402
403 if (fast) {
404 count = min(dd->total, sg_dma_len(dd->in_sg));
405 count = min(count, sg_dma_len(dd->out_sg));
406
407 if (count != dd->total) {
408 pr_err("request length != buffer length\n");
409 return -EINVAL;
410 }
411
412 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
413 if (!err) {
414 dev_err(dd->dev, "dma_map_sg() error\n");
415 return -EINVAL;
416 }
417
418 err = dma_map_sg(dd->dev, dd->out_sg, 1,
419 DMA_FROM_DEVICE);
420 if (!err) {
421 dev_err(dd->dev, "dma_map_sg() error\n");
422 dma_unmap_sg(dd->dev, dd->in_sg, 1,
423 DMA_TO_DEVICE);
424 return -EINVAL;
425 }
426
427 addr_in = sg_dma_address(dd->in_sg);
428 addr_out = sg_dma_address(dd->out_sg);
429
430 dd->flags |= TDES_FLAGS_FAST;
431
432 } else {
433 /* use cache buffers */
434 count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
435 dd->buf_in, dd->buflen, dd->total, 0);
436
437 addr_in = dd->dma_addr_in;
438 addr_out = dd->dma_addr_out;
439
440 dd->flags &= ~TDES_FLAGS_FAST;
441
442 }
443
444 dd->total -= count;
445
446 err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
447 if (err) {
448 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
449 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
450 }
451
452 return err;
453}
454
455
456static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
457{
458 struct ablkcipher_request *req = dd->req;
459
460 clk_disable_unprepare(dd->iclk);
461
462 dd->flags &= ~TDES_FLAGS_BUSY;
463
464 req->base.complete(&req->base, err);
465}
466
467static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
468 struct ablkcipher_request *req)
469{
470 struct crypto_async_request *async_req, *backlog;
471 struct atmel_tdes_ctx *ctx;
472 struct atmel_tdes_reqctx *rctx;
473 unsigned long flags;
474 int err, ret = 0;
475
476 spin_lock_irqsave(&dd->lock, flags);
477 if (req)
478 ret = ablkcipher_enqueue_request(&dd->queue, req);
479 if (dd->flags & TDES_FLAGS_BUSY) {
480 spin_unlock_irqrestore(&dd->lock, flags);
481 return ret;
482 }
483 backlog = crypto_get_backlog(&dd->queue);
484 async_req = crypto_dequeue_request(&dd->queue);
485 if (async_req)
486 dd->flags |= TDES_FLAGS_BUSY;
487 spin_unlock_irqrestore(&dd->lock, flags);
488
489 if (!async_req)
490 return ret;
491
492 if (backlog)
493 backlog->complete(backlog, -EINPROGRESS);
494
495 req = ablkcipher_request_cast(async_req);
496
497 /* assign new request to device */
498 dd->req = req;
499 dd->total = req->nbytes;
500 dd->in_offset = 0;
501 dd->in_sg = req->src;
502 dd->out_offset = 0;
503 dd->out_sg = req->dst;
504
505 rctx = ablkcipher_request_ctx(req);
506 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
507 rctx->mode &= TDES_FLAGS_MODE_MASK;
508 dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
509 dd->ctx = ctx;
510 ctx->dd = dd;
511
512 err = atmel_tdes_write_ctrl(dd);
513 if (!err)
514 err = atmel_tdes_crypt_dma_start(dd);
515 if (err) {
516 /* des_task will not finish it, so do it here */
517 atmel_tdes_finish_req(dd, err);
518 tasklet_schedule(&dd->queue_task);
519 }
520
521 return ret;
522}
523
524
525static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
526{
527 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
528 crypto_ablkcipher_reqtfm(req));
529 struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
530 struct atmel_tdes_dev *dd;
531
532 if (mode & TDES_FLAGS_CFB8) {
533 if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
534 pr_err("request size is not exact amount of CFB8 blocks\n");
535 return -EINVAL;
536 }
537 } else if (mode & TDES_FLAGS_CFB16) {
538 if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
539 pr_err("request size is not exact amount of CFB16 blocks\n");
540 return -EINVAL;
541 }
542 } else if (mode & TDES_FLAGS_CFB32) {
543 if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
544 pr_err("request size is not exact amount of CFB32 blocks\n");
545 return -EINVAL;
546 }
547 } else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
548 pr_err("request size is not exact amount of DES blocks\n");
549 return -EINVAL;
550 }
551
552 dd = atmel_tdes_find_dev(ctx);
553 if (!dd)
554 return -ENODEV;
555
556 rctx->mode = mode;
557
558 return atmel_tdes_handle_queue(dd, req);
559}
560
561static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
562 unsigned int keylen)
563{
564 u32 tmp[DES_EXPKEY_WORDS];
565 int err;
566 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
567
568 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
569
570 if (keylen != DES_KEY_SIZE) {
571 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
572 return -EINVAL;
573 }
574
575 err = des_ekey(tmp, key);
576 if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
577 ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
578 return -EINVAL;
579 }
580
581 memcpy(ctx->key, key, keylen);
582 ctx->keylen = keylen;
583
584 return 0;
585}
586
587static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
588 unsigned int keylen)
589{
590 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
591 const char *alg_name;
592
593 alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
594
595 /*
596 * HW bug in cfb 3-keys mode.
597 */
598 if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) {
599 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
600 return -EINVAL;
601 } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
602 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
603 return -EINVAL;
604 }
605
606 memcpy(ctx->key, key, keylen);
607 ctx->keylen = keylen;
608
609 return 0;
610}
611
612static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
613{
614 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
615}
616
617static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
618{
619 return atmel_tdes_crypt(req, 0);
620}
621
622static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
623{
624 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
625}
626
627static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
628{
629 return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
630}
631static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
632{
633 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
634}
635
636static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
637{
638 return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
639}
640
641static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
642{
643 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
644 TDES_FLAGS_CFB8);
645}
646
647static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
648{
649 return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
650}
651
652static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
653{
654 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
655 TDES_FLAGS_CFB16);
656}
657
658static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
659{
660 return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
661}
662
663static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
664{
665 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
666 TDES_FLAGS_CFB32);
667}
668
669static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
670{
671 return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
672}
673
674static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
675{
676 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
677}
678
679static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
680{
681 return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
682}
683
684static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
685{
686 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
687
688 return 0;
689}
690
691static void atmel_tdes_cra_exit(struct crypto_tfm *tfm)
692{
693}
694
695static struct crypto_alg tdes_algs[] = {
696{
697 .cra_name = "ecb(des)",
698 .cra_driver_name = "atmel-ecb-des",
699 .cra_priority = 100,
700 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
701 .cra_blocksize = DES_BLOCK_SIZE,
702 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
703 .cra_alignmask = 0,
704 .cra_type = &crypto_ablkcipher_type,
705 .cra_module = THIS_MODULE,
706 .cra_init = atmel_tdes_cra_init,
707 .cra_exit = atmel_tdes_cra_exit,
708 .cra_u.ablkcipher = {
709 .min_keysize = DES_KEY_SIZE,
710 .max_keysize = DES_KEY_SIZE,
711 .setkey = atmel_des_setkey,
712 .encrypt = atmel_tdes_ecb_encrypt,
713 .decrypt = atmel_tdes_ecb_decrypt,
714 }
715},
716{
717 .cra_name = "cbc(des)",
718 .cra_driver_name = "atmel-cbc-des",
719 .cra_priority = 100,
720 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
721 .cra_blocksize = DES_BLOCK_SIZE,
722 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
723 .cra_alignmask = 0,
724 .cra_type = &crypto_ablkcipher_type,
725 .cra_module = THIS_MODULE,
726 .cra_init = atmel_tdes_cra_init,
727 .cra_exit = atmel_tdes_cra_exit,
728 .cra_u.ablkcipher = {
729 .min_keysize = DES_KEY_SIZE,
730 .max_keysize = DES_KEY_SIZE,
731 .ivsize = DES_BLOCK_SIZE,
732 .setkey = atmel_des_setkey,
733 .encrypt = atmel_tdes_cbc_encrypt,
734 .decrypt = atmel_tdes_cbc_decrypt,
735 }
736},
737{
738 .cra_name = "cfb(des)",
739 .cra_driver_name = "atmel-cfb-des",
740 .cra_priority = 100,
741 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
742 .cra_blocksize = DES_BLOCK_SIZE,
743 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
744 .cra_alignmask = 0,
745 .cra_type = &crypto_ablkcipher_type,
746 .cra_module = THIS_MODULE,
747 .cra_init = atmel_tdes_cra_init,
748 .cra_exit = atmel_tdes_cra_exit,
749 .cra_u.ablkcipher = {
750 .min_keysize = DES_KEY_SIZE,
751 .max_keysize = DES_KEY_SIZE,
752 .ivsize = DES_BLOCK_SIZE,
753 .setkey = atmel_des_setkey,
754 .encrypt = atmel_tdes_cfb_encrypt,
755 .decrypt = atmel_tdes_cfb_decrypt,
756 }
757},
758{
759 .cra_name = "cfb8(des)",
760 .cra_driver_name = "atmel-cfb8-des",
761 .cra_priority = 100,
762 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
763 .cra_blocksize = CFB8_BLOCK_SIZE,
764 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
765 .cra_alignmask = 0,
766 .cra_type = &crypto_ablkcipher_type,
767 .cra_module = THIS_MODULE,
768 .cra_init = atmel_tdes_cra_init,
769 .cra_exit = atmel_tdes_cra_exit,
770 .cra_u.ablkcipher = {
771 .min_keysize = DES_KEY_SIZE,
772 .max_keysize = DES_KEY_SIZE,
773 .ivsize = DES_BLOCK_SIZE,
774 .setkey = atmel_des_setkey,
775 .encrypt = atmel_tdes_cfb8_encrypt,
776 .decrypt = atmel_tdes_cfb8_decrypt,
777 }
778},
779{
780 .cra_name = "cfb16(des)",
781 .cra_driver_name = "atmel-cfb16-des",
782 .cra_priority = 100,
783 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
784 .cra_blocksize = CFB16_BLOCK_SIZE,
785 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
786 .cra_alignmask = 0,
787 .cra_type = &crypto_ablkcipher_type,
788 .cra_module = THIS_MODULE,
789 .cra_init = atmel_tdes_cra_init,
790 .cra_exit = atmel_tdes_cra_exit,
791 .cra_u.ablkcipher = {
792 .min_keysize = DES_KEY_SIZE,
793 .max_keysize = DES_KEY_SIZE,
794 .ivsize = DES_BLOCK_SIZE,
795 .setkey = atmel_des_setkey,
796 .encrypt = atmel_tdes_cfb16_encrypt,
797 .decrypt = atmel_tdes_cfb16_decrypt,
798 }
799},
800{
801 .cra_name = "cfb32(des)",
802 .cra_driver_name = "atmel-cfb32-des",
803 .cra_priority = 100,
804 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
805 .cra_blocksize = CFB32_BLOCK_SIZE,
806 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
807 .cra_alignmask = 0,
808 .cra_type = &crypto_ablkcipher_type,
809 .cra_module = THIS_MODULE,
810 .cra_init = atmel_tdes_cra_init,
811 .cra_exit = atmel_tdes_cra_exit,
812 .cra_u.ablkcipher = {
813 .min_keysize = DES_KEY_SIZE,
814 .max_keysize = DES_KEY_SIZE,
815 .ivsize = DES_BLOCK_SIZE,
816 .setkey = atmel_des_setkey,
817 .encrypt = atmel_tdes_cfb32_encrypt,
818 .decrypt = atmel_tdes_cfb32_decrypt,
819 }
820},
821{
822 .cra_name = "ofb(des)",
823 .cra_driver_name = "atmel-ofb-des",
824 .cra_priority = 100,
825 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
826 .cra_blocksize = DES_BLOCK_SIZE,
827 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
828 .cra_alignmask = 0,
829 .cra_type = &crypto_ablkcipher_type,
830 .cra_module = THIS_MODULE,
831 .cra_init = atmel_tdes_cra_init,
832 .cra_exit = atmel_tdes_cra_exit,
833 .cra_u.ablkcipher = {
834 .min_keysize = DES_KEY_SIZE,
835 .max_keysize = DES_KEY_SIZE,
836 .ivsize = DES_BLOCK_SIZE,
837 .setkey = atmel_des_setkey,
838 .encrypt = atmel_tdes_ofb_encrypt,
839 .decrypt = atmel_tdes_ofb_decrypt,
840 }
841},
842{
843 .cra_name = "ecb(des3_ede)",
844 .cra_driver_name = "atmel-ecb-tdes",
845 .cra_priority = 100,
846 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
847 .cra_blocksize = DES_BLOCK_SIZE,
848 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
849 .cra_alignmask = 0,
850 .cra_type = &crypto_ablkcipher_type,
851 .cra_module = THIS_MODULE,
852 .cra_init = atmel_tdes_cra_init,
853 .cra_exit = atmel_tdes_cra_exit,
854 .cra_u.ablkcipher = {
855 .min_keysize = 2 * DES_KEY_SIZE,
856 .max_keysize = 3 * DES_KEY_SIZE,
857 .setkey = atmel_tdes_setkey,
858 .encrypt = atmel_tdes_ecb_encrypt,
859 .decrypt = atmel_tdes_ecb_decrypt,
860 }
861},
862{
863 .cra_name = "cbc(des3_ede)",
864 .cra_driver_name = "atmel-cbc-tdes",
865 .cra_priority = 100,
866 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
867 .cra_blocksize = DES_BLOCK_SIZE,
868 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
869 .cra_alignmask = 0,
870 .cra_type = &crypto_ablkcipher_type,
871 .cra_module = THIS_MODULE,
872 .cra_init = atmel_tdes_cra_init,
873 .cra_exit = atmel_tdes_cra_exit,
874 .cra_u.ablkcipher = {
875 .min_keysize = 2*DES_KEY_SIZE,
876 .max_keysize = 3*DES_KEY_SIZE,
877 .ivsize = DES_BLOCK_SIZE,
878 .setkey = atmel_tdes_setkey,
879 .encrypt = atmel_tdes_cbc_encrypt,
880 .decrypt = atmel_tdes_cbc_decrypt,
881 }
882},
883{
884 .cra_name = "cfb(des3_ede)",
885 .cra_driver_name = "atmel-cfb-tdes",
886 .cra_priority = 100,
887 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
888 .cra_blocksize = DES_BLOCK_SIZE,
889 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
890 .cra_alignmask = 0,
891 .cra_type = &crypto_ablkcipher_type,
892 .cra_module = THIS_MODULE,
893 .cra_init = atmel_tdes_cra_init,
894 .cra_exit = atmel_tdes_cra_exit,
895 .cra_u.ablkcipher = {
896 .min_keysize = 2*DES_KEY_SIZE,
897 .max_keysize = 2*DES_KEY_SIZE,
898 .ivsize = DES_BLOCK_SIZE,
899 .setkey = atmel_tdes_setkey,
900 .encrypt = atmel_tdes_cfb_encrypt,
901 .decrypt = atmel_tdes_cfb_decrypt,
902 }
903},
904{
905 .cra_name = "cfb8(des3_ede)",
906 .cra_driver_name = "atmel-cfb8-tdes",
907 .cra_priority = 100,
908 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
909 .cra_blocksize = CFB8_BLOCK_SIZE,
910 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
911 .cra_alignmask = 0,
912 .cra_type = &crypto_ablkcipher_type,
913 .cra_module = THIS_MODULE,
914 .cra_init = atmel_tdes_cra_init,
915 .cra_exit = atmel_tdes_cra_exit,
916 .cra_u.ablkcipher = {
917 .min_keysize = 2*DES_KEY_SIZE,
918 .max_keysize = 2*DES_KEY_SIZE,
919 .ivsize = DES_BLOCK_SIZE,
920 .setkey = atmel_tdes_setkey,
921 .encrypt = atmel_tdes_cfb8_encrypt,
922 .decrypt = atmel_tdes_cfb8_decrypt,
923 }
924},
925{
926 .cra_name = "cfb16(des3_ede)",
927 .cra_driver_name = "atmel-cfb16-tdes",
928 .cra_priority = 100,
929 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
930 .cra_blocksize = CFB16_BLOCK_SIZE,
931 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
932 .cra_alignmask = 0,
933 .cra_type = &crypto_ablkcipher_type,
934 .cra_module = THIS_MODULE,
935 .cra_init = atmel_tdes_cra_init,
936 .cra_exit = atmel_tdes_cra_exit,
937 .cra_u.ablkcipher = {
938 .min_keysize = 2*DES_KEY_SIZE,
939 .max_keysize = 2*DES_KEY_SIZE,
940 .ivsize = DES_BLOCK_SIZE,
941 .setkey = atmel_tdes_setkey,
942 .encrypt = atmel_tdes_cfb16_encrypt,
943 .decrypt = atmel_tdes_cfb16_decrypt,
944 }
945},
946{
947 .cra_name = "cfb32(des3_ede)",
948 .cra_driver_name = "atmel-cfb32-tdes",
949 .cra_priority = 100,
950 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
951 .cra_blocksize = CFB32_BLOCK_SIZE,
952 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
953 .cra_alignmask = 0,
954 .cra_type = &crypto_ablkcipher_type,
955 .cra_module = THIS_MODULE,
956 .cra_init = atmel_tdes_cra_init,
957 .cra_exit = atmel_tdes_cra_exit,
958 .cra_u.ablkcipher = {
959 .min_keysize = 2*DES_KEY_SIZE,
960 .max_keysize = 2*DES_KEY_SIZE,
961 .ivsize = DES_BLOCK_SIZE,
962 .setkey = atmel_tdes_setkey,
963 .encrypt = atmel_tdes_cfb32_encrypt,
964 .decrypt = atmel_tdes_cfb32_decrypt,
965 }
966},
967{
968 .cra_name = "ofb(des3_ede)",
969 .cra_driver_name = "atmel-ofb-tdes",
970 .cra_priority = 100,
971 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
972 .cra_blocksize = DES_BLOCK_SIZE,
973 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
974 .cra_alignmask = 0,
975 .cra_type = &crypto_ablkcipher_type,
976 .cra_module = THIS_MODULE,
977 .cra_init = atmel_tdes_cra_init,
978 .cra_exit = atmel_tdes_cra_exit,
979 .cra_u.ablkcipher = {
980 .min_keysize = 2*DES_KEY_SIZE,
981 .max_keysize = 3*DES_KEY_SIZE,
982 .ivsize = DES_BLOCK_SIZE,
983 .setkey = atmel_tdes_setkey,
984 .encrypt = atmel_tdes_ofb_encrypt,
985 .decrypt = atmel_tdes_ofb_decrypt,
986 }
987},
988};
989
990static void atmel_tdes_queue_task(unsigned long data)
991{
992 struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
993
994 atmel_tdes_handle_queue(dd, NULL);
995}
996
997static void atmel_tdes_done_task(unsigned long data)
998{
999 struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1000 int err;
1001
1002 err = atmel_tdes_crypt_dma_stop(dd);
1003
1004 err = dd->err ? : err;
1005
1006 if (dd->total && !err) {
1007 err = atmel_tdes_crypt_dma_start(dd);
1008 if (!err)
1009 return;
1010 }
1011
1012 atmel_tdes_finish_req(dd, err);
1013 atmel_tdes_handle_queue(dd, NULL);
1014}
1015
1016static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1017{
1018 struct atmel_tdes_dev *tdes_dd = dev_id;
1019 u32 reg;
1020
1021 reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1022 if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1023 atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1024 if (TDES_FLAGS_BUSY & tdes_dd->flags)
1025 tasklet_schedule(&tdes_dd->done_task);
1026 else
1027 dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1028 return IRQ_HANDLED;
1029 }
1030
1031 return IRQ_NONE;
1032}
1033
1034static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1035{
1036 int i;
1037
1038 for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1039 crypto_unregister_alg(&tdes_algs[i]);
1040}
1041
1042static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1043{
1044 int err, i, j;
1045
1046 for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1047 INIT_LIST_HEAD(&tdes_algs[i].cra_list);
1048 err = crypto_register_alg(&tdes_algs[i]);
1049 if (err)
1050 goto err_tdes_algs;
1051 }
1052
1053 return 0;
1054
1055err_tdes_algs:
1056 for (j = 0; j < i; j++)
1057 crypto_unregister_alg(&tdes_algs[j]);
1058
1059 return err;
1060}
1061
1062static int __devinit atmel_tdes_probe(struct platform_device *pdev)
1063{
1064 struct atmel_tdes_dev *tdes_dd;
1065 struct device *dev = &pdev->dev;
1066 struct resource *tdes_res;
1067 unsigned long tdes_phys_size;
1068 int err;
1069
1070 tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL);
1071 if (tdes_dd == NULL) {
1072 dev_err(dev, "unable to alloc data struct.\n");
1073 err = -ENOMEM;
1074 goto tdes_dd_err;
1075 }
1076
1077 tdes_dd->dev = dev;
1078
1079 platform_set_drvdata(pdev, tdes_dd);
1080
1081 INIT_LIST_HEAD(&tdes_dd->list);
1082
1083 tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1084 (unsigned long)tdes_dd);
1085 tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1086 (unsigned long)tdes_dd);
1087
1088 crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1089
1090 tdes_dd->irq = -1;
1091
1092 /* Get the base address */
1093 tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1094 if (!tdes_res) {
1095 dev_err(dev, "no MEM resource info\n");
1096 err = -ENODEV;
1097 goto res_err;
1098 }
1099 tdes_dd->phys_base = tdes_res->start;
1100 tdes_phys_size = resource_size(tdes_res);
1101
1102 /* Get the IRQ */
1103 tdes_dd->irq = platform_get_irq(pdev, 0);
1104 if (tdes_dd->irq < 0) {
1105 dev_err(dev, "no IRQ resource info\n");
1106 err = tdes_dd->irq;
1107 goto res_err;
1108 }
1109
1110 err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED,
1111 "atmel-tdes", tdes_dd);
1112 if (err) {
1113 dev_err(dev, "unable to request tdes irq.\n");
1114 goto tdes_irq_err;
1115 }
1116
1117 /* Initializing the clock */
1118 tdes_dd->iclk = clk_get(&pdev->dev, NULL);
1119 if (IS_ERR(tdes_dd->iclk)) {
1120 dev_err(dev, "clock intialization failed.\n");
1121 err = PTR_ERR(tdes_dd->iclk);
1122 goto clk_err;
1123 }
1124
1125 tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size);
1126 if (!tdes_dd->io_base) {
1127 dev_err(dev, "can't ioremap\n");
1128 err = -ENOMEM;
1129 goto tdes_io_err;
1130 }
1131
1132 err = atmel_tdes_dma_init(tdes_dd);
1133 if (err)
1134 goto err_tdes_dma;
1135
1136 spin_lock(&atmel_tdes.lock);
1137 list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1138 spin_unlock(&atmel_tdes.lock);
1139
1140 err = atmel_tdes_register_algs(tdes_dd);
1141 if (err)
1142 goto err_algs;
1143
1144 dev_info(dev, "Atmel DES/TDES\n");
1145
1146 return 0;
1147
1148err_algs:
1149 spin_lock(&atmel_tdes.lock);
1150 list_del(&tdes_dd->list);
1151 spin_unlock(&atmel_tdes.lock);
1152 atmel_tdes_dma_cleanup(tdes_dd);
1153err_tdes_dma:
1154 iounmap(tdes_dd->io_base);
1155tdes_io_err:
1156 clk_put(tdes_dd->iclk);
1157clk_err:
1158 free_irq(tdes_dd->irq, tdes_dd);
1159tdes_irq_err:
1160res_err:
1161 tasklet_kill(&tdes_dd->done_task);
1162 tasklet_kill(&tdes_dd->queue_task);
1163 kfree(tdes_dd);
1164 tdes_dd = NULL;
1165tdes_dd_err:
1166 dev_err(dev, "initialization failed.\n");
1167
1168 return err;
1169}
1170
1171static int __devexit atmel_tdes_remove(struct platform_device *pdev)
1172{
1173 static struct atmel_tdes_dev *tdes_dd;
1174
1175 tdes_dd = platform_get_drvdata(pdev);
1176 if (!tdes_dd)
1177 return -ENODEV;
1178 spin_lock(&atmel_tdes.lock);
1179 list_del(&tdes_dd->list);
1180 spin_unlock(&atmel_tdes.lock);
1181
1182 atmel_tdes_unregister_algs(tdes_dd);
1183
1184 tasklet_kill(&tdes_dd->done_task);
1185 tasklet_kill(&tdes_dd->queue_task);
1186
1187 atmel_tdes_dma_cleanup(tdes_dd);
1188
1189 iounmap(tdes_dd->io_base);
1190
1191 clk_put(tdes_dd->iclk);
1192
1193 if (tdes_dd->irq >= 0)
1194 free_irq(tdes_dd->irq, tdes_dd);
1195
1196 kfree(tdes_dd);
1197 tdes_dd = NULL;
1198
1199 return 0;
1200}
1201
1202static struct platform_driver atmel_tdes_driver = {
1203 .probe = atmel_tdes_probe,
1204 .remove = __devexit_p(atmel_tdes_remove),
1205 .driver = {
1206 .name = "atmel_tdes",
1207 .owner = THIS_MODULE,
1208 },
1209};
1210
1211module_platform_driver(atmel_tdes_driver);
1212
1213MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1214MODULE_LICENSE("GPL v2");
1215MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
new file mode 100644
index 000000000000..5398580b4313
--- /dev/null
+++ b/drivers/crypto/bfin_crc.c
@@ -0,0 +1,780 @@
1/*
2 * Cryptographic API.
3 *
4 * Support Blackfin CRC HW acceleration.
5 *
6 * Copyright 2012 Analog Devices Inc.
7 *
8 * Licensed under the GPL-2.
9 */
10
11#include <linux/err.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/irq.h>
19#include <linux/io.h>
20#include <linux/platform_device.h>
21#include <linux/scatterlist.h>
22#include <linux/dma-mapping.h>
23#include <linux/delay.h>
24#include <linux/unaligned/access_ok.h>
25#include <linux/crypto.h>
26#include <linux/cryptohash.h>
27#include <crypto/scatterwalk.h>
28#include <crypto/algapi.h>
29#include <crypto/hash.h>
30#include <crypto/internal/hash.h>
31
32#include <asm/blackfin.h>
33#include <asm/bfin_crc.h>
34#include <asm/dma.h>
35#include <asm/portmux.h>
36
37#define CRC_CCRYPTO_QUEUE_LENGTH 5
38
39#define DRIVER_NAME "bfin-hmac-crc"
40#define CHKSUM_DIGEST_SIZE 4
41#define CHKSUM_BLOCK_SIZE 1
42
43#define CRC_MAX_DMA_DESC 100
44
45#define CRC_CRYPTO_STATE_UPDATE 1
46#define CRC_CRYPTO_STATE_FINALUPDATE 2
47#define CRC_CRYPTO_STATE_FINISH 3
48
49struct bfin_crypto_crc {
50 struct list_head list;
51 struct device *dev;
52 spinlock_t lock;
53
54 int irq;
55 int dma_ch;
56 u32 poly;
57 volatile struct crc_register *regs;
58
59 struct ahash_request *req; /* current request in operation */
60 struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
61 dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
62 u8 *sg_mid_buf;
63
64 struct tasklet_struct done_task;
65 struct crypto_queue queue; /* waiting requests */
66
67 u8 busy:1; /* crc device in operation flag */
68};
69
70static struct bfin_crypto_crc_list {
71 struct list_head dev_list;
72 spinlock_t lock;
73} crc_list;
74
75struct bfin_crypto_crc_reqctx {
76 struct bfin_crypto_crc *crc;
77
78 unsigned int total; /* total request bytes */
79 size_t sg_buflen; /* bytes for this update */
80 unsigned int sg_nents;
81 struct scatterlist *sg; /* sg list head for this update*/
82 struct scatterlist bufsl[2]; /* chained sg list */
83
84 size_t bufnext_len;
85 size_t buflast_len;
86 u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
87 u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
88
89 u8 flag;
90};
91
92struct bfin_crypto_crc_ctx {
93 struct bfin_crypto_crc *crc;
94 u32 key;
95};
96
97
98/*
99 * derive number of elements in scatterlist
100 */
101static int sg_count(struct scatterlist *sg_list)
102{
103 struct scatterlist *sg = sg_list;
104 int sg_nents = 1;
105
106 if (sg_list == NULL)
107 return 0;
108
109 while (!sg_is_last(sg)) {
110 sg_nents++;
111 sg = scatterwalk_sg_next(sg);
112 }
113
114 return sg_nents;
115}
116
117/*
118 * get element in scatter list by given index
119 */
120static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
121 unsigned int index)
122{
123 struct scatterlist *sg = NULL;
124 int i;
125
126 for_each_sg(sg_list, sg, nents, i)
127 if (i == index)
128 break;
129
130 return sg;
131}
132
133static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
134{
135 crc->regs->datacntrld = 0;
136 crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET;
137 crc->regs->curresult = key;
138
139 /* setup CRC interrupts */
140 crc->regs->status = CMPERRI | DCNTEXPI;
141 crc->regs->intrenset = CMPERRI | DCNTEXPI;
142 SSYNC();
143
144 return 0;
145}
146
147static int bfin_crypto_crc_init(struct ahash_request *req)
148{
149 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
150 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
151 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
152 struct bfin_crypto_crc *crc;
153
154 dev_dbg(crc->dev, "crc_init\n");
155 spin_lock_bh(&crc_list.lock);
156 list_for_each_entry(crc, &crc_list.dev_list, list) {
157 crc_ctx->crc = crc;
158 break;
159 }
160 spin_unlock_bh(&crc_list.lock);
161
162 if (sg_count(req->src) > CRC_MAX_DMA_DESC) {
163 dev_dbg(crc->dev, "init: requested sg list is too big > %d\n",
164 CRC_MAX_DMA_DESC);
165 return -EINVAL;
166 }
167
168 ctx->crc = crc;
169 ctx->bufnext_len = 0;
170 ctx->buflast_len = 0;
171 ctx->sg_buflen = 0;
172 ctx->total = 0;
173 ctx->flag = 0;
174
175 /* init crc results */
176 put_unaligned_le32(crc_ctx->key, req->result);
177
178 dev_dbg(crc->dev, "init: digest size: %d\n",
179 crypto_ahash_digestsize(tfm));
180
181 return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
182}
183
184static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
185{
186 struct scatterlist *sg;
187 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
188 int i = 0, j = 0;
189 unsigned long dma_config;
190 unsigned int dma_count;
191 unsigned int dma_addr;
192 unsigned int mid_dma_count = 0;
193 int dma_mod;
194
195 dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
196
197 for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
198 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
199 dma_addr = sg_dma_address(sg);
200 /* deduce extra bytes in last sg */
201 if (sg_is_last(sg))
202 dma_count = sg_dma_len(sg) - ctx->bufnext_len;
203 else
204 dma_count = sg_dma_len(sg);
205
206 if (mid_dma_count) {
207 /* Append last middle dma buffer to 4 bytes with first
208 bytes in current sg buffer. Move addr of current
209 sg and deduce the length of current sg.
210 */
211 memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count,
212 (void *)dma_addr,
213 CHKSUM_DIGEST_SIZE - mid_dma_count);
214 dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
215 dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
216 }
217 /* chop current sg dma len to multiple of 32 bits */
218 mid_dma_count = dma_count % 4;
219 dma_count &= ~0x3;
220
221 if (dma_addr % 4 == 0) {
222 dma_config |= WDSIZE_32;
223 dma_count >>= 2;
224 dma_mod = 4;
225 } else if (dma_addr % 2 == 0) {
226 dma_config |= WDSIZE_16;
227 dma_count >>= 1;
228 dma_mod = 2;
229 } else {
230 dma_config |= WDSIZE_8;
231 dma_mod = 1;
232 }
233
234 crc->sg_cpu[i].start_addr = dma_addr;
235 crc->sg_cpu[i].cfg = dma_config;
236 crc->sg_cpu[i].x_count = dma_count;
237 crc->sg_cpu[i].x_modify = dma_mod;
238 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
239 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
240 i, crc->sg_cpu[i].start_addr,
241 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
242 crc->sg_cpu[i].x_modify);
243 i++;
244
245 if (mid_dma_count) {
246 /* copy extra bytes to next middle dma buffer */
247 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
248 DMAEN | PSIZE_32 | WDSIZE_32;
249 memcpy(crc->sg_mid_buf + (i << 2),
250 (void *)(dma_addr + (dma_count << 2)),
251 mid_dma_count);
252 /* setup new dma descriptor for next middle dma */
253 crc->sg_cpu[i].start_addr = dma_map_single(crc->dev,
254 crc->sg_mid_buf + (i << 2),
255 CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
256 crc->sg_cpu[i].cfg = dma_config;
257 crc->sg_cpu[i].x_count = 1;
258 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
259 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
260 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
261 i, crc->sg_cpu[i].start_addr,
262 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
263 crc->sg_cpu[i].x_modify);
264 i++;
265 }
266 }
267
268 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
269 /* For final update req, append the buffer for next update as well*/
270 if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
271 ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
272 crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
273 CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
274 crc->sg_cpu[i].cfg = dma_config;
275 crc->sg_cpu[i].x_count = 1;
276 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
277 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
278 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
279 i, crc->sg_cpu[i].start_addr,
280 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
281 crc->sg_cpu[i].x_modify);
282 i++;
283 }
284
285 if (i == 0)
286 return;
287
288 flush_dcache_range((unsigned int)crc->sg_cpu,
289 (unsigned int)crc->sg_cpu +
290 i * sizeof(struct dma_desc_array));
291
292 /* Set the last descriptor to stop mode */
293 crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
294 crc->sg_cpu[i - 1].cfg |= DI_EN;
295 set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
296 set_dma_x_count(crc->dma_ch, 0);
297 set_dma_x_modify(crc->dma_ch, 0);
298 SSYNC();
299 set_dma_config(crc->dma_ch, dma_config);
300}
301
302static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
303 struct ahash_request *req)
304{
305 struct crypto_async_request *async_req, *backlog;
306 struct bfin_crypto_crc_reqctx *ctx;
307 struct scatterlist *sg;
308 int ret = 0;
309 int nsg, i, j;
310 unsigned int nextlen;
311 unsigned long flags;
312
313 spin_lock_irqsave(&crc->lock, flags);
314 if (req)
315 ret = ahash_enqueue_request(&crc->queue, req);
316 if (crc->busy) {
317 spin_unlock_irqrestore(&crc->lock, flags);
318 return ret;
319 }
320 backlog = crypto_get_backlog(&crc->queue);
321 async_req = crypto_dequeue_request(&crc->queue);
322 if (async_req)
323 crc->busy = 1;
324 spin_unlock_irqrestore(&crc->lock, flags);
325
326 if (!async_req)
327 return ret;
328
329 if (backlog)
330 backlog->complete(backlog, -EINPROGRESS);
331
332 req = ahash_request_cast(async_req);
333 crc->req = req;
334 ctx = ahash_request_ctx(req);
335 ctx->sg = NULL;
336 ctx->sg_buflen = 0;
337 ctx->sg_nents = 0;
338
339 dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
340 ctx->flag, req->nbytes);
341
342 if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
343 if (ctx->bufnext_len == 0) {
344 crc->busy = 0;
345 return 0;
346 }
347
348 /* Pack last crc update buffer to 32bit */
349 memset(ctx->bufnext + ctx->bufnext_len, 0,
350 CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
351 } else {
352 /* Pack small data which is less than 32bit to buffer for next update. */
353 if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
354 memcpy(ctx->bufnext + ctx->bufnext_len,
355 sg_virt(req->src), req->nbytes);
356 ctx->bufnext_len += req->nbytes;
357 if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
358 ctx->bufnext_len) {
359 goto finish_update;
360 } else {
361 crc->busy = 0;
362 return 0;
363 }
364 }
365
366 if (ctx->bufnext_len) {
367 /* Chain in extra bytes of last update */
368 ctx->buflast_len = ctx->bufnext_len;
369 memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
370
371 nsg = ctx->sg_buflen ? 2 : 1;
372 sg_init_table(ctx->bufsl, nsg);
373 sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
374 if (nsg > 1)
375 scatterwalk_sg_chain(ctx->bufsl, nsg,
376 req->src);
377 ctx->sg = ctx->bufsl;
378 } else
379 ctx->sg = req->src;
380
381 /* Chop crc buffer size to multiple of 32 bit */
382 nsg = ctx->sg_nents = sg_count(ctx->sg);
383 ctx->sg_buflen = ctx->buflast_len + req->nbytes;
384 ctx->bufnext_len = ctx->sg_buflen % 4;
385 ctx->sg_buflen &= ~0x3;
386
387 if (ctx->bufnext_len) {
388 /* copy extra bytes to buffer for next update */
389 memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
390 nextlen = ctx->bufnext_len;
391 for (i = nsg - 1; i >= 0; i--) {
392 sg = sg_get(ctx->sg, nsg, i);
393 j = min(nextlen, sg_dma_len(sg));
394 memcpy(ctx->bufnext + nextlen - j,
395 sg_virt(sg) + sg_dma_len(sg) - j, j);
396 if (j == sg_dma_len(sg))
397 ctx->sg_nents--;
398 nextlen -= j;
399 if (nextlen == 0)
400 break;
401 }
402 }
403 }
404
405finish_update:
406 if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
407 ctx->flag == CRC_CRYPTO_STATE_FINISH))
408 ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
409
410 /* set CRC data count before start DMA */
411 crc->regs->datacnt = ctx->sg_buflen >> 2;
412
413 /* setup and enable CRC DMA */
414 bfin_crypto_crc_config_dma(crc);
415
416 /* finally kick off CRC operation */
417 crc->regs->control |= BLKEN;
418 SSYNC();
419
420 return -EINPROGRESS;
421}
422
423static int bfin_crypto_crc_update(struct ahash_request *req)
424{
425 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
426
427 if (!req->nbytes)
428 return 0;
429
430 dev_dbg(ctx->crc->dev, "crc_update\n");
431 ctx->total += req->nbytes;
432 ctx->flag = CRC_CRYPTO_STATE_UPDATE;
433
434 return bfin_crypto_crc_handle_queue(ctx->crc, req);
435}
436
437static int bfin_crypto_crc_final(struct ahash_request *req)
438{
439 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
440 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
441 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
442
443 dev_dbg(ctx->crc->dev, "crc_final\n");
444 ctx->flag = CRC_CRYPTO_STATE_FINISH;
445 crc_ctx->key = 0;
446
447 return bfin_crypto_crc_handle_queue(ctx->crc, req);
448}
449
450static int bfin_crypto_crc_finup(struct ahash_request *req)
451{
452 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
453 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
454 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
455
456 dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
457 ctx->total += req->nbytes;
458 ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
459 crc_ctx->key = 0;
460
461 return bfin_crypto_crc_handle_queue(ctx->crc, req);
462}
463
464static int bfin_crypto_crc_digest(struct ahash_request *req)
465{
466 int ret;
467
468 ret = bfin_crypto_crc_init(req);
469 if (ret)
470 return ret;
471
472 return bfin_crypto_crc_finup(req);
473}
474
475static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
476 unsigned int keylen)
477{
478 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
479
480 dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
481 if (keylen != CHKSUM_DIGEST_SIZE) {
482 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
483 return -EINVAL;
484 }
485
486 crc_ctx->key = get_unaligned_le32(key);
487
488 return 0;
489}
490
491static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
492{
493 struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
494
495 crc_ctx->key = 0;
496 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
497 sizeof(struct bfin_crypto_crc_reqctx));
498
499 return 0;
500}
501
502static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
503{
504}
505
506static struct ahash_alg algs = {
507 .init = bfin_crypto_crc_init,
508 .update = bfin_crypto_crc_update,
509 .final = bfin_crypto_crc_final,
510 .finup = bfin_crypto_crc_finup,
511 .digest = bfin_crypto_crc_digest,
512 .setkey = bfin_crypto_crc_setkey,
513 .halg.digestsize = CHKSUM_DIGEST_SIZE,
514 .halg.base = {
515 .cra_name = "hmac(crc32)",
516 .cra_driver_name = DRIVER_NAME,
517 .cra_priority = 100,
518 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
519 CRYPTO_ALG_ASYNC,
520 .cra_blocksize = CHKSUM_BLOCK_SIZE,
521 .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
522 .cra_alignmask = 3,
523 .cra_module = THIS_MODULE,
524 .cra_init = bfin_crypto_crc_cra_init,
525 .cra_exit = bfin_crypto_crc_cra_exit,
526 }
527};
528
529static void bfin_crypto_crc_done_task(unsigned long data)
530{
531 struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
532
533 bfin_crypto_crc_handle_queue(crc, NULL);
534}
535
536static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
537{
538 struct bfin_crypto_crc *crc = dev_id;
539
540 if (crc->regs->status & DCNTEXP) {
541 crc->regs->status = DCNTEXP;
542 SSYNC();
543
544 /* prepare results */
545 put_unaligned_le32(crc->regs->result, crc->req->result);
546
547 crc->regs->control &= ~BLKEN;
548 crc->busy = 0;
549
550 if (crc->req->base.complete)
551 crc->req->base.complete(&crc->req->base, 0);
552
553 tasklet_schedule(&crc->done_task);
554
555 return IRQ_HANDLED;
556 } else
557 return IRQ_NONE;
558}
559
560#ifdef CONFIG_PM
561/**
562 * bfin_crypto_crc_suspend - suspend crc device
563 * @pdev: device being suspended
564 * @state: requested suspend state
565 */
566static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
567{
568 struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
569 int i = 100000;
570
571 while ((crc->regs->control & BLKEN) && --i)
572 cpu_relax();
573
574 if (i == 0)
575 return -EBUSY;
576
577 return 0;
578}
579#else
580# define bfin_crypto_crc_suspend NULL
581#endif
582
583#define bfin_crypto_crc_resume NULL
584
585/**
586 * bfin_crypto_crc_probe - Initialize module
587 *
588 */
589static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
590{
591 struct device *dev = &pdev->dev;
592 struct resource *res;
593 struct bfin_crypto_crc *crc;
594 unsigned int timeout = 100000;
595 int ret;
596
597 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
598 if (!crc) {
599 dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
600 return -ENOMEM;
601 }
602
603 crc->dev = dev;
604
605 INIT_LIST_HEAD(&crc->list);
606 spin_lock_init(&crc->lock);
607 tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
608 crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
609
610 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
611 if (res == NULL) {
612 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
613 ret = -ENOENT;
614 goto out_error_free_mem;
615 }
616
617 crc->regs = ioremap(res->start, resource_size(res));
618 if (!crc->regs) {
619 dev_err(&pdev->dev, "Cannot map CRC IO\n");
620 ret = -ENXIO;
621 goto out_error_free_mem;
622 }
623
624 crc->irq = platform_get_irq(pdev, 0);
625 if (crc->irq < 0) {
626 dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
627 ret = -ENOENT;
628 goto out_error_unmap;
629 }
630
631 ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc);
632 if (ret) {
633 dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
634 goto out_error_unmap;
635 }
636
637 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
638 if (res == NULL) {
639 dev_err(&pdev->dev, "No CRC DMA channel specified\n");
640 ret = -ENOENT;
641 goto out_error_irq;
642 }
643 crc->dma_ch = res->start;
644
645 ret = request_dma(crc->dma_ch, dev_name(dev));
646 if (ret) {
647 dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
648 goto out_error_irq;
649 }
650
651 crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
652 if (crc->sg_cpu == NULL) {
653 ret = -ENOMEM;
654 goto out_error_dma;
655 }
656 /*
657 * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle +
658 * 1 last + 1 next dma descriptors
659 */
660 crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
661
662 crc->regs->control = 0;
663 SSYNC();
664 crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data;
665 SSYNC();
666
667 while (!(crc->regs->status & LUTDONE) && (--timeout) > 0)
668 cpu_relax();
669
670 if (timeout == 0)
671 dev_info(&pdev->dev, "init crc poly timeout\n");
672
673 spin_lock(&crc_list.lock);
674 list_add(&crc->list, &crc_list.dev_list);
675 spin_unlock(&crc_list.lock);
676
677 platform_set_drvdata(pdev, crc);
678
679 ret = crypto_register_ahash(&algs);
680 if (ret) {
681 spin_lock(&crc_list.lock);
682 list_del(&crc->list);
683 spin_unlock(&crc_list.lock);
684 dev_err(&pdev->dev, "Cann't register crypto ahash device\n");
685 goto out_error_dma;
686 }
687
688 dev_info(&pdev->dev, "initialized\n");
689
690 return 0;
691
692out_error_dma:
693 if (crc->sg_cpu)
694 dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
695 free_dma(crc->dma_ch);
696out_error_irq:
697 free_irq(crc->irq, crc->dev);
698out_error_unmap:
699 iounmap((void *)crc->regs);
700out_error_free_mem:
701 kfree(crc);
702
703 return ret;
704}
705
706/**
707 * bfin_crypto_crc_remove - Initialize module
708 *
709 */
710static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
711{
712 struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
713
714 if (!crc)
715 return -ENODEV;
716
717 spin_lock(&crc_list.lock);
718 list_del(&crc->list);
719 spin_unlock(&crc_list.lock);
720
721 crypto_unregister_ahash(&algs);
722 tasklet_kill(&crc->done_task);
723 iounmap((void *)crc->regs);
724 free_dma(crc->dma_ch);
725 if (crc->irq > 0)
726 free_irq(crc->irq, crc->dev);
727 kfree(crc);
728
729 return 0;
730}
731
732static struct platform_driver bfin_crypto_crc_driver = {
733 .probe = bfin_crypto_crc_probe,
734 .remove = __devexit_p(bfin_crypto_crc_remove),
735 .suspend = bfin_crypto_crc_suspend,
736 .resume = bfin_crypto_crc_resume,
737 .driver = {
738 .name = DRIVER_NAME,
739 .owner = THIS_MODULE,
740 },
741};
742
743/**
744 * bfin_crypto_crc_mod_init - Initialize module
745 *
746 * Checks the module params and registers the platform driver.
747 * Real work is in the platform probe function.
748 */
749static int __init bfin_crypto_crc_mod_init(void)
750{
751 int ret;
752
753 pr_info("Blackfin hardware CRC crypto driver\n");
754
755 INIT_LIST_HEAD(&crc_list.dev_list);
756 spin_lock_init(&crc_list.lock);
757
758 ret = platform_driver_register(&bfin_crypto_crc_driver);
759 if (ret) {
760 pr_info(KERN_ERR "unable to register driver\n");
761 return ret;
762 }
763
764 return 0;
765}
766
767/**
768 * bfin_crypto_crc_mod_exit - Deinitialize module
769 */
770static void __exit bfin_crypto_crc_mod_exit(void)
771{
772 platform_driver_unregister(&bfin_crypto_crc_driver);
773}
774
775module_init(bfin_crypto_crc_mod_init);
776module_exit(bfin_crypto_crc_mod_exit);
777
778MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
779MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
780MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 2d876bb98ff4..65c7668614ab 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -32,10 +32,13 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
32config CRYPTO_DEV_FSL_CAAM_INTC 32config CRYPTO_DEV_FSL_CAAM_INTC
33 bool "Job Ring interrupt coalescing" 33 bool "Job Ring interrupt coalescing"
34 depends on CRYPTO_DEV_FSL_CAAM 34 depends on CRYPTO_DEV_FSL_CAAM
35 default y 35 default n
36 help 36 help
37 Enable the Job Ring's interrupt coalescing feature. 37 Enable the Job Ring's interrupt coalescing feature.
38 38
39 Note: the driver already provides adequate
40 interrupt coalescing in software.
41
39config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD 42config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
40 int "Job Ring interrupt coalescing count threshold" 43 int "Job Ring interrupt coalescing count threshold"
41 depends on CRYPTO_DEV_FSL_CAAM_INTC 44 depends on CRYPTO_DEV_FSL_CAAM_INTC
@@ -70,3 +73,28 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
70 73
71 To compile this as a module, choose M here: the module 74 To compile this as a module, choose M here: the module
72 will be called caamalg. 75 will be called caamalg.
76
77config CRYPTO_DEV_FSL_CAAM_AHASH_API
78 tristate "Register hash algorithm implementations with Crypto API"
79 depends on CRYPTO_DEV_FSL_CAAM
80 default y
81 select CRYPTO_AHASH
82 help
83 Selecting this will offload ahash for users of the
84 scatterlist crypto API to the SEC4 via job ring.
85
86 To compile this as a module, choose M here: the module
87 will be called caamhash.
88
89config CRYPTO_DEV_FSL_CAAM_RNG_API
90 tristate "Register caam device for hwrng API"
91 depends on CRYPTO_DEV_FSL_CAAM
92 default y
93 select CRYPTO_RNG
94 select HW_RANDOM
95 help
96 Selecting this will register the SEC4 hardware rng to
97 the hw_random API for suppying the kernel entropy pool.
98
99 To compile this as a module, choose M here: the module
100 will be called caamrng.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index ef39011b4505..b1eb44838db5 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -4,5 +4,7 @@
4 4
5obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 5obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
6obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 6obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7 9
8caam-objs := ctrl.o jr.o error.o 10caam-objs := ctrl.o jr.o error.o key_gen.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4eec389184d3..0c1ea8492eff 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -37,9 +37,10 @@
37 * | ShareDesc Pointer | 37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR | 38 * | SEQ_OUT_PTR |
39 * | (output buffer) | 39 * | (output buffer) |
40 * | (output length) |
40 * | SEQ_IN_PTR | 41 * | SEQ_IN_PTR |
41 * | (input buffer) | 42 * | (input buffer) |
42 * | LOAD (to DECO) | 43 * | (input length) |
43 * --------------------- 44 * ---------------------
44 */ 45 */
45 46
@@ -50,6 +51,8 @@
50#include "desc_constr.h" 51#include "desc_constr.h"
51#include "jr.h" 52#include "jr.h"
52#include "error.h" 53#include "error.h"
54#include "sg_sw_sec4.h"
55#include "key_gen.h"
53 56
54/* 57/*
55 * crypto alg 58 * crypto alg
@@ -62,7 +65,7 @@
62#define CAAM_MAX_IV_LENGTH 16 65#define CAAM_MAX_IV_LENGTH 16
63 66
64/* length of descriptors text */ 67/* length of descriptors text */
65#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) 68#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
66 69
67#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) 70#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
68#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) 71#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
@@ -143,11 +146,11 @@ static inline void aead_append_ld_iv(u32 *desc, int ivsize)
143 */ 146 */
144static inline void ablkcipher_append_src_dst(u32 *desc) 147static inline void ablkcipher_append_src_dst(u32 *desc)
145{ 148{
146 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ 149 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
147 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ 150 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
148 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \ 151 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
149 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \ 152 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
150 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \ 153 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
151} 154}
152 155
153/* 156/*
@@ -452,121 +455,12 @@ static int aead_setauthsize(struct crypto_aead *authenc,
452 return 0; 455 return 0;
453} 456}
454 457
455struct split_key_result { 458static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
456 struct completion completion; 459 u32 authkeylen)
457 int err;
458};
459
460static void split_key_done(struct device *dev, u32 *desc, u32 err,
461 void *context)
462{ 460{
463 struct split_key_result *res = context; 461 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
464 462 ctx->split_key_pad_len, key_in, authkeylen,
465#ifdef DEBUG 463 ctx->alg_op);
466 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
467#endif
468
469 if (err) {
470 char tmp[CAAM_ERROR_STR_MAX];
471
472 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
473 }
474
475 res->err = err;
476
477 complete(&res->completion);
478}
479
480/*
481get a split ipad/opad key
482
483Split key generation-----------------------------------------------
484
485[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
486[01] 0x04000014 key: class2->keyreg len=20
487 @0xffe01000
488[03] 0x84410014 operation: cls2-op sha1 hmac init dec
489[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
490[05] 0xa4000001 jump: class2 local all ->1 [06]
491[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
492 @0xffe04000
493*/
494static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
495{
496 struct device *jrdev = ctx->jrdev;
497 u32 *desc;
498 struct split_key_result result;
499 dma_addr_t dma_addr_in, dma_addr_out;
500 int ret = 0;
501
502 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
503
504 init_job_desc(desc, 0);
505
506 dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
507 DMA_TO_DEVICE);
508 if (dma_mapping_error(jrdev, dma_addr_in)) {
509 dev_err(jrdev, "unable to map key input memory\n");
510 kfree(desc);
511 return -ENOMEM;
512 }
513 append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
514 KEY_DEST_CLASS_REG);
515
516 /* Sets MDHA up into an HMAC-INIT */
517 append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
518 OP_ALG_AS_INIT);
519
520 /*
521 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
522 into both pads inside MDHA
523 */
524 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
525 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
526
527 /*
528 * FIFO_STORE with the explicit split-key content store
529 * (0x26 output type)
530 */
531 dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
532 DMA_FROM_DEVICE);
533 if (dma_mapping_error(jrdev, dma_addr_out)) {
534 dev_err(jrdev, "unable to map key output memory\n");
535 kfree(desc);
536 return -ENOMEM;
537 }
538 append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
539 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
540
541#ifdef DEBUG
542 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
543 DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
544 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
545 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
546#endif
547
548 result.err = 0;
549 init_completion(&result.completion);
550
551 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
552 if (!ret) {
553 /* in progress */
554 wait_for_completion_interruptible(&result.completion);
555 ret = result.err;
556#ifdef DEBUG
557 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
558 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
559 ctx->split_key_pad_len, 1);
560#endif
561 }
562
563 dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
564 DMA_FROM_DEVICE);
565 dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
566
567 kfree(desc);
568
569 return ret;
570} 464}
571 465
572static int aead_setkey(struct crypto_aead *aead, 466static int aead_setkey(struct crypto_aead *aead,
@@ -610,7 +504,7 @@ static int aead_setkey(struct crypto_aead *aead,
610 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 504 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
611#endif 505#endif
612 506
613 ret = gen_split_key(ctx, key, authkeylen); 507 ret = gen_split_aead_key(ctx, key, authkeylen);
614 if (ret) { 508 if (ret) {
615 goto badkey; 509 goto badkey;
616 } 510 }
@@ -757,72 +651,78 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
757 return ret; 651 return ret;
758} 652}
759 653
760struct link_tbl_entry {
761 u64 ptr;
762 u32 len;
763 u8 reserved;
764 u8 buf_pool_id;
765 u16 offset;
766};
767
768/* 654/*
769 * aead_edesc - s/w-extended aead descriptor 655 * aead_edesc - s/w-extended aead descriptor
770 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 656 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
657 * @assoc_chained: if source is chained
771 * @src_nents: number of segments in input scatterlist 658 * @src_nents: number of segments in input scatterlist
659 * @src_chained: if source is chained
772 * @dst_nents: number of segments in output scatterlist 660 * @dst_nents: number of segments in output scatterlist
661 * @dst_chained: if destination is chained
773 * @iv_dma: dma address of iv for checking continuity and link table 662 * @iv_dma: dma address of iv for checking continuity and link table
774 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 663 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
775 * @link_tbl_bytes: length of dma mapped link_tbl space 664 * @sec4_sg_bytes: length of dma mapped sec4_sg space
776 * @link_tbl_dma: bus physical mapped address of h/w link table 665 * @sec4_sg_dma: bus physical mapped address of h/w link table
777 * @hw_desc: the h/w job descriptor followed by any referenced link tables 666 * @hw_desc: the h/w job descriptor followed by any referenced link tables
778 */ 667 */
779struct aead_edesc { 668struct aead_edesc {
780 int assoc_nents; 669 int assoc_nents;
670 bool assoc_chained;
781 int src_nents; 671 int src_nents;
672 bool src_chained;
782 int dst_nents; 673 int dst_nents;
674 bool dst_chained;
783 dma_addr_t iv_dma; 675 dma_addr_t iv_dma;
784 int link_tbl_bytes; 676 int sec4_sg_bytes;
785 dma_addr_t link_tbl_dma; 677 dma_addr_t sec4_sg_dma;
786 struct link_tbl_entry *link_tbl; 678 struct sec4_sg_entry *sec4_sg;
787 u32 hw_desc[0]; 679 u32 hw_desc[0];
788}; 680};
789 681
790/* 682/*
791 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 683 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
792 * @src_nents: number of segments in input scatterlist 684 * @src_nents: number of segments in input scatterlist
685 * @src_chained: if source is chained
793 * @dst_nents: number of segments in output scatterlist 686 * @dst_nents: number of segments in output scatterlist
687 * @dst_chained: if destination is chained
794 * @iv_dma: dma address of iv for checking continuity and link table 688 * @iv_dma: dma address of iv for checking continuity and link table
795 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 689 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
796 * @link_tbl_bytes: length of dma mapped link_tbl space 690 * @sec4_sg_bytes: length of dma mapped sec4_sg space
797 * @link_tbl_dma: bus physical mapped address of h/w link table 691 * @sec4_sg_dma: bus physical mapped address of h/w link table
798 * @hw_desc: the h/w job descriptor followed by any referenced link tables 692 * @hw_desc: the h/w job descriptor followed by any referenced link tables
799 */ 693 */
800struct ablkcipher_edesc { 694struct ablkcipher_edesc {
801 int src_nents; 695 int src_nents;
696 bool src_chained;
802 int dst_nents; 697 int dst_nents;
698 bool dst_chained;
803 dma_addr_t iv_dma; 699 dma_addr_t iv_dma;
804 int link_tbl_bytes; 700 int sec4_sg_bytes;
805 dma_addr_t link_tbl_dma; 701 dma_addr_t sec4_sg_dma;
806 struct link_tbl_entry *link_tbl; 702 struct sec4_sg_entry *sec4_sg;
807 u32 hw_desc[0]; 703 u32 hw_desc[0];
808}; 704};
809 705
810static void caam_unmap(struct device *dev, struct scatterlist *src, 706static void caam_unmap(struct device *dev, struct scatterlist *src,
811 struct scatterlist *dst, int src_nents, int dst_nents, 707 struct scatterlist *dst, int src_nents,
812 dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, 708 bool src_chained, int dst_nents, bool dst_chained,
813 int link_tbl_bytes) 709 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
710 int sec4_sg_bytes)
814{ 711{
815 if (unlikely(dst != src)) { 712 if (dst != src) {
816 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 713 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
817 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 714 src_chained);
715 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
716 dst_chained);
818 } else { 717 } else {
819 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 718 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
719 DMA_BIDIRECTIONAL, src_chained);
820 } 720 }
821 721
822 if (iv_dma) 722 if (iv_dma)
823 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 723 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
824 if (link_tbl_bytes) 724 if (sec4_sg_bytes)
825 dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, 725 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
826 DMA_TO_DEVICE); 726 DMA_TO_DEVICE);
827} 727}
828 728
@@ -833,12 +733,13 @@ static void aead_unmap(struct device *dev,
833 struct crypto_aead *aead = crypto_aead_reqtfm(req); 733 struct crypto_aead *aead = crypto_aead_reqtfm(req);
834 int ivsize = crypto_aead_ivsize(aead); 734 int ivsize = crypto_aead_ivsize(aead);
835 735
836 dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); 736 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
737 DMA_TO_DEVICE, edesc->assoc_chained);
837 738
838 caam_unmap(dev, req->src, req->dst, 739 caam_unmap(dev, req->src, req->dst,
839 edesc->src_nents, edesc->dst_nents, 740 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
840 edesc->iv_dma, ivsize, edesc->link_tbl_dma, 741 edesc->dst_chained, edesc->iv_dma, ivsize,
841 edesc->link_tbl_bytes); 742 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
842} 743}
843 744
844static void ablkcipher_unmap(struct device *dev, 745static void ablkcipher_unmap(struct device *dev,
@@ -849,9 +750,9 @@ static void ablkcipher_unmap(struct device *dev,
849 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 750 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
850 751
851 caam_unmap(dev, req->src, req->dst, 752 caam_unmap(dev, req->src, req->dst,
852 edesc->src_nents, edesc->dst_nents, 753 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
853 edesc->iv_dma, ivsize, edesc->link_tbl_dma, 754 edesc->dst_chained, edesc->iv_dma, ivsize,
854 edesc->link_tbl_bytes); 755 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
855} 756}
856 757
857static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 758static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -942,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
942 sizeof(struct iphdr) + req->assoclen + 843 sizeof(struct iphdr) + req->assoclen +
943 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + 844 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
944 ctx->authsize + 36, 1); 845 ctx->authsize + 36, 1);
945 if (!err && edesc->link_tbl_bytes) { 846 if (!err && edesc->sec4_sg_bytes) {
946 struct scatterlist *sg = sg_last(req->src, edesc->src_nents); 847 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
947 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", 848 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
948 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), 849 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
@@ -1026,50 +927,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1026 ablkcipher_request_complete(req, err); 927 ablkcipher_request_complete(req, err);
1027} 928}
1028 929
1029static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
1030 dma_addr_t dma, u32 len, u32 offset)
1031{
1032 link_tbl_ptr->ptr = dma;
1033 link_tbl_ptr->len = len;
1034 link_tbl_ptr->reserved = 0;
1035 link_tbl_ptr->buf_pool_id = 0;
1036 link_tbl_ptr->offset = offset;
1037#ifdef DEBUG
1038 print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
1039 DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
1040 sizeof(struct link_tbl_entry), 1);
1041#endif
1042}
1043
1044/*
1045 * convert scatterlist to h/w link table format
1046 * but does not have final bit; instead, returns last entry
1047 */
1048static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
1049 int sg_count, struct link_tbl_entry
1050 *link_tbl_ptr, u32 offset)
1051{
1052 while (sg_count) {
1053 sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
1054 sg_dma_len(sg), offset);
1055 link_tbl_ptr++;
1056 sg = sg_next(sg);
1057 sg_count--;
1058 }
1059 return link_tbl_ptr - 1;
1060}
1061
1062/*
1063 * convert scatterlist to h/w link table format
1064 * scatterlist must have been previously dma mapped
1065 */
1066static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
1067 struct link_tbl_entry *link_tbl_ptr, u32 offset)
1068{
1069 link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
1070 link_tbl_ptr->len |= 0x40000000;
1071}
1072
1073/* 930/*
1074 * Fill in aead job descriptor 931 * Fill in aead job descriptor
1075 */ 932 */
@@ -1085,7 +942,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1085 u32 *desc = edesc->hw_desc; 942 u32 *desc = edesc->hw_desc;
1086 u32 out_options = 0, in_options; 943 u32 out_options = 0, in_options;
1087 dma_addr_t dst_dma, src_dma; 944 dma_addr_t dst_dma, src_dma;
1088 int len, link_tbl_index = 0; 945 int len, sec4_sg_index = 0;
1089 946
1090#ifdef DEBUG 947#ifdef DEBUG
1091 debug("assoclen %d cryptlen %d authsize %d\n", 948 debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1111,9 +968,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1111 src_dma = sg_dma_address(req->assoc); 968 src_dma = sg_dma_address(req->assoc);
1112 in_options = 0; 969 in_options = 0;
1113 } else { 970 } else {
1114 src_dma = edesc->link_tbl_dma; 971 src_dma = edesc->sec4_sg_dma;
1115 link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + 972 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1116 (edesc->src_nents ? : 1); 973 (edesc->src_nents ? : 1);
1117 in_options = LDST_SGF; 974 in_options = LDST_SGF;
1118 } 975 }
1119 if (encrypt) 976 if (encrypt)
@@ -1127,7 +984,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1127 if (all_contig) { 984 if (all_contig) {
1128 dst_dma = sg_dma_address(req->src); 985 dst_dma = sg_dma_address(req->src);
1129 } else { 986 } else {
1130 dst_dma = src_dma + sizeof(struct link_tbl_entry) * 987 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1131 ((edesc->assoc_nents ? : 1) + 1); 988 ((edesc->assoc_nents ? : 1) + 1);
1132 out_options = LDST_SGF; 989 out_options = LDST_SGF;
1133 } 990 }
@@ -1135,9 +992,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1135 if (!edesc->dst_nents) { 992 if (!edesc->dst_nents) {
1136 dst_dma = sg_dma_address(req->dst); 993 dst_dma = sg_dma_address(req->dst);
1137 } else { 994 } else {
1138 dst_dma = edesc->link_tbl_dma + 995 dst_dma = edesc->sec4_sg_dma +
1139 link_tbl_index * 996 sec4_sg_index *
1140 sizeof(struct link_tbl_entry); 997 sizeof(struct sec4_sg_entry);
1141 out_options = LDST_SGF; 998 out_options = LDST_SGF;
1142 } 999 }
1143 } 1000 }
@@ -1163,7 +1020,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1163 u32 *desc = edesc->hw_desc; 1020 u32 *desc = edesc->hw_desc;
1164 u32 out_options = 0, in_options; 1021 u32 out_options = 0, in_options;
1165 dma_addr_t dst_dma, src_dma; 1022 dma_addr_t dst_dma, src_dma;
1166 int len, link_tbl_index = 0; 1023 int len, sec4_sg_index = 0;
1167 1024
1168#ifdef DEBUG 1025#ifdef DEBUG
1169 debug("assoclen %d cryptlen %d authsize %d\n", 1026 debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1188,8 +1045,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1188 src_dma = sg_dma_address(req->assoc); 1045 src_dma = sg_dma_address(req->assoc);
1189 in_options = 0; 1046 in_options = 0;
1190 } else { 1047 } else {
1191 src_dma = edesc->link_tbl_dma; 1048 src_dma = edesc->sec4_sg_dma;
1192 link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; 1049 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1193 in_options = LDST_SGF; 1050 in_options = LDST_SGF;
1194 } 1051 }
1195 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1052 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
@@ -1199,13 +1056,13 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1199 dst_dma = edesc->iv_dma; 1056 dst_dma = edesc->iv_dma;
1200 } else { 1057 } else {
1201 if (likely(req->src == req->dst)) { 1058 if (likely(req->src == req->dst)) {
1202 dst_dma = src_dma + sizeof(struct link_tbl_entry) * 1059 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1203 edesc->assoc_nents; 1060 edesc->assoc_nents;
1204 out_options = LDST_SGF; 1061 out_options = LDST_SGF;
1205 } else { 1062 } else {
1206 dst_dma = edesc->link_tbl_dma + 1063 dst_dma = edesc->sec4_sg_dma +
1207 link_tbl_index * 1064 sec4_sg_index *
1208 sizeof(struct link_tbl_entry); 1065 sizeof(struct sec4_sg_entry);
1209 out_options = LDST_SGF; 1066 out_options = LDST_SGF;
1210 } 1067 }
1211 } 1068 }
@@ -1226,7 +1083,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1226 u32 *desc = edesc->hw_desc; 1083 u32 *desc = edesc->hw_desc;
1227 u32 out_options = 0, in_options; 1084 u32 out_options = 0, in_options;
1228 dma_addr_t dst_dma, src_dma; 1085 dma_addr_t dst_dma, src_dma;
1229 int len, link_tbl_index = 0; 1086 int len, sec4_sg_index = 0;
1230 1087
1231#ifdef DEBUG 1088#ifdef DEBUG
1232 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 1089 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
@@ -1244,8 +1101,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1244 src_dma = edesc->iv_dma; 1101 src_dma = edesc->iv_dma;
1245 in_options = 0; 1102 in_options = 0;
1246 } else { 1103 } else {
1247 src_dma = edesc->link_tbl_dma; 1104 src_dma = edesc->sec4_sg_dma;
1248 link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; 1105 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1249 in_options = LDST_SGF; 1106 in_options = LDST_SGF;
1250 } 1107 }
1251 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); 1108 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
@@ -1254,16 +1111,16 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1254 if (!edesc->src_nents && iv_contig) { 1111 if (!edesc->src_nents && iv_contig) {
1255 dst_dma = sg_dma_address(req->src); 1112 dst_dma = sg_dma_address(req->src);
1256 } else { 1113 } else {
1257 dst_dma = edesc->link_tbl_dma + 1114 dst_dma = edesc->sec4_sg_dma +
1258 sizeof(struct link_tbl_entry); 1115 sizeof(struct sec4_sg_entry);
1259 out_options = LDST_SGF; 1116 out_options = LDST_SGF;
1260 } 1117 }
1261 } else { 1118 } else {
1262 if (!edesc->dst_nents) { 1119 if (!edesc->dst_nents) {
1263 dst_dma = sg_dma_address(req->dst); 1120 dst_dma = sg_dma_address(req->dst);
1264 } else { 1121 } else {
1265 dst_dma = edesc->link_tbl_dma + 1122 dst_dma = edesc->sec4_sg_dma +
1266 link_tbl_index * sizeof(struct link_tbl_entry); 1123 sec4_sg_index * sizeof(struct sec4_sg_entry);
1267 out_options = LDST_SGF; 1124 out_options = LDST_SGF;
1268 } 1125 }
1269 } 1126 }
@@ -1271,28 +1128,6 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1271} 1128}
1272 1129
1273/* 1130/*
1274 * derive number of elements in scatterlist
1275 */
1276static int sg_count(struct scatterlist *sg_list, int nbytes)
1277{
1278 struct scatterlist *sg = sg_list;
1279 int sg_nents = 0;
1280
1281 while (nbytes > 0) {
1282 sg_nents++;
1283 nbytes -= sg->length;
1284 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1285 BUG(); /* Not support chaining */
1286 sg = scatterwalk_sg_next(sg);
1287 }
1288
1289 if (likely(sg_nents == 1))
1290 return 0;
1291
1292 return sg_nents;
1293}
1294
1295/*
1296 * allocate and map the aead extended descriptor 1131 * allocate and map the aead extended descriptor
1297 */ 1132 */
1298static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1133static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -1308,25 +1143,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1308 dma_addr_t iv_dma = 0; 1143 dma_addr_t iv_dma = 0;
1309 int sgc; 1144 int sgc;
1310 bool all_contig = true; 1145 bool all_contig = true;
1146 bool assoc_chained = false, src_chained = false, dst_chained = false;
1311 int ivsize = crypto_aead_ivsize(aead); 1147 int ivsize = crypto_aead_ivsize(aead);
1312 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; 1148 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1313 1149
1314 assoc_nents = sg_count(req->assoc, req->assoclen); 1150 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1315 src_nents = sg_count(req->src, req->cryptlen); 1151 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1316 1152
1317 if (unlikely(req->dst != req->src)) 1153 if (unlikely(req->dst != req->src))
1318 dst_nents = sg_count(req->dst, req->cryptlen); 1154 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1319 1155
1320 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, 1156 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1321 DMA_BIDIRECTIONAL); 1157 DMA_BIDIRECTIONAL, assoc_chained);
1322 if (likely(req->src == req->dst)) { 1158 if (likely(req->src == req->dst)) {
1323 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1159 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1324 DMA_BIDIRECTIONAL); 1160 DMA_BIDIRECTIONAL, src_chained);
1325 } else { 1161 } else {
1326 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1162 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1327 DMA_TO_DEVICE); 1163 DMA_TO_DEVICE, src_chained);
1328 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1164 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1329 DMA_FROM_DEVICE); 1165 DMA_FROM_DEVICE, dst_chained);
1330 } 1166 }
1331 1167
1332 /* Check if data are contiguous */ 1168 /* Check if data are contiguous */
@@ -1337,50 +1173,53 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1337 all_contig = false; 1173 all_contig = false;
1338 assoc_nents = assoc_nents ? : 1; 1174 assoc_nents = assoc_nents ? : 1;
1339 src_nents = src_nents ? : 1; 1175 src_nents = src_nents ? : 1;
1340 link_tbl_len = assoc_nents + 1 + src_nents; 1176 sec4_sg_len = assoc_nents + 1 + src_nents;
1341 } 1177 }
1342 link_tbl_len += dst_nents; 1178 sec4_sg_len += dst_nents;
1343 1179
1344 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); 1180 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1345 1181
1346 /* allocate space for base edesc and hw desc commands, link tables */ 1182 /* allocate space for base edesc and hw desc commands, link tables */
1347 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1183 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1348 link_tbl_bytes, GFP_DMA | flags); 1184 sec4_sg_bytes, GFP_DMA | flags);
1349 if (!edesc) { 1185 if (!edesc) {
1350 dev_err(jrdev, "could not allocate extended descriptor\n"); 1186 dev_err(jrdev, "could not allocate extended descriptor\n");
1351 return ERR_PTR(-ENOMEM); 1187 return ERR_PTR(-ENOMEM);
1352 } 1188 }
1353 1189
1354 edesc->assoc_nents = assoc_nents; 1190 edesc->assoc_nents = assoc_nents;
1191 edesc->assoc_chained = assoc_chained;
1355 edesc->src_nents = src_nents; 1192 edesc->src_nents = src_nents;
1193 edesc->src_chained = src_chained;
1356 edesc->dst_nents = dst_nents; 1194 edesc->dst_nents = dst_nents;
1195 edesc->dst_chained = dst_chained;
1357 edesc->iv_dma = iv_dma; 1196 edesc->iv_dma = iv_dma;
1358 edesc->link_tbl_bytes = link_tbl_bytes; 1197 edesc->sec4_sg_bytes = sec4_sg_bytes;
1359 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + 1198 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1360 desc_bytes; 1199 desc_bytes;
1361 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1200 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1362 link_tbl_bytes, DMA_TO_DEVICE); 1201 sec4_sg_bytes, DMA_TO_DEVICE);
1363 *all_contig_ptr = all_contig; 1202 *all_contig_ptr = all_contig;
1364 1203
1365 link_tbl_index = 0; 1204 sec4_sg_index = 0;
1366 if (!all_contig) { 1205 if (!all_contig) {
1367 sg_to_link_tbl(req->assoc, 1206 sg_to_sec4_sg(req->assoc,
1368 (assoc_nents ? : 1), 1207 (assoc_nents ? : 1),
1369 edesc->link_tbl + 1208 edesc->sec4_sg +
1370 link_tbl_index, 0); 1209 sec4_sg_index, 0);
1371 link_tbl_index += assoc_nents ? : 1; 1210 sec4_sg_index += assoc_nents ? : 1;
1372 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1211 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1373 iv_dma, ivsize, 0); 1212 iv_dma, ivsize, 0);
1374 link_tbl_index += 1; 1213 sec4_sg_index += 1;
1375 sg_to_link_tbl_last(req->src, 1214 sg_to_sec4_sg_last(req->src,
1376 (src_nents ? : 1), 1215 (src_nents ? : 1),
1377 edesc->link_tbl + 1216 edesc->sec4_sg +
1378 link_tbl_index, 0); 1217 sec4_sg_index, 0);
1379 link_tbl_index += src_nents ? : 1; 1218 sec4_sg_index += src_nents ? : 1;
1380 } 1219 }
1381 if (dst_nents) { 1220 if (dst_nents) {
1382 sg_to_link_tbl_last(req->dst, dst_nents, 1221 sg_to_sec4_sg_last(req->dst, dst_nents,
1383 edesc->link_tbl + link_tbl_index, 0); 1222 edesc->sec4_sg + sec4_sg_index, 0);
1384 } 1223 }
1385 1224
1386 return edesc; 1225 return edesc;
@@ -1487,24 +1326,25 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1487 int sgc; 1326 int sgc;
1488 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; 1327 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1489 int ivsize = crypto_aead_ivsize(aead); 1328 int ivsize = crypto_aead_ivsize(aead);
1490 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; 1329 bool assoc_chained = false, src_chained = false, dst_chained = false;
1330 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1491 1331
1492 assoc_nents = sg_count(req->assoc, req->assoclen); 1332 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1493 src_nents = sg_count(req->src, req->cryptlen); 1333 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1494 1334
1495 if (unlikely(req->dst != req->src)) 1335 if (unlikely(req->dst != req->src))
1496 dst_nents = sg_count(req->dst, req->cryptlen); 1336 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1497 1337
1498 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, 1338 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1499 DMA_BIDIRECTIONAL); 1339 DMA_BIDIRECTIONAL, assoc_chained);
1500 if (likely(req->src == req->dst)) { 1340 if (likely(req->src == req->dst)) {
1501 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1341 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1502 DMA_BIDIRECTIONAL); 1342 DMA_BIDIRECTIONAL, src_chained);
1503 } else { 1343 } else {
1504 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1344 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1505 DMA_TO_DEVICE); 1345 DMA_TO_DEVICE, src_chained);
1506 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1346 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1507 DMA_FROM_DEVICE); 1347 DMA_FROM_DEVICE, dst_chained);
1508 } 1348 }
1509 1349
1510 /* Check if data are contiguous */ 1350 /* Check if data are contiguous */
@@ -1516,58 +1356,61 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1516 contig &= ~GIV_DST_CONTIG; 1356 contig &= ~GIV_DST_CONTIG;
1517 if (unlikely(req->src != req->dst)) { 1357 if (unlikely(req->src != req->dst)) {
1518 dst_nents = dst_nents ? : 1; 1358 dst_nents = dst_nents ? : 1;
1519 link_tbl_len += 1; 1359 sec4_sg_len += 1;
1520 } 1360 }
1521 if (!(contig & GIV_SRC_CONTIG)) { 1361 if (!(contig & GIV_SRC_CONTIG)) {
1522 assoc_nents = assoc_nents ? : 1; 1362 assoc_nents = assoc_nents ? : 1;
1523 src_nents = src_nents ? : 1; 1363 src_nents = src_nents ? : 1;
1524 link_tbl_len += assoc_nents + 1 + src_nents; 1364 sec4_sg_len += assoc_nents + 1 + src_nents;
1525 if (likely(req->src == req->dst)) 1365 if (likely(req->src == req->dst))
1526 contig &= ~GIV_DST_CONTIG; 1366 contig &= ~GIV_DST_CONTIG;
1527 } 1367 }
1528 link_tbl_len += dst_nents; 1368 sec4_sg_len += dst_nents;
1529 1369
1530 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); 1370 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1531 1371
1532 /* allocate space for base edesc and hw desc commands, link tables */ 1372 /* allocate space for base edesc and hw desc commands, link tables */
1533 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1373 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1534 link_tbl_bytes, GFP_DMA | flags); 1374 sec4_sg_bytes, GFP_DMA | flags);
1535 if (!edesc) { 1375 if (!edesc) {
1536 dev_err(jrdev, "could not allocate extended descriptor\n"); 1376 dev_err(jrdev, "could not allocate extended descriptor\n");
1537 return ERR_PTR(-ENOMEM); 1377 return ERR_PTR(-ENOMEM);
1538 } 1378 }
1539 1379
1540 edesc->assoc_nents = assoc_nents; 1380 edesc->assoc_nents = assoc_nents;
1381 edesc->assoc_chained = assoc_chained;
1541 edesc->src_nents = src_nents; 1382 edesc->src_nents = src_nents;
1383 edesc->src_chained = src_chained;
1542 edesc->dst_nents = dst_nents; 1384 edesc->dst_nents = dst_nents;
1385 edesc->dst_chained = dst_chained;
1543 edesc->iv_dma = iv_dma; 1386 edesc->iv_dma = iv_dma;
1544 edesc->link_tbl_bytes = link_tbl_bytes; 1387 edesc->sec4_sg_bytes = sec4_sg_bytes;
1545 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + 1388 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1546 desc_bytes; 1389 desc_bytes;
1547 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1390 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1548 link_tbl_bytes, DMA_TO_DEVICE); 1391 sec4_sg_bytes, DMA_TO_DEVICE);
1549 *contig_ptr = contig; 1392 *contig_ptr = contig;
1550 1393
1551 link_tbl_index = 0; 1394 sec4_sg_index = 0;
1552 if (!(contig & GIV_SRC_CONTIG)) { 1395 if (!(contig & GIV_SRC_CONTIG)) {
1553 sg_to_link_tbl(req->assoc, assoc_nents, 1396 sg_to_sec4_sg(req->assoc, assoc_nents,
1554 edesc->link_tbl + 1397 edesc->sec4_sg +
1555 link_tbl_index, 0); 1398 sec4_sg_index, 0);
1556 link_tbl_index += assoc_nents; 1399 sec4_sg_index += assoc_nents;
1557 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1400 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1558 iv_dma, ivsize, 0); 1401 iv_dma, ivsize, 0);
1559 link_tbl_index += 1; 1402 sec4_sg_index += 1;
1560 sg_to_link_tbl_last(req->src, src_nents, 1403 sg_to_sec4_sg_last(req->src, src_nents,
1561 edesc->link_tbl + 1404 edesc->sec4_sg +
1562 link_tbl_index, 0); 1405 sec4_sg_index, 0);
1563 link_tbl_index += src_nents; 1406 sec4_sg_index += src_nents;
1564 } 1407 }
1565 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { 1408 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1566 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1409 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1567 iv_dma, ivsize, 0); 1410 iv_dma, ivsize, 0);
1568 link_tbl_index += 1; 1411 sec4_sg_index += 1;
1569 sg_to_link_tbl_last(req->dst, dst_nents, 1412 sg_to_sec4_sg_last(req->dst, dst_nents,
1570 edesc->link_tbl + link_tbl_index, 0); 1413 edesc->sec4_sg + sec4_sg_index, 0);
1571 } 1414 }
1572 1415
1573 return edesc; 1416 return edesc;
@@ -1633,27 +1476,28 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1633 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1476 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1634 CRYPTO_TFM_REQ_MAY_SLEEP)) ? 1477 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1635 GFP_KERNEL : GFP_ATOMIC; 1478 GFP_KERNEL : GFP_ATOMIC;
1636 int src_nents, dst_nents = 0, link_tbl_bytes; 1479 int src_nents, dst_nents = 0, sec4_sg_bytes;
1637 struct ablkcipher_edesc *edesc; 1480 struct ablkcipher_edesc *edesc;
1638 dma_addr_t iv_dma = 0; 1481 dma_addr_t iv_dma = 0;
1639 bool iv_contig = false; 1482 bool iv_contig = false;
1640 int sgc; 1483 int sgc;
1641 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1484 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1642 int link_tbl_index; 1485 bool src_chained = false, dst_chained = false;
1486 int sec4_sg_index;
1643 1487
1644 src_nents = sg_count(req->src, req->nbytes); 1488 src_nents = sg_count(req->src, req->nbytes, &src_chained);
1645 1489
1646 if (unlikely(req->dst != req->src)) 1490 if (req->dst != req->src)
1647 dst_nents = sg_count(req->dst, req->nbytes); 1491 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1648 1492
1649 if (likely(req->src == req->dst)) { 1493 if (likely(req->src == req->dst)) {
1650 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1494 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1651 DMA_BIDIRECTIONAL); 1495 DMA_BIDIRECTIONAL, src_chained);
1652 } else { 1496 } else {
1653 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1497 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1654 DMA_TO_DEVICE); 1498 DMA_TO_DEVICE, src_chained);
1655 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1499 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1656 DMA_FROM_DEVICE); 1500 DMA_FROM_DEVICE, dst_chained);
1657 } 1501 }
1658 1502
1659 /* 1503 /*
@@ -1665,44 +1509,46 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1665 iv_contig = true; 1509 iv_contig = true;
1666 else 1510 else
1667 src_nents = src_nents ? : 1; 1511 src_nents = src_nents ? : 1;
1668 link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * 1512 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1669 sizeof(struct link_tbl_entry); 1513 sizeof(struct sec4_sg_entry);
1670 1514
1671 /* allocate space for base edesc and hw desc commands, link tables */ 1515 /* allocate space for base edesc and hw desc commands, link tables */
1672 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + 1516 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1673 link_tbl_bytes, GFP_DMA | flags); 1517 sec4_sg_bytes, GFP_DMA | flags);
1674 if (!edesc) { 1518 if (!edesc) {
1675 dev_err(jrdev, "could not allocate extended descriptor\n"); 1519 dev_err(jrdev, "could not allocate extended descriptor\n");
1676 return ERR_PTR(-ENOMEM); 1520 return ERR_PTR(-ENOMEM);
1677 } 1521 }
1678 1522
1679 edesc->src_nents = src_nents; 1523 edesc->src_nents = src_nents;
1524 edesc->src_chained = src_chained;
1680 edesc->dst_nents = dst_nents; 1525 edesc->dst_nents = dst_nents;
1681 edesc->link_tbl_bytes = link_tbl_bytes; 1526 edesc->dst_chained = dst_chained;
1682 edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1527 edesc->sec4_sg_bytes = sec4_sg_bytes;
1683 desc_bytes; 1528 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1529 desc_bytes;
1684 1530
1685 link_tbl_index = 0; 1531 sec4_sg_index = 0;
1686 if (!iv_contig) { 1532 if (!iv_contig) {
1687 sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); 1533 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1688 sg_to_link_tbl_last(req->src, src_nents, 1534 sg_to_sec4_sg_last(req->src, src_nents,
1689 edesc->link_tbl + 1, 0); 1535 edesc->sec4_sg + 1, 0);
1690 link_tbl_index += 1 + src_nents; 1536 sec4_sg_index += 1 + src_nents;
1691 } 1537 }
1692 1538
1693 if (unlikely(dst_nents)) { 1539 if (dst_nents) {
1694 sg_to_link_tbl_last(req->dst, dst_nents, 1540 sg_to_sec4_sg_last(req->dst, dst_nents,
1695 edesc->link_tbl + link_tbl_index, 0); 1541 edesc->sec4_sg + sec4_sg_index, 0);
1696 } 1542 }
1697 1543
1698 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1544 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1699 link_tbl_bytes, DMA_TO_DEVICE); 1545 sec4_sg_bytes, DMA_TO_DEVICE);
1700 edesc->iv_dma = iv_dma; 1546 edesc->iv_dma = iv_dma;
1701 1547
1702#ifdef DEBUG 1548#ifdef DEBUG
1703 print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", 1549 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1704 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, 1550 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1705 link_tbl_bytes, 1); 1551 sec4_sg_bytes, 1);
1706#endif 1552#endif
1707 1553
1708 *iv_contig_out = iv_contig; 1554 *iv_contig_out = iv_contig;
@@ -2227,7 +2073,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
2227 * distribute tfms across job rings to ensure in-order 2073 * distribute tfms across job rings to ensure in-order
2228 * crypto request processing per tfm 2074 * crypto request processing per tfm
2229 */ 2075 */
2230 ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; 2076 ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
2231 2077
2232 /* copy descriptor header template value */ 2078 /* copy descriptor header template value */
2233 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; 2079 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2264,7 +2110,6 @@ static void __exit caam_algapi_exit(void)
2264 struct device *ctrldev; 2110 struct device *ctrldev;
2265 struct caam_drv_private *priv; 2111 struct caam_drv_private *priv;
2266 struct caam_crypto_alg *t_alg, *n; 2112 struct caam_crypto_alg *t_alg, *n;
2267 int i, err;
2268 2113
2269 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2114 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2270 if (!dev_node) { 2115 if (!dev_node) {
@@ -2289,13 +2134,6 @@ static void __exit caam_algapi_exit(void)
2289 list_del(&t_alg->entry); 2134 list_del(&t_alg->entry);
2290 kfree(t_alg); 2135 kfree(t_alg);
2291 } 2136 }
2292
2293 for (i = 0; i < priv->total_jobrs; i++) {
2294 err = caam_jr_deregister(priv->algapi_jr[i]);
2295 if (err < 0)
2296 break;
2297 }
2298 kfree(priv->algapi_jr);
2299} 2137}
2300 2138
2301static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, 2139static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
@@ -2348,7 +2186,7 @@ static int __init caam_algapi_init(void)
2348{ 2186{
2349 struct device_node *dev_node; 2187 struct device_node *dev_node;
2350 struct platform_device *pdev; 2188 struct platform_device *pdev;
2351 struct device *ctrldev, **jrdev; 2189 struct device *ctrldev;
2352 struct caam_drv_private *priv; 2190 struct caam_drv_private *priv;
2353 int i = 0, err = 0; 2191 int i = 0, err = 0;
2354 2192
@@ -2369,24 +2207,6 @@ static int __init caam_algapi_init(void)
2369 2207
2370 INIT_LIST_HEAD(&priv->alg_list); 2208 INIT_LIST_HEAD(&priv->alg_list);
2371 2209
2372 jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
2373 if (!jrdev)
2374 return -ENOMEM;
2375
2376 for (i = 0; i < priv->total_jobrs; i++) {
2377 err = caam_jr_register(ctrldev, &jrdev[i]);
2378 if (err < 0)
2379 break;
2380 }
2381 if (err < 0 && i == 0) {
2382 dev_err(ctrldev, "algapi error in job ring registration: %d\n",
2383 err);
2384 kfree(jrdev);
2385 return err;
2386 }
2387
2388 priv->num_jrs_for_algapi = i;
2389 priv->algapi_jr = jrdev;
2390 atomic_set(&priv->tfm_count, -1); 2210 atomic_set(&priv->tfm_count, -1);
2391 2211
2392 /* register crypto algorithms the device supports */ 2212 /* register crypto algorithms the device supports */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
new file mode 100644
index 000000000000..895aaf2bca92
--- /dev/null
+++ b/drivers/crypto/caam/caamhash.c
@@ -0,0 +1,1878 @@
1/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
75#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
76
77#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
78#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
79#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
80#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
81#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
82#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
83
84#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85 CAAM_MAX_HASH_KEY_SIZE)
86#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
87
88/* caam context sizes for hashes: running digest + 8 */
89#define HASH_MSG_LEN 8
90#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91
92#ifdef DEBUG
93/* for print_hex_dumps with line references */
94#define xstr(s) str(s)
95#define str(s) #s
96#define debug(format, arg...) printk(format, arg)
97#else
98#define debug(format, arg...)
99#endif
100
101/* ahash per-session context */
102struct caam_hash_ctx {
103 struct device *jrdev;
104 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
108 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
109 dma_addr_t sh_desc_update_dma;
110 dma_addr_t sh_desc_update_first_dma;
111 dma_addr_t sh_desc_fin_dma;
112 dma_addr_t sh_desc_digest_dma;
113 dma_addr_t sh_desc_finup_dma;
114 u32 alg_type;
115 u32 alg_op;
116 u8 key[CAAM_MAX_HASH_KEY_SIZE];
117 dma_addr_t key_dma;
118 int ctx_len;
119 unsigned int split_key_len;
120 unsigned int split_key_pad_len;
121};
122
123/* ahash state */
124struct caam_hash_state {
125 dma_addr_t buf_dma;
126 dma_addr_t ctx_dma;
127 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 int buflen_0;
129 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 int buflen_1;
131 u8 caam_ctx[MAX_CTX_LEN];
132 int (*update)(struct ahash_request *req);
133 int (*final)(struct ahash_request *req);
134 int (*finup)(struct ahash_request *req);
135 int current_buf;
136};
137
138/* Common job descriptor seq in/out ptr routines */
139
140/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142 struct caam_hash_state *state,
143 int ctx_len)
144{
145 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
146 ctx_len, DMA_FROM_DEVICE);
147 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
148}
149
150/* Map req->result, and append seq_out_ptr command that points to it */
151static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
152 u8 *result, int digestsize)
153{
154 dma_addr_t dst_dma;
155
156 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
157 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
158
159 return dst_dma;
160}
161
162/* Map current buffer in state and put it in link table */
163static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
164 struct sec4_sg_entry *sec4_sg,
165 u8 *buf, int buflen)
166{
167 dma_addr_t buf_dma;
168
169 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
170 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
171
172 return buf_dma;
173}
174
175/* Map req->src and put it in link table */
176static inline void src_map_to_sec4_sg(struct device *jrdev,
177 struct scatterlist *src, int src_nents,
178 struct sec4_sg_entry *sec4_sg,
179 bool chained)
180{
181 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
182 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
183}
184
185/*
186 * Only put buffer in link table if it contains data, which is possible,
187 * since a buffer has previously been used, and needs to be unmapped,
188 */
189static inline dma_addr_t
190try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
191 u8 *buf, dma_addr_t buf_dma, int buflen,
192 int last_buflen)
193{
194 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
195 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 if (buflen)
197 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
198 else
199 buf_dma = 0;
200
201 return buf_dma;
202}
203
204/* Map state->caam_ctx, and add it to link table */
205static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
206 struct caam_hash_state *state,
207 int ctx_len,
208 struct sec4_sg_entry *sec4_sg,
209 u32 flag)
210{
211 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
212 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
213}
214
215/* Common shared descriptor commands */
216static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
217{
218 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
219 ctx->split_key_len, CLASS_2 |
220 KEY_DEST_MDHA_SPLIT | KEY_ENC);
221}
222
223/* Append key if it has been set */
224static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
225{
226 u32 *key_jump_cmd;
227
228 init_sh_desc(desc, HDR_SHARE_WAIT);
229
230 if (ctx->split_key_len) {
231 /* Skip if already shared */
232 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
233 JUMP_COND_SHRD);
234
235 append_key_ahash(desc, ctx);
236
237 set_jump_tgt_here(desc, key_jump_cmd);
238 }
239
240 /* Propagate errors from shared to job descriptor */
241 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
242}
243
244/*
245 * For ahash read data from seqin following state->caam_ctx,
246 * and write resulting class2 context to seqout, which may be state->caam_ctx
247 * or req->result
248 */
249static inline void ahash_append_load_str(u32 *desc, int digestsize)
250{
251 /* Calculate remaining bytes to read */
252 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
253
254 /* Read remaining bytes */
255 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
256 FIFOLD_TYPE_MSG | KEY_VLF);
257
258 /* Store class2 context bytes */
259 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
260 LDST_SRCDST_BYTE_CONTEXT);
261}
262
263/*
264 * For ahash update, final and finup, import context, read and write to seqout
265 */
266static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
267 int digestsize,
268 struct caam_hash_ctx *ctx)
269{
270 init_sh_desc_key_ahash(desc, ctx);
271
272 /* Import context from software */
273 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
274 LDST_CLASS_2_CCB | ctx->ctx_len);
275
276 /* Class 2 operation */
277 append_operation(desc, op | state | OP_ALG_ENCRYPT);
278
279 /*
280 * Load from buf and/or src and write to req->result or state->context
281 */
282 ahash_append_load_str(desc, digestsize);
283}
284
285/* For ahash firsts and digest, read and write to seqout */
286static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
287 int digestsize, struct caam_hash_ctx *ctx)
288{
289 init_sh_desc_key_ahash(desc, ctx);
290
291 /* Class 2 operation */
292 append_operation(desc, op | state | OP_ALG_ENCRYPT);
293
294 /*
295 * Load from buf and/or src and write to req->result or state->context
296 */
297 ahash_append_load_str(desc, digestsize);
298}
299
300static int ahash_set_sh_desc(struct crypto_ahash *ahash)
301{
302 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
303 int digestsize = crypto_ahash_digestsize(ahash);
304 struct device *jrdev = ctx->jrdev;
305 u32 have_key = 0;
306 u32 *desc;
307
308 if (ctx->split_key_len)
309 have_key = OP_ALG_AAI_HMAC_PRECOMP;
310
311 /* ahash_update shared descriptor */
312 desc = ctx->sh_desc_update;
313
314 init_sh_desc(desc, HDR_SHARE_WAIT);
315
316 /* Import context from software */
317 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
318 LDST_CLASS_2_CCB | ctx->ctx_len);
319
320 /* Class 2 operation */
321 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
322 OP_ALG_ENCRYPT);
323
324 /* Load data and write to result or context */
325 ahash_append_load_str(desc, ctx->ctx_len);
326
327 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
328 DMA_TO_DEVICE);
329 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
330 dev_err(jrdev, "unable to map shared descriptor\n");
331 return -ENOMEM;
332 }
333#ifdef DEBUG
334 print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
335 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336#endif
337
338 /* ahash_update_first shared descriptor */
339 desc = ctx->sh_desc_update_first;
340
341 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342 ctx->ctx_len, ctx);
343
344 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345 desc_bytes(desc),
346 DMA_TO_DEVICE);
347 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348 dev_err(jrdev, "unable to map shared descriptor\n");
349 return -ENOMEM;
350 }
351#ifdef DEBUG
352 print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
353 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
354#endif
355
356 /* ahash_final shared descriptor */
357 desc = ctx->sh_desc_fin;
358
359 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
360 OP_ALG_AS_FINALIZE, digestsize, ctx);
361
362 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
363 DMA_TO_DEVICE);
364 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
365 dev_err(jrdev, "unable to map shared descriptor\n");
366 return -ENOMEM;
367 }
368#ifdef DEBUG
369 print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
370 DUMP_PREFIX_ADDRESS, 16, 4, desc,
371 desc_bytes(desc), 1);
372#endif
373
374 /* ahash_finup shared descriptor */
375 desc = ctx->sh_desc_finup;
376
377 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
378 OP_ALG_AS_FINALIZE, digestsize, ctx);
379
380 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
381 DMA_TO_DEVICE);
382 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
383 dev_err(jrdev, "unable to map shared descriptor\n");
384 return -ENOMEM;
385 }
386#ifdef DEBUG
387 print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
388 DUMP_PREFIX_ADDRESS, 16, 4, desc,
389 desc_bytes(desc), 1);
390#endif
391
392 /* ahash_digest shared descriptor */
393 desc = ctx->sh_desc_digest;
394
395 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
396 digestsize, ctx);
397
398 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
399 desc_bytes(desc),
400 DMA_TO_DEVICE);
401 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
402 dev_err(jrdev, "unable to map shared descriptor\n");
403 return -ENOMEM;
404 }
405#ifdef DEBUG
406 print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
407 DUMP_PREFIX_ADDRESS, 16, 4, desc,
408 desc_bytes(desc), 1);
409#endif
410
411 return 0;
412}
413
414static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
415 u32 keylen)
416{
417 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
418 ctx->split_key_pad_len, key_in, keylen,
419 ctx->alg_op);
420}
421
422/* Digest hash size if it is too large */
423static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
424 u32 *keylen, u8 *key_out, u32 digestsize)
425{
426 struct device *jrdev = ctx->jrdev;
427 u32 *desc;
428 struct split_key_result result;
429 dma_addr_t src_dma, dst_dma;
430 int ret = 0;
431
432 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
433
434 init_job_desc(desc, 0);
435
436 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
437 DMA_TO_DEVICE);
438 if (dma_mapping_error(jrdev, src_dma)) {
439 dev_err(jrdev, "unable to map key input memory\n");
440 kfree(desc);
441 return -ENOMEM;
442 }
443 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
444 DMA_FROM_DEVICE);
445 if (dma_mapping_error(jrdev, dst_dma)) {
446 dev_err(jrdev, "unable to map key output memory\n");
447 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
448 kfree(desc);
449 return -ENOMEM;
450 }
451
452 /* Job descriptor to perform unkeyed hash on key_in */
453 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
454 OP_ALG_AS_INITFINAL);
455 append_seq_in_ptr(desc, src_dma, *keylen, 0);
456 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
457 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
458 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
459 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
460 LDST_SRCDST_BYTE_CONTEXT);
461
462#ifdef DEBUG
463 print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
464 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
465 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
466 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
467#endif
468
469 result.err = 0;
470 init_completion(&result.completion);
471
472 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
473 if (!ret) {
474 /* in progress */
475 wait_for_completion_interruptible(&result.completion);
476 ret = result.err;
477#ifdef DEBUG
478 print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
479 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
480 digestsize, 1);
481#endif
482 }
483 *keylen = digestsize;
484
485 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
486 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
487
488 kfree(desc);
489
490 return ret;
491}
492
493static int ahash_setkey(struct crypto_ahash *ahash,
494 const u8 *key, unsigned int keylen)
495{
496 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
497 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
498 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
499 struct device *jrdev = ctx->jrdev;
500 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
501 int digestsize = crypto_ahash_digestsize(ahash);
502 int ret = 0;
503 u8 *hashed_key = NULL;
504
505#ifdef DEBUG
506 printk(KERN_ERR "keylen %d\n", keylen);
507#endif
508
509 if (keylen > blocksize) {
510 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
511 GFP_DMA);
512 if (!hashed_key)
513 return -ENOMEM;
514 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
515 digestsize);
516 if (ret)
517 goto badkey;
518 key = hashed_key;
519 }
520
521 /* Pick class 2 key length from algorithm submask */
522 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
523 OP_ALG_ALGSEL_SHIFT] * 2;
524 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
525
526#ifdef DEBUG
527 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
528 ctx->split_key_len, ctx->split_key_pad_len);
529 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
530 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
531#endif
532
533 ret = gen_split_hash_key(ctx, key, keylen);
534 if (ret)
535 goto badkey;
536
537 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
538 DMA_TO_DEVICE);
539 if (dma_mapping_error(jrdev, ctx->key_dma)) {
540 dev_err(jrdev, "unable to map key i/o memory\n");
541 return -ENOMEM;
542 }
543#ifdef DEBUG
544 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
545 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
546 ctx->split_key_pad_len, 1);
547#endif
548
549 ret = ahash_set_sh_desc(ahash);
550 if (ret) {
551 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
552 DMA_TO_DEVICE);
553 }
554
555 kfree(hashed_key);
556 return ret;
557badkey:
558 kfree(hashed_key);
559 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
560 return -EINVAL;
561}
562
563/*
564 * ahash_edesc - s/w-extended ahash descriptor
565 * @dst_dma: physical mapped address of req->result
566 * @sec4_sg_dma: physical mapped address of h/w link table
567 * @chained: if source is chained
568 * @src_nents: number of segments in input scatterlist
569 * @sec4_sg_bytes: length of dma mapped sec4_sg space
570 * @sec4_sg: pointer to h/w link table
571 * @hw_desc: the h/w job descriptor followed by any referenced link tables
572 */
573struct ahash_edesc {
574 dma_addr_t dst_dma;
575 dma_addr_t sec4_sg_dma;
576 bool chained;
577 int src_nents;
578 int sec4_sg_bytes;
579 struct sec4_sg_entry *sec4_sg;
580 u32 hw_desc[0];
581};
582
583static inline void ahash_unmap(struct device *dev,
584 struct ahash_edesc *edesc,
585 struct ahash_request *req, int dst_len)
586{
587 if (edesc->src_nents)
588 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
589 DMA_TO_DEVICE, edesc->chained);
590 if (edesc->dst_dma)
591 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
592
593 if (edesc->sec4_sg_bytes)
594 dma_unmap_single(dev, edesc->sec4_sg_dma,
595 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
596}
597
598static inline void ahash_unmap_ctx(struct device *dev,
599 struct ahash_edesc *edesc,
600 struct ahash_request *req, int dst_len, u32 flag)
601{
602 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
603 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
604 struct caam_hash_state *state = ahash_request_ctx(req);
605
606 if (state->ctx_dma)
607 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
608 ahash_unmap(dev, edesc, req, dst_len);
609}
610
611static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
612 void *context)
613{
614 struct ahash_request *req = context;
615 struct ahash_edesc *edesc;
616 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
617 int digestsize = crypto_ahash_digestsize(ahash);
618#ifdef DEBUG
619 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
620 struct caam_hash_state *state = ahash_request_ctx(req);
621
622 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
623#endif
624
625 edesc = (struct ahash_edesc *)((char *)desc -
626 offsetof(struct ahash_edesc, hw_desc));
627 if (err) {
628 char tmp[CAAM_ERROR_STR_MAX];
629
630 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
631 }
632
633 ahash_unmap(jrdev, edesc, req, digestsize);
634 kfree(edesc);
635
636#ifdef DEBUG
637 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
638 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
639 ctx->ctx_len, 1);
640 if (req->result)
641 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
642 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
643 digestsize, 1);
644#endif
645
646 req->base.complete(&req->base, err);
647}
648
649static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
650 void *context)
651{
652 struct ahash_request *req = context;
653 struct ahash_edesc *edesc;
654 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
655 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
656#ifdef DEBUG
657 struct caam_hash_state *state = ahash_request_ctx(req);
658 int digestsize = crypto_ahash_digestsize(ahash);
659
660 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
661#endif
662
663 edesc = (struct ahash_edesc *)((char *)desc -
664 offsetof(struct ahash_edesc, hw_desc));
665 if (err) {
666 char tmp[CAAM_ERROR_STR_MAX];
667
668 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
669 }
670
671 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
672 kfree(edesc);
673
674#ifdef DEBUG
675 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
676 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
677 ctx->ctx_len, 1);
678 if (req->result)
679 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
680 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
681 digestsize, 1);
682#endif
683
684 req->base.complete(&req->base, err);
685}
686
687static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
688 void *context)
689{
690 struct ahash_request *req = context;
691 struct ahash_edesc *edesc;
692 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
693 int digestsize = crypto_ahash_digestsize(ahash);
694#ifdef DEBUG
695 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
696 struct caam_hash_state *state = ahash_request_ctx(req);
697
698 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
699#endif
700
701 edesc = (struct ahash_edesc *)((char *)desc -
702 offsetof(struct ahash_edesc, hw_desc));
703 if (err) {
704 char tmp[CAAM_ERROR_STR_MAX];
705
706 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
707 }
708
709 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
710 kfree(edesc);
711
712#ifdef DEBUG
713 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
714 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
715 ctx->ctx_len, 1);
716 if (req->result)
717 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
718 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
719 digestsize, 1);
720#endif
721
722 req->base.complete(&req->base, err);
723}
724
725static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
726 void *context)
727{
728 struct ahash_request *req = context;
729 struct ahash_edesc *edesc;
730 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
731 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
732#ifdef DEBUG
733 struct caam_hash_state *state = ahash_request_ctx(req);
734 int digestsize = crypto_ahash_digestsize(ahash);
735
736 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
737#endif
738
739 edesc = (struct ahash_edesc *)((char *)desc -
740 offsetof(struct ahash_edesc, hw_desc));
741 if (err) {
742 char tmp[CAAM_ERROR_STR_MAX];
743
744 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
745 }
746
747 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
748 kfree(edesc);
749
750#ifdef DEBUG
751 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
752 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
753 ctx->ctx_len, 1);
754 if (req->result)
755 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
756 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
757 digestsize, 1);
758#endif
759
760 req->base.complete(&req->base, err);
761}
762
763/* submit update job descriptor */
764static int ahash_update_ctx(struct ahash_request *req)
765{
766 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
767 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
768 struct caam_hash_state *state = ahash_request_ctx(req);
769 struct device *jrdev = ctx->jrdev;
770 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
771 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
772 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
773 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
774 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
775 int *next_buflen = state->current_buf ? &state->buflen_0 :
776 &state->buflen_1, last_buflen;
777 int in_len = *buflen + req->nbytes, to_hash;
778 u32 *sh_desc = ctx->sh_desc_update, *desc;
779 dma_addr_t ptr = ctx->sh_desc_update_dma;
780 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
781 struct ahash_edesc *edesc;
782 bool chained = false;
783 int ret = 0;
784 int sh_len;
785
786 last_buflen = *next_buflen;
787 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
788 to_hash = in_len - *next_buflen;
789
790 if (to_hash) {
791 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
792 &chained);
793 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
794 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
795 sizeof(struct sec4_sg_entry);
796
797 /*
798 * allocate space for base edesc and hw desc commands,
799 * link tables
800 */
801 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
802 sec4_sg_bytes, GFP_DMA | flags);
803 if (!edesc) {
804 dev_err(jrdev,
805 "could not allocate extended descriptor\n");
806 return -ENOMEM;
807 }
808
809 edesc->src_nents = src_nents;
810 edesc->chained = chained;
811 edesc->sec4_sg_bytes = sec4_sg_bytes;
812 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
813 DESC_JOB_IO_LEN;
814 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
815 sec4_sg_bytes,
816 DMA_TO_DEVICE);
817
818 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
819 edesc->sec4_sg, DMA_BIDIRECTIONAL);
820
821 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
822 edesc->sec4_sg + 1,
823 buf, state->buf_dma,
824 *buflen, last_buflen);
825
826 if (src_nents) {
827 src_map_to_sec4_sg(jrdev, req->src, src_nents,
828 edesc->sec4_sg + sec4_sg_src_index,
829 chained);
830 if (*next_buflen) {
831 sg_copy_part(next_buf, req->src, to_hash -
832 *buflen, req->nbytes);
833 state->current_buf = !state->current_buf;
834 }
835 } else {
836 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
837 SEC4_SG_LEN_FIN;
838 }
839
840 sh_len = desc_len(sh_desc);
841 desc = edesc->hw_desc;
842 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
843 HDR_REVERSE);
844
845 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
846 to_hash, LDST_SGF);
847
848 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
849
850#ifdef DEBUG
851 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
852 DUMP_PREFIX_ADDRESS, 16, 4, desc,
853 desc_bytes(desc), 1);
854#endif
855
856 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
857 if (!ret) {
858 ret = -EINPROGRESS;
859 } else {
860 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
861 DMA_BIDIRECTIONAL);
862 kfree(edesc);
863 }
864 } else if (*next_buflen) {
865 sg_copy(buf + *buflen, req->src, req->nbytes);
866 *buflen = *next_buflen;
867 *next_buflen = last_buflen;
868 }
869#ifdef DEBUG
870 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
871 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
872 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
873 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
874 *next_buflen, 1);
875#endif
876
877 return ret;
878}
879
880static int ahash_final_ctx(struct ahash_request *req)
881{
882 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
883 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
884 struct caam_hash_state *state = ahash_request_ctx(req);
885 struct device *jrdev = ctx->jrdev;
886 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
887 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
888 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
889 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
890 int last_buflen = state->current_buf ? state->buflen_0 :
891 state->buflen_1;
892 u32 *sh_desc = ctx->sh_desc_fin, *desc;
893 dma_addr_t ptr = ctx->sh_desc_fin_dma;
894 int sec4_sg_bytes;
895 int digestsize = crypto_ahash_digestsize(ahash);
896 struct ahash_edesc *edesc;
897 int ret = 0;
898 int sh_len;
899
900 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
901
902 /* allocate space for base edesc and hw desc commands, link tables */
903 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
904 sec4_sg_bytes, GFP_DMA | flags);
905 if (!edesc) {
906 dev_err(jrdev, "could not allocate extended descriptor\n");
907 return -ENOMEM;
908 }
909
910 sh_len = desc_len(sh_desc);
911 desc = edesc->hw_desc;
912 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
913
914 edesc->sec4_sg_bytes = sec4_sg_bytes;
915 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
916 DESC_JOB_IO_LEN;
917 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
918 sec4_sg_bytes, DMA_TO_DEVICE);
919 edesc->src_nents = 0;
920
921 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
922 DMA_TO_DEVICE);
923
924 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
925 buf, state->buf_dma, buflen,
926 last_buflen);
927 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
928
929 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
930 LDST_SGF);
931
932 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
933 digestsize);
934
935#ifdef DEBUG
936 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
937 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
938#endif
939
940 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
941 if (!ret) {
942 ret = -EINPROGRESS;
943 } else {
944 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
945 kfree(edesc);
946 }
947
948 return ret;
949}
950
951static int ahash_finup_ctx(struct ahash_request *req)
952{
953 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
954 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
955 struct caam_hash_state *state = ahash_request_ctx(req);
956 struct device *jrdev = ctx->jrdev;
957 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
958 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
959 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
960 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
961 int last_buflen = state->current_buf ? state->buflen_0 :
962 state->buflen_1;
963 u32 *sh_desc = ctx->sh_desc_finup, *desc;
964 dma_addr_t ptr = ctx->sh_desc_finup_dma;
965 int sec4_sg_bytes, sec4_sg_src_index;
966 int src_nents;
967 int digestsize = crypto_ahash_digestsize(ahash);
968 struct ahash_edesc *edesc;
969 bool chained = false;
970 int ret = 0;
971 int sh_len;
972
973 src_nents = __sg_count(req->src, req->nbytes, &chained);
974 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
975 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
976 sizeof(struct sec4_sg_entry);
977
978 /* allocate space for base edesc and hw desc commands, link tables */
979 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
980 sec4_sg_bytes, GFP_DMA | flags);
981 if (!edesc) {
982 dev_err(jrdev, "could not allocate extended descriptor\n");
983 return -ENOMEM;
984 }
985
986 sh_len = desc_len(sh_desc);
987 desc = edesc->hw_desc;
988 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
989
990 edesc->src_nents = src_nents;
991 edesc->chained = chained;
992 edesc->sec4_sg_bytes = sec4_sg_bytes;
993 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
994 DESC_JOB_IO_LEN;
995 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
996 sec4_sg_bytes, DMA_TO_DEVICE);
997
998 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
999 DMA_TO_DEVICE);
1000
1001 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1002 buf, state->buf_dma, buflen,
1003 last_buflen);
1004
1005 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1006 sec4_sg_src_index, chained);
1007
1008 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1009 buflen + req->nbytes, LDST_SGF);
1010
1011 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1012 digestsize);
1013
1014#ifdef DEBUG
1015 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1016 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1017#endif
1018
1019 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1020 if (!ret) {
1021 ret = -EINPROGRESS;
1022 } else {
1023 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1024 kfree(edesc);
1025 }
1026
1027 return ret;
1028}
1029
1030static int ahash_digest(struct ahash_request *req)
1031{
1032 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1033 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1034 struct device *jrdev = ctx->jrdev;
1035 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1036 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1037 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1038 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1039 int digestsize = crypto_ahash_digestsize(ahash);
1040 int src_nents, sec4_sg_bytes;
1041 dma_addr_t src_dma;
1042 struct ahash_edesc *edesc;
1043 bool chained = false;
1044 int ret = 0;
1045 u32 options;
1046 int sh_len;
1047
1048 src_nents = sg_count(req->src, req->nbytes, &chained);
1049 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1050 chained);
1051 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1052
1053 /* allocate space for base edesc and hw desc commands, link tables */
1054 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1055 DESC_JOB_IO_LEN, GFP_DMA | flags);
1056 if (!edesc) {
1057 dev_err(jrdev, "could not allocate extended descriptor\n");
1058 return -ENOMEM;
1059 }
1060 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1061 DESC_JOB_IO_LEN;
1062 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1063 sec4_sg_bytes, DMA_TO_DEVICE);
1064 edesc->src_nents = src_nents;
1065 edesc->chained = chained;
1066
1067 sh_len = desc_len(sh_desc);
1068 desc = edesc->hw_desc;
1069 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1070
1071 if (src_nents) {
1072 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1073 src_dma = edesc->sec4_sg_dma;
1074 options = LDST_SGF;
1075 } else {
1076 src_dma = sg_dma_address(req->src);
1077 options = 0;
1078 }
1079 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1080
1081 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1082 digestsize);
1083
1084#ifdef DEBUG
1085 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1086 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1087#endif
1088
1089 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1090 if (!ret) {
1091 ret = -EINPROGRESS;
1092 } else {
1093 ahash_unmap(jrdev, edesc, req, digestsize);
1094 kfree(edesc);
1095 }
1096
1097 return ret;
1098}
1099
1100/* submit ahash final if it the first job descriptor */
1101static int ahash_final_no_ctx(struct ahash_request *req)
1102{
1103 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1104 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1105 struct caam_hash_state *state = ahash_request_ctx(req);
1106 struct device *jrdev = ctx->jrdev;
1107 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1108 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1109 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1110 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1111 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1112 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1113 int digestsize = crypto_ahash_digestsize(ahash);
1114 struct ahash_edesc *edesc;
1115 int ret = 0;
1116 int sh_len;
1117
1118 /* allocate space for base edesc and hw desc commands, link tables */
1119 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1120 GFP_DMA | flags);
1121 if (!edesc) {
1122 dev_err(jrdev, "could not allocate extended descriptor\n");
1123 return -ENOMEM;
1124 }
1125
1126 sh_len = desc_len(sh_desc);
1127 desc = edesc->hw_desc;
1128 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1129
1130 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1131
1132 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1133
1134 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1135 digestsize);
1136 edesc->src_nents = 0;
1137
1138#ifdef DEBUG
1139 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1140 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1141#endif
1142
1143 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1144 if (!ret) {
1145 ret = -EINPROGRESS;
1146 } else {
1147 ahash_unmap(jrdev, edesc, req, digestsize);
1148 kfree(edesc);
1149 }
1150
1151 return ret;
1152}
1153
1154/* submit ahash update if it the first job descriptor after update */
1155static int ahash_update_no_ctx(struct ahash_request *req)
1156{
1157 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1158 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1159 struct caam_hash_state *state = ahash_request_ctx(req);
1160 struct device *jrdev = ctx->jrdev;
1161 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1162 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1163 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1164 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1165 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1166 int *next_buflen = state->current_buf ? &state->buflen_0 :
1167 &state->buflen_1;
1168 int in_len = *buflen + req->nbytes, to_hash;
1169 int sec4_sg_bytes, src_nents;
1170 struct ahash_edesc *edesc;
1171 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1172 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1173 bool chained = false;
1174 int ret = 0;
1175 int sh_len;
1176
1177 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1178 to_hash = in_len - *next_buflen;
1179
1180 if (to_hash) {
1181 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1182 &chained);
1183 sec4_sg_bytes = (1 + src_nents) *
1184 sizeof(struct sec4_sg_entry);
1185
1186 /*
1187 * allocate space for base edesc and hw desc commands,
1188 * link tables
1189 */
1190 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1191 sec4_sg_bytes, GFP_DMA | flags);
1192 if (!edesc) {
1193 dev_err(jrdev,
1194 "could not allocate extended descriptor\n");
1195 return -ENOMEM;
1196 }
1197
1198 edesc->src_nents = src_nents;
1199 edesc->chained = chained;
1200 edesc->sec4_sg_bytes = sec4_sg_bytes;
1201 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1202 DESC_JOB_IO_LEN;
1203 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1204 sec4_sg_bytes,
1205 DMA_TO_DEVICE);
1206
1207 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1208 buf, *buflen);
1209 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1210 edesc->sec4_sg + 1, chained);
1211 if (*next_buflen) {
1212 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1213 req->nbytes);
1214 state->current_buf = !state->current_buf;
1215 }
1216
1217 sh_len = desc_len(sh_desc);
1218 desc = edesc->hw_desc;
1219 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1220 HDR_REVERSE);
1221
1222 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1223
1224 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1225
1226#ifdef DEBUG
1227 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1228 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1229 desc_bytes(desc), 1);
1230#endif
1231
1232 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1233 if (!ret) {
1234 ret = -EINPROGRESS;
1235 state->update = ahash_update_ctx;
1236 state->finup = ahash_finup_ctx;
1237 state->final = ahash_final_ctx;
1238 } else {
1239 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1240 DMA_TO_DEVICE);
1241 kfree(edesc);
1242 }
1243 } else if (*next_buflen) {
1244 sg_copy(buf + *buflen, req->src, req->nbytes);
1245 *buflen = *next_buflen;
1246 *next_buflen = 0;
1247 }
1248#ifdef DEBUG
1249 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
1250 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1251 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1252 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1253 *next_buflen, 1);
1254#endif
1255
1256 return ret;
1257}
1258
1259/* submit ahash finup if it the first job descriptor after update */
1260static int ahash_finup_no_ctx(struct ahash_request *req)
1261{
1262 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1263 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1264 struct caam_hash_state *state = ahash_request_ctx(req);
1265 struct device *jrdev = ctx->jrdev;
1266 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1267 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1268 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1269 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1270 int last_buflen = state->current_buf ? state->buflen_0 :
1271 state->buflen_1;
1272 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1273 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1274 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1275 int digestsize = crypto_ahash_digestsize(ahash);
1276 struct ahash_edesc *edesc;
1277 bool chained = false;
1278 int sh_len;
1279 int ret = 0;
1280
1281 src_nents = __sg_count(req->src, req->nbytes, &chained);
1282 sec4_sg_src_index = 2;
1283 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1284 sizeof(struct sec4_sg_entry);
1285
1286 /* allocate space for base edesc and hw desc commands, link tables */
1287 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1288 sec4_sg_bytes, GFP_DMA | flags);
1289 if (!edesc) {
1290 dev_err(jrdev, "could not allocate extended descriptor\n");
1291 return -ENOMEM;
1292 }
1293
1294 sh_len = desc_len(sh_desc);
1295 desc = edesc->hw_desc;
1296 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1297
1298 edesc->src_nents = src_nents;
1299 edesc->chained = chained;
1300 edesc->sec4_sg_bytes = sec4_sg_bytes;
1301 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1302 DESC_JOB_IO_LEN;
1303 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1304 sec4_sg_bytes, DMA_TO_DEVICE);
1305
1306 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1307 state->buf_dma, buflen,
1308 last_buflen);
1309
1310 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1311 chained);
1312
1313 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1314 req->nbytes, LDST_SGF);
1315
1316 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1317 digestsize);
1318
1319#ifdef DEBUG
1320 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1321 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1322#endif
1323
1324 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1325 if (!ret) {
1326 ret = -EINPROGRESS;
1327 } else {
1328 ahash_unmap(jrdev, edesc, req, digestsize);
1329 kfree(edesc);
1330 }
1331
1332 return ret;
1333}
1334
1335/* submit first update job descriptor after init */
1336static int ahash_update_first(struct ahash_request *req)
1337{
1338 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1339 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1340 struct caam_hash_state *state = ahash_request_ctx(req);
1341 struct device *jrdev = ctx->jrdev;
1342 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1343 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1344 u8 *next_buf = state->buf_0 + state->current_buf *
1345 CAAM_MAX_HASH_BLOCK_SIZE;
1346 int *next_buflen = &state->buflen_0 + state->current_buf;
1347 int to_hash;
1348 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1349 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1350 int sec4_sg_bytes, src_nents;
1351 dma_addr_t src_dma;
1352 u32 options;
1353 struct ahash_edesc *edesc;
1354 bool chained = false;
1355 int ret = 0;
1356 int sh_len;
1357
1358 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1359 1);
1360 to_hash = req->nbytes - *next_buflen;
1361
1362 if (to_hash) {
1363 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1364 &chained);
1365 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1366 DMA_TO_DEVICE, chained);
1367 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1368
1369 /*
1370 * allocate space for base edesc and hw desc commands,
1371 * link tables
1372 */
1373 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1374 sec4_sg_bytes, GFP_DMA | flags);
1375 if (!edesc) {
1376 dev_err(jrdev,
1377 "could not allocate extended descriptor\n");
1378 return -ENOMEM;
1379 }
1380
1381 edesc->src_nents = src_nents;
1382 edesc->chained = chained;
1383 edesc->sec4_sg_bytes = sec4_sg_bytes;
1384 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1385 DESC_JOB_IO_LEN;
1386 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1387 sec4_sg_bytes,
1388 DMA_TO_DEVICE);
1389
1390 if (src_nents) {
1391 sg_to_sec4_sg_last(req->src, src_nents,
1392 edesc->sec4_sg, 0);
1393 src_dma = edesc->sec4_sg_dma;
1394 options = LDST_SGF;
1395 } else {
1396 src_dma = sg_dma_address(req->src);
1397 options = 0;
1398 }
1399
1400 if (*next_buflen)
1401 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1402
1403 sh_len = desc_len(sh_desc);
1404 desc = edesc->hw_desc;
1405 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1406 HDR_REVERSE);
1407
1408 append_seq_in_ptr(desc, src_dma, to_hash, options);
1409
1410 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1411
1412#ifdef DEBUG
1413 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1414 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1415 desc_bytes(desc), 1);
1416#endif
1417
1418 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1419 req);
1420 if (!ret) {
1421 ret = -EINPROGRESS;
1422 state->update = ahash_update_ctx;
1423 state->finup = ahash_finup_ctx;
1424 state->final = ahash_final_ctx;
1425 } else {
1426 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1427 DMA_TO_DEVICE);
1428 kfree(edesc);
1429 }
1430 } else if (*next_buflen) {
1431 state->update = ahash_update_no_ctx;
1432 state->finup = ahash_finup_no_ctx;
1433 state->final = ahash_final_no_ctx;
1434 sg_copy(next_buf, req->src, req->nbytes);
1435 }
1436#ifdef DEBUG
1437 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1438 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1439 *next_buflen, 1);
1440#endif
1441
1442 return ret;
1443}
1444
1445static int ahash_finup_first(struct ahash_request *req)
1446{
1447 return ahash_digest(req);
1448}
1449
1450static int ahash_init(struct ahash_request *req)
1451{
1452 struct caam_hash_state *state = ahash_request_ctx(req);
1453
1454 state->update = ahash_update_first;
1455 state->finup = ahash_finup_first;
1456 state->final = ahash_final_no_ctx;
1457
1458 state->current_buf = 0;
1459
1460 return 0;
1461}
1462
1463static int ahash_update(struct ahash_request *req)
1464{
1465 struct caam_hash_state *state = ahash_request_ctx(req);
1466
1467 return state->update(req);
1468}
1469
1470static int ahash_finup(struct ahash_request *req)
1471{
1472 struct caam_hash_state *state = ahash_request_ctx(req);
1473
1474 return state->finup(req);
1475}
1476
1477static int ahash_final(struct ahash_request *req)
1478{
1479 struct caam_hash_state *state = ahash_request_ctx(req);
1480
1481 return state->final(req);
1482}
1483
1484static int ahash_export(struct ahash_request *req, void *out)
1485{
1486 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1487 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1488 struct caam_hash_state *state = ahash_request_ctx(req);
1489
1490 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1491 memcpy(out + sizeof(struct caam_hash_ctx), state,
1492 sizeof(struct caam_hash_state));
1493 return 0;
1494}
1495
1496static int ahash_import(struct ahash_request *req, const void *in)
1497{
1498 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1499 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1500 struct caam_hash_state *state = ahash_request_ctx(req);
1501
1502 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1503 memcpy(state, in + sizeof(struct caam_hash_ctx),
1504 sizeof(struct caam_hash_state));
1505 return 0;
1506}
1507
1508struct caam_hash_template {
1509 char name[CRYPTO_MAX_ALG_NAME];
1510 char driver_name[CRYPTO_MAX_ALG_NAME];
1511 char hmac_name[CRYPTO_MAX_ALG_NAME];
1512 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1513 unsigned int blocksize;
1514 struct ahash_alg template_ahash;
1515 u32 alg_type;
1516 u32 alg_op;
1517};
1518
1519/* ahash descriptors */
1520static struct caam_hash_template driver_hash[] = {
1521 {
1522 .name = "sha1",
1523 .driver_name = "sha1-caam",
1524 .hmac_name = "hmac(sha1)",
1525 .hmac_driver_name = "hmac-sha1-caam",
1526 .blocksize = SHA1_BLOCK_SIZE,
1527 .template_ahash = {
1528 .init = ahash_init,
1529 .update = ahash_update,
1530 .final = ahash_final,
1531 .finup = ahash_finup,
1532 .digest = ahash_digest,
1533 .export = ahash_export,
1534 .import = ahash_import,
1535 .setkey = ahash_setkey,
1536 .halg = {
1537 .digestsize = SHA1_DIGEST_SIZE,
1538 },
1539 },
1540 .alg_type = OP_ALG_ALGSEL_SHA1,
1541 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1542 }, {
1543 .name = "sha224",
1544 .driver_name = "sha224-caam",
1545 .hmac_name = "hmac(sha224)",
1546 .hmac_driver_name = "hmac-sha224-caam",
1547 .blocksize = SHA224_BLOCK_SIZE,
1548 .template_ahash = {
1549 .init = ahash_init,
1550 .update = ahash_update,
1551 .final = ahash_final,
1552 .finup = ahash_finup,
1553 .digest = ahash_digest,
1554 .export = ahash_export,
1555 .import = ahash_import,
1556 .setkey = ahash_setkey,
1557 .halg = {
1558 .digestsize = SHA224_DIGEST_SIZE,
1559 },
1560 },
1561 .alg_type = OP_ALG_ALGSEL_SHA224,
1562 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1563 }, {
1564 .name = "sha256",
1565 .driver_name = "sha256-caam",
1566 .hmac_name = "hmac(sha256)",
1567 .hmac_driver_name = "hmac-sha256-caam",
1568 .blocksize = SHA256_BLOCK_SIZE,
1569 .template_ahash = {
1570 .init = ahash_init,
1571 .update = ahash_update,
1572 .final = ahash_final,
1573 .finup = ahash_finup,
1574 .digest = ahash_digest,
1575 .export = ahash_export,
1576 .import = ahash_import,
1577 .setkey = ahash_setkey,
1578 .halg = {
1579 .digestsize = SHA256_DIGEST_SIZE,
1580 },
1581 },
1582 .alg_type = OP_ALG_ALGSEL_SHA256,
1583 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1584 }, {
1585 .name = "sha384",
1586 .driver_name = "sha384-caam",
1587 .hmac_name = "hmac(sha384)",
1588 .hmac_driver_name = "hmac-sha384-caam",
1589 .blocksize = SHA384_BLOCK_SIZE,
1590 .template_ahash = {
1591 .init = ahash_init,
1592 .update = ahash_update,
1593 .final = ahash_final,
1594 .finup = ahash_finup,
1595 .digest = ahash_digest,
1596 .export = ahash_export,
1597 .import = ahash_import,
1598 .setkey = ahash_setkey,
1599 .halg = {
1600 .digestsize = SHA384_DIGEST_SIZE,
1601 },
1602 },
1603 .alg_type = OP_ALG_ALGSEL_SHA384,
1604 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1605 }, {
1606 .name = "sha512",
1607 .driver_name = "sha512-caam",
1608 .hmac_name = "hmac(sha512)",
1609 .hmac_driver_name = "hmac-sha512-caam",
1610 .blocksize = SHA512_BLOCK_SIZE,
1611 .template_ahash = {
1612 .init = ahash_init,
1613 .update = ahash_update,
1614 .final = ahash_final,
1615 .finup = ahash_finup,
1616 .digest = ahash_digest,
1617 .export = ahash_export,
1618 .import = ahash_import,
1619 .setkey = ahash_setkey,
1620 .halg = {
1621 .digestsize = SHA512_DIGEST_SIZE,
1622 },
1623 },
1624 .alg_type = OP_ALG_ALGSEL_SHA512,
1625 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1626 }, {
1627 .name = "md5",
1628 .driver_name = "md5-caam",
1629 .hmac_name = "hmac(md5)",
1630 .hmac_driver_name = "hmac-md5-caam",
1631 .blocksize = MD5_BLOCK_WORDS * 4,
1632 .template_ahash = {
1633 .init = ahash_init,
1634 .update = ahash_update,
1635 .final = ahash_final,
1636 .finup = ahash_finup,
1637 .digest = ahash_digest,
1638 .export = ahash_export,
1639 .import = ahash_import,
1640 .setkey = ahash_setkey,
1641 .halg = {
1642 .digestsize = MD5_DIGEST_SIZE,
1643 },
1644 },
1645 .alg_type = OP_ALG_ALGSEL_MD5,
1646 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1647 },
1648};
1649
1650struct caam_hash_alg {
1651 struct list_head entry;
1652 struct device *ctrldev;
1653 int alg_type;
1654 int alg_op;
1655 struct ahash_alg ahash_alg;
1656};
1657
1658static int caam_hash_cra_init(struct crypto_tfm *tfm)
1659{
1660 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1661 struct crypto_alg *base = tfm->__crt_alg;
1662 struct hash_alg_common *halg =
1663 container_of(base, struct hash_alg_common, base);
1664 struct ahash_alg *alg =
1665 container_of(halg, struct ahash_alg, halg);
1666 struct caam_hash_alg *caam_hash =
1667 container_of(alg, struct caam_hash_alg, ahash_alg);
1668 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1669 struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1670 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1671 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1672 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1673 HASH_MSG_LEN + 32,
1674 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1675 HASH_MSG_LEN + 64,
1676 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1677 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1678 int ret = 0;
1679
1680 /*
1681 * distribute tfms across job rings to ensure in-order
1682 * crypto request processing per tfm
1683 */
1684 ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
1685
1686 /* copy descriptor header template value */
1687 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1688 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1689
1690 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1691 OP_ALG_ALGSEL_SHIFT];
1692
1693 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1694 sizeof(struct caam_hash_state));
1695
1696 ret = ahash_set_sh_desc(ahash);
1697
1698 return ret;
1699}
1700
1701static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1702{
1703 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1704
1705 if (ctx->sh_desc_update_dma &&
1706 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1707 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1708 desc_bytes(ctx->sh_desc_update),
1709 DMA_TO_DEVICE);
1710 if (ctx->sh_desc_update_first_dma &&
1711 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1712 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1713 desc_bytes(ctx->sh_desc_update_first),
1714 DMA_TO_DEVICE);
1715 if (ctx->sh_desc_fin_dma &&
1716 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1717 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1718 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1719 if (ctx->sh_desc_digest_dma &&
1720 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1721 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1722 desc_bytes(ctx->sh_desc_digest),
1723 DMA_TO_DEVICE);
1724 if (ctx->sh_desc_finup_dma &&
1725 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1726 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1727 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1728}
1729
1730static void __exit caam_algapi_hash_exit(void)
1731{
1732 struct device_node *dev_node;
1733 struct platform_device *pdev;
1734 struct device *ctrldev;
1735 struct caam_drv_private *priv;
1736 struct caam_hash_alg *t_alg, *n;
1737
1738 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1739 if (!dev_node)
1740 return;
1741
1742 pdev = of_find_device_by_node(dev_node);
1743 if (!pdev)
1744 return;
1745
1746 ctrldev = &pdev->dev;
1747 of_node_put(dev_node);
1748 priv = dev_get_drvdata(ctrldev);
1749
1750 if (!priv->hash_list.next)
1751 return;
1752
1753 list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1754 crypto_unregister_ahash(&t_alg->ahash_alg);
1755 list_del(&t_alg->entry);
1756 kfree(t_alg);
1757 }
1758}
1759
1760static struct caam_hash_alg *
1761caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1762 bool keyed)
1763{
1764 struct caam_hash_alg *t_alg;
1765 struct ahash_alg *halg;
1766 struct crypto_alg *alg;
1767
1768 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1769 if (!t_alg) {
1770 dev_err(ctrldev, "failed to allocate t_alg\n");
1771 return ERR_PTR(-ENOMEM);
1772 }
1773
1774 t_alg->ahash_alg = template->template_ahash;
1775 halg = &t_alg->ahash_alg;
1776 alg = &halg->halg.base;
1777
1778 if (keyed) {
1779 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1780 template->hmac_name);
1781 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1782 template->hmac_driver_name);
1783 } else {
1784 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1785 template->name);
1786 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1787 template->driver_name);
1788 }
1789 alg->cra_module = THIS_MODULE;
1790 alg->cra_init = caam_hash_cra_init;
1791 alg->cra_exit = caam_hash_cra_exit;
1792 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1793 alg->cra_priority = CAAM_CRA_PRIORITY;
1794 alg->cra_blocksize = template->blocksize;
1795 alg->cra_alignmask = 0;
1796 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1797 alg->cra_type = &crypto_ahash_type;
1798
1799 t_alg->alg_type = template->alg_type;
1800 t_alg->alg_op = template->alg_op;
1801 t_alg->ctrldev = ctrldev;
1802
1803 return t_alg;
1804}
1805
1806static int __init caam_algapi_hash_init(void)
1807{
1808 struct device_node *dev_node;
1809 struct platform_device *pdev;
1810 struct device *ctrldev;
1811 struct caam_drv_private *priv;
1812 int i = 0, err = 0;
1813
1814 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1815 if (!dev_node)
1816 return -ENODEV;
1817
1818 pdev = of_find_device_by_node(dev_node);
1819 if (!pdev)
1820 return -ENODEV;
1821
1822 ctrldev = &pdev->dev;
1823 priv = dev_get_drvdata(ctrldev);
1824 of_node_put(dev_node);
1825
1826 INIT_LIST_HEAD(&priv->hash_list);
1827
1828 atomic_set(&priv->tfm_count, -1);
1829
1830 /* register crypto algorithms the device supports */
1831 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1832 /* TODO: check if h/w supports alg */
1833 struct caam_hash_alg *t_alg;
1834
1835 /* register hmac version */
1836 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
1837 if (IS_ERR(t_alg)) {
1838 err = PTR_ERR(t_alg);
1839 dev_warn(ctrldev, "%s alg allocation failed\n",
1840 driver_hash[i].driver_name);
1841 continue;
1842 }
1843
1844 err = crypto_register_ahash(&t_alg->ahash_alg);
1845 if (err) {
1846 dev_warn(ctrldev, "%s alg registration failed\n",
1847 t_alg->ahash_alg.halg.base.cra_driver_name);
1848 kfree(t_alg);
1849 } else
1850 list_add_tail(&t_alg->entry, &priv->hash_list);
1851
1852 /* register unkeyed version */
1853 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
1854 if (IS_ERR(t_alg)) {
1855 err = PTR_ERR(t_alg);
1856 dev_warn(ctrldev, "%s alg allocation failed\n",
1857 driver_hash[i].driver_name);
1858 continue;
1859 }
1860
1861 err = crypto_register_ahash(&t_alg->ahash_alg);
1862 if (err) {
1863 dev_warn(ctrldev, "%s alg registration failed\n",
1864 t_alg->ahash_alg.halg.base.cra_driver_name);
1865 kfree(t_alg);
1866 } else
1867 list_add_tail(&t_alg->entry, &priv->hash_list);
1868 }
1869
1870 return err;
1871}
1872
1873module_init(caam_algapi_hash_init);
1874module_exit(caam_algapi_hash_exit);
1875
1876MODULE_LICENSE("GPL");
1877MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1878MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
new file mode 100644
index 000000000000..e2bfe161dece
--- /dev/null
+++ b/drivers/crypto/caam/caamrng.c
@@ -0,0 +1,309 @@
1/*
2 * caam - Freescale FSL CAAM support for hw_random
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship between job descriptors to shared descriptors:
9 *
10 * --------------- --------------
11 * | JobDesc #0 |-------------------->| ShareDesc |
12 * | *(buffer 0) | |------------->| (generate) |
13 * --------------- | | (move) |
14 * | | (store) |
15 * --------------- | --------------
16 * | JobDesc #1 |------|
17 * | *(buffer 1) |
18 * ---------------
19 *
20 * A job desc looks like this:
21 *
22 * ---------------------
23 * | Header |
24 * | ShareDesc Pointer |
25 * | SEQ_OUT_PTR |
26 * | (output buffer) |
27 * ---------------------
28 *
29 * The SharedDesc never changes, and each job descriptor points to one of two
30 * buffers for each device, from which the data will be copied into the
31 * requested destination
32 */
33
34#include <linux/hw_random.h>
35#include <linux/completion.h>
36#include <linux/atomic.h>
37
38#include "compat.h"
39
40#include "regs.h"
41#include "intern.h"
42#include "desc_constr.h"
43#include "jr.h"
44#include "error.h"
45
46/*
47 * Maximum buffer size: maximum number of random, cache-aligned bytes that
48 * will be generated and moved to seq out ptr (extlen not allowed)
49 */
50#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
51 L1_CACHE_BYTES)
52
53/* length of descriptors */
54#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
55#define DESC_RNG_LEN (10 * CAAM_CMD_SZ)
56
57/* Buffer, its dma address and lock */
58struct buf_data {
59 u8 buf[RN_BUF_SIZE];
60 dma_addr_t addr;
61 struct completion filled;
62 u32 hw_desc[DESC_JOB_O_LEN];
63#define BUF_NOT_EMPTY 0
64#define BUF_EMPTY 1
65#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
66 atomic_t empty;
67};
68
69/* rng per-device context */
70struct caam_rng_ctx {
71 struct device *jrdev;
72 dma_addr_t sh_desc_dma;
73 u32 sh_desc[DESC_RNG_LEN];
74 unsigned int cur_buf_idx;
75 int current_buf;
76 struct buf_data bufs[2];
77};
78
79static struct caam_rng_ctx rng_ctx;
80
81static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
82{
83 if (bd->addr)
84 dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
85 DMA_FROM_DEVICE);
86}
87
88static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
89{
90 struct device *jrdev = ctx->jrdev;
91
92 if (ctx->sh_desc_dma)
93 dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN,
94 DMA_TO_DEVICE);
95 rng_unmap_buf(jrdev, &ctx->bufs[0]);
96 rng_unmap_buf(jrdev, &ctx->bufs[1]);
97}
98
99static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
100{
101 struct buf_data *bd;
102
103 bd = (struct buf_data *)((char *)desc -
104 offsetof(struct buf_data, hw_desc));
105
106 if (err) {
107 char tmp[CAAM_ERROR_STR_MAX];
108
109 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
110 }
111
112 atomic_set(&bd->empty, BUF_NOT_EMPTY);
113 complete(&bd->filled);
114#ifdef DEBUG
115 print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
116 DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
117#endif
118}
119
120static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
121{
122 struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
123 struct device *jrdev = ctx->jrdev;
124 u32 *desc = bd->hw_desc;
125 int err;
126
127 dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
128 init_completion(&bd->filled);
129 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
130 if (err)
131 complete(&bd->filled); /* don't wait on failed job*/
132 else
133 atomic_inc(&bd->empty); /* note if pending */
134
135 return err;
136}
137
138static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
139{
140 struct caam_rng_ctx *ctx = &rng_ctx;
141 struct buf_data *bd = &ctx->bufs[ctx->current_buf];
142 int next_buf_idx, copied_idx;
143 int err;
144
145 if (atomic_read(&bd->empty)) {
146 /* try to submit job if there wasn't one */
147 if (atomic_read(&bd->empty) == BUF_EMPTY) {
148 err = submit_job(ctx, 1);
149 /* if can't submit job, can't even wait */
150 if (err)
151 return 0;
152 }
153 /* no immediate data, so exit if not waiting */
154 if (!wait)
155 return 0;
156
157 /* waiting for pending job */
158 if (atomic_read(&bd->empty))
159 wait_for_completion(&bd->filled);
160 }
161
162 next_buf_idx = ctx->cur_buf_idx + max;
163 dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
164 __func__, ctx->current_buf, ctx->cur_buf_idx);
165
166 /* if enough data in current buffer */
167 if (next_buf_idx < RN_BUF_SIZE) {
168 memcpy(data, bd->buf + ctx->cur_buf_idx, max);
169 ctx->cur_buf_idx = next_buf_idx;
170 return max;
171 }
172
173 /* else, copy what's left... */
174 copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
175 memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
176 ctx->cur_buf_idx = 0;
177 atomic_set(&bd->empty, BUF_EMPTY);
178
179 /* ...refill... */
180 submit_job(ctx, 1);
181
182 /* and use next buffer */
183 ctx->current_buf = !ctx->current_buf;
184 dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
185
186 /* since there already is some data read, don't wait */
187 return copied_idx + caam_read(rng, data + copied_idx,
188 max - copied_idx, false);
189}
190
191static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
192{
193 struct device *jrdev = ctx->jrdev;
194 u32 *desc = ctx->sh_desc;
195
196 init_sh_desc(desc, HDR_SHARE_WAIT);
197
198 /* Propagate errors from shared to job descriptor */
199 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
200
201 /* Generate random bytes */
202 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
203
204 /* Store bytes */
205 append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
206
207 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
208 DMA_TO_DEVICE);
209#ifdef DEBUG
210 print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
211 desc, desc_bytes(desc), 1);
212#endif
213}
214
215static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
216{
217 struct device *jrdev = ctx->jrdev;
218 struct buf_data *bd = &ctx->bufs[buf_id];
219 u32 *desc = bd->hw_desc;
220 int sh_len = desc_len(ctx->sh_desc);
221
222 init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
223 HDR_REVERSE);
224
225 bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
226
227 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
228#ifdef DEBUG
229 print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
230 desc, desc_bytes(desc), 1);
231#endif
232}
233
234static void caam_cleanup(struct hwrng *rng)
235{
236 int i;
237 struct buf_data *bd;
238
239 for (i = 0; i < 2; i++) {
240 bd = &rng_ctx.bufs[i];
241 if (atomic_read(&bd->empty) == BUF_PENDING)
242 wait_for_completion(&bd->filled);
243 }
244
245 rng_unmap_ctx(&rng_ctx);
246}
247
248static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
249{
250 struct buf_data *bd = &ctx->bufs[buf_id];
251
252 rng_create_job_desc(ctx, buf_id);
253 atomic_set(&bd->empty, BUF_EMPTY);
254 submit_job(ctx, buf_id == ctx->current_buf);
255 wait_for_completion(&bd->filled);
256}
257
258static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
259{
260 ctx->jrdev = jrdev;
261 rng_create_sh_desc(ctx);
262 ctx->current_buf = 0;
263 ctx->cur_buf_idx = 0;
264 caam_init_buf(ctx, 0);
265 caam_init_buf(ctx, 1);
266}
267
268static struct hwrng caam_rng = {
269 .name = "rng-caam",
270 .cleanup = caam_cleanup,
271 .read = caam_read,
272};
273
274static void __exit caam_rng_exit(void)
275{
276 hwrng_unregister(&caam_rng);
277}
278
279static int __init caam_rng_init(void)
280{
281 struct device_node *dev_node;
282 struct platform_device *pdev;
283 struct device *ctrldev;
284 struct caam_drv_private *priv;
285
286 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
287 if (!dev_node)
288 return -ENODEV;
289
290 pdev = of_find_device_by_node(dev_node);
291 if (!pdev)
292 return -ENODEV;
293
294 ctrldev = &pdev->dev;
295 priv = dev_get_drvdata(ctrldev);
296 of_node_put(dev_node);
297
298 caam_init_rng(&rng_ctx, priv->jrdev[0]);
299
300 dev_info(priv->jrdev[0], "registering rng-caam\n");
301 return hwrng_register(&caam_rng);
302}
303
304module_init(caam_rng_init);
305module_exit(caam_rng_exit);
306
307MODULE_LICENSE("GPL");
308MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
309MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index a63bc65fae86..762aeff626ac 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -11,6 +11,7 @@
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14#include <linux/hash.h>
14#include <linux/hw_random.h> 15#include <linux/hw_random.h>
15#include <linux/of_platform.h> 16#include <linux/of_platform.h>
16#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
@@ -33,5 +34,6 @@
33#include <crypto/authenc.h> 34#include <crypto/authenc.h>
34#include <crypto/scatterwalk.h> 35#include <crypto/scatterwalk.h>
35#include <crypto/internal/skcipher.h> 36#include <crypto/internal/skcipher.h>
37#include <crypto/internal/hash.h>
36 38
37#endif /* !defined(CAAM_COMPAT_H) */ 39#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 77557ebcd337..414ba20c05a1 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -2,13 +2,16 @@
2 * CAAM control-plane driver backend 2 * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization 3 * Controller-level driver, kernel property detection, initialization
4 * 4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6 */ 6 */
7 7
8#include "compat.h" 8#include "compat.h"
9#include "regs.h" 9#include "regs.h"
10#include "intern.h" 10#include "intern.h"
11#include "jr.h" 11#include "jr.h"
12#include "desc_constr.h"
13#include "error.h"
14#include "ctrl.h"
12 15
13static int caam_remove(struct platform_device *pdev) 16static int caam_remove(struct platform_device *pdev)
14{ 17{
@@ -43,10 +46,154 @@ static int caam_remove(struct platform_device *pdev)
43 return ret; 46 return ret;
44} 47}
45 48
49/*
50 * Descriptor to instantiate RNG State Handle 0 in normal mode and
51 * load the JDKEK, TDKEK and TDSK registers
52 */
53static void build_instantiation_desc(u32 *desc)
54{
55 u32 *jump_cmd;
56
57 init_job_desc(desc, 0);
58
59 /* INIT RNG in non-test mode */
60 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
61 OP_ALG_AS_INIT);
62
63 /* wait for done */
64 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
65 set_jump_tgt_here(desc, jump_cmd);
66
67 /*
68 * load 1 to clear written reg:
69 * resets the done interrrupt and returns the RNG to idle.
70 */
71 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
72
73 /* generate secure keys (non-test) */
74 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
75 OP_ALG_RNG4_SK);
76}
77
78struct instantiate_result {
79 struct completion completion;
80 int err;
81};
82
83static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
84 void *context)
85{
86 struct instantiate_result *instantiation = context;
87
88 if (err) {
89 char tmp[CAAM_ERROR_STR_MAX];
90
91 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
92 }
93
94 instantiation->err = err;
95 complete(&instantiation->completion);
96}
97
98static int instantiate_rng(struct device *jrdev)
99{
100 struct instantiate_result instantiation;
101
102 dma_addr_t desc_dma;
103 u32 *desc;
104 int ret;
105
106 desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
107 if (!desc) {
108 dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
109 return -ENOMEM;
110 }
111
112 build_instantiation_desc(desc);
113 desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
114 init_completion(&instantiation.completion);
115 ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
116 if (!ret) {
117 wait_for_completion_interruptible(&instantiation.completion);
118 ret = instantiation.err;
119 if (ret)
120 dev_err(jrdev, "unable to instantiate RNG\n");
121 }
122
123 dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
124
125 kfree(desc);
126
127 return ret;
128}
129
130/*
131 * By default, the TRNG runs for 200 clocks per sample;
132 * 800 clocks per sample generates better entropy.
133 */
134static void kick_trng(struct platform_device *pdev)
135{
136 struct device *ctrldev = &pdev->dev;
137 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
138 struct caam_full __iomem *topregs;
139 struct rng4tst __iomem *r4tst;
140 u32 val;
141
142 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
143 r4tst = &topregs->ctrl.r4tst[0];
144
145 /* put RNG4 into program mode */
146 setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
147 /* 800 clocks per sample */
148 val = rd_reg32(&r4tst->rtsdctl);
149 val = (val & ~RTSDCTL_ENT_DLY_MASK) | (800 << RTSDCTL_ENT_DLY_SHIFT);
150 wr_reg32(&r4tst->rtsdctl, val);
151 /* min. freq. count */
152 wr_reg32(&r4tst->rtfrqmin, 400);
153 /* max. freq. count */
154 wr_reg32(&r4tst->rtfrqmax, 6400);
155 /* put RNG4 into run mode */
156 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
157}
158
159/**
160 * caam_get_era() - Return the ERA of the SEC on SoC, based
161 * on the SEC_VID register.
162 * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
163 * @caam_id - the value of the SEC_VID register
164 **/
165int caam_get_era(u64 caam_id)
166{
167 struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
168 static const struct {
169 u16 ip_id;
170 u8 maj_rev;
171 u8 era;
172 } caam_eras[] = {
173 {0x0A10, 1, 1},
174 {0x0A10, 2, 2},
175 {0x0A12, 1, 3},
176 {0x0A14, 1, 3},
177 {0x0A14, 2, 4},
178 {0x0A16, 1, 4},
179 {0x0A11, 1, 4}
180 };
181 int i;
182
183 for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
184 if (caam_eras[i].ip_id == sec_vid->ip_id &&
185 caam_eras[i].maj_rev == sec_vid->maj_rev)
186 return caam_eras[i].era;
187
188 return -ENOTSUPP;
189}
190EXPORT_SYMBOL(caam_get_era);
191
46/* Probe routine for CAAM top (controller) level */ 192/* Probe routine for CAAM top (controller) level */
47static int caam_probe(struct platform_device *pdev) 193static int caam_probe(struct platform_device *pdev)
48{ 194{
49 int ring, rspec; 195 int ret, ring, rspec;
196 u64 caam_id;
50 struct device *dev; 197 struct device *dev;
51 struct device_node *nprop, *np; 198 struct device_node *nprop, *np;
52 struct caam_ctrl __iomem *ctrl; 199 struct caam_ctrl __iomem *ctrl;
@@ -82,13 +229,18 @@ static int caam_probe(struct platform_device *pdev)
82 229
83 /* 230 /*
84 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, 231 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
85 * 36-bit pointers in master configuration register 232 * long pointers in master configuration register
86 */ 233 */
87 setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | 234 setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
88 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); 235 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
89 236
90 if (sizeof(dma_addr_t) == sizeof(u64)) 237 if (sizeof(dma_addr_t) == sizeof(u64))
91 dma_set_mask(dev, DMA_BIT_MASK(36)); 238 if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
239 dma_set_mask(dev, DMA_BIT_MASK(40));
240 else
241 dma_set_mask(dev, DMA_BIT_MASK(36));
242 else
243 dma_set_mask(dev, DMA_BIT_MASK(32));
92 244
93 /* 245 /*
94 * Detect and enable JobRs 246 * Detect and enable JobRs
@@ -141,14 +293,29 @@ static int caam_probe(struct platform_device *pdev)
141 return -ENOMEM; 293 return -ENOMEM;
142 } 294 }
143 295
296 /*
297 * RNG4 based SECs (v5+) need special initialization prior
298 * to executing any descriptors
299 */
300 if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) {
301 kick_trng(pdev);
302 ret = instantiate_rng(ctrlpriv->jrdev[0]);
303 if (ret) {
304 caam_remove(pdev);
305 return ret;
306 }
307 }
308
144 /* NOTE: RTIC detection ought to go here, around Si time */ 309 /* NOTE: RTIC detection ought to go here, around Si time */
145 310
146 /* Initialize queue allocator lock */ 311 /* Initialize queue allocator lock */
147 spin_lock_init(&ctrlpriv->jr_alloc_lock); 312 spin_lock_init(&ctrlpriv->jr_alloc_lock);
148 313
314 caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
315
149 /* Report "alive" for developer to see */ 316 /* Report "alive" for developer to see */
150 dev_info(dev, "device ID = 0x%016llx\n", 317 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
151 rd_reg64(&topregs->ctrl.perfmon.caam_id)); 318 caam_get_era(caam_id));
152 dev_info(dev, "job rings = %d, qi = %d\n", 319 dev_info(dev, "job rings = %d, qi = %d\n",
153 ctrlpriv->total_jobrs, ctrlpriv->qi_present); 320 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
154 321
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
new file mode 100644
index 000000000000..980d44eaaf40
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.h
@@ -0,0 +1,13 @@
1/*
2 * CAAM control-plane driver backend public-level include definitions
3 *
4 * Copyright 2012 Freescale Semiconductor, Inc.
5 */
6
7#ifndef CTRL_H
8#define CTRL_H
9
10/* Prototypes for backend-level services exposed to APIs */
11int caam_get_era(u64 caam_id);
12
13#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index a17c2958dab1..f7f833be8c67 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -8,6 +8,16 @@
8#ifndef DESC_H 8#ifndef DESC_H
9#define DESC_H 9#define DESC_H
10 10
11struct sec4_sg_entry {
12 u64 ptr;
13#define SEC4_SG_LEN_FIN 0x40000000
14#define SEC4_SG_LEN_EXT 0x80000000
15 u32 len;
16 u8 reserved;
17 u8 buf_pool_id;
18 u16 offset;
19};
20
11/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ 21/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
12#define MAX_CAAM_DESCSIZE 64 22#define MAX_CAAM_DESCSIZE 64
13 23
@@ -1162,6 +1172,11 @@
1162#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) 1172#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
1163#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) 1173#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
1164 1174
1175/* RNG4 set */
1176#define OP_ALG_RNG4_SHIFT 4
1177#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
1178
1179#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
1165 1180
1166#define OP_ALG_AS_SHIFT 2 1181#define OP_ALG_AS_SHIFT 2
1167#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) 1182#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
@@ -1585,20 +1600,4 @@
1585#define NFIFOENTRY_PLEN_SHIFT 0 1600#define NFIFOENTRY_PLEN_SHIFT 0
1586#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) 1601#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
1587 1602
1588/*
1589 * PDB internal definitions
1590 */
1591
1592/* IPSec ESP CBC Encap/Decap Options */
1593#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
1594#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
1595#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
1596#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
1597#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
1598#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
1599#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
1600#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
1601#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
1602#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
1603
1604#endif /* DESC_H */ 1603#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 348b882275f0..c85c1f058401 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * caam descriptor construction helper functions 2 * caam descriptor construction helper functions
3 * 3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc. 4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
5 */ 5 */
6 6
7#include "desc.h" 7#include "desc.h"
@@ -51,7 +51,7 @@ static inline void *sh_desc_pdb(u32 *desc)
51 51
52static inline void init_desc(u32 *desc, u32 options) 52static inline void init_desc(u32 *desc, u32 options)
53{ 53{
54 *desc = options | HDR_ONE | 1; 54 *desc = (options | HDR_ONE) + 1;
55} 55}
56 56
57static inline void init_sh_desc(u32 *desc, u32 options) 57static inline void init_sh_desc(u32 *desc, u32 options)
@@ -62,9 +62,9 @@ static inline void init_sh_desc(u32 *desc, u32 options)
62 62
63static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) 63static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
64{ 64{
65 u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1; 65 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
66 66
67 init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) | 67 init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) |
68 options); 68 options);
69} 69}
70 70
@@ -117,6 +117,15 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
117 append_ptr(desc, ptr); 117 append_ptr(desc, ptr);
118} 118}
119 119
120/* Write length after pointer, rather than inside command */
121static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
122 unsigned int len, u32 command)
123{
124 append_cmd(desc, command);
125 append_ptr(desc, ptr);
126 append_cmd(desc, len);
127}
128
120static inline void append_cmd_data(u32 *desc, void *data, int len, 129static inline void append_cmd_data(u32 *desc, void *data, int len,
121 u32 command) 130 u32 command)
122{ 131{
@@ -166,13 +175,22 @@ static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
166 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ 175 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
167} 176}
168APPEND_CMD_PTR(key, KEY) 177APPEND_CMD_PTR(key, KEY)
169APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
170APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
171APPEND_CMD_PTR(load, LOAD) 178APPEND_CMD_PTR(load, LOAD)
172APPEND_CMD_PTR(store, STORE) 179APPEND_CMD_PTR(store, STORE)
173APPEND_CMD_PTR(fifo_load, FIFO_LOAD) 180APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
174APPEND_CMD_PTR(fifo_store, FIFO_STORE) 181APPEND_CMD_PTR(fifo_store, FIFO_STORE)
175 182
183#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
184static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
185 unsigned int len, \
186 u32 options) \
187{ \
188 PRINT_POS; \
189 append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
190}
191APPEND_SEQ_PTR_INTLEN(in, IN)
192APPEND_SEQ_PTR_INTLEN(out, OUT)
193
176#define APPEND_CMD_PTR_TO_IMM(cmd, op) \ 194#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
177static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ 195static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
178 unsigned int len, u32 options) \ 196 unsigned int len, u32 options) \
@@ -183,6 +201,33 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
183APPEND_CMD_PTR_TO_IMM(load, LOAD); 201APPEND_CMD_PTR_TO_IMM(load, LOAD);
184APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); 202APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
185 203
204#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
205static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
206 unsigned int len, u32 options) \
207{ \
208 PRINT_POS; \
209 append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
210}
211APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR)
212APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
213
214/*
215 * Determine whether to store length internally or externally depending on
216 * the size of its type
217 */
218#define APPEND_CMD_PTR_LEN(cmd, op, type) \
219static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
220 type len, u32 options) \
221{ \
222 PRINT_POS; \
223 if (sizeof(type) > sizeof(u16)) \
224 append_##cmd##_extlen(desc, ptr, len, options); \
225 else \
226 append_##cmd##_intlen(desc, ptr, len, options); \
227}
228APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32)
229APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
230
186/* 231/*
187 * 2nd variant for commands whose specified immediate length differs 232 * 2nd variant for commands whose specified immediate length differs
188 * from length of immediate data provided, e.g., split keys 233 * from length of immediate data provided, e.g., split keys
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 7e2d54bffad6..9955ed9643e6 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -39,18 +39,20 @@ static void report_ccb_status(u32 status, char *outstr)
39 char *cha_id_list[] = { 39 char *cha_id_list[] = {
40 "", 40 "",
41 "AES", 41 "AES",
42 "DES, 3DES", 42 "DES",
43 "ARC4", 43 "ARC4",
44 "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512", 44 "MDHA",
45 "RNG", 45 "RNG",
46 "SNOW f8", 46 "SNOW f8",
47 "Kasumi f8, f9", 47 "Kasumi f8/9",
48 "All Public Key Algorithms", 48 "PKHA",
49 "CRC", 49 "CRCA",
50 "SNOW f9", 50 "SNOW f9",
51 "ZUCE",
52 "ZUCA",
51 }; 53 };
52 char *err_id_list[] = { 54 char *err_id_list[] = {
53 "None. No error.", 55 "No error.",
54 "Mode error.", 56 "Mode error.",
55 "Data size error.", 57 "Data size error.",
56 "Key size error.", 58 "Key size error.",
@@ -67,6 +69,20 @@ static void report_ccb_status(u32 status, char *outstr)
67 "Invalid CHA combination was selected", 69 "Invalid CHA combination was selected",
68 "Invalid CHA selected.", 70 "Invalid CHA selected.",
69 }; 71 };
72 char *rng_err_id_list[] = {
73 "",
74 "",
75 "",
76 "Instantiate",
77 "Not instantiated",
78 "Test instantiate",
79 "Prediction resistance",
80 "",
81 "Prediction resistance and test request",
82 "Uninstantiate",
83 "",
84 "Secure key generation",
85 };
70 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> 86 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
71 JRSTA_CCBERR_CHAID_SHIFT; 87 JRSTA_CCBERR_CHAID_SHIFT;
72 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; 88 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
@@ -81,7 +97,13 @@ static void report_ccb_status(u32 status, char *outstr)
81 cha_id, sizeof("ff")); 97 cha_id, sizeof("ff"));
82 } 98 }
83 99
84 if (err_id < ARRAY_SIZE(err_id_list)) { 100 if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
101 err_id < ARRAY_SIZE(rng_err_id_list) &&
102 strlen(rng_err_id_list[err_id])) {
103 /* RNG-only error */
104 SPRINTFCAT(outstr, "%s", rng_err_id_list[err_id],
105 strlen(rng_err_id_list[err_id]));
106 } else if (err_id < ARRAY_SIZE(err_id_list)) {
85 SPRINTFCAT(outstr, "%s", err_id_list[err_id], 107 SPRINTFCAT(outstr, "%s", err_id_list[err_id],
86 strlen(err_id_list[err_id])); 108 strlen(err_id_list[err_id]));
87 } else { 109 } else {
@@ -101,10 +123,10 @@ static void report_deco_status(u32 status, char *outstr)
101 u8 value; 123 u8 value;
102 char *error_text; 124 char *error_text;
103 } desc_error_list[] = { 125 } desc_error_list[] = {
104 { 0x00, "None. No error." }, 126 { 0x00, "No error." },
105 { 0x01, "SGT Length Error. The descriptor is trying to read " 127 { 0x01, "SGT Length Error. The descriptor is trying to read "
106 "more data than is contained in the SGT table." }, 128 "more data than is contained in the SGT table." },
107 { 0x02, "Reserved." }, 129 { 0x02, "SGT Null Entry Error." },
108 { 0x03, "Job Ring Control Error. There is a bad value in the " 130 { 0x03, "Job Ring Control Error. There is a bad value in the "
109 "Job Ring Control register." }, 131 "Job Ring Control register." },
110 { 0x04, "Invalid Descriptor Command. The Descriptor Command " 132 { 0x04, "Invalid Descriptor Command. The Descriptor Command "
@@ -116,7 +138,7 @@ static void report_deco_status(u32 status, char *outstr)
116 { 0x09, "Invalid OPERATION Command" }, 138 { 0x09, "Invalid OPERATION Command" },
117 { 0x0A, "Invalid FIFO LOAD Command" }, 139 { 0x0A, "Invalid FIFO LOAD Command" },
118 { 0x0B, "Invalid FIFO STORE Command" }, 140 { 0x0B, "Invalid FIFO STORE Command" },
119 { 0x0C, "Invalid MOVE Command" }, 141 { 0x0C, "Invalid MOVE/MOVE_LEN Command" },
120 { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is " 142 { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
121 "invalid because the target is not a Job Header " 143 "invalid because the target is not a Job Header "
122 "Command, or the jump is from a Trusted Descriptor to " 144 "Command, or the jump is from a Trusted Descriptor to "
@@ -166,6 +188,8 @@ static void report_deco_status(u32 status, char *outstr)
166 "(input frame; block ciphers) and IPsec decap (output " 188 "(input frame; block ciphers) and IPsec decap (output "
167 "frame, when doing the next header byte update) and " 189 "frame, when doing the next header byte update) and "
168 "DCRC (output frame)." }, 190 "DCRC (output frame)." },
191 { 0x23, "Read Input Frame error" },
192 { 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
169 { 0x80, "DNR (do not run) error" }, 193 { 0x80, "DNR (do not run) error" },
170 { 0x81, "undefined protocol command" }, 194 { 0x81, "undefined protocol command" },
171 { 0x82, "invalid setting in PDB" }, 195 { 0x82, "invalid setting in PDB" },
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index a34be01b0b29..5cd4c1b268a1 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -43,7 +43,7 @@ struct caam_drv_private_jr {
43 struct device *parentdev; /* points back to controller dev */ 43 struct device *parentdev; /* points back to controller dev */
44 int ridx; 44 int ridx;
45 struct caam_job_ring __iomem *rregs; /* JobR's register space */ 45 struct caam_job_ring __iomem *rregs; /* JobR's register space */
46 struct tasklet_struct irqtask[NR_CPUS]; 46 struct tasklet_struct irqtask;
47 int irq; /* One per queue */ 47 int irq; /* One per queue */
48 int assign; /* busy/free */ 48 int assign; /* busy/free */
49 49
@@ -86,10 +86,10 @@ struct caam_drv_private {
86 86
87 /* which jr allocated to scatterlist crypto */ 87 /* which jr allocated to scatterlist crypto */
88 atomic_t tfm_count ____cacheline_aligned; 88 atomic_t tfm_count ____cacheline_aligned;
89 int num_jrs_for_algapi;
90 struct device **algapi_jr;
91 /* list of registered crypto algorithms (mk generic context handle?) */ 89 /* list of registered crypto algorithms (mk generic context handle?) */
92 struct list_head alg_list; 90 struct list_head alg_list;
91 /* list of registered hash algorithms (mk generic context handle?) */
92 struct list_head hash_list;
93 93
94 /* 94 /*
95 * debugfs entries for developer view into driver/device 95 * debugfs entries for developer view into driver/device
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 340fa322c0f0..93d14070141a 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -2,7 +2,7 @@
2 * CAAM/SEC 4.x transport/backend driver 2 * CAAM/SEC 4.x transport/backend driver
3 * JobR backend functionality 3 * JobR backend functionality
4 * 4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6 */ 6 */
7 7
8#include "compat.h" 8#include "compat.h"
@@ -43,7 +43,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
43 wr_reg32(&jrp->rregs->jrintstatus, irqstate); 43 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
44 44
45 preempt_disable(); 45 preempt_disable();
46 tasklet_schedule(&jrp->irqtask[smp_processor_id()]); 46 tasklet_schedule(&jrp->irqtask);
47 preempt_enable(); 47 preempt_enable();
48 48
49 return IRQ_HANDLED; 49 return IRQ_HANDLED;
@@ -58,17 +58,16 @@ static void caam_jr_dequeue(unsigned long devarg)
58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); 58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
59 u32 *userdesc, userstatus; 59 u32 *userdesc, userstatus;
60 void *userarg; 60 void *userarg;
61 unsigned long flags;
62 61
63 spin_lock_irqsave(&jrp->outlock, flags); 62 while (rd_reg32(&jrp->rregs->outring_used)) {
64 63
65 head = ACCESS_ONCE(jrp->head); 64 head = ACCESS_ONCE(jrp->head);
66 sw_idx = tail = jrp->tail;
67 65
68 while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && 66 spin_lock(&jrp->outlock);
69 rd_reg32(&jrp->rregs->outring_used)) {
70 67
68 sw_idx = tail = jrp->tail;
71 hw_idx = jrp->out_ring_read_index; 69 hw_idx = jrp->out_ring_read_index;
70
72 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { 71 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
73 sw_idx = (tail + i) & (JOBR_DEPTH - 1); 72 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
74 73
@@ -95,7 +94,8 @@ static void caam_jr_dequeue(unsigned long devarg)
95 userdesc = jrp->entinfo[sw_idx].desc_addr_virt; 94 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
96 userstatus = jrp->outring[hw_idx].jrstatus; 95 userstatus = jrp->outring[hw_idx].jrstatus;
97 96
98 smp_mb(); 97 /* set done */
98 wr_reg32(&jrp->rregs->outring_rmvd, 1);
99 99
100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & 100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
101 (JOBR_DEPTH - 1); 101 (JOBR_DEPTH - 1);
@@ -115,22 +115,12 @@ static void caam_jr_dequeue(unsigned long devarg)
115 jrp->tail = tail; 115 jrp->tail = tail;
116 } 116 }
117 117
118 /* set done */ 118 spin_unlock(&jrp->outlock);
119 wr_reg32(&jrp->rregs->outring_rmvd, 1);
120
121 spin_unlock_irqrestore(&jrp->outlock, flags);
122 119
123 /* Finally, execute user's callback */ 120 /* Finally, execute user's callback */
124 usercall(dev, userdesc, userstatus, userarg); 121 usercall(dev, userdesc, userstatus, userarg);
125
126 spin_lock_irqsave(&jrp->outlock, flags);
127
128 head = ACCESS_ONCE(jrp->head);
129 sw_idx = tail = jrp->tail;
130 } 122 }
131 123
132 spin_unlock_irqrestore(&jrp->outlock, flags);
133
134 /* reenable / unmask IRQs */ 124 /* reenable / unmask IRQs */
135 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 125 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
136} 126}
@@ -148,23 +138,22 @@ int caam_jr_register(struct device *ctrldev, struct device **rdev)
148{ 138{
149 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 139 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
150 struct caam_drv_private_jr *jrpriv = NULL; 140 struct caam_drv_private_jr *jrpriv = NULL;
151 unsigned long flags;
152 int ring; 141 int ring;
153 142
154 /* Lock, if free ring - assign, unlock */ 143 /* Lock, if free ring - assign, unlock */
155 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); 144 spin_lock(&ctrlpriv->jr_alloc_lock);
156 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { 145 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
157 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); 146 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
158 if (jrpriv->assign == JOBR_UNASSIGNED) { 147 if (jrpriv->assign == JOBR_UNASSIGNED) {
159 jrpriv->assign = JOBR_ASSIGNED; 148 jrpriv->assign = JOBR_ASSIGNED;
160 *rdev = ctrlpriv->jrdev[ring]; 149 *rdev = ctrlpriv->jrdev[ring];
161 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); 150 spin_unlock(&ctrlpriv->jr_alloc_lock);
162 return ring; 151 return ring;
163 } 152 }
164 } 153 }
165 154
166 /* If assigned, write dev where caller needs it */ 155 /* If assigned, write dev where caller needs it */
167 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); 156 spin_unlock(&ctrlpriv->jr_alloc_lock);
168 *rdev = NULL; 157 *rdev = NULL;
169 158
170 return -ENODEV; 159 return -ENODEV;
@@ -182,7 +171,6 @@ int caam_jr_deregister(struct device *rdev)
182{ 171{
183 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); 172 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
184 struct caam_drv_private *ctrlpriv; 173 struct caam_drv_private *ctrlpriv;
185 unsigned long flags;
186 174
187 /* Get the owning controller's private space */ 175 /* Get the owning controller's private space */
188 ctrlpriv = dev_get_drvdata(jrpriv->parentdev); 176 ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
@@ -195,9 +183,9 @@ int caam_jr_deregister(struct device *rdev)
195 return -EBUSY; 183 return -EBUSY;
196 184
197 /* Release ring */ 185 /* Release ring */
198 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); 186 spin_lock(&ctrlpriv->jr_alloc_lock);
199 jrpriv->assign = JOBR_UNASSIGNED; 187 jrpriv->assign = JOBR_UNASSIGNED;
200 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); 188 spin_unlock(&ctrlpriv->jr_alloc_lock);
201 189
202 return 0; 190 return 0;
203} 191}
@@ -238,7 +226,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
238{ 226{
239 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 227 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
240 struct caam_jrentry_info *head_entry; 228 struct caam_jrentry_info *head_entry;
241 unsigned long flags;
242 int head, tail, desc_size; 229 int head, tail, desc_size;
243 dma_addr_t desc_dma; 230 dma_addr_t desc_dma;
244 231
@@ -249,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
249 return -EIO; 236 return -EIO;
250 } 237 }
251 238
252 spin_lock_irqsave(&jrp->inplock, flags); 239 spin_lock_bh(&jrp->inplock);
253 240
254 head = jrp->head; 241 head = jrp->head;
255 tail = ACCESS_ONCE(jrp->tail); 242 tail = ACCESS_ONCE(jrp->tail);
256 243
257 if (!rd_reg32(&jrp->rregs->inpring_avail) || 244 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
258 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { 245 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
259 spin_unlock_irqrestore(&jrp->inplock, flags); 246 spin_unlock_bh(&jrp->inplock);
260 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); 247 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
261 return -EBUSY; 248 return -EBUSY;
262 } 249 }
@@ -276,11 +263,9 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
276 (JOBR_DEPTH - 1); 263 (JOBR_DEPTH - 1);
277 jrp->head = (head + 1) & (JOBR_DEPTH - 1); 264 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
278 265
279 wmb();
280
281 wr_reg32(&jrp->rregs->inpring_jobadd, 1); 266 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
282 267
283 spin_unlock_irqrestore(&jrp->inplock, flags); 268 spin_unlock_bh(&jrp->inplock);
284 269
285 return 0; 270 return 0;
286} 271}
@@ -337,11 +322,9 @@ static int caam_jr_init(struct device *dev)
337 322
338 jrp = dev_get_drvdata(dev); 323 jrp = dev_get_drvdata(dev);
339 324
340 /* Connect job ring interrupt handler. */ 325 tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
341 for_each_possible_cpu(i)
342 tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
343 (unsigned long)dev);
344 326
327 /* Connect job ring interrupt handler. */
345 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, 328 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
346 "caam-jobr", dev); 329 "caam-jobr", dev);
347 if (error) { 330 if (error) {
@@ -356,10 +339,11 @@ static int caam_jr_init(struct device *dev)
356 if (error) 339 if (error)
357 return error; 340 return error;
358 341
359 jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH, 342 jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
360 GFP_KERNEL | GFP_DMA); 343 &inpbusaddr, GFP_KERNEL);
361 jrp->outring = kzalloc(sizeof(struct jr_outentry) * 344
362 JOBR_DEPTH, GFP_KERNEL | GFP_DMA); 345 jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
346 JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
363 347
364 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, 348 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
365 GFP_KERNEL); 349 GFP_KERNEL);
@@ -375,31 +359,6 @@ static int caam_jr_init(struct device *dev)
375 jrp->entinfo[i].desc_addr_dma = !0; 359 jrp->entinfo[i].desc_addr_dma = !0;
376 360
377 /* Setup rings */ 361 /* Setup rings */
378 inpbusaddr = dma_map_single(dev, jrp->inpring,
379 sizeof(u32 *) * JOBR_DEPTH,
380 DMA_BIDIRECTIONAL);
381 if (dma_mapping_error(dev, inpbusaddr)) {
382 dev_err(dev, "caam_jr_init(): can't map input ring\n");
383 kfree(jrp->inpring);
384 kfree(jrp->outring);
385 kfree(jrp->entinfo);
386 return -EIO;
387 }
388
389 outbusaddr = dma_map_single(dev, jrp->outring,
390 sizeof(struct jr_outentry) * JOBR_DEPTH,
391 DMA_BIDIRECTIONAL);
392 if (dma_mapping_error(dev, outbusaddr)) {
393 dev_err(dev, "caam_jr_init(): can't map output ring\n");
394 dma_unmap_single(dev, inpbusaddr,
395 sizeof(u32 *) * JOBR_DEPTH,
396 DMA_BIDIRECTIONAL);
397 kfree(jrp->inpring);
398 kfree(jrp->outring);
399 kfree(jrp->entinfo);
400 return -EIO;
401 }
402
403 jrp->inp_ring_write_index = 0; 362 jrp->inp_ring_write_index = 0;
404 jrp->out_ring_read_index = 0; 363 jrp->out_ring_read_index = 0;
405 jrp->head = 0; 364 jrp->head = 0;
@@ -431,12 +390,11 @@ int caam_jr_shutdown(struct device *dev)
431{ 390{
432 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 391 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
433 dma_addr_t inpbusaddr, outbusaddr; 392 dma_addr_t inpbusaddr, outbusaddr;
434 int ret, i; 393 int ret;
435 394
436 ret = caam_reset_hw_jr(dev); 395 ret = caam_reset_hw_jr(dev);
437 396
438 for_each_possible_cpu(i) 397 tasklet_kill(&jrp->irqtask);
439 tasklet_kill(&jrp->irqtask[i]);
440 398
441 /* Release interrupt */ 399 /* Release interrupt */
442 free_irq(jrp->irq, dev); 400 free_irq(jrp->irq, dev);
@@ -444,13 +402,10 @@ int caam_jr_shutdown(struct device *dev)
444 /* Free rings */ 402 /* Free rings */
445 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); 403 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
446 outbusaddr = rd_reg64(&jrp->rregs->outring_base); 404 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
447 dma_unmap_single(dev, outbusaddr, 405 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
448 sizeof(struct jr_outentry) * JOBR_DEPTH, 406 jrp->inpring, inpbusaddr);
449 DMA_BIDIRECTIONAL); 407 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
450 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH, 408 jrp->outring, outbusaddr);
451 DMA_BIDIRECTIONAL);
452 kfree(jrp->outring);
453 kfree(jrp->inpring);
454 kfree(jrp->entinfo); 409 kfree(jrp->entinfo);
455 410
456 return ret; 411 return ret;
@@ -503,6 +458,14 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
503 dev_set_drvdata(jrdev, jrpriv); 458 dev_set_drvdata(jrdev, jrpriv);
504 ctrlpriv->jrdev[ring] = jrdev; 459 ctrlpriv->jrdev[ring] = jrdev;
505 460
461 if (sizeof(dma_addr_t) == sizeof(u64))
462 if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring"))
463 dma_set_mask(jrdev, DMA_BIT_MASK(40));
464 else
465 dma_set_mask(jrdev, DMA_BIT_MASK(36));
466 else
467 dma_set_mask(jrdev, DMA_BIT_MASK(32));
468
506 /* Identify the interrupt */ 469 /* Identify the interrupt */
507 jrpriv->irq = of_irq_to_resource(np, 0, NULL); 470 jrpriv->irq = of_irq_to_resource(np, 0, NULL);
508 471
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
new file mode 100644
index 000000000000..002888185f17
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.c
@@ -0,0 +1,122 @@
1/*
2 * CAAM/SEC 4.x functions for handling key-generation jobs
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 */
7#include "compat.h"
8#include "jr.h"
9#include "error.h"
10#include "desc_constr.h"
11#include "key_gen.h"
12
13void split_key_done(struct device *dev, u32 *desc, u32 err,
14 void *context)
15{
16 struct split_key_result *res = context;
17
18#ifdef DEBUG
19 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20#endif
21
22 if (err) {
23 char tmp[CAAM_ERROR_STR_MAX];
24
25 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
26 }
27
28 res->err = err;
29
30 complete(&res->completion);
31}
32EXPORT_SYMBOL(split_key_done);
33/*
34get a split ipad/opad key
35
36Split key generation-----------------------------------------------
37
38[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
39[01] 0x04000014 key: class2->keyreg len=20
40 @0xffe01000
41[03] 0x84410014 operation: cls2-op sha1 hmac init dec
42[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
43[05] 0xa4000001 jump: class2 local all ->1 [06]
44[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
45 @0xffe04000
46*/
47u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
48 int split_key_pad_len, const u8 *key_in, u32 keylen,
49 u32 alg_op)
50{
51 u32 *desc;
52 struct split_key_result result;
53 dma_addr_t dma_addr_in, dma_addr_out;
54 int ret = 0;
55
56 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
57
58 init_job_desc(desc, 0);
59
60 dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
61 DMA_TO_DEVICE);
62 if (dma_mapping_error(jrdev, dma_addr_in)) {
63 dev_err(jrdev, "unable to map key input memory\n");
64 kfree(desc);
65 return -ENOMEM;
66 }
67 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
68
69 /* Sets MDHA up into an HMAC-INIT */
70 append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
71
72 /*
73 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
74 * into both pads inside MDHA
75 */
76 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
77 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
78
79 /*
80 * FIFO_STORE with the explicit split-key content store
81 * (0x26 output type)
82 */
83 dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
84 DMA_FROM_DEVICE);
85 if (dma_mapping_error(jrdev, dma_addr_out)) {
86 dev_err(jrdev, "unable to map key output memory\n");
87 kfree(desc);
88 return -ENOMEM;
89 }
90 append_fifo_store(desc, dma_addr_out, split_key_len,
91 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
92
93#ifdef DEBUG
94 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
95 DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
96 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
97 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
98#endif
99
100 result.err = 0;
101 init_completion(&result.completion);
102
103 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
104 if (!ret) {
105 /* in progress */
106 wait_for_completion_interruptible(&result.completion);
107 ret = result.err;
108#ifdef DEBUG
109 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
110 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
111 split_key_pad_len, 1);
112#endif
113 }
114
115 dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
116 DMA_FROM_DEVICE);
117 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
118
119 kfree(desc);
120
121 return ret;
122}
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
new file mode 100644
index 000000000000..d95d290c6e8b
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.h
@@ -0,0 +1,17 @@
1/*
2 * CAAM/SEC 4.x definitions for handling key-generation jobs
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 */
7
8struct split_key_result {
9 struct completion completion;
10 int err;
11};
12
13void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
14
15u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
16 int split_key_pad_len, const u8 *key_in, u32 keylen,
17 u32 alg_op);
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
new file mode 100644
index 000000000000..62950d22ac13
--- /dev/null
+++ b/drivers/crypto/caam/pdb.h
@@ -0,0 +1,401 @@
1/*
2 * CAAM Protocol Data Block (PDB) definition header file
3 *
4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
5 *
6 */
7
8#ifndef CAAM_PDB_H
9#define CAAM_PDB_H
10
11/*
12 * PDB- IPSec ESP Header Modification Options
13 */
14#define PDBHMO_ESP_DECAP_SHIFT 12
15#define PDBHMO_ESP_ENCAP_SHIFT 4
16/*
17 * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
18 * Options Byte IP version (IPvsn) field:
19 * if IPv4, decrement the inner IP header TTL field (byte 8);
20 * if IPv6 decrement the inner IP header Hop Limit field (byte 7).
21*/
22#define PDBHMO_ESP_DECAP_DEC_TTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
23#define PDBHMO_ESP_ENCAP_DEC_TTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
24/*
25 * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte
26 * from the outer IP header to the inner IP header.
27 */
28#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
29/*
30 * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from
31 * the PDB, copy the DF bit from the inner IP header to the outer IP header.
32 */
33#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
34
35/*
36 * PDB - IPSec ESP Encap/Decap Options
37 */
38#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */
39#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */
40#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */
41#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */
42#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */
43#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */
44#define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
45#define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */
46#define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */
47#define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */
48#define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */
49#define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */
50#define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */
51#define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */
52
53/*
54 * General IPSec encap/decap PDB definitions
55 */
56struct ipsec_encap_cbc {
57 u32 iv[4];
58};
59
60struct ipsec_encap_ctr {
61 u32 ctr_nonce;
62 u32 ctr_initial;
63 u32 iv[2];
64};
65
66struct ipsec_encap_ccm {
67 u32 salt; /* lower 24 bits */
68 u8 b0_flags;
69 u8 ctr_flags;
70 u16 ctr_initial;
71 u32 iv[2];
72};
73
74struct ipsec_encap_gcm {
75 u32 salt; /* lower 24 bits */
76 u32 rsvd1;
77 u32 iv[2];
78};
79
80struct ipsec_encap_pdb {
81 u8 hmo_rsvd;
82 u8 ip_nh;
83 u8 ip_nh_offset;
84 u8 options;
85 u32 seq_num_ext_hi;
86 u32 seq_num;
87 union {
88 struct ipsec_encap_cbc cbc;
89 struct ipsec_encap_ctr ctr;
90 struct ipsec_encap_ccm ccm;
91 struct ipsec_encap_gcm gcm;
92 };
93 u32 spi;
94 u16 rsvd1;
95 u16 ip_hdr_len;
96 u32 ip_hdr[0]; /* optional IP Header content */
97};
98
99struct ipsec_decap_cbc {
100 u32 rsvd[2];
101};
102
103struct ipsec_decap_ctr {
104 u32 salt;
105 u32 ctr_initial;
106};
107
108struct ipsec_decap_ccm {
109 u32 salt;
110 u8 iv_flags;
111 u8 ctr_flags;
112 u16 ctr_initial;
113};
114
115struct ipsec_decap_gcm {
116 u32 salt;
117 u32 resvd;
118};
119
120struct ipsec_decap_pdb {
121 u16 hmo_ip_hdr_len;
122 u8 ip_nh_offset;
123 u8 options;
124 union {
125 struct ipsec_decap_cbc cbc;
126 struct ipsec_decap_ctr ctr;
127 struct ipsec_decap_ccm ccm;
128 struct ipsec_decap_gcm gcm;
129 };
130 u32 seq_num_ext_hi;
131 u32 seq_num;
132 u32 anti_replay[2];
133 u32 end_index[0];
134};
135
136/*
137 * IPSec ESP Datapath Protocol Override Register (DPOVRD)
138 */
139struct ipsec_deco_dpovrd {
140#define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80
141 u8 ovrd_ecn;
142 u8 ip_hdr_len;
143 u8 nh_offset;
144 u8 next_header; /* reserved if decap */
145};
146
147/*
148 * IEEE 802.11i WiFi Protocol Data Block
149 */
150#define WIFI_PDBOPTS_FCS 0x01
151#define WIFI_PDBOPTS_AR 0x40
152
153struct wifi_encap_pdb {
154 u16 mac_hdr_len;
155 u8 rsvd;
156 u8 options;
157 u8 iv_flags;
158 u8 pri;
159 u16 pn1;
160 u32 pn2;
161 u16 frm_ctrl_mask;
162 u16 seq_ctrl_mask;
163 u8 rsvd1[2];
164 u8 cnst;
165 u8 key_id;
166 u8 ctr_flags;
167 u8 rsvd2;
168 u16 ctr_init;
169};
170
171struct wifi_decap_pdb {
172 u16 mac_hdr_len;
173 u8 rsvd;
174 u8 options;
175 u8 iv_flags;
176 u8 pri;
177 u16 pn1;
178 u32 pn2;
179 u16 frm_ctrl_mask;
180 u16 seq_ctrl_mask;
181 u8 rsvd1[4];
182 u8 ctr_flags;
183 u8 rsvd2;
184 u16 ctr_init;
185};
186
187/*
188 * IEEE 802.16 WiMAX Protocol Data Block
189 */
190#define WIMAX_PDBOPTS_FCS 0x01
191#define WIMAX_PDBOPTS_AR 0x40 /* decap only */
192
193struct wimax_encap_pdb {
194 u8 rsvd[3];
195 u8 options;
196 u32 nonce;
197 u8 b0_flags;
198 u8 ctr_flags;
199 u16 ctr_init;
200 /* begin DECO writeback region */
201 u32 pn;
202 /* end DECO writeback region */
203};
204
205struct wimax_decap_pdb {
206 u8 rsvd[3];
207 u8 options;
208 u32 nonce;
209 u8 iv_flags;
210 u8 ctr_flags;
211 u16 ctr_init;
212 /* begin DECO writeback region */
213 u32 pn;
214 u8 rsvd1[2];
215 u16 antireplay_len;
216 u64 antireplay_scorecard;
217 /* end DECO writeback region */
218};
219
220/*
221 * IEEE 801.AE MacSEC Protocol Data Block
222 */
223#define MACSEC_PDBOPTS_FCS 0x01
224#define MACSEC_PDBOPTS_AR 0x40 /* used in decap only */
225
226struct macsec_encap_pdb {
227 u16 aad_len;
228 u8 rsvd;
229 u8 options;
230 u64 sci;
231 u16 ethertype;
232 u8 tci_an;
233 u8 rsvd1;
234 /* begin DECO writeback region */
235 u32 pn;
236 /* end DECO writeback region */
237};
238
239struct macsec_decap_pdb {
240 u16 aad_len;
241 u8 rsvd;
242 u8 options;
243 u64 sci;
244 u8 rsvd1[3];
245 /* begin DECO writeback region */
246 u8 antireplay_len;
247 u32 pn;
248 u64 antireplay_scorecard;
249 /* end DECO writeback region */
250};
251
252/*
253 * SSL/TLS/DTLS Protocol Data Blocks
254 */
255
256#define TLS_PDBOPTS_ARS32 0x40
257#define TLS_PDBOPTS_ARS64 0xc0
258#define TLS_PDBOPTS_OUTFMT 0x08
259#define TLS_PDBOPTS_IV_WRTBK 0x02 /* 1.1/1.2/DTLS only */
260#define TLS_PDBOPTS_EXP_RND_IV 0x01 /* 1.1/1.2/DTLS only */
261
262struct tls_block_encap_pdb {
263 u8 type;
264 u8 version[2];
265 u8 options;
266 u64 seq_num;
267 u32 iv[4];
268};
269
270struct tls_stream_encap_pdb {
271 u8 type;
272 u8 version[2];
273 u8 options;
274 u64 seq_num;
275 u8 i;
276 u8 j;
277 u8 rsvd1[2];
278};
279
280struct dtls_block_encap_pdb {
281 u8 type;
282 u8 version[2];
283 u8 options;
284 u16 epoch;
285 u16 seq_num[3];
286 u32 iv[4];
287};
288
289struct tls_block_decap_pdb {
290 u8 rsvd[3];
291 u8 options;
292 u64 seq_num;
293 u32 iv[4];
294};
295
296struct tls_stream_decap_pdb {
297 u8 rsvd[3];
298 u8 options;
299 u64 seq_num;
300 u8 i;
301 u8 j;
302 u8 rsvd1[2];
303};
304
305struct dtls_block_decap_pdb {
306 u8 rsvd[3];
307 u8 options;
308 u16 epoch;
309 u16 seq_num[3];
310 u32 iv[4];
311 u64 antireplay_scorecard;
312};
313
314/*
315 * SRTP Protocol Data Blocks
316 */
317#define SRTP_PDBOPTS_MKI 0x08
318#define SRTP_PDBOPTS_AR 0x40
319
320struct srtp_encap_pdb {
321 u8 x_len;
322 u8 mki_len;
323 u8 n_tag;
324 u8 options;
325 u32 cnst0;
326 u8 rsvd[2];
327 u16 cnst1;
328 u16 salt[7];
329 u16 cnst2;
330 u32 rsvd1;
331 u32 roc;
332 u32 opt_mki;
333};
334
335struct srtp_decap_pdb {
336 u8 x_len;
337 u8 mki_len;
338 u8 n_tag;
339 u8 options;
340 u32 cnst0;
341 u8 rsvd[2];
342 u16 cnst1;
343 u16 salt[7];
344 u16 cnst2;
345 u16 rsvd1;
346 u16 seq_num;
347 u32 roc;
348 u64 antireplay_scorecard;
349};
350
351/*
352 * DSA/ECDSA Protocol Data Blocks
353 * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar
354 * except for the treatment of "w" for verify, "s" for sign,
355 * and the placement of "a,b".
356 */
357#define DSA_PDB_SGF_SHIFT 24
358#define DSA_PDB_SGF_MASK (0xff << DSA_PDB_SGF_SHIFT)
359#define DSA_PDB_SGF_Q (0x80 << DSA_PDB_SGF_SHIFT)
360#define DSA_PDB_SGF_R (0x40 << DSA_PDB_SGF_SHIFT)
361#define DSA_PDB_SGF_G (0x20 << DSA_PDB_SGF_SHIFT)
362#define DSA_PDB_SGF_W (0x10 << DSA_PDB_SGF_SHIFT)
363#define DSA_PDB_SGF_S (0x10 << DSA_PDB_SGF_SHIFT)
364#define DSA_PDB_SGF_F (0x08 << DSA_PDB_SGF_SHIFT)
365#define DSA_PDB_SGF_C (0x04 << DSA_PDB_SGF_SHIFT)
366#define DSA_PDB_SGF_D (0x02 << DSA_PDB_SGF_SHIFT)
367#define DSA_PDB_SGF_AB_SIGN (0x02 << DSA_PDB_SGF_SHIFT)
368#define DSA_PDB_SGF_AB_VERIFY (0x01 << DSA_PDB_SGF_SHIFT)
369
370#define DSA_PDB_L_SHIFT 7
371#define DSA_PDB_L_MASK (0x3ff << DSA_PDB_L_SHIFT)
372
373#define DSA_PDB_N_MASK 0x7f
374
375struct dsa_sign_pdb {
376 u32 sgf_ln; /* Use DSA_PDB_ defintions per above */
377 u8 *q;
378 u8 *r;
379 u8 *g; /* or Gx,y */
380 u8 *s;
381 u8 *f;
382 u8 *c;
383 u8 *d;
384 u8 *ab; /* ECC only */
385 u8 *u;
386};
387
388struct dsa_verify_pdb {
389 u32 sgf_ln;
390 u8 *q;
391 u8 *r;
392 u8 *g; /* or Gx,y */
393 u8 *w; /* or Wx,y */
394 u8 *f;
395 u8 *c;
396 u8 *d;
397 u8 *tmp; /* temporary data block */
398 u8 *ab; /* only used if ECC processing */
399};
400
401#endif
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index e9f7a70cdd5e..3223fc6d647c 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -117,6 +117,12 @@ struct jr_outentry {
117#define CHA_NUM_DECONUM_SHIFT 56 117#define CHA_NUM_DECONUM_SHIFT 56
118#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) 118#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
119 119
120struct sec_vid {
121 u16 ip_id;
122 u8 maj_rev;
123 u8 min_rev;
124};
125
120struct caam_perfmon { 126struct caam_perfmon {
121 /* Performance Monitor Registers f00-f9f */ 127 /* Performance Monitor Registers f00-f9f */
122 u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */ 128 u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
@@ -167,7 +173,7 @@ struct partid {
167 u32 pidr; /* partition ID, DECO */ 173 u32 pidr; /* partition ID, DECO */
168}; 174};
169 175
170/* RNG test mode (replicated twice in some configurations) */ 176/* RNGB test mode (replicated twice in some configurations) */
171/* Padded out to 0x100 */ 177/* Padded out to 0x100 */
172struct rngtst { 178struct rngtst {
173 u32 mode; /* RTSTMODEx - Test mode */ 179 u32 mode; /* RTSTMODEx - Test mode */
@@ -200,6 +206,31 @@ struct rngtst {
200 u32 rsvd14[15]; 206 u32 rsvd14[15];
201}; 207};
202 208
209/* RNG4 TRNG test registers */
210struct rng4tst {
211#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
212 u32 rtmctl; /* misc. control register */
213 u32 rtscmisc; /* statistical check misc. register */
214 u32 rtpkrrng; /* poker range register */
215 union {
216 u32 rtpkrmax; /* PRGM=1: poker max. limit register */
217 u32 rtpkrsq; /* PRGM=0: poker square calc. result register */
218 };
219#define RTSDCTL_ENT_DLY_SHIFT 16
220#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
221 u32 rtsdctl; /* seed control register */
222 union {
223 u32 rtsblim; /* PRGM=1: sparse bit limit register */
224 u32 rttotsam; /* PRGM=0: total samples register */
225 };
226 u32 rtfrqmin; /* frequency count min. limit register */
227 union {
228 u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
229 u32 rtfrqcnt; /* PRGM=0: freq. count register */
230 };
231 u32 rsvd1[56];
232};
233
203/* 234/*
204 * caam_ctrl - basic core configuration 235 * caam_ctrl - basic core configuration
205 * starts base + 0x0000 padded out to 0x1000 236 * starts base + 0x0000 padded out to 0x1000
@@ -249,7 +280,10 @@ struct caam_ctrl {
249 280
250 /* RNG Test/Verification/Debug Access 600-7ff */ 281 /* RNG Test/Verification/Debug Access 600-7ff */
251 /* (Useful in Test/Debug modes only...) */ 282 /* (Useful in Test/Debug modes only...) */
252 struct rngtst rtst[2]; 283 union {
284 struct rngtst rtst[2];
285 struct rng4tst r4tst[2];
286 };
253 287
254 u32 rsvd9[448]; 288 u32 rsvd9[448];
255 289
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
new file mode 100644
index 000000000000..e0037c8ee243
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -0,0 +1,156 @@
1/*
2 * CAAM/SEC 4.x functions for using scatterlists in caam driver
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 */
7
8struct sec4_sg_entry;
9
10/*
11 * convert single dma address to h/w link table format
12 */
13static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
14 dma_addr_t dma, u32 len, u32 offset)
15{
16 sec4_sg_ptr->ptr = dma;
17 sec4_sg_ptr->len = len;
18 sec4_sg_ptr->reserved = 0;
19 sec4_sg_ptr->buf_pool_id = 0;
20 sec4_sg_ptr->offset = offset;
21#ifdef DEBUG
22 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
23 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
24 sizeof(struct sec4_sg_entry), 1);
25#endif
26}
27
28/*
29 * convert scatterlist to h/w link table format
30 * but does not have final bit; instead, returns last entry
31 */
32static inline struct sec4_sg_entry *
33sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
34 struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
35{
36 while (sg_count) {
37 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
38 sg_dma_len(sg), offset);
39 sec4_sg_ptr++;
40 sg = scatterwalk_sg_next(sg);
41 sg_count--;
42 }
43 return sec4_sg_ptr - 1;
44}
45
46/*
47 * convert scatterlist to h/w link table format
48 * scatterlist must have been previously dma mapped
49 */
50static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
51 struct sec4_sg_entry *sec4_sg_ptr,
52 u32 offset)
53{
54 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
55 sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
56}
57
58/* count number of elements in scatterlist */
59static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
60 bool *chained)
61{
62 struct scatterlist *sg = sg_list;
63 int sg_nents = 0;
64
65 while (nbytes > 0) {
66 sg_nents++;
67 nbytes -= sg->length;
68 if (!sg_is_last(sg) && (sg + 1)->length == 0)
69 *chained = true;
70 sg = scatterwalk_sg_next(sg);
71 }
72
73 return sg_nents;
74}
75
76/* derive number of elements in scatterlist, but return 0 for 1 */
77static inline int sg_count(struct scatterlist *sg_list, int nbytes,
78 bool *chained)
79{
80 int sg_nents = __sg_count(sg_list, nbytes, chained);
81
82 if (likely(sg_nents == 1))
83 return 0;
84
85 return sg_nents;
86}
87
88static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
89 unsigned int nents, enum dma_data_direction dir,
90 bool chained)
91{
92 if (unlikely(chained)) {
93 int i;
94 for (i = 0; i < nents; i++) {
95 dma_map_sg(dev, sg, 1, dir);
96 sg = scatterwalk_sg_next(sg);
97 }
98 } else {
99 dma_map_sg(dev, sg, nents, dir);
100 }
101 return nents;
102}
103
104static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
105 unsigned int nents, enum dma_data_direction dir,
106 bool chained)
107{
108 if (unlikely(chained)) {
109 int i;
110 for (i = 0; i < nents; i++) {
111 dma_unmap_sg(dev, sg, 1, dir);
112 sg = scatterwalk_sg_next(sg);
113 }
114 } else {
115 dma_unmap_sg(dev, sg, nents, dir);
116 }
117 return nents;
118}
119
120/* Copy from len bytes of sg to dest, starting from beginning */
121static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
122{
123 struct scatterlist *current_sg = sg;
124 int cpy_index = 0, next_cpy_index = current_sg->length;
125
126 while (next_cpy_index < len) {
127 memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
128 current_sg->length);
129 current_sg = scatterwalk_sg_next(current_sg);
130 cpy_index = next_cpy_index;
131 next_cpy_index += current_sg->length;
132 }
133 if (cpy_index < len)
134 memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
135 len - cpy_index);
136}
137
138/* Copy sg data, from to_skip to end, to dest */
139static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
140 int to_skip, unsigned int end)
141{
142 struct scatterlist *current_sg = sg;
143 int sg_index, cpy_index;
144
145 sg_index = current_sg->length;
146 while (sg_index <= to_skip) {
147 current_sg = scatterwalk_sg_next(current_sg);
148 sg_index += current_sg->length;
149 }
150 cpy_index = sg_index - to_skip;
151 memcpy(dest, (u8 *) sg_virt(current_sg) +
152 current_sg->length - cpy_index, cpy_index);
153 current_sg = scatterwalk_sg_next(current_sg);
154 if (end - sg_index)
155 sg_copy(dest + cpy_index, current_sg, end - sg_index);
156}
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index c9c4befb5a8d..df14358d7fa1 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -821,8 +821,8 @@ static int hifn_register_rng(struct hifn_device *dev)
821 /* 821 /*
822 * We must wait at least 256 Pk_clk cycles between two reads of the rng. 822 * We must wait at least 256 Pk_clk cycles between two reads of the rng.
823 */ 823 */
824 dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) * 824 dev->rng_wait_time = DIV_ROUND_UP_ULL(NSEC_PER_SEC,
825 256; 825 dev->pk_clk_freq) * 256;
826 826
827 dev->rng.name = dev->name; 827 dev->rng.name = dev->name;
828 dev->rng.data_present = hifn_rng_data_present, 828 dev->rng.data_present = hifn_rng_data_present,
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 1cc6b3f3e262..21c1a87032b7 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -24,6 +24,7 @@
24 24
25#define MV_CESA "MV-CESA:" 25#define MV_CESA "MV-CESA:"
26#define MAX_HW_HASH_SIZE 0xFFFF 26#define MAX_HW_HASH_SIZE 0xFFFF
27#define MV_CESA_EXPIRE 500 /* msec */
27 28
28/* 29/*
29 * STM: 30 * STM:
@@ -87,6 +88,7 @@ struct crypto_priv {
87 spinlock_t lock; 88 spinlock_t lock;
88 struct crypto_queue queue; 89 struct crypto_queue queue;
89 enum engine_status eng_st; 90 enum engine_status eng_st;
91 struct timer_list completion_timer;
90 struct crypto_async_request *cur_req; 92 struct crypto_async_request *cur_req;
91 struct req_progress p; 93 struct req_progress p;
92 int max_req_size; 94 int max_req_size;
@@ -138,6 +140,29 @@ struct mv_req_hash_ctx {
138 int count_add; 140 int count_add;
139}; 141};
140 142
143static void mv_completion_timer_callback(unsigned long unused)
144{
145 int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
146
147 printk(KERN_ERR MV_CESA
148 "completion timer expired (CESA %sactive), cleaning up.\n",
149 active ? "" : "in");
150
151 del_timer(&cpg->completion_timer);
152 writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
153 while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
154 printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
155 cpg->eng_st = ENGINE_W_DEQUEUE;
156 wake_up_process(cpg->queue_th);
157}
158
159static void mv_setup_timer(void)
160{
161 setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
162 mod_timer(&cpg->completion_timer,
163 jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
164}
165
141static void compute_aes_dec_key(struct mv_ctx *ctx) 166static void compute_aes_dec_key(struct mv_ctx *ctx)
142{ 167{
143 struct crypto_aes_ctx gen_aes_key; 168 struct crypto_aes_ctx gen_aes_key;
@@ -273,12 +298,8 @@ static void mv_process_current_q(int first_block)
273 sizeof(struct sec_accel_config)); 298 sizeof(struct sec_accel_config));
274 299
275 /* GO */ 300 /* GO */
301 mv_setup_timer();
276 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 302 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
277
278 /*
279 * XXX: add timer if the interrupt does not occur for some mystery
280 * reason
281 */
282} 303}
283 304
284static void mv_crypto_algo_completion(void) 305static void mv_crypto_algo_completion(void)
@@ -357,12 +378,8 @@ static void mv_process_hash_current(int first_block)
357 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); 378 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
358 379
359 /* GO */ 380 /* GO */
381 mv_setup_timer();
360 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 382 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
361
362 /*
363 * XXX: add timer if the interrupt does not occur for some mystery
364 * reason
365 */
366} 383}
367 384
368static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, 385static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
@@ -406,6 +423,15 @@ out:
406 return rc; 423 return rc;
407} 424}
408 425
426static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
427{
428 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
429 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
430 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
431 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
432 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
433}
434
409static void mv_hash_algo_completion(void) 435static void mv_hash_algo_completion(void)
410{ 436{
411 struct ahash_request *req = ahash_request_cast(cpg->cur_req); 437 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
@@ -420,14 +446,12 @@ static void mv_hash_algo_completion(void)
420 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, 446 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
421 crypto_ahash_digestsize(crypto_ahash_reqtfm 447 crypto_ahash_digestsize(crypto_ahash_reqtfm
422 (req))); 448 (req)));
423 } else 449 } else {
450 mv_save_digest_state(ctx);
424 mv_hash_final_fallback(req); 451 mv_hash_final_fallback(req);
452 }
425 } else { 453 } else {
426 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); 454 mv_save_digest_state(ctx);
427 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
428 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
429 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
430 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
431 } 455 }
432} 456}
433 457
@@ -888,6 +912,10 @@ irqreturn_t crypto_int(int irq, void *priv)
888 if (!(val & SEC_INT_ACCEL0_DONE)) 912 if (!(val & SEC_INT_ACCEL0_DONE))
889 return IRQ_NONE; 913 return IRQ_NONE;
890 914
915 if (!del_timer(&cpg->completion_timer)) {
916 printk(KERN_WARNING MV_CESA
917 "got an interrupt but no pending timer?\n");
918 }
891 val &= ~SEC_INT_ACCEL0_DONE; 919 val &= ~SEC_INT_ACCEL0_DONE;
892 writel(val, cpg->reg + FPGA_INT_STATUS); 920 writel(val, cpg->reg + FPGA_INT_STATUS);
893 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); 921 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
@@ -1061,6 +1089,7 @@ static int mv_probe(struct platform_device *pdev)
1061 if (!IS_ERR(cp->clk)) 1089 if (!IS_ERR(cp->clk))
1062 clk_prepare_enable(cp->clk); 1090 clk_prepare_enable(cp->clk);
1063 1091
1092 writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
1064 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); 1093 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1065 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); 1094 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1066 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); 1095 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
@@ -1098,6 +1127,10 @@ err_unreg_ecb:
1098 crypto_unregister_alg(&mv_aes_alg_ecb); 1127 crypto_unregister_alg(&mv_aes_alg_ecb);
1099err_irq: 1128err_irq:
1100 free_irq(irq, cp); 1129 free_irq(irq, cp);
1130 if (!IS_ERR(cp->clk)) {
1131 clk_disable_unprepare(cp->clk);
1132 clk_put(cp->clk);
1133 }
1101err_thread: 1134err_thread:
1102 kthread_stop(cp->queue_th); 1135 kthread_stop(cp->queue_th);
1103err_unmap_sram: 1136err_unmap_sram:
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 67b97c5fd859..a8bd0310f8fe 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1610,8 +1610,7 @@ static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1610 1610
1611 sprintf(p->irq_name, "%s-%d", irq_name, index); 1611 sprintf(p->irq_name, "%s-%d", irq_name, index);
1612 1612
1613 return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, 1613 return request_irq(p->irq, handler, 0, p->irq_name, p);
1614 p->irq_name, p);
1615} 1614}
1616 1615
1617static struct kmem_cache *queue_cache[2]; 1616static struct kmem_cache *queue_cache[2];
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 921039e56f87..efff788d2f1d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -53,117 +53,6 @@
53 53
54#include "talitos.h" 54#include "talitos.h"
55 55
56#define TALITOS_TIMEOUT 100000
57#define TALITOS_MAX_DATA_LEN 65535
58
59#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
60#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
61#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
62
63/* descriptor pointer entry */
64struct talitos_ptr {
65 __be16 len; /* length */
66 u8 j_extent; /* jump to sg link table and/or extent */
67 u8 eptr; /* extended address */
68 __be32 ptr; /* address */
69};
70
71static const struct talitos_ptr zero_entry = {
72 .len = 0,
73 .j_extent = 0,
74 .eptr = 0,
75 .ptr = 0
76};
77
78/* descriptor */
79struct talitos_desc {
80 __be32 hdr; /* header high bits */
81 __be32 hdr_lo; /* header low bits */
82 struct talitos_ptr ptr[7]; /* ptr/len pair array */
83};
84
85/**
86 * talitos_request - descriptor submission request
87 * @desc: descriptor pointer (kernel virtual)
88 * @dma_desc: descriptor's physical bus address
89 * @callback: whom to call when descriptor processing is done
90 * @context: caller context (optional)
91 */
92struct talitos_request {
93 struct talitos_desc *desc;
94 dma_addr_t dma_desc;
95 void (*callback) (struct device *dev, struct talitos_desc *desc,
96 void *context, int error);
97 void *context;
98};
99
100/* per-channel fifo management */
101struct talitos_channel {
102 void __iomem *reg;
103
104 /* request fifo */
105 struct talitos_request *fifo;
106
107 /* number of requests pending in channel h/w fifo */
108 atomic_t submit_count ____cacheline_aligned;
109
110 /* request submission (head) lock */
111 spinlock_t head_lock ____cacheline_aligned;
112 /* index to next free descriptor request */
113 int head;
114
115 /* request release (tail) lock */
116 spinlock_t tail_lock ____cacheline_aligned;
117 /* index to next in-progress/done descriptor request */
118 int tail;
119};
120
121struct talitos_private {
122 struct device *dev;
123 struct platform_device *ofdev;
124 void __iomem *reg;
125 int irq[2];
126
127 /* SEC global registers lock */
128 spinlock_t reg_lock ____cacheline_aligned;
129
130 /* SEC version geometry (from device tree node) */
131 unsigned int num_channels;
132 unsigned int chfifo_len;
133 unsigned int exec_units;
134 unsigned int desc_types;
135
136 /* SEC Compatibility info */
137 unsigned long features;
138
139 /*
140 * length of the request fifo
141 * fifo_len is chfifo_len rounded up to next power of 2
142 * so we can use bitwise ops to wrap
143 */
144 unsigned int fifo_len;
145
146 struct talitos_channel *chan;
147
148 /* next channel to be assigned next incoming descriptor */
149 atomic_t last_chan ____cacheline_aligned;
150
151 /* request callback tasklet */
152 struct tasklet_struct done_task[2];
153
154 /* list of registered algorithms */
155 struct list_head alg_list;
156
157 /* hwrng device */
158 struct hwrng rng;
159};
160
161/* .features flag */
162#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
163#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
164#define TALITOS_FTR_SHA224_HWINIT 0x00000004
165#define TALITOS_FTR_HMAC_OK 0x00000008
166
167static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) 56static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
168{ 57{
169 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 58 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
@@ -303,11 +192,11 @@ static int init_device(struct device *dev)
303 * callback must check err and feedback in descriptor header 192 * callback must check err and feedback in descriptor header
304 * for device processing status. 193 * for device processing status.
305 */ 194 */
306static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 195int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
307 void (*callback)(struct device *dev, 196 void (*callback)(struct device *dev,
308 struct talitos_desc *desc, 197 struct talitos_desc *desc,
309 void *context, int error), 198 void *context, int error),
310 void *context) 199 void *context)
311{ 200{
312 struct talitos_private *priv = dev_get_drvdata(dev); 201 struct talitos_private *priv = dev_get_drvdata(dev);
313 struct talitos_request *request; 202 struct talitos_request *request;
@@ -348,6 +237,7 @@ static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
348 237
349 return -EINPROGRESS; 238 return -EINPROGRESS;
350} 239}
240EXPORT_SYMBOL(talitos_submit);
351 241
352/* 242/*
353 * process what was done, notify callback of error if not 243 * process what was done, notify callback of error if not
@@ -733,7 +623,7 @@ static void talitos_unregister_rng(struct device *dev)
733 * crypto alg 623 * crypto alg
734 */ 624 */
735#define TALITOS_CRA_PRIORITY 3000 625#define TALITOS_CRA_PRIORITY 3000
736#define TALITOS_MAX_KEY_SIZE 64 626#define TALITOS_MAX_KEY_SIZE 96
737#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 627#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
738 628
739#define MD5_BLOCK_SIZE 64 629#define MD5_BLOCK_SIZE 64
@@ -2066,6 +1956,59 @@ static struct talitos_alg_template driver_algs[] = {
2066 DESC_HDR_MODE1_MDEU_PAD | 1956 DESC_HDR_MODE1_MDEU_PAD |
2067 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 1957 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2068 }, 1958 },
1959 { .type = CRYPTO_ALG_TYPE_AEAD,
1960 .alg.crypto = {
1961 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1962 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
1963 .cra_blocksize = AES_BLOCK_SIZE,
1964 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1965 .cra_type = &crypto_aead_type,
1966 .cra_aead = {
1967 .setkey = aead_setkey,
1968 .setauthsize = aead_setauthsize,
1969 .encrypt = aead_encrypt,
1970 .decrypt = aead_decrypt,
1971 .givencrypt = aead_givencrypt,
1972 .geniv = "<built-in>",
1973 .ivsize = AES_BLOCK_SIZE,
1974 .maxauthsize = SHA224_DIGEST_SIZE,
1975 }
1976 },
1977 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1978 DESC_HDR_SEL0_AESU |
1979 DESC_HDR_MODE0_AESU_CBC |
1980 DESC_HDR_SEL1_MDEUA |
1981 DESC_HDR_MODE1_MDEU_INIT |
1982 DESC_HDR_MODE1_MDEU_PAD |
1983 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1984 },
1985 { .type = CRYPTO_ALG_TYPE_AEAD,
1986 .alg.crypto = {
1987 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
1988 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
1989 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1990 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1991 .cra_type = &crypto_aead_type,
1992 .cra_aead = {
1993 .setkey = aead_setkey,
1994 .setauthsize = aead_setauthsize,
1995 .encrypt = aead_encrypt,
1996 .decrypt = aead_decrypt,
1997 .givencrypt = aead_givencrypt,
1998 .geniv = "<built-in>",
1999 .ivsize = DES3_EDE_BLOCK_SIZE,
2000 .maxauthsize = SHA224_DIGEST_SIZE,
2001 }
2002 },
2003 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2004 DESC_HDR_SEL0_DEU |
2005 DESC_HDR_MODE0_DEU_CBC |
2006 DESC_HDR_MODE0_DEU_3DES |
2007 DESC_HDR_SEL1_MDEUA |
2008 DESC_HDR_MODE1_MDEU_INIT |
2009 DESC_HDR_MODE1_MDEU_PAD |
2010 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2011 },
2069 { .type = CRYPTO_ALG_TYPE_AEAD, 2012 { .type = CRYPTO_ALG_TYPE_AEAD,
2070 .alg.crypto = { 2013 .alg.crypto = {
2071 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2014 .cra_name = "authenc(hmac(sha256),cbc(aes))",
@@ -2121,6 +2064,112 @@ static struct talitos_alg_template driver_algs[] = {
2121 }, 2064 },
2122 { .type = CRYPTO_ALG_TYPE_AEAD, 2065 { .type = CRYPTO_ALG_TYPE_AEAD,
2123 .alg.crypto = { 2066 .alg.crypto = {
2067 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2068 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2069 .cra_blocksize = AES_BLOCK_SIZE,
2070 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2071 .cra_type = &crypto_aead_type,
2072 .cra_aead = {
2073 .setkey = aead_setkey,
2074 .setauthsize = aead_setauthsize,
2075 .encrypt = aead_encrypt,
2076 .decrypt = aead_decrypt,
2077 .givencrypt = aead_givencrypt,
2078 .geniv = "<built-in>",
2079 .ivsize = AES_BLOCK_SIZE,
2080 .maxauthsize = SHA384_DIGEST_SIZE,
2081 }
2082 },
2083 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2084 DESC_HDR_SEL0_AESU |
2085 DESC_HDR_MODE0_AESU_CBC |
2086 DESC_HDR_SEL1_MDEUB |
2087 DESC_HDR_MODE1_MDEU_INIT |
2088 DESC_HDR_MODE1_MDEU_PAD |
2089 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2090 },
2091 { .type = CRYPTO_ALG_TYPE_AEAD,
2092 .alg.crypto = {
2093 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2094 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2095 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2096 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2097 .cra_type = &crypto_aead_type,
2098 .cra_aead = {
2099 .setkey = aead_setkey,
2100 .setauthsize = aead_setauthsize,
2101 .encrypt = aead_encrypt,
2102 .decrypt = aead_decrypt,
2103 .givencrypt = aead_givencrypt,
2104 .geniv = "<built-in>",
2105 .ivsize = DES3_EDE_BLOCK_SIZE,
2106 .maxauthsize = SHA384_DIGEST_SIZE,
2107 }
2108 },
2109 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2110 DESC_HDR_SEL0_DEU |
2111 DESC_HDR_MODE0_DEU_CBC |
2112 DESC_HDR_MODE0_DEU_3DES |
2113 DESC_HDR_SEL1_MDEUB |
2114 DESC_HDR_MODE1_MDEU_INIT |
2115 DESC_HDR_MODE1_MDEU_PAD |
2116 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2117 },
2118 { .type = CRYPTO_ALG_TYPE_AEAD,
2119 .alg.crypto = {
2120 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2121 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2122 .cra_blocksize = AES_BLOCK_SIZE,
2123 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2124 .cra_type = &crypto_aead_type,
2125 .cra_aead = {
2126 .setkey = aead_setkey,
2127 .setauthsize = aead_setauthsize,
2128 .encrypt = aead_encrypt,
2129 .decrypt = aead_decrypt,
2130 .givencrypt = aead_givencrypt,
2131 .geniv = "<built-in>",
2132 .ivsize = AES_BLOCK_SIZE,
2133 .maxauthsize = SHA512_DIGEST_SIZE,
2134 }
2135 },
2136 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2137 DESC_HDR_SEL0_AESU |
2138 DESC_HDR_MODE0_AESU_CBC |
2139 DESC_HDR_SEL1_MDEUB |
2140 DESC_HDR_MODE1_MDEU_INIT |
2141 DESC_HDR_MODE1_MDEU_PAD |
2142 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2143 },
2144 { .type = CRYPTO_ALG_TYPE_AEAD,
2145 .alg.crypto = {
2146 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2147 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2148 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2149 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2150 .cra_type = &crypto_aead_type,
2151 .cra_aead = {
2152 .setkey = aead_setkey,
2153 .setauthsize = aead_setauthsize,
2154 .encrypt = aead_encrypt,
2155 .decrypt = aead_decrypt,
2156 .givencrypt = aead_givencrypt,
2157 .geniv = "<built-in>",
2158 .ivsize = DES3_EDE_BLOCK_SIZE,
2159 .maxauthsize = SHA512_DIGEST_SIZE,
2160 }
2161 },
2162 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2163 DESC_HDR_SEL0_DEU |
2164 DESC_HDR_MODE0_DEU_CBC |
2165 DESC_HDR_MODE0_DEU_3DES |
2166 DESC_HDR_SEL1_MDEUB |
2167 DESC_HDR_MODE1_MDEU_INIT |
2168 DESC_HDR_MODE1_MDEU_PAD |
2169 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2170 },
2171 { .type = CRYPTO_ALG_TYPE_AEAD,
2172 .alg.crypto = {
2124 .cra_name = "authenc(hmac(md5),cbc(aes))", 2173 .cra_name = "authenc(hmac(md5),cbc(aes))",
2125 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", 2174 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2126 .cra_blocksize = AES_BLOCK_SIZE, 2175 .cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 3c173954ef29..61a14054aa39 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -28,6 +28,123 @@
28 * 28 *
29 */ 29 */
30 30
31#define TALITOS_TIMEOUT 100000
32#define TALITOS_MAX_DATA_LEN 65535
33
34#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
35#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
36#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
37
38/* descriptor pointer entry */
39struct talitos_ptr {
40 __be16 len; /* length */
41 u8 j_extent; /* jump to sg link table and/or extent */
42 u8 eptr; /* extended address */
43 __be32 ptr; /* address */
44};
45
46static const struct talitos_ptr zero_entry = {
47 .len = 0,
48 .j_extent = 0,
49 .eptr = 0,
50 .ptr = 0
51};
52
53/* descriptor */
54struct talitos_desc {
55 __be32 hdr; /* header high bits */
56 __be32 hdr_lo; /* header low bits */
57 struct talitos_ptr ptr[7]; /* ptr/len pair array */
58};
59
60/**
61 * talitos_request - descriptor submission request
62 * @desc: descriptor pointer (kernel virtual)
63 * @dma_desc: descriptor's physical bus address
64 * @callback: whom to call when descriptor processing is done
65 * @context: caller context (optional)
66 */
67struct talitos_request {
68 struct talitos_desc *desc;
69 dma_addr_t dma_desc;
70 void (*callback) (struct device *dev, struct talitos_desc *desc,
71 void *context, int error);
72 void *context;
73};
74
75/* per-channel fifo management */
76struct talitos_channel {
77 void __iomem *reg;
78
79 /* request fifo */
80 struct talitos_request *fifo;
81
82 /* number of requests pending in channel h/w fifo */
83 atomic_t submit_count ____cacheline_aligned;
84
85 /* request submission (head) lock */
86 spinlock_t head_lock ____cacheline_aligned;
87 /* index to next free descriptor request */
88 int head;
89
90 /* request release (tail) lock */
91 spinlock_t tail_lock ____cacheline_aligned;
92 /* index to next in-progress/done descriptor request */
93 int tail;
94};
95
96struct talitos_private {
97 struct device *dev;
98 struct platform_device *ofdev;
99 void __iomem *reg;
100 int irq[2];
101
102 /* SEC global registers lock */
103 spinlock_t reg_lock ____cacheline_aligned;
104
105 /* SEC version geometry (from device tree node) */
106 unsigned int num_channels;
107 unsigned int chfifo_len;
108 unsigned int exec_units;
109 unsigned int desc_types;
110
111 /* SEC Compatibility info */
112 unsigned long features;
113
114 /*
115 * length of the request fifo
116 * fifo_len is chfifo_len rounded up to next power of 2
117 * so we can use bitwise ops to wrap
118 */
119 unsigned int fifo_len;
120
121 struct talitos_channel *chan;
122
123 /* next channel to be assigned next incoming descriptor */
124 atomic_t last_chan ____cacheline_aligned;
125
126 /* request callback tasklet */
127 struct tasklet_struct done_task[2];
128
129 /* list of registered algorithms */
130 struct list_head alg_list;
131
132 /* hwrng device */
133 struct hwrng rng;
134};
135
136extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
137 void (*callback)(struct device *dev,
138 struct talitos_desc *desc,
139 void *context, int error),
140 void *context);
141
142/* .features flag */
143#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
144#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
145#define TALITOS_FTR_SHA224_HWINIT 0x00000004
146#define TALITOS_FTR_HMAC_OK 0x00000008
147
31/* 148/*
32 * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register 149 * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register
33 */ 150 */
@@ -209,6 +326,12 @@
209 DESC_HDR_MODE1_MDEU_HMAC) 326 DESC_HDR_MODE1_MDEU_HMAC)
210#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \ 327#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \
211 DESC_HDR_MODE1_MDEU_HMAC) 328 DESC_HDR_MODE1_MDEU_HMAC)
329#define DESC_HDR_MODE1_MDEU_SHA224_HMAC (DESC_HDR_MODE1_MDEU_SHA224 | \
330 DESC_HDR_MODE1_MDEU_HMAC)
331#define DESC_HDR_MODE1_MDEUB_SHA384_HMAC (DESC_HDR_MODE1_MDEUB_SHA384 | \
332 DESC_HDR_MODE1_MDEU_HMAC)
333#define DESC_HDR_MODE1_MDEUB_SHA512_HMAC (DESC_HDR_MODE1_MDEUB_SHA512 | \
334 DESC_HDR_MODE1_MDEU_HMAC)
212 335
213/* direction of overall data flow (DIR) */ 336/* direction of overall data flow (DIR) */
214#define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) 337#define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002)
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 422a9766c7c9..ac236f6724f4 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -572,7 +572,7 @@ static void aes_workqueue_handler(struct work_struct *work)
572 struct tegra_aes_dev *dd = aes_dev; 572 struct tegra_aes_dev *dd = aes_dev;
573 int ret; 573 int ret;
574 574
575 ret = clk_enable(dd->aes_clk); 575 ret = clk_prepare_enable(dd->aes_clk);
576 if (ret) 576 if (ret)
577 BUG_ON("clock enable failed"); 577 BUG_ON("clock enable failed");
578 578
@@ -581,7 +581,7 @@ static void aes_workqueue_handler(struct work_struct *work)
581 ret = tegra_aes_handle_req(dd); 581 ret = tegra_aes_handle_req(dd);
582 } while (!ret); 582 } while (!ret);
583 583
584 clk_disable(dd->aes_clk); 584 clk_disable_unprepare(dd->aes_clk);
585} 585}
586 586
587static irqreturn_t aes_irq(int irq, void *dev_id) 587static irqreturn_t aes_irq(int irq, void *dev_id)
@@ -673,7 +673,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
673 /* take mutex to access the aes hw */ 673 /* take mutex to access the aes hw */
674 mutex_lock(&aes_lock); 674 mutex_lock(&aes_lock);
675 675
676 ret = clk_enable(dd->aes_clk); 676 ret = clk_prepare_enable(dd->aes_clk);
677 if (ret) 677 if (ret)
678 return ret; 678 return ret;
679 679
@@ -700,7 +700,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
700 } 700 }
701 701
702out: 702out:
703 clk_disable(dd->aes_clk); 703 clk_disable_unprepare(dd->aes_clk);
704 mutex_unlock(&aes_lock); 704 mutex_unlock(&aes_lock);
705 705
706 dev_dbg(dd->dev, "%s: done\n", __func__); 706 dev_dbg(dd->dev, "%s: done\n", __func__);
@@ -758,7 +758,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
758 758
759 dd->flags = FLAGS_ENCRYPT | FLAGS_RNG; 759 dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
760 760
761 ret = clk_enable(dd->aes_clk); 761 ret = clk_prepare_enable(dd->aes_clk);
762 if (ret) 762 if (ret)
763 return ret; 763 return ret;
764 764
@@ -788,7 +788,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
788 memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); 788 memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
789 789
790out: 790out:
791 clk_disable(dd->aes_clk); 791 clk_disable_unprepare(dd->aes_clk);
792 mutex_unlock(&aes_lock); 792 mutex_unlock(&aes_lock);
793 793
794 dev_dbg(dd->dev, "%s: done\n", __func__); 794 dev_dbg(dd->dev, "%s: done\n", __func__);
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 7cac12793a4b..1c307e1b840c 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1661,27 +1661,26 @@ static void ux500_cryp_shutdown(struct platform_device *pdev)
1661 1661
1662} 1662}
1663 1663
1664static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state) 1664static int ux500_cryp_suspend(struct device *dev)
1665{ 1665{
1666 int ret; 1666 int ret;
1667 struct platform_device *pdev = to_platform_device(dev);
1667 struct cryp_device_data *device_data; 1668 struct cryp_device_data *device_data;
1668 struct resource *res_irq; 1669 struct resource *res_irq;
1669 struct cryp_ctx *temp_ctx = NULL; 1670 struct cryp_ctx *temp_ctx = NULL;
1670 1671
1671 dev_dbg(&pdev->dev, "[%s]", __func__); 1672 dev_dbg(dev, "[%s]", __func__);
1672 1673
1673 /* Handle state? */ 1674 /* Handle state? */
1674 device_data = platform_get_drvdata(pdev); 1675 device_data = platform_get_drvdata(pdev);
1675 if (!device_data) { 1676 if (!device_data) {
1676 dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", 1677 dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1677 __func__);
1678 return -ENOMEM; 1678 return -ENOMEM;
1679 } 1679 }
1680 1680
1681 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1681 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1682 if (!res_irq) 1682 if (!res_irq)
1683 dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", 1683 dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
1684 __func__);
1685 else 1684 else
1686 disable_irq(res_irq->start); 1685 disable_irq(res_irq->start);
1687 1686
@@ -1692,32 +1691,32 @@ static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
1692 1691
1693 if (device_data->current_ctx == ++temp_ctx) { 1692 if (device_data->current_ctx == ++temp_ctx) {
1694 if (down_interruptible(&driver_data.device_allocation)) 1693 if (down_interruptible(&driver_data.device_allocation))
1695 dev_dbg(&pdev->dev, "[%s]: down_interruptible() " 1694 dev_dbg(dev, "[%s]: down_interruptible() failed",
1696 "failed", __func__); 1695 __func__);
1697 ret = cryp_disable_power(&pdev->dev, device_data, false); 1696 ret = cryp_disable_power(dev, device_data, false);
1698 1697
1699 } else 1698 } else
1700 ret = cryp_disable_power(&pdev->dev, device_data, true); 1699 ret = cryp_disable_power(dev, device_data, true);
1701 1700
1702 if (ret) 1701 if (ret)
1703 dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__); 1702 dev_err(dev, "[%s]: cryp_disable_power()", __func__);
1704 1703
1705 return ret; 1704 return ret;
1706} 1705}
1707 1706
1708static int ux500_cryp_resume(struct platform_device *pdev) 1707static int ux500_cryp_resume(struct device *dev)
1709{ 1708{
1710 int ret = 0; 1709 int ret = 0;
1710 struct platform_device *pdev = to_platform_device(dev);
1711 struct cryp_device_data *device_data; 1711 struct cryp_device_data *device_data;
1712 struct resource *res_irq; 1712 struct resource *res_irq;
1713 struct cryp_ctx *temp_ctx = NULL; 1713 struct cryp_ctx *temp_ctx = NULL;
1714 1714
1715 dev_dbg(&pdev->dev, "[%s]", __func__); 1715 dev_dbg(dev, "[%s]", __func__);
1716 1716
1717 device_data = platform_get_drvdata(pdev); 1717 device_data = platform_get_drvdata(pdev);
1718 if (!device_data) { 1718 if (!device_data) {
1719 dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", 1719 dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1720 __func__);
1721 return -ENOMEM; 1720 return -ENOMEM;
1722 } 1721 }
1723 1722
@@ -1730,11 +1729,10 @@ static int ux500_cryp_resume(struct platform_device *pdev)
1730 if (!device_data->current_ctx) 1729 if (!device_data->current_ctx)
1731 up(&driver_data.device_allocation); 1730 up(&driver_data.device_allocation);
1732 else 1731 else
1733 ret = cryp_enable_power(&pdev->dev, device_data, true); 1732 ret = cryp_enable_power(dev, device_data, true);
1734 1733
1735 if (ret) 1734 if (ret)
1736 dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!", 1735 dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1737 __func__);
1738 else { 1736 else {
1739 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1737 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1740 if (res_irq) 1738 if (res_irq)
@@ -1744,15 +1742,16 @@ static int ux500_cryp_resume(struct platform_device *pdev)
1744 return ret; 1742 return ret;
1745} 1743}
1746 1744
1745static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
1746
1747static struct platform_driver cryp_driver = { 1747static struct platform_driver cryp_driver = {
1748 .probe = ux500_cryp_probe, 1748 .probe = ux500_cryp_probe,
1749 .remove = ux500_cryp_remove, 1749 .remove = ux500_cryp_remove,
1750 .shutdown = ux500_cryp_shutdown, 1750 .shutdown = ux500_cryp_shutdown,
1751 .suspend = ux500_cryp_suspend,
1752 .resume = ux500_cryp_resume,
1753 .driver = { 1751 .driver = {
1754 .owner = THIS_MODULE, 1752 .owner = THIS_MODULE,
1755 .name = "cryp1" 1753 .name = "cryp1"
1754 .pm = &ux500_cryp_pm,
1756 } 1755 }
1757}; 1756};
1758 1757
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 6dbb9ec709a3..08d5032cb564 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1894,19 +1894,17 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
1894 1894
1895/** 1895/**
1896 * ux500_hash_suspend - Function that suspends the hash device. 1896 * ux500_hash_suspend - Function that suspends the hash device.
1897 * @pdev: The platform device. 1897 * @dev: Device to suspend.
1898 * @state: -
1899 */ 1898 */
1900static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) 1899static int ux500_hash_suspend(struct device *dev)
1901{ 1900{
1902 int ret; 1901 int ret;
1903 struct hash_device_data *device_data; 1902 struct hash_device_data *device_data;
1904 struct hash_ctx *temp_ctx = NULL; 1903 struct hash_ctx *temp_ctx = NULL;
1905 1904
1906 device_data = platform_get_drvdata(pdev); 1905 device_data = dev_get_drvdata(dev);
1907 if (!device_data) { 1906 if (!device_data) {
1908 dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", 1907 dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
1909 __func__);
1910 return -ENOMEM; 1908 return -ENOMEM;
1911 } 1909 }
1912 1910
@@ -1917,33 +1915,32 @@ static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
1917 1915
1918 if (device_data->current_ctx == ++temp_ctx) { 1916 if (device_data->current_ctx == ++temp_ctx) {
1919 if (down_interruptible(&driver_data.device_allocation)) 1917 if (down_interruptible(&driver_data.device_allocation))
1920 dev_dbg(&pdev->dev, "[%s]: down_interruptible() " 1918 dev_dbg(dev, "[%s]: down_interruptible() failed",
1921 "failed", __func__); 1919 __func__);
1922 ret = hash_disable_power(device_data, false); 1920 ret = hash_disable_power(device_data, false);
1923 1921
1924 } else 1922 } else
1925 ret = hash_disable_power(device_data, true); 1923 ret = hash_disable_power(device_data, true);
1926 1924
1927 if (ret) 1925 if (ret)
1928 dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__); 1926 dev_err(dev, "[%s]: hash_disable_power()", __func__);
1929 1927
1930 return ret; 1928 return ret;
1931} 1929}
1932 1930
1933/** 1931/**
1934 * ux500_hash_resume - Function that resume the hash device. 1932 * ux500_hash_resume - Function that resume the hash device.
1935 * @pdev: The platform device. 1933 * @dev: Device to resume.
1936 */ 1934 */
1937static int ux500_hash_resume(struct platform_device *pdev) 1935static int ux500_hash_resume(struct device *dev)
1938{ 1936{
1939 int ret = 0; 1937 int ret = 0;
1940 struct hash_device_data *device_data; 1938 struct hash_device_data *device_data;
1941 struct hash_ctx *temp_ctx = NULL; 1939 struct hash_ctx *temp_ctx = NULL;
1942 1940
1943 device_data = platform_get_drvdata(pdev); 1941 device_data = dev_get_drvdata(dev);
1944 if (!device_data) { 1942 if (!device_data) {
1945 dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", 1943 dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
1946 __func__);
1947 return -ENOMEM; 1944 return -ENOMEM;
1948 } 1945 }
1949 1946
@@ -1958,21 +1955,21 @@ static int ux500_hash_resume(struct platform_device *pdev)
1958 ret = hash_enable_power(device_data, true); 1955 ret = hash_enable_power(device_data, true);
1959 1956
1960 if (ret) 1957 if (ret)
1961 dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!", 1958 dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
1962 __func__);
1963 1959
1964 return ret; 1960 return ret;
1965} 1961}
1966 1962
1963static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1964
1967static struct platform_driver hash_driver = { 1965static struct platform_driver hash_driver = {
1968 .probe = ux500_hash_probe, 1966 .probe = ux500_hash_probe,
1969 .remove = ux500_hash_remove, 1967 .remove = ux500_hash_remove,
1970 .shutdown = ux500_hash_shutdown, 1968 .shutdown = ux500_hash_shutdown,
1971 .suspend = ux500_hash_suspend,
1972 .resume = ux500_hash_resume,
1973 .driver = { 1969 .driver = {
1974 .owner = THIS_MODULE, 1970 .owner = THIS_MODULE,
1975 .name = "hash1", 1971 .name = "hash1",
1972 .pm = &ux500_hash_pm,
1976 } 1973 }
1977}; 1974};
1978 1975
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index aadeb5be9dba..d06ea2950dd9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -53,6 +53,7 @@ config AMBA_PL08X
53 bool "ARM PrimeCell PL080 or PL081 support" 53 bool "ARM PrimeCell PL080 or PL081 support"
54 depends on ARM_AMBA && EXPERIMENTAL 54 depends on ARM_AMBA && EXPERIMENTAL
55 select DMA_ENGINE 55 select DMA_ENGINE
56 select DMA_VIRTUAL_CHANNELS
56 help 57 help
57 Platform has a PL08x DMAC device 58 Platform has a PL08x DMAC device
58 which can provide DMA engine support 59 which can provide DMA engine support
@@ -148,6 +149,20 @@ config TXX9_DMAC
148 Support the TXx9 SoC internal DMA controller. This can be 149 Support the TXx9 SoC internal DMA controller. This can be
149 integrated in chips such as the Toshiba TX4927/38/39. 150 integrated in chips such as the Toshiba TX4927/38/39.
150 151
152config TEGRA20_APB_DMA
153 bool "NVIDIA Tegra20 APB DMA support"
154 depends on ARCH_TEGRA
155 select DMA_ENGINE
156 help
157 Support for the NVIDIA Tegra20 APB DMA controller driver. The
158 DMA controller is having multiple DMA channel which can be
159 configured for different peripherals like audio, UART, SPI,
160 I2C etc which is in APB bus.
161 This DMA controller transfers data from memory to peripheral fifo
162 or vice versa. It does not support memory to memory data transfer.
163
164
165
151config SH_DMAE 166config SH_DMAE
152 tristate "Renesas SuperH DMAC support" 167 tristate "Renesas SuperH DMAC support"
153 depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) 168 depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
@@ -237,7 +252,7 @@ config IMX_DMA
237 252
238config MXS_DMA 253config MXS_DMA
239 bool "MXS DMA support" 254 bool "MXS DMA support"
240 depends on SOC_IMX23 || SOC_IMX28 255 depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
241 select STMP_DEVICE 256 select STMP_DEVICE
242 select DMA_ENGINE 257 select DMA_ENGINE
243 help 258 help
@@ -255,14 +270,34 @@ config DMA_SA11X0
255 tristate "SA-11x0 DMA support" 270 tristate "SA-11x0 DMA support"
256 depends on ARCH_SA1100 271 depends on ARCH_SA1100
257 select DMA_ENGINE 272 select DMA_ENGINE
273 select DMA_VIRTUAL_CHANNELS
258 help 274 help
259 Support the DMA engine found on Intel StrongARM SA-1100 and 275 Support the DMA engine found on Intel StrongARM SA-1100 and
260 SA-1110 SoCs. This DMA engine can only be used with on-chip 276 SA-1110 SoCs. This DMA engine can only be used with on-chip
261 devices. 277 devices.
262 278
279config MMP_TDMA
280 bool "MMP Two-Channel DMA support"
281 depends on ARCH_MMP
282 select DMA_ENGINE
283 help
284 Support the MMP Two-Channel DMA engine.
285 This engine used for MMP Audio DMA and pxa910 SQU.
286
287 Say Y here if you enabled MMP ADMA, otherwise say N.
288
289config DMA_OMAP
290 tristate "OMAP DMA support"
291 depends on ARCH_OMAP
292 select DMA_ENGINE
293 select DMA_VIRTUAL_CHANNELS
294
263config DMA_ENGINE 295config DMA_ENGINE
264 bool 296 bool
265 297
298config DMA_VIRTUAL_CHANNELS
299 tristate
300
266comment "DMA Clients" 301comment "DMA Clients"
267 depends on DMA_ENGINE 302 depends on DMA_ENGINE
268 303
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 86b795baba98..4cf6b128ab9a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
2ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG 2ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
3 3
4obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 4obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
5obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
5obj-$(CONFIG_NET_DMA) += iovlock.o 6obj-$(CONFIG_NET_DMA) += iovlock.o
6obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o 7obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
7obj-$(CONFIG_DMATEST) += dmatest.o 8obj-$(CONFIG_DMATEST) += dmatest.o
@@ -14,7 +15,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
14obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 15obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
15obj-$(CONFIG_MX3_IPU) += ipu/ 16obj-$(CONFIG_MX3_IPU) += ipu/
16obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 17obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
17obj-$(CONFIG_SH_DMAE) += shdma.o 18obj-$(CONFIG_SH_DMAE) += sh/
18obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 19obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
19obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 20obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
20obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 21obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
@@ -23,8 +24,11 @@ obj-$(CONFIG_MXS_DMA) += mxs-dma.o
23obj-$(CONFIG_TIMB_DMA) += timb_dma.o 24obj-$(CONFIG_TIMB_DMA) += timb_dma.o
24obj-$(CONFIG_SIRF_DMA) += sirf-dma.o 25obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
25obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 26obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
27obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
26obj-$(CONFIG_PL330_DMA) += pl330.o 28obj-$(CONFIG_PL330_DMA) += pl330.o
27obj-$(CONFIG_PCH_DMA) += pch_dma.o 29obj-$(CONFIG_PCH_DMA) += pch_dma.o
28obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o 30obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
29obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 31obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
30obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o 32obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
33obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
34obj-$(CONFIG_DMA_OMAP) += omap-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 49ecbbb8932d..6fbeebb9486f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -86,10 +86,12 @@
86#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
87 87
88#include "dmaengine.h" 88#include "dmaengine.h"
89#include "virt-dma.h"
89 90
90#define DRIVER_NAME "pl08xdmac" 91#define DRIVER_NAME "pl08xdmac"
91 92
92static struct amba_driver pl08x_amba_driver; 93static struct amba_driver pl08x_amba_driver;
94struct pl08x_driver_data;
93 95
94/** 96/**
95 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 97 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
@@ -119,6 +121,123 @@ struct pl08x_lli {
119}; 121};
120 122
121/** 123/**
124 * struct pl08x_bus_data - information of source or destination
125 * busses for a transfer
126 * @addr: current address
127 * @maxwidth: the maximum width of a transfer on this bus
128 * @buswidth: the width of this bus in bytes: 1, 2 or 4
129 */
130struct pl08x_bus_data {
131 dma_addr_t addr;
132 u8 maxwidth;
133 u8 buswidth;
134};
135
136/**
137 * struct pl08x_phy_chan - holder for the physical channels
138 * @id: physical index to this channel
139 * @lock: a lock to use when altering an instance of this struct
140 * @serving: the virtual channel currently being served by this physical
141 * channel
142 * @locked: channel unavailable for the system, e.g. dedicated to secure
143 * world
144 */
145struct pl08x_phy_chan {
146 unsigned int id;
147 void __iomem *base;
148 spinlock_t lock;
149 struct pl08x_dma_chan *serving;
150 bool locked;
151};
152
153/**
154 * struct pl08x_sg - structure containing data per sg
155 * @src_addr: src address of sg
156 * @dst_addr: dst address of sg
157 * @len: transfer len in bytes
158 * @node: node for txd's dsg_list
159 */
160struct pl08x_sg {
161 dma_addr_t src_addr;
162 dma_addr_t dst_addr;
163 size_t len;
164 struct list_head node;
165};
166
167/**
168 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
169 * @vd: virtual DMA descriptor
170 * @dsg_list: list of children sg's
171 * @llis_bus: DMA memory address (physical) start for the LLIs
172 * @llis_va: virtual memory address start for the LLIs
173 * @cctl: control reg values for current txd
174 * @ccfg: config reg values for current txd
175 * @done: this marks completed descriptors, which should not have their
176 * mux released.
177 */
178struct pl08x_txd {
179 struct virt_dma_desc vd;
180 struct list_head dsg_list;
181 dma_addr_t llis_bus;
182 struct pl08x_lli *llis_va;
183 /* Default cctl value for LLIs */
184 u32 cctl;
185 /*
186 * Settings to be put into the physical channel when we
187 * trigger this txd. Other registers are in llis_va[0].
188 */
189 u32 ccfg;
190 bool done;
191};
192
193/**
194 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
195 * states
196 * @PL08X_CHAN_IDLE: the channel is idle
197 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
198 * channel and is running a transfer on it
199 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
200 * channel, but the transfer is currently paused
201 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
202 * channel to become available (only pertains to memcpy channels)
203 */
204enum pl08x_dma_chan_state {
205 PL08X_CHAN_IDLE,
206 PL08X_CHAN_RUNNING,
207 PL08X_CHAN_PAUSED,
208 PL08X_CHAN_WAITING,
209};
210
211/**
212 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
213 * @vc: wrappped virtual channel
214 * @phychan: the physical channel utilized by this channel, if there is one
215 * @name: name of channel
216 * @cd: channel platform data
217 * @runtime_addr: address for RX/TX according to the runtime config
218 * @at: active transaction on this channel
219 * @lock: a lock for this channel data
220 * @host: a pointer to the host (internal use)
221 * @state: whether the channel is idle, paused, running etc
222 * @slave: whether this channel is a device (slave) or for memcpy
223 * @signal: the physical DMA request signal which this channel is using
224 * @mux_use: count of descriptors using this DMA request signal setting
225 */
226struct pl08x_dma_chan {
227 struct virt_dma_chan vc;
228 struct pl08x_phy_chan *phychan;
229 const char *name;
230 const struct pl08x_channel_data *cd;
231 struct dma_slave_config cfg;
232 struct pl08x_txd *at;
233 struct pl08x_driver_data *host;
234 enum pl08x_dma_chan_state state;
235 bool slave;
236 int signal;
237 unsigned mux_use;
238};
239
240/**
122 * struct pl08x_driver_data - the local state holder for the PL08x 241 * struct pl08x_driver_data - the local state holder for the PL08x
123 * @slave: slave engine for this instance 242 * @slave: slave engine for this instance
124 * @memcpy: memcpy engine for this instance 243 * @memcpy: memcpy engine for this instance
@@ -128,7 +247,6 @@ struct pl08x_lli {
128 * @pd: platform data passed in from the platform/machine 247 * @pd: platform data passed in from the platform/machine
129 * @phy_chans: array of data for the physical channels 248 * @phy_chans: array of data for the physical channels
130 * @pool: a pool for the LLI descriptors 249 * @pool: a pool for the LLI descriptors
131 * @pool_ctr: counter of LLIs in the pool
132 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 250 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
133 * fetches 251 * fetches
134 * @mem_buses: set to indicate memory transfers on AHB2. 252 * @mem_buses: set to indicate memory transfers on AHB2.
@@ -143,10 +261,8 @@ struct pl08x_driver_data {
143 struct pl08x_platform_data *pd; 261 struct pl08x_platform_data *pd;
144 struct pl08x_phy_chan *phy_chans; 262 struct pl08x_phy_chan *phy_chans;
145 struct dma_pool *pool; 263 struct dma_pool *pool;
146 int pool_ctr;
147 u8 lli_buses; 264 u8 lli_buses;
148 u8 mem_buses; 265 u8 mem_buses;
149 spinlock_t lock;
150}; 266};
151 267
152/* 268/*
@@ -162,12 +278,51 @@ struct pl08x_driver_data {
162 278
163static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 279static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
164{ 280{
165 return container_of(chan, struct pl08x_dma_chan, chan); 281 return container_of(chan, struct pl08x_dma_chan, vc.chan);
166} 282}
167 283
168static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 284static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
169{ 285{
170 return container_of(tx, struct pl08x_txd, tx); 286 return container_of(tx, struct pl08x_txd, vd.tx);
287}
288
289/*
290 * Mux handling.
291 *
292 * This gives us the DMA request input to the PL08x primecell which the
293 * peripheral described by the channel data will be routed to, possibly
294 * via a board/SoC specific external MUX. One important point to note
295 * here is that this does not depend on the physical channel.
296 */
297static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
298{
299 const struct pl08x_platform_data *pd = plchan->host->pd;
300 int ret;
301
302 if (plchan->mux_use++ == 0 && pd->get_signal) {
303 ret = pd->get_signal(plchan->cd);
304 if (ret < 0) {
305 plchan->mux_use = 0;
306 return ret;
307 }
308
309 plchan->signal = ret;
310 }
311 return 0;
312}
313
314static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
315{
316 const struct pl08x_platform_data *pd = plchan->host->pd;
317
318 if (plchan->signal >= 0) {
319 WARN_ON(plchan->mux_use == 0);
320
321 if (--plchan->mux_use == 0 && pd->put_signal) {
322 pd->put_signal(plchan->cd, plchan->signal);
323 plchan->signal = -1;
324 }
325 }
171} 326}
172 327
173/* 328/*
@@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
189 * been set when the LLIs were constructed. Poke them into the hardware 344 * been set when the LLIs were constructed. Poke them into the hardware
190 * and start the transfer. 345 * and start the transfer.
191 */ 346 */
192static void pl08x_start_txd(struct pl08x_dma_chan *plchan, 347static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
193 struct pl08x_txd *txd)
194{ 348{
195 struct pl08x_driver_data *pl08x = plchan->host; 349 struct pl08x_driver_data *pl08x = plchan->host;
196 struct pl08x_phy_chan *phychan = plchan->phychan; 350 struct pl08x_phy_chan *phychan = plchan->phychan;
197 struct pl08x_lli *lli = &txd->llis_va[0]; 351 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
352 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
353 struct pl08x_lli *lli;
198 u32 val; 354 u32 val;
199 355
356 list_del(&txd->vd.node);
357
200 plchan->at = txd; 358 plchan->at = txd;
201 359
202 /* Wait for channel inactive */ 360 /* Wait for channel inactive */
203 while (pl08x_phy_channel_busy(phychan)) 361 while (pl08x_phy_channel_busy(phychan))
204 cpu_relax(); 362 cpu_relax();
205 363
364 lli = &txd->llis_va[0];
365
206 dev_vdbg(&pl08x->adev->dev, 366 dev_vdbg(&pl08x->adev->dev,
207 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 367 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
208 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 368 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
@@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
311{ 471{
312 struct pl08x_phy_chan *ch; 472 struct pl08x_phy_chan *ch;
313 struct pl08x_txd *txd; 473 struct pl08x_txd *txd;
314 unsigned long flags;
315 size_t bytes = 0; 474 size_t bytes = 0;
316 475
317 spin_lock_irqsave(&plchan->lock, flags);
318 ch = plchan->phychan; 476 ch = plchan->phychan;
319 txd = plchan->at; 477 txd = plchan->at;
320 478
@@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
354 } 512 }
355 } 513 }
356 514
357 /* Sum up all queued transactions */
358 if (!list_empty(&plchan->pend_list)) {
359 struct pl08x_txd *txdi;
360 list_for_each_entry(txdi, &plchan->pend_list, node) {
361 struct pl08x_sg *dsg;
362 list_for_each_entry(dsg, &txd->dsg_list, node)
363 bytes += dsg->len;
364 }
365 }
366
367 spin_unlock_irqrestore(&plchan->lock, flags);
368
369 return bytes; 515 return bytes;
370} 516}
371 517
@@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
391 537
392 if (!ch->locked && !ch->serving) { 538 if (!ch->locked && !ch->serving) {
393 ch->serving = virt_chan; 539 ch->serving = virt_chan;
394 ch->signal = -1;
395 spin_unlock_irqrestore(&ch->lock, flags); 540 spin_unlock_irqrestore(&ch->lock, flags);
396 break; 541 break;
397 } 542 }
@@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
404 return NULL; 549 return NULL;
405 } 550 }
406 551
407 pm_runtime_get_sync(&pl08x->adev->dev);
408 return ch; 552 return ch;
409} 553}
410 554
555/* Mark the physical channel as free. Note, this write is atomic. */
411static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 556static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
412 struct pl08x_phy_chan *ch) 557 struct pl08x_phy_chan *ch)
413{ 558{
414 unsigned long flags; 559 ch->serving = NULL;
560}
561
562/*
563 * Try to allocate a physical channel. When successful, assign it to
564 * this virtual channel, and initiate the next descriptor. The
565 * virtual channel lock must be held at this point.
566 */
567static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
568{
569 struct pl08x_driver_data *pl08x = plchan->host;
570 struct pl08x_phy_chan *ch;
415 571
416 spin_lock_irqsave(&ch->lock, flags); 572 ch = pl08x_get_phy_channel(pl08x, plchan);
573 if (!ch) {
574 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
575 plchan->state = PL08X_CHAN_WAITING;
576 return;
577 }
417 578
418 /* Stop the channel and clear its interrupts */ 579 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
419 pl08x_terminate_phy_chan(pl08x, ch); 580 ch->id, plchan->name);
420 581
421 pm_runtime_put(&pl08x->adev->dev); 582 plchan->phychan = ch;
583 plchan->state = PL08X_CHAN_RUNNING;
584 pl08x_start_next_txd(plchan);
585}
422 586
423 /* Mark it as free */ 587static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
424 ch->serving = NULL; 588 struct pl08x_dma_chan *plchan)
425 spin_unlock_irqrestore(&ch->lock, flags); 589{
590 struct pl08x_driver_data *pl08x = plchan->host;
591
592 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
593 ch->id, plchan->name);
594
595 /*
596 * We do this without taking the lock; we're really only concerned
597 * about whether this pointer is NULL or not, and we're guaranteed
598 * that this will only be called when it _already_ is non-NULL.
599 */
600 ch->serving = plchan;
601 plchan->phychan = ch;
602 plchan->state = PL08X_CHAN_RUNNING;
603 pl08x_start_next_txd(plchan);
604}
605
606/*
607 * Free a physical DMA channel, potentially reallocating it to another
608 * virtual channel if we have any pending.
609 */
610static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
611{
612 struct pl08x_driver_data *pl08x = plchan->host;
613 struct pl08x_dma_chan *p, *next;
614
615 retry:
616 next = NULL;
617
618 /* Find a waiting virtual channel for the next transfer. */
619 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
620 if (p->state == PL08X_CHAN_WAITING) {
621 next = p;
622 break;
623 }
624
625 if (!next) {
626 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
627 if (p->state == PL08X_CHAN_WAITING) {
628 next = p;
629 break;
630 }
631 }
632
633 /* Ensure that the physical channel is stopped */
634 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
635
636 if (next) {
637 bool success;
638
639 /*
640 * Eww. We know this isn't going to deadlock
641 * but lockdep probably doesn't.
642 */
643 spin_lock(&next->vc.lock);
644 /* Re-check the state now that we have the lock */
645 success = next->state == PL08X_CHAN_WAITING;
646 if (success)
647 pl08x_phy_reassign_start(plchan->phychan, next);
648 spin_unlock(&next->vc.lock);
649
650 /* If the state changed, try to find another channel */
651 if (!success)
652 goto retry;
653 } else {
654 /* No more jobs, so free up the physical channel */
655 pl08x_put_phy_channel(pl08x, plchan->phychan);
656 }
657
658 plchan->phychan = NULL;
659 plchan->state = PL08X_CHAN_IDLE;
426} 660}
427 661
428/* 662/*
@@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
585 return 0; 819 return 0;
586 } 820 }
587 821
588 pl08x->pool_ctr++;
589
590 bd.txd = txd; 822 bd.txd = txd;
591 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 823 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
592 cctl = txd->cctl; 824 cctl = txd->cctl;
@@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
802 return num_llis; 1034 return num_llis;
803} 1035}
804 1036
805/* You should call this with the struct pl08x lock held */
806static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1037static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
807 struct pl08x_txd *txd) 1038 struct pl08x_txd *txd)
808{ 1039{
809 struct pl08x_sg *dsg, *_dsg; 1040 struct pl08x_sg *dsg, *_dsg;
810 1041
811 /* Free the LLI */
812 if (txd->llis_va) 1042 if (txd->llis_va)
813 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1043 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
814 1044
815 pl08x->pool_ctr--;
816
817 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1045 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
818 list_del(&dsg->node); 1046 list_del(&dsg->node);
819 kfree(dsg); 1047 kfree(dsg);
@@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
822 kfree(txd); 1050 kfree(txd);
823} 1051}
824 1052
825static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1053static void pl08x_unmap_buffers(struct pl08x_txd *txd)
826 struct pl08x_dma_chan *plchan)
827{ 1054{
828 struct pl08x_txd *txdi = NULL; 1055 struct device *dev = txd->vd.tx.chan->device->dev;
829 struct pl08x_txd *next; 1056 struct pl08x_sg *dsg;
830 1057
831 if (!list_empty(&plchan->pend_list)) { 1058 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
832 list_for_each_entry_safe(txdi, 1059 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
833 next, &plchan->pend_list, node) { 1060 list_for_each_entry(dsg, &txd->dsg_list, node)
834 list_del(&txdi->node); 1061 dma_unmap_single(dev, dsg->src_addr, dsg->len,
835 pl08x_free_txd(pl08x, txdi); 1062 DMA_TO_DEVICE);
1063 else {
1064 list_for_each_entry(dsg, &txd->dsg_list, node)
1065 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1066 DMA_TO_DEVICE);
836 } 1067 }
837 } 1068 }
1069 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1070 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1071 list_for_each_entry(dsg, &txd->dsg_list, node)
1072 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1073 DMA_FROM_DEVICE);
1074 else
1075 list_for_each_entry(dsg, &txd->dsg_list, node)
1076 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1077 DMA_FROM_DEVICE);
1078 }
838} 1079}
839 1080
840/* 1081static void pl08x_desc_free(struct virt_dma_desc *vd)
841 * The DMA ENGINE API
842 */
843static int pl08x_alloc_chan_resources(struct dma_chan *chan)
844{ 1082{
845 return 0; 1083 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
846} 1084 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
847 1085
848static void pl08x_free_chan_resources(struct dma_chan *chan) 1086 if (!plchan->slave)
849{ 1087 pl08x_unmap_buffers(txd);
1088
1089 if (!txd->done)
1090 pl08x_release_mux(plchan);
1091
1092 pl08x_free_txd(plchan->host, txd);
850} 1093}
851 1094
852/* 1095static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
853 * This should be called with the channel plchan->lock held 1096 struct pl08x_dma_chan *plchan)
854 */
855static int prep_phy_channel(struct pl08x_dma_chan *plchan,
856 struct pl08x_txd *txd)
857{ 1097{
858 struct pl08x_driver_data *pl08x = plchan->host; 1098 LIST_HEAD(head);
859 struct pl08x_phy_chan *ch; 1099 struct pl08x_txd *txd;
860 int ret;
861
862 /* Check if we already have a channel */
863 if (plchan->phychan) {
864 ch = plchan->phychan;
865 goto got_channel;
866 }
867 1100
868 ch = pl08x_get_phy_channel(pl08x, plchan); 1101 vchan_get_all_descriptors(&plchan->vc, &head);
869 if (!ch) {
870 /* No physical channel available, cope with it */
871 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
872 return -EBUSY;
873 }
874 1102
875 /* 1103 while (!list_empty(&head)) {
876 * OK we have a physical channel: for memcpy() this is all we 1104 txd = list_first_entry(&head, struct pl08x_txd, vd.node);
877 * need, but for slaves the physical signals may be muxed! 1105 list_del(&txd->vd.node);
878 * Can the platform allow us to use this channel? 1106 pl08x_desc_free(&txd->vd);
879 */
880 if (plchan->slave && pl08x->pd->get_signal) {
881 ret = pl08x->pd->get_signal(plchan);
882 if (ret < 0) {
883 dev_dbg(&pl08x->adev->dev,
884 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
885 ch->id, plchan->name);
886 /* Release physical channel & return */
887 pl08x_put_phy_channel(pl08x, ch);
888 return -EBUSY;
889 }
890 ch->signal = ret;
891 } 1107 }
892
893 plchan->phychan = ch;
894 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
895 ch->id,
896 ch->signal,
897 plchan->name);
898
899got_channel:
900 /* Assign the flow control signal to this channel */
901 if (txd->direction == DMA_MEM_TO_DEV)
902 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
903 else if (txd->direction == DMA_DEV_TO_MEM)
904 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
905
906 plchan->phychan_hold++;
907
908 return 0;
909} 1108}
910 1109
911static void release_phy_channel(struct pl08x_dma_chan *plchan) 1110/*
1111 * The DMA ENGINE API
1112 */
1113static int pl08x_alloc_chan_resources(struct dma_chan *chan)
912{ 1114{
913 struct pl08x_driver_data *pl08x = plchan->host; 1115 return 0;
914
915 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
916 pl08x->pd->put_signal(plchan);
917 plchan->phychan->signal = -1;
918 }
919 pl08x_put_phy_channel(pl08x, plchan->phychan);
920 plchan->phychan = NULL;
921} 1116}
922 1117
923static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 1118static void pl08x_free_chan_resources(struct dma_chan *chan)
924{ 1119{
925 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 1120 /* Ensure all queued descriptors are freed */
926 struct pl08x_txd *txd = to_pl08x_txd(tx); 1121 vchan_free_chan_resources(to_virt_chan(chan));
927 unsigned long flags;
928 dma_cookie_t cookie;
929
930 spin_lock_irqsave(&plchan->lock, flags);
931 cookie = dma_cookie_assign(tx);
932
933 /* Put this onto the pending list */
934 list_add_tail(&txd->node, &plchan->pend_list);
935
936 /*
937 * If there was no physical channel available for this memcpy,
938 * stack the request up and indicate that the channel is waiting
939 * for a free physical channel.
940 */
941 if (!plchan->slave && !plchan->phychan) {
942 /* Do this memcpy whenever there is a channel ready */
943 plchan->state = PL08X_CHAN_WAITING;
944 plchan->waiting = txd;
945 } else {
946 plchan->phychan_hold--;
947 }
948
949 spin_unlock_irqrestore(&plchan->lock, flags);
950
951 return cookie;
952} 1122}
953 1123
954static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1124static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
968 dma_cookie_t cookie, struct dma_tx_state *txstate) 1138 dma_cookie_t cookie, struct dma_tx_state *txstate)
969{ 1139{
970 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1140 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1141 struct virt_dma_desc *vd;
1142 unsigned long flags;
971 enum dma_status ret; 1143 enum dma_status ret;
1144 size_t bytes = 0;
972 1145
973 ret = dma_cookie_status(chan, cookie, txstate); 1146 ret = dma_cookie_status(chan, cookie, txstate);
974 if (ret == DMA_SUCCESS) 1147 if (ret == DMA_SUCCESS)
975 return ret; 1148 return ret;
976 1149
977 /* 1150 /*
1151 * There's no point calculating the residue if there's
1152 * no txstate to store the value.
1153 */
1154 if (!txstate) {
1155 if (plchan->state == PL08X_CHAN_PAUSED)
1156 ret = DMA_PAUSED;
1157 return ret;
1158 }
1159
1160 spin_lock_irqsave(&plchan->vc.lock, flags);
1161 ret = dma_cookie_status(chan, cookie, txstate);
1162 if (ret != DMA_SUCCESS) {
1163 vd = vchan_find_desc(&plchan->vc, cookie);
1164 if (vd) {
1165 /* On the issued list, so hasn't been processed yet */
1166 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1167 struct pl08x_sg *dsg;
1168
1169 list_for_each_entry(dsg, &txd->dsg_list, node)
1170 bytes += dsg->len;
1171 } else {
1172 bytes = pl08x_getbytes_chan(plchan);
1173 }
1174 }
1175 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1176
1177 /*
978 * This cookie not complete yet 1178 * This cookie not complete yet
979 * Get number of bytes left in the active transactions and queue 1179 * Get number of bytes left in the active transactions and queue
980 */ 1180 */
981 dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); 1181 dma_set_residue(txstate, bytes);
982 1182
983 if (plchan->state == PL08X_CHAN_PAUSED) 1183 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
984 return DMA_PAUSED; 1184 ret = DMA_PAUSED;
985 1185
986 /* Whether waiting or running, we're in progress */ 1186 /* Whether waiting or running, we're in progress */
987 return DMA_IN_PROGRESS; 1187 return ret;
988} 1188}
989 1189
990/* PrimeCell DMA extension */ 1190/* PrimeCell DMA extension */
@@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst)
1080 return burst_sizes[i].reg; 1280 return burst_sizes[i].reg;
1081} 1281}
1082 1282
1083static int dma_set_runtime_config(struct dma_chan *chan, 1283static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1084 struct dma_slave_config *config) 1284 enum dma_slave_buswidth addr_width, u32 maxburst)
1085{ 1285{
1086 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1286 u32 width, burst, cctl = 0;
1087 struct pl08x_driver_data *pl08x = plchan->host;
1088 enum dma_slave_buswidth addr_width;
1089 u32 width, burst, maxburst;
1090 u32 cctl = 0;
1091
1092 if (!plchan->slave)
1093 return -EINVAL;
1094
1095 /* Transfer direction */
1096 plchan->runtime_direction = config->direction;
1097 if (config->direction == DMA_MEM_TO_DEV) {
1098 addr_width = config->dst_addr_width;
1099 maxburst = config->dst_maxburst;
1100 } else if (config->direction == DMA_DEV_TO_MEM) {
1101 addr_width = config->src_addr_width;
1102 maxburst = config->src_maxburst;
1103 } else {
1104 dev_err(&pl08x->adev->dev,
1105 "bad runtime_config: alien transfer direction\n");
1106 return -EINVAL;
1107 }
1108 1287
1109 width = pl08x_width(addr_width); 1288 width = pl08x_width(addr_width);
1110 if (width == ~0) { 1289 if (width == ~0)
1111 dev_err(&pl08x->adev->dev, 1290 return ~0;
1112 "bad runtime_config: alien address width\n");
1113 return -EINVAL;
1114 }
1115 1291
1116 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1292 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1117 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1293 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
@@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1128 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1304 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1129 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1305 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1130 1306
1131 plchan->device_fc = config->device_fc; 1307 return pl08x_cctl(cctl);
1308}
1132 1309
1133 if (plchan->runtime_direction == DMA_DEV_TO_MEM) { 1310static int dma_set_runtime_config(struct dma_chan *chan,
1134 plchan->src_addr = config->src_addr; 1311 struct dma_slave_config *config)
1135 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | 1312{
1136 pl08x_select_bus(plchan->cd->periph_buses, 1313 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1137 pl08x->mem_buses);
1138 } else {
1139 plchan->dst_addr = config->dst_addr;
1140 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
1141 pl08x_select_bus(pl08x->mem_buses,
1142 plchan->cd->periph_buses);
1143 }
1144 1314
1145 dev_dbg(&pl08x->adev->dev, 1315 if (!plchan->slave)
1146 "configured channel %s (%s) for %s, data width %d, " 1316 return -EINVAL;
1147 "maxburst %d words, LE, CCTL=0x%08x\n", 1317
1148 dma_chan_name(chan), plchan->name, 1318 /* Reject definitely invalid configurations */
1149 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 1319 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1150 addr_width, 1320 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1151 maxburst, 1321 return -EINVAL;
1152 cctl); 1322
1323 plchan->cfg = *config;
1153 1324
1154 return 0; 1325 return 0;
1155} 1326}
@@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan)
1163 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1334 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1164 unsigned long flags; 1335 unsigned long flags;
1165 1336
1166 spin_lock_irqsave(&plchan->lock, flags); 1337 spin_lock_irqsave(&plchan->vc.lock, flags);
1167 /* Something is already active, or we're waiting for a channel... */ 1338 if (vchan_issue_pending(&plchan->vc)) {
1168 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { 1339 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
1169 spin_unlock_irqrestore(&plchan->lock, flags); 1340 pl08x_phy_alloc_and_start(plchan);
1170 return;
1171 }
1172
1173 /* Take the first element in the queue and execute it */
1174 if (!list_empty(&plchan->pend_list)) {
1175 struct pl08x_txd *next;
1176
1177 next = list_first_entry(&plchan->pend_list,
1178 struct pl08x_txd,
1179 node);
1180 list_del(&next->node);
1181 plchan->state = PL08X_CHAN_RUNNING;
1182
1183 pl08x_start_txd(plchan, next);
1184 } 1341 }
1185 1342 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1186 spin_unlock_irqrestore(&plchan->lock, flags);
1187}
1188
1189static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1190 struct pl08x_txd *txd)
1191{
1192 struct pl08x_driver_data *pl08x = plchan->host;
1193 unsigned long flags;
1194 int num_llis, ret;
1195
1196 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1197 if (!num_llis) {
1198 spin_lock_irqsave(&plchan->lock, flags);
1199 pl08x_free_txd(pl08x, txd);
1200 spin_unlock_irqrestore(&plchan->lock, flags);
1201 return -EINVAL;
1202 }
1203
1204 spin_lock_irqsave(&plchan->lock, flags);
1205
1206 /*
1207 * See if we already have a physical channel allocated,
1208 * else this is the time to try to get one.
1209 */
1210 ret = prep_phy_channel(plchan, txd);
1211 if (ret) {
1212 /*
1213 * No physical channel was available.
1214 *
1215 * memcpy transfers can be sorted out at submission time.
1216 *
1217 * Slave transfers may have been denied due to platform
1218 * channel muxing restrictions. Since there is no guarantee
1219 * that this will ever be resolved, and the signal must be
1220 * acquired AFTER acquiring the physical channel, we will let
1221 * them be NACK:ed with -EBUSY here. The drivers can retry
1222 * the prep() call if they are eager on doing this using DMA.
1223 */
1224 if (plchan->slave) {
1225 pl08x_free_txd_list(pl08x, plchan);
1226 pl08x_free_txd(pl08x, txd);
1227 spin_unlock_irqrestore(&plchan->lock, flags);
1228 return -EBUSY;
1229 }
1230 } else
1231 /*
1232 * Else we're all set, paused and ready to roll, status
1233 * will switch to PL08X_CHAN_RUNNING when we call
1234 * issue_pending(). If there is something running on the
1235 * channel already we don't change its state.
1236 */
1237 if (plchan->state == PL08X_CHAN_IDLE)
1238 plchan->state = PL08X_CHAN_PAUSED;
1239
1240 spin_unlock_irqrestore(&plchan->lock, flags);
1241
1242 return 0;
1243} 1343}
1244 1344
1245static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1345static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
1246 unsigned long flags)
1247{ 1346{
1248 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1347 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1249 1348
1250 if (txd) { 1349 if (txd) {
1251 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1252 txd->tx.flags = flags;
1253 txd->tx.tx_submit = pl08x_tx_submit;
1254 INIT_LIST_HEAD(&txd->node);
1255 INIT_LIST_HEAD(&txd->dsg_list); 1350 INIT_LIST_HEAD(&txd->dsg_list);
1256 1351
1257 /* Always enable error and terminal interrupts */ 1352 /* Always enable error and terminal interrupts */
@@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1274 struct pl08x_sg *dsg; 1369 struct pl08x_sg *dsg;
1275 int ret; 1370 int ret;
1276 1371
1277 txd = pl08x_get_txd(plchan, flags); 1372 txd = pl08x_get_txd(plchan);
1278 if (!txd) { 1373 if (!txd) {
1279 dev_err(&pl08x->adev->dev, 1374 dev_err(&pl08x->adev->dev,
1280 "%s no memory for descriptor\n", __func__); 1375 "%s no memory for descriptor\n", __func__);
@@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1290 } 1385 }
1291 list_add_tail(&dsg->node, &txd->dsg_list); 1386 list_add_tail(&dsg->node, &txd->dsg_list);
1292 1387
1293 txd->direction = DMA_NONE;
1294 dsg->src_addr = src; 1388 dsg->src_addr = src;
1295 dsg->dst_addr = dest; 1389 dsg->dst_addr = dest;
1296 dsg->len = len; 1390 dsg->len = len;
1297 1391
1298 /* Set platform data for m2m */ 1392 /* Set platform data for m2m */
1299 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1393 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1300 txd->cctl = pl08x->pd->memcpy_channel.cctl & 1394 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
1301 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1395 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1302 1396
1303 /* Both to be incremented or the code will break */ 1397 /* Both to be incremented or the code will break */
@@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1307 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1401 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1308 pl08x->mem_buses); 1402 pl08x->mem_buses);
1309 1403
1310 ret = pl08x_prep_channel_resources(plchan, txd); 1404 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1311 if (ret) 1405 if (!ret) {
1406 pl08x_free_txd(pl08x, txd);
1312 return NULL; 1407 return NULL;
1408 }
1313 1409
1314 return &txd->tx; 1410 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1315} 1411}
1316 1412
1317static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1413static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1324 struct pl08x_txd *txd; 1420 struct pl08x_txd *txd;
1325 struct pl08x_sg *dsg; 1421 struct pl08x_sg *dsg;
1326 struct scatterlist *sg; 1422 struct scatterlist *sg;
1423 enum dma_slave_buswidth addr_width;
1327 dma_addr_t slave_addr; 1424 dma_addr_t slave_addr;
1328 int ret, tmp; 1425 int ret, tmp;
1426 u8 src_buses, dst_buses;
1427 u32 maxburst, cctl;
1329 1428
1330 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1429 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1331 __func__, sg_dma_len(sgl), plchan->name); 1430 __func__, sg_dma_len(sgl), plchan->name);
1332 1431
1333 txd = pl08x_get_txd(plchan, flags); 1432 txd = pl08x_get_txd(plchan);
1334 if (!txd) { 1433 if (!txd) {
1335 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1434 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1336 return NULL; 1435 return NULL;
1337 } 1436 }
1338 1437
1339 if (direction != plchan->runtime_direction)
1340 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1341 "the direction configured for the PrimeCell\n",
1342 __func__);
1343
1344 /* 1438 /*
1345 * Set up addresses, the PrimeCell configured address 1439 * Set up addresses, the PrimeCell configured address
1346 * will take precedence since this may configure the 1440 * will take precedence since this may configure the
1347 * channel target address dynamically at runtime. 1441 * channel target address dynamically at runtime.
1348 */ 1442 */
1349 txd->direction = direction;
1350
1351 if (direction == DMA_MEM_TO_DEV) { 1443 if (direction == DMA_MEM_TO_DEV) {
1352 txd->cctl = plchan->dst_cctl; 1444 cctl = PL080_CONTROL_SRC_INCR;
1353 slave_addr = plchan->dst_addr; 1445 slave_addr = plchan->cfg.dst_addr;
1446 addr_width = plchan->cfg.dst_addr_width;
1447 maxburst = plchan->cfg.dst_maxburst;
1448 src_buses = pl08x->mem_buses;
1449 dst_buses = plchan->cd->periph_buses;
1354 } else if (direction == DMA_DEV_TO_MEM) { 1450 } else if (direction == DMA_DEV_TO_MEM) {
1355 txd->cctl = plchan->src_cctl; 1451 cctl = PL080_CONTROL_DST_INCR;
1356 slave_addr = plchan->src_addr; 1452 slave_addr = plchan->cfg.src_addr;
1453 addr_width = plchan->cfg.src_addr_width;
1454 maxburst = plchan->cfg.src_maxburst;
1455 src_buses = plchan->cd->periph_buses;
1456 dst_buses = pl08x->mem_buses;
1357 } else { 1457 } else {
1358 pl08x_free_txd(pl08x, txd); 1458 pl08x_free_txd(pl08x, txd);
1359 dev_err(&pl08x->adev->dev, 1459 dev_err(&pl08x->adev->dev,
@@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1361 return NULL; 1461 return NULL;
1362 } 1462 }
1363 1463
1364 if (plchan->device_fc) 1464 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
1465 if (cctl == ~0) {
1466 pl08x_free_txd(pl08x, txd);
1467 dev_err(&pl08x->adev->dev,
1468 "DMA slave configuration botched?\n");
1469 return NULL;
1470 }
1471
1472 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
1473
1474 if (plchan->cfg.device_fc)
1365 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1475 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1366 PL080_FLOW_PER2MEM_PER; 1476 PL080_FLOW_PER2MEM_PER;
1367 else 1477 else
@@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1370 1480
1371 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1481 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1372 1482
1483 ret = pl08x_request_mux(plchan);
1484 if (ret < 0) {
1485 pl08x_free_txd(pl08x, txd);
1486 dev_dbg(&pl08x->adev->dev,
1487 "unable to mux for transfer on %s due to platform restrictions\n",
1488 plchan->name);
1489 return NULL;
1490 }
1491
1492 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
1493 plchan->signal, plchan->name);
1494
1495 /* Assign the flow control signal to this channel */
1496 if (direction == DMA_MEM_TO_DEV)
1497 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1498 else
1499 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1500
1373 for_each_sg(sgl, sg, sg_len, tmp) { 1501 for_each_sg(sgl, sg, sg_len, tmp) {
1374 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1502 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1375 if (!dsg) { 1503 if (!dsg) {
1504 pl08x_release_mux(plchan);
1376 pl08x_free_txd(pl08x, txd); 1505 pl08x_free_txd(pl08x, txd);
1377 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1506 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1378 __func__); 1507 __func__);
@@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1390 } 1519 }
1391 } 1520 }
1392 1521
1393 ret = pl08x_prep_channel_resources(plchan, txd); 1522 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1394 if (ret) 1523 if (!ret) {
1524 pl08x_release_mux(plchan);
1525 pl08x_free_txd(pl08x, txd);
1395 return NULL; 1526 return NULL;
1527 }
1396 1528
1397 return &txd->tx; 1529 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1398} 1530}
1399 1531
1400static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1532static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1415 * Anything succeeds on channels with no physical allocation and 1547 * Anything succeeds on channels with no physical allocation and
1416 * no queued transfers. 1548 * no queued transfers.
1417 */ 1549 */
1418 spin_lock_irqsave(&plchan->lock, flags); 1550 spin_lock_irqsave(&plchan->vc.lock, flags);
1419 if (!plchan->phychan && !plchan->at) { 1551 if (!plchan->phychan && !plchan->at) {
1420 spin_unlock_irqrestore(&plchan->lock, flags); 1552 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1421 return 0; 1553 return 0;
1422 } 1554 }
1423 1555
@@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1426 plchan->state = PL08X_CHAN_IDLE; 1558 plchan->state = PL08X_CHAN_IDLE;
1427 1559
1428 if (plchan->phychan) { 1560 if (plchan->phychan) {
1429 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1430
1431 /* 1561 /*
1432 * Mark physical channel as free and free any slave 1562 * Mark physical channel as free and free any slave
1433 * signal 1563 * signal
1434 */ 1564 */
1435 release_phy_channel(plchan); 1565 pl08x_phy_free(plchan);
1436 plchan->phychan_hold = 0;
1437 } 1566 }
1438 /* Dequeue jobs and free LLIs */ 1567 /* Dequeue jobs and free LLIs */
1439 if (plchan->at) { 1568 if (plchan->at) {
1440 pl08x_free_txd(pl08x, plchan->at); 1569 pl08x_desc_free(&plchan->at->vd);
1441 plchan->at = NULL; 1570 plchan->at = NULL;
1442 } 1571 }
1443 /* Dequeue jobs not yet fired as well */ 1572 /* Dequeue jobs not yet fired as well */
@@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1457 break; 1586 break;
1458 } 1587 }
1459 1588
1460 spin_unlock_irqrestore(&plchan->lock, flags); 1589 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1461 1590
1462 return ret; 1591 return ret;
1463} 1592}
@@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1494 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1623 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1495} 1624}
1496 1625
1497static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1498{
1499 struct device *dev = txd->tx.chan->device->dev;
1500 struct pl08x_sg *dsg;
1501
1502 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1503 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1504 list_for_each_entry(dsg, &txd->dsg_list, node)
1505 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1506 DMA_TO_DEVICE);
1507 else {
1508 list_for_each_entry(dsg, &txd->dsg_list, node)
1509 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1510 DMA_TO_DEVICE);
1511 }
1512 }
1513 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1514 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1515 list_for_each_entry(dsg, &txd->dsg_list, node)
1516 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1517 DMA_FROM_DEVICE);
1518 else
1519 list_for_each_entry(dsg, &txd->dsg_list, node)
1520 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1521 DMA_FROM_DEVICE);
1522 }
1523}
1524
1525static void pl08x_tasklet(unsigned long data)
1526{
1527 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1528 struct pl08x_driver_data *pl08x = plchan->host;
1529 struct pl08x_txd *txd;
1530 unsigned long flags;
1531
1532 spin_lock_irqsave(&plchan->lock, flags);
1533
1534 txd = plchan->at;
1535 plchan->at = NULL;
1536
1537 if (txd) {
1538 /* Update last completed */
1539 dma_cookie_complete(&txd->tx);
1540 }
1541
1542 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1543 if (!list_empty(&plchan->pend_list)) {
1544 struct pl08x_txd *next;
1545
1546 next = list_first_entry(&plchan->pend_list,
1547 struct pl08x_txd,
1548 node);
1549 list_del(&next->node);
1550
1551 pl08x_start_txd(plchan, next);
1552 } else if (plchan->phychan_hold) {
1553 /*
1554 * This channel is still in use - we have a new txd being
1555 * prepared and will soon be queued. Don't give up the
1556 * physical channel.
1557 */
1558 } else {
1559 struct pl08x_dma_chan *waiting = NULL;
1560
1561 /*
1562 * No more jobs, so free up the physical channel
1563 * Free any allocated signal on slave transfers too
1564 */
1565 release_phy_channel(plchan);
1566 plchan->state = PL08X_CHAN_IDLE;
1567
1568 /*
1569 * And NOW before anyone else can grab that free:d up
1570 * physical channel, see if there is some memcpy pending
1571 * that seriously needs to start because of being stacked
1572 * up while we were choking the physical channels with data.
1573 */
1574 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1575 chan.device_node) {
1576 if (waiting->state == PL08X_CHAN_WAITING &&
1577 waiting->waiting != NULL) {
1578 int ret;
1579
1580 /* This should REALLY not fail now */
1581 ret = prep_phy_channel(waiting,
1582 waiting->waiting);
1583 BUG_ON(ret);
1584 waiting->phychan_hold--;
1585 waiting->state = PL08X_CHAN_RUNNING;
1586 waiting->waiting = NULL;
1587 pl08x_issue_pending(&waiting->chan);
1588 break;
1589 }
1590 }
1591 }
1592
1593 spin_unlock_irqrestore(&plchan->lock, flags);
1594
1595 if (txd) {
1596 dma_async_tx_callback callback = txd->tx.callback;
1597 void *callback_param = txd->tx.callback_param;
1598
1599 /* Don't try to unmap buffers on slave channels */
1600 if (!plchan->slave)
1601 pl08x_unmap_buffers(txd);
1602
1603 /* Free the descriptor */
1604 spin_lock_irqsave(&plchan->lock, flags);
1605 pl08x_free_txd(pl08x, txd);
1606 spin_unlock_irqrestore(&plchan->lock, flags);
1607
1608 /* Callback to signal completion */
1609 if (callback)
1610 callback(callback_param);
1611 }
1612}
1613
1614static irqreturn_t pl08x_irq(int irq, void *dev) 1626static irqreturn_t pl08x_irq(int irq, void *dev)
1615{ 1627{
1616 struct pl08x_driver_data *pl08x = dev; 1628 struct pl08x_driver_data *pl08x = dev;
@@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1635 /* Locate physical channel */ 1647 /* Locate physical channel */
1636 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1648 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1637 struct pl08x_dma_chan *plchan = phychan->serving; 1649 struct pl08x_dma_chan *plchan = phychan->serving;
1650 struct pl08x_txd *tx;
1638 1651
1639 if (!plchan) { 1652 if (!plchan) {
1640 dev_err(&pl08x->adev->dev, 1653 dev_err(&pl08x->adev->dev,
@@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1643 continue; 1656 continue;
1644 } 1657 }
1645 1658
1646 /* Schedule tasklet on this channel */ 1659 spin_lock(&plchan->vc.lock);
1647 tasklet_schedule(&plchan->tasklet); 1660 tx = plchan->at;
1661 if (tx) {
1662 plchan->at = NULL;
1663 /*
1664 * This descriptor is done, release its mux
1665 * reservation.
1666 */
1667 pl08x_release_mux(plchan);
1668 tx->done = true;
1669 vchan_cookie_complete(&tx->vd);
1670
1671 /*
1672 * And start the next descriptor (if any),
1673 * otherwise free this channel.
1674 */
1675 if (vchan_next_desc(&plchan->vc))
1676 pl08x_start_next_txd(plchan);
1677 else
1678 pl08x_phy_free(plchan);
1679 }
1680 spin_unlock(&plchan->vc.lock);
1681
1648 mask |= (1 << i); 1682 mask |= (1 << i);
1649 } 1683 }
1650 } 1684 }
@@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1654 1688
1655static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1689static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1656{ 1690{
1657 u32 cctl = pl08x_cctl(chan->cd->cctl);
1658
1659 chan->slave = true; 1691 chan->slave = true;
1660 chan->name = chan->cd->bus_id; 1692 chan->name = chan->cd->bus_id;
1661 chan->src_addr = chan->cd->addr; 1693 chan->cfg.src_addr = chan->cd->addr;
1662 chan->dst_addr = chan->cd->addr; 1694 chan->cfg.dst_addr = chan->cd->addr;
1663 chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
1664 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
1665 chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
1666 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
1667} 1695}
1668 1696
1669/* 1697/*
@@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1693 1721
1694 chan->host = pl08x; 1722 chan->host = pl08x;
1695 chan->state = PL08X_CHAN_IDLE; 1723 chan->state = PL08X_CHAN_IDLE;
1724 chan->signal = -1;
1696 1725
1697 if (slave) { 1726 if (slave) {
1698 chan->cd = &pl08x->pd->slave_channels[i]; 1727 chan->cd = &pl08x->pd->slave_channels[i];
@@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1705 return -ENOMEM; 1734 return -ENOMEM;
1706 } 1735 }
1707 } 1736 }
1708 if (chan->cd->circular_buffer) {
1709 dev_err(&pl08x->adev->dev,
1710 "channel %s: circular buffers not supported\n",
1711 chan->name);
1712 kfree(chan);
1713 continue;
1714 }
1715 dev_dbg(&pl08x->adev->dev, 1737 dev_dbg(&pl08x->adev->dev,
1716 "initialize virtual channel \"%s\"\n", 1738 "initialize virtual channel \"%s\"\n",
1717 chan->name); 1739 chan->name);
1718 1740
1719 chan->chan.device = dmadev; 1741 chan->vc.desc_free = pl08x_desc_free;
1720 dma_cookie_init(&chan->chan); 1742 vchan_init(&chan->vc, dmadev);
1721
1722 spin_lock_init(&chan->lock);
1723 INIT_LIST_HEAD(&chan->pend_list);
1724 tasklet_init(&chan->tasklet, pl08x_tasklet,
1725 (unsigned long) chan);
1726
1727 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1728 } 1743 }
1729 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1744 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1730 i, slave ? "slave" : "memcpy"); 1745 i, slave ? "slave" : "memcpy");
@@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1737 struct pl08x_dma_chan *next; 1752 struct pl08x_dma_chan *next;
1738 1753
1739 list_for_each_entry_safe(chan, 1754 list_for_each_entry_safe(chan,
1740 next, &dmadev->channels, chan.device_node) { 1755 next, &dmadev->channels, vc.chan.device_node) {
1741 list_del(&chan->chan.device_node); 1756 list_del(&chan->vc.chan.device_node);
1742 kfree(chan); 1757 kfree(chan);
1743 } 1758 }
1744} 1759}
@@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1791 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1806 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1792 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1807 seq_printf(s, "CHANNEL:\tSTATE:\n");
1793 seq_printf(s, "--------\t------\n"); 1808 seq_printf(s, "--------\t------\n");
1794 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1809 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
1795 seq_printf(s, "%s\t\t%s\n", chan->name, 1810 seq_printf(s, "%s\t\t%s\n", chan->name,
1796 pl08x_state_str(chan->state)); 1811 pl08x_state_str(chan->state));
1797 } 1812 }
@@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1799 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1814 seq_printf(s, "\nPL08x virtual slave channels:\n");
1800 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1815 seq_printf(s, "CHANNEL:\tSTATE:\n");
1801 seq_printf(s, "--------\t------\n"); 1816 seq_printf(s, "--------\t------\n");
1802 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1817 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
1803 seq_printf(s, "%s\t\t%s\n", chan->name, 1818 seq_printf(s, "%s\t\t%s\n", chan->name,
1804 pl08x_state_str(chan->state)); 1819 pl08x_state_str(chan->state));
1805 } 1820 }
@@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1851 goto out_no_pl08x; 1866 goto out_no_pl08x;
1852 } 1867 }
1853 1868
1854 pm_runtime_set_active(&adev->dev);
1855 pm_runtime_enable(&adev->dev);
1856
1857 /* Initialize memcpy engine */ 1869 /* Initialize memcpy engine */
1858 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1870 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1859 pl08x->memcpy.dev = &adev->dev; 1871 pl08x->memcpy.dev = &adev->dev;
@@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1903 goto out_no_lli_pool; 1915 goto out_no_lli_pool;
1904 } 1916 }
1905 1917
1906 spin_lock_init(&pl08x->lock);
1907
1908 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 1918 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
1909 if (!pl08x->base) { 1919 if (!pl08x->base) {
1910 ret = -ENOMEM; 1920 ret = -ENOMEM;
@@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1942 ch->id = i; 1952 ch->id = i;
1943 ch->base = pl08x->base + PL080_Cx_BASE(i); 1953 ch->base = pl08x->base + PL080_Cx_BASE(i);
1944 spin_lock_init(&ch->lock); 1954 spin_lock_init(&ch->lock);
1945 ch->signal = -1;
1946 1955
1947 /* 1956 /*
1948 * Nomadik variants can have channels that are locked 1957 * Nomadik variants can have channels that are locked
@@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2007 amba_part(adev), amba_rev(adev), 2016 amba_part(adev), amba_rev(adev),
2008 (unsigned long long)adev->res.start, adev->irq[0]); 2017 (unsigned long long)adev->res.start, adev->irq[0]);
2009 2018
2010 pm_runtime_put(&adev->dev);
2011 return 0; 2019 return 0;
2012 2020
2013out_no_slave_reg: 2021out_no_slave_reg:
@@ -2026,9 +2034,6 @@ out_no_ioremap:
2026 dma_pool_destroy(pl08x->pool); 2034 dma_pool_destroy(pl08x->pool);
2027out_no_lli_pool: 2035out_no_lli_pool:
2028out_no_platdata: 2036out_no_platdata:
2029 pm_runtime_put(&adev->dev);
2030 pm_runtime_disable(&adev->dev);
2031
2032 kfree(pl08x); 2037 kfree(pl08x);
2033out_no_pl08x: 2038out_no_pl08x:
2034 amba_release_regions(adev); 2039 amba_release_regions(adev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7292aa87b2dd..3934fcc4e00b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -9,10 +9,9 @@
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * 11 *
12 * This supports the Atmel AHB DMA Controller, 12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * 13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * The driver has currently been tested with the Atmel AT91SAM9RL 14 * found on AT91SAM9263.
15 * and AT91SAM9G45 series.
16 */ 15 */
17 16
18#include <linux/clk.h> 17#include <linux/clk.h>
@@ -1217,7 +1216,7 @@ static const struct platform_device_id atdma_devtypes[] = {
1217 } 1216 }
1218}; 1217};
1219 1218
1220static inline struct at_dma_platform_data * __init at_dma_get_driver_data( 1219static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1221 struct platform_device *pdev) 1220 struct platform_device *pdev)
1222{ 1221{
1223 if (pdev->dev.of_node) { 1222 if (pdev->dev.of_node) {
@@ -1255,7 +1254,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1255 int irq; 1254 int irq;
1256 int err; 1255 int err;
1257 int i; 1256 int i;
1258 struct at_dma_platform_data *plat_dat; 1257 const struct at_dma_platform_data *plat_dat;
1259 1258
1260 /* setup platform data for each SoC */ 1259 /* setup platform data for each SoC */
1261 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1260 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e67b4e06a918..aa384e53b7ac 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1438,34 +1438,32 @@ static int __init coh901318_probe(struct platform_device *pdev)
1438 1438
1439 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1439 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1440 if (!io) 1440 if (!io)
1441 goto err_get_resource; 1441 return -ENODEV;
1442 1442
1443 /* Map DMA controller registers to virtual memory */ 1443 /* Map DMA controller registers to virtual memory */
1444 if (request_mem_region(io->start, 1444 if (devm_request_mem_region(&pdev->dev,
1445 resource_size(io), 1445 io->start,
1446 pdev->dev.driver->name) == NULL) { 1446 resource_size(io),
1447 err = -EBUSY; 1447 pdev->dev.driver->name) == NULL)
1448 goto err_request_mem; 1448 return -ENOMEM;
1449 }
1450 1449
1451 pdata = pdev->dev.platform_data; 1450 pdata = pdev->dev.platform_data;
1452 if (!pdata) 1451 if (!pdata)
1453 goto err_no_platformdata; 1452 return -ENODEV;
1454 1453
1455 base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) + 1454 base = devm_kzalloc(&pdev->dev,
1456 pdata->max_channels * 1455 ALIGN(sizeof(struct coh901318_base), 4) +
1457 sizeof(struct coh901318_chan), 1456 pdata->max_channels *
1458 GFP_KERNEL); 1457 sizeof(struct coh901318_chan),
1458 GFP_KERNEL);
1459 if (!base) 1459 if (!base)
1460 goto err_alloc_coh_dma_channels; 1460 return -ENOMEM;
1461 1461
1462 base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); 1462 base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
1463 1463
1464 base->virtbase = ioremap(io->start, resource_size(io)); 1464 base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io));
1465 if (!base->virtbase) { 1465 if (!base->virtbase)
1466 err = -ENOMEM; 1466 return -ENOMEM;
1467 goto err_no_ioremap;
1468 }
1469 1467
1470 base->dev = &pdev->dev; 1468 base->dev = &pdev->dev;
1471 base->platform = pdata; 1469 base->platform = pdata;
@@ -1474,25 +1472,20 @@ static int __init coh901318_probe(struct platform_device *pdev)
1474 1472
1475 COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); 1473 COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
1476 1474
1477 platform_set_drvdata(pdev, base);
1478
1479 irq = platform_get_irq(pdev, 0); 1475 irq = platform_get_irq(pdev, 0);
1480 if (irq < 0) 1476 if (irq < 0)
1481 goto err_no_irq; 1477 return irq;
1482 1478
1483 err = request_irq(irq, dma_irq_handler, IRQF_DISABLED, 1479 err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
1484 "coh901318", base); 1480 "coh901318", base);
1485 if (err) { 1481 if (err)
1486 dev_crit(&pdev->dev, 1482 return err;
1487 "Cannot allocate IRQ for DMA controller!\n");
1488 goto err_request_irq;
1489 }
1490 1483
1491 err = coh901318_pool_create(&base->pool, &pdev->dev, 1484 err = coh901318_pool_create(&base->pool, &pdev->dev,
1492 sizeof(struct coh901318_lli), 1485 sizeof(struct coh901318_lli),
1493 32); 1486 32);
1494 if (err) 1487 if (err)
1495 goto err_pool_create; 1488 return err;
1496 1489
1497 /* init channels for device transfers */ 1490 /* init channels for device transfers */
1498 coh901318_base_init(&base->dma_slave, base->platform->chans_slave, 1491 coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
@@ -1538,6 +1531,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
1538 if (err) 1531 if (err)
1539 goto err_register_memcpy; 1532 goto err_register_memcpy;
1540 1533
1534 platform_set_drvdata(pdev, base);
1541 dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", 1535 dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
1542 (u32) base->virtbase); 1536 (u32) base->virtbase);
1543 1537
@@ -1547,19 +1541,6 @@ static int __init coh901318_probe(struct platform_device *pdev)
1547 dma_async_device_unregister(&base->dma_slave); 1541 dma_async_device_unregister(&base->dma_slave);
1548 err_register_slave: 1542 err_register_slave:
1549 coh901318_pool_destroy(&base->pool); 1543 coh901318_pool_destroy(&base->pool);
1550 err_pool_create:
1551 free_irq(platform_get_irq(pdev, 0), base);
1552 err_request_irq:
1553 err_no_irq:
1554 iounmap(base->virtbase);
1555 err_no_ioremap:
1556 kfree(base);
1557 err_alloc_coh_dma_channels:
1558 err_no_platformdata:
1559 release_mem_region(pdev->resource->start,
1560 resource_size(pdev->resource));
1561 err_request_mem:
1562 err_get_resource:
1563 return err; 1544 return err;
1564} 1545}
1565 1546
@@ -1570,11 +1551,6 @@ static int __exit coh901318_remove(struct platform_device *pdev)
1570 dma_async_device_unregister(&base->dma_memcpy); 1551 dma_async_device_unregister(&base->dma_memcpy);
1571 dma_async_device_unregister(&base->dma_slave); 1552 dma_async_device_unregister(&base->dma_slave);
1572 coh901318_pool_destroy(&base->pool); 1553 coh901318_pool_destroy(&base->pool);
1573 free_irq(platform_get_irq(pdev, 0), base);
1574 iounmap(base->virtbase);
1575 kfree(base);
1576 release_mem_region(pdev->resource->start,
1577 resource_size(pdev->resource));
1578 return 0; 1554 return 0;
1579} 1555}
1580 1556
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2397f6f451b1..3491654cdf7b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -45,6 +45,8 @@
45 * See Documentation/dmaengine.txt for more details 45 * See Documentation/dmaengine.txt for more details
46 */ 46 */
47 47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
48#include <linux/dma-mapping.h> 50#include <linux/dma-mapping.h>
49#include <linux/init.h> 51#include <linux/init.h>
50#include <linux/module.h> 52#include <linux/module.h>
@@ -261,7 +263,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
261 do { 263 do {
262 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 264 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
263 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 265 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
264 printk(KERN_ERR "dma_sync_wait_timeout!\n"); 266 pr_err("%s: timeout!\n", __func__);
265 return DMA_ERROR; 267 return DMA_ERROR;
266 } 268 }
267 } while (status == DMA_IN_PROGRESS); 269 } while (status == DMA_IN_PROGRESS);
@@ -312,7 +314,7 @@ static int __init dma_channel_table_init(void)
312 } 314 }
313 315
314 if (err) { 316 if (err) {
315 pr_err("dmaengine: initialization failure\n"); 317 pr_err("initialization failure\n");
316 for_each_dma_cap_mask(cap, dma_cap_mask_all) 318 for_each_dma_cap_mask(cap, dma_cap_mask_all)
317 if (channel_table[cap]) 319 if (channel_table[cap])
318 free_percpu(channel_table[cap]); 320 free_percpu(channel_table[cap]);
@@ -520,12 +522,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
520 err = dma_chan_get(chan); 522 err = dma_chan_get(chan);
521 523
522 if (err == -ENODEV) { 524 if (err == -ENODEV) {
523 pr_debug("%s: %s module removed\n", __func__, 525 pr_debug("%s: %s module removed\n",
524 dma_chan_name(chan)); 526 __func__, dma_chan_name(chan));
525 list_del_rcu(&device->global_node); 527 list_del_rcu(&device->global_node);
526 } else if (err) 528 } else if (err)
527 pr_debug("%s: failed to get %s: (%d)\n", 529 pr_debug("%s: failed to get %s: (%d)\n",
528 __func__, dma_chan_name(chan), err); 530 __func__, dma_chan_name(chan), err);
529 else 531 else
530 break; 532 break;
531 if (--device->privatecnt == 0) 533 if (--device->privatecnt == 0)
@@ -535,7 +537,9 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
535 } 537 }
536 mutex_unlock(&dma_list_mutex); 538 mutex_unlock(&dma_list_mutex);
537 539
538 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", 540 pr_debug("%s: %s (%s)\n",
541 __func__,
542 chan ? "success" : "fail",
539 chan ? dma_chan_name(chan) : NULL); 543 chan ? dma_chan_name(chan) : NULL);
540 544
541 return chan; 545 return chan;
@@ -579,7 +583,7 @@ void dmaengine_get(void)
579 break; 583 break;
580 } else if (err) 584 } else if (err)
581 pr_err("%s: failed to get %s: (%d)\n", 585 pr_err("%s: failed to get %s: (%d)\n",
582 __func__, dma_chan_name(chan), err); 586 __func__, dma_chan_name(chan), err);
583 } 587 }
584 } 588 }
585 589
@@ -1015,7 +1019,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1015 while (tx->cookie == -EBUSY) { 1019 while (tx->cookie == -EBUSY) {
1016 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1020 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1017 pr_err("%s timeout waiting for descriptor submission\n", 1021 pr_err("%s timeout waiting for descriptor submission\n",
1018 __func__); 1022 __func__);
1019 return DMA_ERROR; 1023 return DMA_ERROR;
1020 } 1024 }
1021 cpu_relax(); 1025 cpu_relax();
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index e23dc82d43ac..d3c5a5a88f1e 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -105,13 +105,13 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
105 105
106 spin_lock_irqsave(&dwc->lock, flags); 106 spin_lock_irqsave(&dwc->lock, flags);
107 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 107 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
108 i++;
108 if (async_tx_test_ack(&desc->txd)) { 109 if (async_tx_test_ack(&desc->txd)) {
109 list_del(&desc->desc_node); 110 list_del(&desc->desc_node);
110 ret = desc; 111 ret = desc;
111 break; 112 break;
112 } 113 }
113 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 114 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
114 i++;
115 } 115 }
116 spin_unlock_irqrestore(&dwc->lock, flags); 116 spin_unlock_irqrestore(&dwc->lock, flags);
117 117
@@ -191,6 +191,42 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
191 191
192/*----------------------------------------------------------------------*/ 192/*----------------------------------------------------------------------*/
193 193
194static inline unsigned int dwc_fast_fls(unsigned long long v)
195{
196 /*
197 * We can be a lot more clever here, but this should take care
198 * of the most common optimization.
199 */
200 if (!(v & 7))
201 return 3;
202 else if (!(v & 3))
203 return 2;
204 else if (!(v & 1))
205 return 1;
206 return 0;
207}
208
209static void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
210{
211 dev_err(chan2dev(&dwc->chan),
212 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
213 channel_readl(dwc, SAR),
214 channel_readl(dwc, DAR),
215 channel_readl(dwc, LLP),
216 channel_readl(dwc, CTL_HI),
217 channel_readl(dwc, CTL_LO));
218}
219
220
221static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
222{
223 channel_clear_bit(dw, CH_EN, dwc->mask);
224 while (dma_readl(dw, CH_EN) & dwc->mask)
225 cpu_relax();
226}
227
228/*----------------------------------------------------------------------*/
229
194/* Called with dwc->lock held and bh disabled */ 230/* Called with dwc->lock held and bh disabled */
195static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 231static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
196{ 232{
@@ -200,13 +236,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
200 if (dma_readl(dw, CH_EN) & dwc->mask) { 236 if (dma_readl(dw, CH_EN) & dwc->mask) {
201 dev_err(chan2dev(&dwc->chan), 237 dev_err(chan2dev(&dwc->chan),
202 "BUG: Attempted to start non-idle channel\n"); 238 "BUG: Attempted to start non-idle channel\n");
203 dev_err(chan2dev(&dwc->chan), 239 dwc_dump_chan_regs(dwc);
204 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
205 channel_readl(dwc, SAR),
206 channel_readl(dwc, DAR),
207 channel_readl(dwc, LLP),
208 channel_readl(dwc, CTL_HI),
209 channel_readl(dwc, CTL_LO));
210 240
211 /* The tasklet will hopefully advance the queue... */ 241 /* The tasklet will hopefully advance the queue... */
212 return; 242 return;
@@ -290,9 +320,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
290 "BUG: XFER bit set, but channel not idle!\n"); 320 "BUG: XFER bit set, but channel not idle!\n");
291 321
292 /* Try to continue after resetting the channel... */ 322 /* Try to continue after resetting the channel... */
293 channel_clear_bit(dw, CH_EN, dwc->mask); 323 dwc_chan_disable(dw, dwc);
294 while (dma_readl(dw, CH_EN) & dwc->mask)
295 cpu_relax();
296 } 324 }
297 325
298 /* 326 /*
@@ -337,7 +365,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
337 return; 365 return;
338 } 366 }
339 367
340 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 368 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
369 (unsigned long long)llp);
341 370
342 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 371 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
343 /* check first descriptors addr */ 372 /* check first descriptors addr */
@@ -373,9 +402,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
373 "BUG: All descriptors done, but channel not idle!\n"); 402 "BUG: All descriptors done, but channel not idle!\n");
374 403
375 /* Try to continue after resetting the channel... */ 404 /* Try to continue after resetting the channel... */
376 channel_clear_bit(dw, CH_EN, dwc->mask); 405 dwc_chan_disable(dw, dwc);
377 while (dma_readl(dw, CH_EN) & dwc->mask)
378 cpu_relax();
379 406
380 if (!list_empty(&dwc->queue)) { 407 if (!list_empty(&dwc->queue)) {
381 list_move(dwc->queue.next, &dwc->active_list); 408 list_move(dwc->queue.next, &dwc->active_list);
@@ -384,12 +411,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
384 spin_unlock_irqrestore(&dwc->lock, flags); 411 spin_unlock_irqrestore(&dwc->lock, flags);
385} 412}
386 413
387static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 414static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
388{ 415{
389 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 416 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
390 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 417 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
391 lli->sar, lli->dar, lli->llp, 418 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
392 lli->ctlhi, lli->ctllo);
393} 419}
394 420
395static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) 421static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -487,17 +513,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
487 513
488 spin_lock_irqsave(&dwc->lock, flags); 514 spin_lock_irqsave(&dwc->lock, flags);
489 515
490 dev_err(chan2dev(&dwc->chan), 516 dwc_dump_chan_regs(dwc);
491 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
492 channel_readl(dwc, SAR),
493 channel_readl(dwc, DAR),
494 channel_readl(dwc, LLP),
495 channel_readl(dwc, CTL_HI),
496 channel_readl(dwc, CTL_LO));
497 517
498 channel_clear_bit(dw, CH_EN, dwc->mask); 518 dwc_chan_disable(dw, dwc);
499 while (dma_readl(dw, CH_EN) & dwc->mask)
500 cpu_relax();
501 519
502 /* make sure DMA does not restart by loading a new list */ 520 /* make sure DMA does not restart by loading a new list */
503 channel_writel(dwc, LLP, 0); 521 channel_writel(dwc, LLP, 0);
@@ -527,7 +545,7 @@ static void dw_dma_tasklet(unsigned long data)
527 status_xfer = dma_readl(dw, RAW.XFER); 545 status_xfer = dma_readl(dw, RAW.XFER);
528 status_err = dma_readl(dw, RAW.ERROR); 546 status_err = dma_readl(dw, RAW.ERROR);
529 547
530 dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err); 548 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
531 549
532 for (i = 0; i < dw->dma.chancnt; i++) { 550 for (i = 0; i < dw->dma.chancnt; i++) {
533 dwc = &dw->chan[i]; 551 dwc = &dw->chan[i];
@@ -551,7 +569,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
551 struct dw_dma *dw = dev_id; 569 struct dw_dma *dw = dev_id;
552 u32 status; 570 u32 status;
553 571
554 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", 572 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
555 dma_readl(dw, STATUS_INT)); 573 dma_readl(dw, STATUS_INT));
556 574
557 /* 575 /*
@@ -597,12 +615,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
597 * for DMA. But this is hard to do in a race-free manner. 615 * for DMA. But this is hard to do in a race-free manner.
598 */ 616 */
599 if (list_empty(&dwc->active_list)) { 617 if (list_empty(&dwc->active_list)) {
600 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 618 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
601 desc->txd.cookie); 619 desc->txd.cookie);
602 list_add_tail(&desc->desc_node, &dwc->active_list); 620 list_add_tail(&desc->desc_node, &dwc->active_list);
603 dwc_dostart(dwc, dwc_first_active(dwc)); 621 dwc_dostart(dwc, dwc_first_active(dwc));
604 } else { 622 } else {
605 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 623 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
606 desc->txd.cookie); 624 desc->txd.cookie);
607 625
608 list_add_tail(&desc->desc_node, &dwc->queue); 626 list_add_tail(&desc->desc_node, &dwc->queue);
@@ -627,26 +645,17 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
627 unsigned int dst_width; 645 unsigned int dst_width;
628 u32 ctllo; 646 u32 ctllo;
629 647
630 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", 648 dev_vdbg(chan2dev(chan),
631 dest, src, len, flags); 649 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
650 (unsigned long long)dest, (unsigned long long)src,
651 len, flags);
632 652
633 if (unlikely(!len)) { 653 if (unlikely(!len)) {
634 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 654 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
635 return NULL; 655 return NULL;
636 } 656 }
637 657
638 /* 658 src_width = dst_width = dwc_fast_fls(src | dest | len);
639 * We can be a lot more clever here, but this should take care
640 * of the most common optimization.
641 */
642 if (!((src | dest | len) & 7))
643 src_width = dst_width = 3;
644 else if (!((src | dest | len) & 3))
645 src_width = dst_width = 2;
646 else if (!((src | dest | len) & 1))
647 src_width = dst_width = 1;
648 else
649 src_width = dst_width = 0;
650 659
651 ctllo = DWC_DEFAULT_CTLLO(chan) 660 ctllo = DWC_DEFAULT_CTLLO(chan)
652 | DWC_CTLL_DST_WIDTH(dst_width) 661 | DWC_CTLL_DST_WIDTH(dst_width)
@@ -720,7 +729,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
720 struct scatterlist *sg; 729 struct scatterlist *sg;
721 size_t total_len = 0; 730 size_t total_len = 0;
722 731
723 dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); 732 dev_vdbg(chan2dev(chan), "%s\n", __func__);
724 733
725 if (unlikely(!dws || !sg_len)) 734 if (unlikely(!dws || !sg_len))
726 return NULL; 735 return NULL;
@@ -746,14 +755,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
746 mem = sg_dma_address(sg); 755 mem = sg_dma_address(sg);
747 len = sg_dma_len(sg); 756 len = sg_dma_len(sg);
748 757
749 if (!((mem | len) & 7)) 758 mem_width = dwc_fast_fls(mem | len);
750 mem_width = 3;
751 else if (!((mem | len) & 3))
752 mem_width = 2;
753 else if (!((mem | len) & 1))
754 mem_width = 1;
755 else
756 mem_width = 0;
757 759
758slave_sg_todev_fill_desc: 760slave_sg_todev_fill_desc:
759 desc = dwc_desc_get(dwc); 761 desc = dwc_desc_get(dwc);
@@ -813,14 +815,7 @@ slave_sg_todev_fill_desc:
813 mem = sg_dma_address(sg); 815 mem = sg_dma_address(sg);
814 len = sg_dma_len(sg); 816 len = sg_dma_len(sg);
815 817
816 if (!((mem | len) & 7)) 818 mem_width = dwc_fast_fls(mem | len);
817 mem_width = 3;
818 else if (!((mem | len) & 3))
819 mem_width = 2;
820 else if (!((mem | len) & 1))
821 mem_width = 1;
822 else
823 mem_width = 0;
824 819
825slave_sg_fromdev_fill_desc: 820slave_sg_fromdev_fill_desc:
826 desc = dwc_desc_get(dwc); 821 desc = dwc_desc_get(dwc);
@@ -950,9 +945,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
950 } else if (cmd == DMA_TERMINATE_ALL) { 945 } else if (cmd == DMA_TERMINATE_ALL) {
951 spin_lock_irqsave(&dwc->lock, flags); 946 spin_lock_irqsave(&dwc->lock, flags);
952 947
953 channel_clear_bit(dw, CH_EN, dwc->mask); 948 dwc_chan_disable(dw, dwc);
954 while (dma_readl(dw, CH_EN) & dwc->mask)
955 cpu_relax();
956 949
957 dwc->paused = false; 950 dwc->paused = false;
958 951
@@ -1014,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1014 int i; 1007 int i;
1015 unsigned long flags; 1008 unsigned long flags;
1016 1009
1017 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1010 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1018 1011
1019 /* ASSERT: channel is idle */ 1012 /* ASSERT: channel is idle */
1020 if (dma_readl(dw, CH_EN) & dwc->mask) { 1013 if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1057,8 +1050,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1057 1050
1058 spin_unlock_irqrestore(&dwc->lock, flags); 1051 spin_unlock_irqrestore(&dwc->lock, flags);
1059 1052
1060 dev_dbg(chan2dev(chan), 1053 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1061 "alloc_chan_resources allocated %d descriptors\n", i);
1062 1054
1063 return i; 1055 return i;
1064} 1056}
@@ -1071,7 +1063,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1071 unsigned long flags; 1063 unsigned long flags;
1072 LIST_HEAD(list); 1064 LIST_HEAD(list);
1073 1065
1074 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", 1066 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1075 dwc->descs_allocated); 1067 dwc->descs_allocated);
1076 1068
1077 /* ASSERT: channel is idle */ 1069 /* ASSERT: channel is idle */
@@ -1097,7 +1089,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1097 kfree(desc); 1089 kfree(desc);
1098 } 1090 }
1099 1091
1100 dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); 1092 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1101} 1093}
1102 1094
1103/* --------------------- Cyclic DMA API extensions -------------------- */ 1095/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -1126,13 +1118,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1126 if (dma_readl(dw, CH_EN) & dwc->mask) { 1118 if (dma_readl(dw, CH_EN) & dwc->mask) {
1127 dev_err(chan2dev(&dwc->chan), 1119 dev_err(chan2dev(&dwc->chan),
1128 "BUG: Attempted to start non-idle channel\n"); 1120 "BUG: Attempted to start non-idle channel\n");
1129 dev_err(chan2dev(&dwc->chan), 1121 dwc_dump_chan_regs(dwc);
1130 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1131 channel_readl(dwc, SAR),
1132 channel_readl(dwc, DAR),
1133 channel_readl(dwc, LLP),
1134 channel_readl(dwc, CTL_HI),
1135 channel_readl(dwc, CTL_LO));
1136 spin_unlock_irqrestore(&dwc->lock, flags); 1122 spin_unlock_irqrestore(&dwc->lock, flags);
1137 return -EBUSY; 1123 return -EBUSY;
1138 } 1124 }
@@ -1167,9 +1153,7 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
1167 1153
1168 spin_lock_irqsave(&dwc->lock, flags); 1154 spin_lock_irqsave(&dwc->lock, flags);
1169 1155
1170 channel_clear_bit(dw, CH_EN, dwc->mask); 1156 dwc_chan_disable(dw, dwc);
1171 while (dma_readl(dw, CH_EN) & dwc->mask)
1172 cpu_relax();
1173 1157
1174 spin_unlock_irqrestore(&dwc->lock, flags); 1158 spin_unlock_irqrestore(&dwc->lock, flags);
1175} 1159}
@@ -1308,9 +1292,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1308 dma_sync_single_for_device(chan2parent(chan), last->txd.phys, 1292 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1309 sizeof(last->lli), DMA_TO_DEVICE); 1293 sizeof(last->lli), DMA_TO_DEVICE);
1310 1294
1311 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " 1295 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1312 "period %zu periods %d\n", buf_addr, buf_len, 1296 "period %zu periods %d\n", (unsigned long long)buf_addr,
1313 period_len, periods); 1297 buf_len, period_len, periods);
1314 1298
1315 cdesc->periods = periods; 1299 cdesc->periods = periods;
1316 dwc->cdesc = cdesc; 1300 dwc->cdesc = cdesc;
@@ -1340,16 +1324,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1340 int i; 1324 int i;
1341 unsigned long flags; 1325 unsigned long flags;
1342 1326
1343 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); 1327 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1344 1328
1345 if (!cdesc) 1329 if (!cdesc)
1346 return; 1330 return;
1347 1331
1348 spin_lock_irqsave(&dwc->lock, flags); 1332 spin_lock_irqsave(&dwc->lock, flags);
1349 1333
1350 channel_clear_bit(dw, CH_EN, dwc->mask); 1334 dwc_chan_disable(dw, dwc);
1351 while (dma_readl(dw, CH_EN) & dwc->mask)
1352 cpu_relax();
1353 1335
1354 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1336 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1355 dma_writel(dw, CLEAR.XFER, dwc->mask); 1337 dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -1386,7 +1368,7 @@ static void dw_dma_off(struct dw_dma *dw)
1386 dw->chan[i].initialized = false; 1368 dw->chan[i].initialized = false;
1387} 1369}
1388 1370
1389static int __init dw_probe(struct platform_device *pdev) 1371static int __devinit dw_probe(struct platform_device *pdev)
1390{ 1372{
1391 struct dw_dma_platform_data *pdata; 1373 struct dw_dma_platform_data *pdata;
1392 struct resource *io; 1374 struct resource *io;
@@ -1432,9 +1414,15 @@ static int __init dw_probe(struct platform_device *pdev)
1432 } 1414 }
1433 clk_prepare_enable(dw->clk); 1415 clk_prepare_enable(dw->clk);
1434 1416
1417 /* Calculate all channel mask before DMA setup */
1418 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1419
1435 /* force dma off, just in case */ 1420 /* force dma off, just in case */
1436 dw_dma_off(dw); 1421 dw_dma_off(dw);
1437 1422
1423 /* disable BLOCK interrupts as well */
1424 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1425
1438 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); 1426 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1439 if (err) 1427 if (err)
1440 goto err_irq; 1428 goto err_irq;
@@ -1443,8 +1431,6 @@ static int __init dw_probe(struct platform_device *pdev)
1443 1431
1444 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1432 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1445 1433
1446 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1447
1448 INIT_LIST_HEAD(&dw->dma.channels); 1434 INIT_LIST_HEAD(&dw->dma.channels);
1449 for (i = 0; i < pdata->nr_channels; i++) { 1435 for (i = 0; i < pdata->nr_channels; i++) {
1450 struct dw_dma_chan *dwc = &dw->chan[i]; 1436 struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1474,17 +1460,13 @@ static int __init dw_probe(struct platform_device *pdev)
1474 channel_clear_bit(dw, CH_EN, dwc->mask); 1460 channel_clear_bit(dw, CH_EN, dwc->mask);
1475 } 1461 }
1476 1462
1477 /* Clear/disable all interrupts on all channels. */ 1463 /* Clear all interrupts on all channels. */
1478 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1464 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1465 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1479 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1466 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1480 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1467 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1481 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1468 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1482 1469
1483 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1484 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1485 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1486 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1487
1488 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1470 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1489 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1471 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1490 if (pdata->is_private) 1472 if (pdata->is_private)
@@ -1523,7 +1505,7 @@ err_kfree:
1523 return err; 1505 return err;
1524} 1506}
1525 1507
1526static int __exit dw_remove(struct platform_device *pdev) 1508static int __devexit dw_remove(struct platform_device *pdev)
1527{ 1509{
1528 struct dw_dma *dw = platform_get_drvdata(pdev); 1510 struct dw_dma *dw = platform_get_drvdata(pdev);
1529 struct dw_dma_chan *dwc, *_dwc; 1511 struct dw_dma_chan *dwc, *_dwc;
@@ -1602,7 +1584,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1602#endif 1584#endif
1603 1585
1604static struct platform_driver dw_driver = { 1586static struct platform_driver dw_driver = {
1605 .remove = __exit_p(dw_remove), 1587 .remove = __devexit_p(dw_remove),
1606 .shutdown = dw_shutdown, 1588 .shutdown = dw_shutdown,
1607 .driver = { 1589 .driver = {
1608 .name = "dw_dmac", 1590 .name = "dw_dmac",
@@ -1626,4 +1608,4 @@ module_exit(dw_exit);
1626MODULE_LICENSE("GPL v2"); 1608MODULE_LICENSE("GPL v2");
1627MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); 1609MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1628MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1610MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1629MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 1611MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index f298f69ecbf9..50830bee087a 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -82,7 +82,7 @@ struct dw_dma_regs {
82 DW_REG(ID); 82 DW_REG(ID);
83 DW_REG(TEST); 83 DW_REG(TEST);
84 84
85 /* optional encoded params, 0x3c8..0x3 */ 85 /* optional encoded params, 0x3c8..0x3f7 */
86}; 86};
87 87
88/* Bitfields in CTL_LO */ 88/* Bitfields in CTL_LO */
@@ -219,9 +219,9 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
219/* LLI == Linked List Item; a.k.a. DMA block descriptor */ 219/* LLI == Linked List Item; a.k.a. DMA block descriptor */
220struct dw_lli { 220struct dw_lli {
221 /* values that are not changed by hardware */ 221 /* values that are not changed by hardware */
222 dma_addr_t sar; 222 u32 sar;
223 dma_addr_t dar; 223 u32 dar;
224 dma_addr_t llp; /* chain to next lli */ 224 u32 llp; /* chain to next lli */
225 u32 ctllo; 225 u32 ctllo;
226 /* values that may get written back: */ 226 /* values that may get written back: */
227 u32 ctlhi; 227 u32 ctlhi;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index fcfeb3cd8d31..5084975d793c 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -172,7 +172,8 @@ struct imxdma_engine {
172 struct device_dma_parameters dma_parms; 172 struct device_dma_parameters dma_parms;
173 struct dma_device dma_device; 173 struct dma_device dma_device;
174 void __iomem *base; 174 void __iomem *base;
175 struct clk *dma_clk; 175 struct clk *dma_ahb;
176 struct clk *dma_ipg;
176 spinlock_t lock; 177 spinlock_t lock;
177 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; 178 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
178 struct imxdma_channel channel[IMX_DMA_CHANNELS]; 179 struct imxdma_channel channel[IMX_DMA_CHANNELS];
@@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev)
976 return 0; 977 return 0;
977 } 978 }
978 979
979 imxdma->dma_clk = clk_get(NULL, "dma"); 980 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
980 if (IS_ERR(imxdma->dma_clk)) 981 if (IS_ERR(imxdma->dma_ipg)) {
981 return PTR_ERR(imxdma->dma_clk); 982 ret = PTR_ERR(imxdma->dma_ipg);
982 clk_enable(imxdma->dma_clk); 983 goto err_clk;
984 }
985
986 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
987 if (IS_ERR(imxdma->dma_ahb)) {
988 ret = PTR_ERR(imxdma->dma_ahb);
989 goto err_clk;
990 }
991
992 clk_prepare_enable(imxdma->dma_ipg);
993 clk_prepare_enable(imxdma->dma_ahb);
983 994
984 /* reset DMA module */ 995 /* reset DMA module */
985 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); 996 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
988 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); 999 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
989 if (ret) { 1000 if (ret) {
990 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); 1001 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
991 kfree(imxdma); 1002 goto err_enable;
992 return ret;
993 } 1003 }
994 1004
995 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); 1005 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
996 if (ret) { 1006 if (ret) {
997 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); 1007 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
998 free_irq(MX1_DMA_INT, NULL); 1008 free_irq(MX1_DMA_INT, NULL);
999 kfree(imxdma); 1009 goto err_enable;
1000 return ret;
1001 } 1010 }
1002 } 1011 }
1003 1012
@@ -1094,7 +1103,10 @@ err_init:
1094 free_irq(MX1_DMA_INT, NULL); 1103 free_irq(MX1_DMA_INT, NULL);
1095 free_irq(MX1_DMA_ERR, NULL); 1104 free_irq(MX1_DMA_ERR, NULL);
1096 } 1105 }
1097 1106err_enable:
1107 clk_disable_unprepare(imxdma->dma_ipg);
1108 clk_disable_unprepare(imxdma->dma_ahb);
1109err_clk:
1098 kfree(imxdma); 1110 kfree(imxdma);
1099 return ret; 1111 return ret;
1100} 1112}
@@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev)
1114 free_irq(MX1_DMA_ERR, NULL); 1126 free_irq(MX1_DMA_ERR, NULL);
1115 } 1127 }
1116 1128
1117 kfree(imxdma); 1129 clk_disable_unprepare(imxdma->dma_ipg);
1130 clk_disable_unprepare(imxdma->dma_ahb);
1131 kfree(imxdma);
1118 1132
1119 return 0; 1133 return 0;
1120} 1134}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fb4f4990f5eb..1dc2a4ad0026 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
815 815
816 init_completion(&sdmac->done); 816 init_completion(&sdmac->done);
817 817
818 sdmac->buf_tail = 0;
819
820 return 0; 818 return 0;
821out: 819out:
822 820
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
927 925
928 sdmac->flags = 0; 926 sdmac->flags = 0;
929 927
928 sdmac->buf_tail = 0;
929
930 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 930 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
931 sg_len, channel); 931 sg_len, channel);
932 932
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1027 1027
1028 sdmac->status = DMA_IN_PROGRESS; 1028 sdmac->status = DMA_IN_PROGRESS;
1029 1029
1030 sdmac->buf_tail = 0;
1031
1030 sdmac->flags |= IMX_DMA_SG_LOOP; 1032 sdmac->flags |= IMX_DMA_SG_LOOP;
1031 sdmac->direction = direction; 1033 sdmac->direction = direction;
1032 ret = sdma_load_context(sdmac); 1034 ret = sdma_load_context(sdmac);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 5ec72044ea4c..c7573e50aa14 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1663,7 +1663,6 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
1663 1663
1664static int __init ipu_probe(struct platform_device *pdev) 1664static int __init ipu_probe(struct platform_device *pdev)
1665{ 1665{
1666 struct ipu_platform_data *pdata = pdev->dev.platform_data;
1667 struct resource *mem_ipu, *mem_ic; 1666 struct resource *mem_ipu, *mem_ic;
1668 int ret; 1667 int ret;
1669 1668
@@ -1671,7 +1670,7 @@ static int __init ipu_probe(struct platform_device *pdev)
1671 1670
1672 mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1671 mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1673 mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1672 mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1674 if (!pdata || !mem_ipu || !mem_ic) 1673 if (!mem_ipu || !mem_ic)
1675 return -EINVAL; 1674 return -EINVAL;
1676 1675
1677 ipu_data.dev = &pdev->dev; 1676 ipu_data.dev = &pdev->dev;
@@ -1688,10 +1687,9 @@ static int __init ipu_probe(struct platform_device *pdev)
1688 goto err_noirq; 1687 goto err_noirq;
1689 1688
1690 ipu_data.irq_err = ret; 1689 ipu_data.irq_err = ret;
1691 ipu_data.irq_base = pdata->irq_base;
1692 1690
1693 dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n", 1691 dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
1694 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); 1692 ipu_data.irq_fn, ipu_data.irq_err);
1695 1693
1696 /* Remap IPU common registers */ 1694 /* Remap IPU common registers */
1697 ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); 1695 ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index a71f55e72be9..fa95bcc3de1f 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -14,6 +14,7 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/module.h>
17 18
18#include <mach/ipu.h> 19#include <mach/ipu.h>
19 20
@@ -354,10 +355,12 @@ static struct irq_chip ipu_irq_chip = {
354/* Install the IRQ handler */ 355/* Install the IRQ handler */
355int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) 356int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
356{ 357{
357 struct ipu_platform_data *pdata = dev->dev.platform_data; 358 unsigned int irq, i;
358 unsigned int irq, irq_base, i; 359 int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS,
360 numa_node_id());
359 361
360 irq_base = pdata->irq_base; 362 if (irq_base < 0)
363 return irq_base;
361 364
362 for (i = 0; i < IPU_IRQ_NR_BANKS; i++) 365 for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
363 irq_bank[i].ipu = ipu; 366 irq_bank[i].ipu = ipu;
@@ -387,15 +390,16 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
387 irq_set_handler_data(ipu->irq_err, ipu); 390 irq_set_handler_data(ipu->irq_err, ipu);
388 irq_set_chained_handler(ipu->irq_err, ipu_irq_err); 391 irq_set_chained_handler(ipu->irq_err, ipu_irq_err);
389 392
393 ipu->irq_base = irq_base;
394
390 return 0; 395 return 0;
391} 396}
392 397
393void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) 398void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
394{ 399{
395 struct ipu_platform_data *pdata = dev->dev.platform_data;
396 unsigned int irq, irq_base; 400 unsigned int irq, irq_base;
397 401
398 irq_base = pdata->irq_base; 402 irq_base = ipu->irq_base;
399 403
400 irq_set_chained_handler(ipu->irq_fn, NULL); 404 irq_set_chained_handler(ipu->irq_fn, NULL);
401 irq_set_handler_data(ipu->irq_fn, NULL); 405 irq_set_handler_data(ipu->irq_fn, NULL);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
new file mode 100644
index 000000000000..8a15cf2163dc
--- /dev/null
+++ b/drivers/dma/mmp_tdma.c
@@ -0,0 +1,610 @@
1/*
2 * Driver For Marvell Two-channel DMA Engine
3 *
4 * Copyright: Marvell International Ltd.
5 *
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/interrupt.h>
16#include <linux/dma-mapping.h>
17#include <linux/slab.h>
18#include <linux/dmaengine.h>
19#include <linux/platform_device.h>
20#include <linux/device.h>
21#include <mach/regs-icu.h>
22#include <mach/sram.h>
23
24#include "dmaengine.h"
25
26/*
27 * Two-Channel DMA registers
28 */
29#define TDBCR 0x00 /* Byte Count */
30#define TDSAR 0x10 /* Src Addr */
31#define TDDAR 0x20 /* Dst Addr */
32#define TDNDPR 0x30 /* Next Desc */
33#define TDCR 0x40 /* Control */
34#define TDCP 0x60 /* Priority*/
35#define TDCDPR 0x70 /* Current Desc */
36#define TDIMR 0x80 /* Int Mask */
37#define TDISR 0xa0 /* Int Status */
38
39/* Two-Channel DMA Control Register */
40#define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */
41#define TDCR_SSZ_12_BITS (0x1 << 22)
42#define TDCR_SSZ_16_BITS (0x2 << 22)
43#define TDCR_SSZ_20_BITS (0x3 << 22)
44#define TDCR_SSZ_24_BITS (0x4 << 22)
45#define TDCR_SSZ_32_BITS (0x5 << 22)
46#define TDCR_SSZ_SHIFT (0x1 << 22)
47#define TDCR_SSZ_MASK (0x7 << 22)
48#define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */
49#define TDCR_ABR (0x1 << 20) /* Channel Abort */
50#define TDCR_CDE (0x1 << 17) /* Close Desc Enable */
51#define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */
52#define TDCR_CHANACT (0x1 << 14) /* Channel Active */
53#define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */
54#define TDCR_CHANEN (0x1 << 12) /* Channel Enable */
55#define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */
56#define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */
57#define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */
58#define TDCR_BURSTSZ_4B (0x0 << 6)
59#define TDCR_BURSTSZ_8B (0x1 << 6)
60#define TDCR_BURSTSZ_16B (0x3 << 6)
61#define TDCR_BURSTSZ_32B (0x6 << 6)
62#define TDCR_BURSTSZ_64B (0x7 << 6)
63#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
64#define TDCR_BURSTSZ_128B (0x5 << 6)
65#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
66#define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */
67#define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */
68#define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */
69#define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */
70#define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */
71#define TDCR_DSTDESCCONT (0x1 << 1)
72#define TDCR_SRCDESTCONT (0x1 << 0)
73
74/* Two-Channel DMA Int Mask Register */
75#define TDIMR_COMP (0x1 << 0)
76
77/* Two-Channel DMA Int Status Register */
78#define TDISR_COMP (0x1 << 0)
79
80/*
81 * Two-Channel DMA Descriptor Struct
82 * NOTE: desc's buf must be aligned to 16 bytes.
83 */
84struct mmp_tdma_desc {
85 u32 byte_cnt;
86 u32 src_addr;
87 u32 dst_addr;
88 u32 nxt_desc;
89};
90
91enum mmp_tdma_type {
92 MMP_AUD_TDMA = 0,
93 PXA910_SQU,
94};
95
96#define TDMA_ALIGNMENT 3
97#define TDMA_MAX_XFER_BYTES SZ_64K
98
99struct mmp_tdma_chan {
100 struct device *dev;
101 struct dma_chan chan;
102 struct dma_async_tx_descriptor desc;
103 struct tasklet_struct tasklet;
104
105 struct mmp_tdma_desc *desc_arr;
106 phys_addr_t desc_arr_phys;
107 int desc_num;
108 enum dma_transfer_direction dir;
109 dma_addr_t dev_addr;
110 u32 burst_sz;
111 enum dma_slave_buswidth buswidth;
112 enum dma_status status;
113
114 int idx;
115 enum mmp_tdma_type type;
116 int irq;
117 unsigned long reg_base;
118
119 size_t buf_len;
120 size_t period_len;
121 size_t pos;
122};
123
124#define TDMA_CHANNEL_NUM 2
125struct mmp_tdma_device {
126 struct device *dev;
127 void __iomem *base;
128 struct dma_device device;
129 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
130 int irq;
131};
132
133#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
134
135static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
136{
137 writel(phys, tdmac->reg_base + TDNDPR);
138 writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND,
139 tdmac->reg_base + TDCR);
140}
141
142static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
143{
144 /* enable irq */
145 writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
146 /* enable dma chan */
147 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
148 tdmac->reg_base + TDCR);
149 tdmac->status = DMA_IN_PROGRESS;
150}
151
152static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
153{
154 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
155 tdmac->reg_base + TDCR);
156 tdmac->status = DMA_SUCCESS;
157}
158
159static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
160{
161 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
162 tdmac->reg_base + TDCR);
163 tdmac->status = DMA_IN_PROGRESS;
164}
165
166static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
167{
168 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
169 tdmac->reg_base + TDCR);
170 tdmac->status = DMA_PAUSED;
171}
172
173static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
174{
175 unsigned int tdcr;
176
177 mmp_tdma_disable_chan(tdmac);
178
179 if (tdmac->dir == DMA_MEM_TO_DEV)
180 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
181 else if (tdmac->dir == DMA_DEV_TO_MEM)
182 tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC;
183
184 if (tdmac->type == MMP_AUD_TDMA) {
185 tdcr |= TDCR_PACKMOD;
186
187 switch (tdmac->burst_sz) {
188 case 4:
189 tdcr |= TDCR_BURSTSZ_4B;
190 break;
191 case 8:
192 tdcr |= TDCR_BURSTSZ_8B;
193 break;
194 case 16:
195 tdcr |= TDCR_BURSTSZ_16B;
196 break;
197 case 32:
198 tdcr |= TDCR_BURSTSZ_32B;
199 break;
200 case 64:
201 tdcr |= TDCR_BURSTSZ_64B;
202 break;
203 case 128:
204 tdcr |= TDCR_BURSTSZ_128B;
205 break;
206 default:
207 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
208 return -EINVAL;
209 }
210
211 switch (tdmac->buswidth) {
212 case DMA_SLAVE_BUSWIDTH_1_BYTE:
213 tdcr |= TDCR_SSZ_8_BITS;
214 break;
215 case DMA_SLAVE_BUSWIDTH_2_BYTES:
216 tdcr |= TDCR_SSZ_16_BITS;
217 break;
218 case DMA_SLAVE_BUSWIDTH_4_BYTES:
219 tdcr |= TDCR_SSZ_32_BITS;
220 break;
221 default:
222 dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n");
223 return -EINVAL;
224 }
225 } else if (tdmac->type == PXA910_SQU) {
226 tdcr |= TDCR_BURSTSZ_SQU_32B;
227 tdcr |= TDCR_SSPMOD;
228 }
229
230 writel(tdcr, tdmac->reg_base + TDCR);
231 return 0;
232}
233
234static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
235{
236 u32 reg = readl(tdmac->reg_base + TDISR);
237
238 if (reg & TDISR_COMP) {
239 /* clear irq */
240 reg &= ~TDISR_COMP;
241 writel(reg, tdmac->reg_base + TDISR);
242
243 return 0;
244 }
245 return -EAGAIN;
246}
247
248static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
249{
250 struct mmp_tdma_chan *tdmac = dev_id;
251
252 if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
253 tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
254 tasklet_schedule(&tdmac->tasklet);
255 return IRQ_HANDLED;
256 } else
257 return IRQ_NONE;
258}
259
260static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
261{
262 struct mmp_tdma_device *tdev = dev_id;
263 int i, ret;
264 int irq_num = 0;
265
266 for (i = 0; i < TDMA_CHANNEL_NUM; i++) {
267 struct mmp_tdma_chan *tdmac = tdev->tdmac[i];
268
269 ret = mmp_tdma_chan_handler(irq, tdmac);
270 if (ret == IRQ_HANDLED)
271 irq_num++;
272 }
273
274 if (irq_num)
275 return IRQ_HANDLED;
276 else
277 return IRQ_NONE;
278}
279
280static void dma_do_tasklet(unsigned long data)
281{
282 struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
283
284 if (tdmac->desc.callback)
285 tdmac->desc.callback(tdmac->desc.callback_param);
286
287}
288
289static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
290{
291 struct gen_pool *gpool;
292 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
293
294 gpool = sram_get_gpool("asram");
295 if (tdmac->desc_arr)
296 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
297 size);
298 tdmac->desc_arr = NULL;
299
300 return;
301}
302
303static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx)
304{
305 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan);
306
307 mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys);
308
309 return 0;
310}
311
312static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
313{
314 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
315 int ret;
316
317 dma_async_tx_descriptor_init(&tdmac->desc, chan);
318 tdmac->desc.tx_submit = mmp_tdma_tx_submit;
319
320 if (tdmac->irq) {
321 ret = devm_request_irq(tdmac->dev, tdmac->irq,
322 mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
323 if (ret)
324 return ret;
325 }
326 return 1;
327}
328
329static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
330{
331 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
332
333 if (tdmac->irq)
334 devm_free_irq(tdmac->dev, tdmac->irq, tdmac);
335 mmp_tdma_free_descriptor(tdmac);
336 return;
337}
338
339struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
340{
341 struct gen_pool *gpool;
342 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
343
344 gpool = sram_get_gpool("asram");
345 if (!gpool)
346 return NULL;
347
348 tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
349 if (!tdmac->desc_arr)
350 return NULL;
351
352 tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
353 (unsigned long)tdmac->desc_arr);
354
355 return tdmac->desc_arr;
356}
357
358static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
359 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
360 size_t period_len, enum dma_transfer_direction direction,
361 void *context)
362{
363 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
364 struct mmp_tdma_desc *desc;
365 int num_periods = buf_len / period_len;
366 int i = 0, buf = 0;
367
368 if (tdmac->status != DMA_SUCCESS)
369 return NULL;
370
371 if (period_len > TDMA_MAX_XFER_BYTES) {
372 dev_err(tdmac->dev,
373 "maximum period size exceeded: %d > %d\n",
374 period_len, TDMA_MAX_XFER_BYTES);
375 goto err_out;
376 }
377
378 tdmac->status = DMA_IN_PROGRESS;
379 tdmac->desc_num = num_periods;
380 desc = mmp_tdma_alloc_descriptor(tdmac);
381 if (!desc)
382 goto err_out;
383
384 while (buf < buf_len) {
385 desc = &tdmac->desc_arr[i];
386
387 if (i + 1 == num_periods)
388 desc->nxt_desc = tdmac->desc_arr_phys;
389 else
390 desc->nxt_desc = tdmac->desc_arr_phys +
391 sizeof(*desc) * (i + 1);
392
393 if (direction == DMA_MEM_TO_DEV) {
394 desc->src_addr = dma_addr;
395 desc->dst_addr = tdmac->dev_addr;
396 } else {
397 desc->src_addr = tdmac->dev_addr;
398 desc->dst_addr = dma_addr;
399 }
400 desc->byte_cnt = period_len;
401 dma_addr += period_len;
402 buf += period_len;
403 i++;
404 }
405
406 tdmac->buf_len = buf_len;
407 tdmac->period_len = period_len;
408 tdmac->pos = 0;
409
410 return &tdmac->desc;
411
412err_out:
413 tdmac->status = DMA_ERROR;
414 return NULL;
415}
416
417static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
418 unsigned long arg)
419{
420 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
421 struct dma_slave_config *dmaengine_cfg = (void *)arg;
422 int ret = 0;
423
424 switch (cmd) {
425 case DMA_TERMINATE_ALL:
426 mmp_tdma_disable_chan(tdmac);
427 break;
428 case DMA_PAUSE:
429 mmp_tdma_pause_chan(tdmac);
430 break;
431 case DMA_RESUME:
432 mmp_tdma_resume_chan(tdmac);
433 break;
434 case DMA_SLAVE_CONFIG:
435 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
436 tdmac->dev_addr = dmaengine_cfg->src_addr;
437 tdmac->burst_sz = dmaengine_cfg->src_maxburst;
438 tdmac->buswidth = dmaengine_cfg->src_addr_width;
439 } else {
440 tdmac->dev_addr = dmaengine_cfg->dst_addr;
441 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
442 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
443 }
444 tdmac->dir = dmaengine_cfg->direction;
445 return mmp_tdma_config_chan(tdmac);
446 default:
447 ret = -ENOSYS;
448 }
449
450 return ret;
451}
452
453static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
454 dma_cookie_t cookie, struct dma_tx_state *txstate)
455{
456 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
457
458 dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
459
460 return tdmac->status;
461}
462
463static void mmp_tdma_issue_pending(struct dma_chan *chan)
464{
465 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
466
467 mmp_tdma_enable_chan(tdmac);
468}
469
470static int __devexit mmp_tdma_remove(struct platform_device *pdev)
471{
472 struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
473
474 dma_async_device_unregister(&tdev->device);
475 return 0;
476}
477
478static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
479 int idx, int irq, int type)
480{
481 struct mmp_tdma_chan *tdmac;
482
483 if (idx >= TDMA_CHANNEL_NUM) {
484 dev_err(tdev->dev, "too many channels for device!\n");
485 return -EINVAL;
486 }
487
488 /* alloc channel */
489 tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
490 if (!tdmac) {
491 dev_err(tdev->dev, "no free memory for DMA channels!\n");
492 return -ENOMEM;
493 }
494 if (irq)
495 tdmac->irq = irq + idx;
496 tdmac->dev = tdev->dev;
497 tdmac->chan.device = &tdev->device;
498 tdmac->idx = idx;
499 tdmac->type = type;
500 tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
501 tdmac->status = DMA_SUCCESS;
502 tdev->tdmac[tdmac->idx] = tdmac;
503 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
504
505 /* add the channel to tdma_chan list */
506 list_add_tail(&tdmac->chan.device_node,
507 &tdev->device.channels);
508
509 return 0;
510}
511
512static int __devinit mmp_tdma_probe(struct platform_device *pdev)
513{
514 const struct platform_device_id *id = platform_get_device_id(pdev);
515 enum mmp_tdma_type type = id->driver_data;
516 struct mmp_tdma_device *tdev;
517 struct resource *iores;
518 int i, ret;
519 int irq = 0;
520 int chan_num = TDMA_CHANNEL_NUM;
521
522 /* always have couple channels */
523 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
524 if (!tdev)
525 return -ENOMEM;
526
527 tdev->dev = &pdev->dev;
528 iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
529 if (!iores)
530 return -EINVAL;
531
532 if (resource_size(iores) != chan_num)
533 tdev->irq = iores->start;
534 else
535 irq = iores->start;
536
537 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
538 if (!iores)
539 return -EINVAL;
540
541 tdev->base = devm_request_and_ioremap(&pdev->dev, iores);
542 if (!tdev->base)
543 return -EADDRNOTAVAIL;
544
545 if (tdev->irq) {
546 ret = devm_request_irq(&pdev->dev, tdev->irq,
547 mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
548 if (ret)
549 return ret;
550 }
551
552 dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
553 dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
554
555 INIT_LIST_HEAD(&tdev->device.channels);
556
557 /* initialize channel parameters */
558 for (i = 0; i < chan_num; i++) {
559 ret = mmp_tdma_chan_init(tdev, i, irq, type);
560 if (ret)
561 return ret;
562 }
563
564 tdev->device.dev = &pdev->dev;
565 tdev->device.device_alloc_chan_resources =
566 mmp_tdma_alloc_chan_resources;
567 tdev->device.device_free_chan_resources =
568 mmp_tdma_free_chan_resources;
569 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
570 tdev->device.device_tx_status = mmp_tdma_tx_status;
571 tdev->device.device_issue_pending = mmp_tdma_issue_pending;
572 tdev->device.device_control = mmp_tdma_control;
573 tdev->device.copy_align = TDMA_ALIGNMENT;
574
575 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
576 platform_set_drvdata(pdev, tdev);
577
578 ret = dma_async_device_register(&tdev->device);
579 if (ret) {
580 dev_err(tdev->device.dev, "unable to register\n");
581 return ret;
582 }
583
584 dev_info(tdev->device.dev, "initialized\n");
585 return 0;
586}
587
588static const struct platform_device_id mmp_tdma_id_table[] = {
589 { "mmp-adma", MMP_AUD_TDMA },
590 { "pxa910-squ", PXA910_SQU },
591 { },
592};
593
594static struct platform_driver mmp_tdma_driver = {
595 .driver = {
596 .name = "mmp-tdma",
597 .owner = THIS_MODULE,
598 },
599 .id_table = mmp_tdma_id_table,
600 .probe = mmp_tdma_probe,
601 .remove = __devexit_p(mmp_tdma_remove),
602};
603
604module_platform_driver(mmp_tdma_driver);
605
606MODULE_LICENSE("GPL");
607MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
608MODULE_ALIAS("platform:mmp-tdma");
609MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
610MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index c96ab15319f2..7f41b25805fa 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -29,7 +29,6 @@
29#include <linux/of_device.h> 29#include <linux/of_device.h>
30 30
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <mach/mxs.h>
33 32
34#include "dmaengine.h" 33#include "dmaengine.h"
35 34
@@ -201,6 +200,7 @@ int mxs_dma_is_apbh(struct dma_chan *chan)
201 200
202 return dma_is_apbh(mxs_dma); 201 return dma_is_apbh(mxs_dma);
203} 202}
203EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
204 204
205int mxs_dma_is_apbx(struct dma_chan *chan) 205int mxs_dma_is_apbx(struct dma_chan *chan)
206{ 206{
@@ -209,6 +209,7 @@ int mxs_dma_is_apbx(struct dma_chan *chan)
209 209
210 return !dma_is_apbh(mxs_dma); 210 return !dma_is_apbh(mxs_dma);
211} 211}
212EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
212 213
213static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 214static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
214{ 215{
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
new file mode 100644
index 000000000000..ae0561826137
--- /dev/null
+++ b/drivers/dma/omap-dma.c
@@ -0,0 +1,669 @@
1/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/err.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/list.h>
14#include <linux/module.h>
15#include <linux/omap-dma.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "virt-dma.h"
21#include <plat/dma.h>
22
23struct omap_dmadev {
24 struct dma_device ddev;
25 spinlock_t lock;
26 struct tasklet_struct task;
27 struct list_head pending;
28};
29
30struct omap_chan {
31 struct virt_dma_chan vc;
32 struct list_head node;
33
34 struct dma_slave_config cfg;
35 unsigned dma_sig;
36 bool cyclic;
37
38 int dma_ch;
39 struct omap_desc *desc;
40 unsigned sgidx;
41};
42
43struct omap_sg {
44 dma_addr_t addr;
45 uint32_t en; /* number of elements (24-bit) */
46 uint32_t fn; /* number of frames (16-bit) */
47};
48
49struct omap_desc {
50 struct virt_dma_desc vd;
51 enum dma_transfer_direction dir;
52 dma_addr_t dev_addr;
53
54 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
55 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
56 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
57 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
58 uint8_t periph_port; /* Peripheral port */
59
60 unsigned sglen;
61 struct omap_sg sg[0];
62};
63
64static const unsigned es_bytes[] = {
65 [OMAP_DMA_DATA_TYPE_S8] = 1,
66 [OMAP_DMA_DATA_TYPE_S16] = 2,
67 [OMAP_DMA_DATA_TYPE_S32] = 4,
68};
69
70static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
71{
72 return container_of(d, struct omap_dmadev, ddev);
73}
74
75static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
76{
77 return container_of(c, struct omap_chan, vc.chan);
78}
79
80static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
81{
82 return container_of(t, struct omap_desc, vd.tx);
83}
84
85static void omap_dma_desc_free(struct virt_dma_desc *vd)
86{
87 kfree(container_of(vd, struct omap_desc, vd));
88}
89
90static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
91 unsigned idx)
92{
93 struct omap_sg *sg = d->sg + idx;
94
95 if (d->dir == DMA_DEV_TO_MEM)
96 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
97 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
98 else
99 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
100 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
101
102 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
103 d->sync_mode, c->dma_sig, d->sync_type);
104
105 omap_start_dma(c->dma_ch);
106}
107
108static void omap_dma_start_desc(struct omap_chan *c)
109{
110 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
111 struct omap_desc *d;
112
113 if (!vd) {
114 c->desc = NULL;
115 return;
116 }
117
118 list_del(&vd->node);
119
120 c->desc = d = to_omap_dma_desc(&vd->tx);
121 c->sgidx = 0;
122
123 if (d->dir == DMA_DEV_TO_MEM)
124 omap_set_dma_src_params(c->dma_ch, d->periph_port,
125 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
126 else
127 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
128 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
129
130 omap_dma_start_sg(c, d, 0);
131}
132
133static void omap_dma_callback(int ch, u16 status, void *data)
134{
135 struct omap_chan *c = data;
136 struct omap_desc *d;
137 unsigned long flags;
138
139 spin_lock_irqsave(&c->vc.lock, flags);
140 d = c->desc;
141 if (d) {
142 if (!c->cyclic) {
143 if (++c->sgidx < d->sglen) {
144 omap_dma_start_sg(c, d, c->sgidx);
145 } else {
146 omap_dma_start_desc(c);
147 vchan_cookie_complete(&d->vd);
148 }
149 } else {
150 vchan_cyclic_callback(&d->vd);
151 }
152 }
153 spin_unlock_irqrestore(&c->vc.lock, flags);
154}
155
156/*
157 * This callback schedules all pending channels. We could be more
158 * clever here by postponing allocation of the real DMA channels to
159 * this point, and freeing them when our virtual channel becomes idle.
160 *
161 * We would then need to deal with 'all channels in-use'
162 */
163static void omap_dma_sched(unsigned long data)
164{
165 struct omap_dmadev *d = (struct omap_dmadev *)data;
166 LIST_HEAD(head);
167
168 spin_lock_irq(&d->lock);
169 list_splice_tail_init(&d->pending, &head);
170 spin_unlock_irq(&d->lock);
171
172 while (!list_empty(&head)) {
173 struct omap_chan *c = list_first_entry(&head,
174 struct omap_chan, node);
175
176 spin_lock_irq(&c->vc.lock);
177 list_del_init(&c->node);
178 omap_dma_start_desc(c);
179 spin_unlock_irq(&c->vc.lock);
180 }
181}
182
183static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
184{
185 struct omap_chan *c = to_omap_dma_chan(chan);
186
187 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
188
189 return omap_request_dma(c->dma_sig, "DMA engine",
190 omap_dma_callback, c, &c->dma_ch);
191}
192
193static void omap_dma_free_chan_resources(struct dma_chan *chan)
194{
195 struct omap_chan *c = to_omap_dma_chan(chan);
196
197 vchan_free_chan_resources(&c->vc);
198 omap_free_dma(c->dma_ch);
199
200 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
201}
202
203static size_t omap_dma_sg_size(struct omap_sg *sg)
204{
205 return sg->en * sg->fn;
206}
207
208static size_t omap_dma_desc_size(struct omap_desc *d)
209{
210 unsigned i;
211 size_t size;
212
213 for (size = i = 0; i < d->sglen; i++)
214 size += omap_dma_sg_size(&d->sg[i]);
215
216 return size * es_bytes[d->es];
217}
218
219static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
220{
221 unsigned i;
222 size_t size, es_size = es_bytes[d->es];
223
224 for (size = i = 0; i < d->sglen; i++) {
225 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
226
227 if (size)
228 size += this_size;
229 else if (addr >= d->sg[i].addr &&
230 addr < d->sg[i].addr + this_size)
231 size += d->sg[i].addr + this_size - addr;
232 }
233 return size;
234}
235
236static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
237 dma_cookie_t cookie, struct dma_tx_state *txstate)
238{
239 struct omap_chan *c = to_omap_dma_chan(chan);
240 struct virt_dma_desc *vd;
241 enum dma_status ret;
242 unsigned long flags;
243
244 ret = dma_cookie_status(chan, cookie, txstate);
245 if (ret == DMA_SUCCESS || !txstate)
246 return ret;
247
248 spin_lock_irqsave(&c->vc.lock, flags);
249 vd = vchan_find_desc(&c->vc, cookie);
250 if (vd) {
251 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
252 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
253 struct omap_desc *d = c->desc;
254 dma_addr_t pos;
255
256 if (d->dir == DMA_MEM_TO_DEV)
257 pos = omap_get_dma_src_pos(c->dma_ch);
258 else if (d->dir == DMA_DEV_TO_MEM)
259 pos = omap_get_dma_dst_pos(c->dma_ch);
260 else
261 pos = 0;
262
263 txstate->residue = omap_dma_desc_size_pos(d, pos);
264 } else {
265 txstate->residue = 0;
266 }
267 spin_unlock_irqrestore(&c->vc.lock, flags);
268
269 return ret;
270}
271
272static void omap_dma_issue_pending(struct dma_chan *chan)
273{
274 struct omap_chan *c = to_omap_dma_chan(chan);
275 unsigned long flags;
276
277 spin_lock_irqsave(&c->vc.lock, flags);
278 if (vchan_issue_pending(&c->vc) && !c->desc) {
279 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
280 spin_lock(&d->lock);
281 if (list_empty(&c->node))
282 list_add_tail(&c->node, &d->pending);
283 spin_unlock(&d->lock);
284 tasklet_schedule(&d->task);
285 }
286 spin_unlock_irqrestore(&c->vc.lock, flags);
287}
288
289static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
290 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
291 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
292{
293 struct omap_chan *c = to_omap_dma_chan(chan);
294 enum dma_slave_buswidth dev_width;
295 struct scatterlist *sgent;
296 struct omap_desc *d;
297 dma_addr_t dev_addr;
298 unsigned i, j = 0, es, en, frame_bytes, sync_type;
299 u32 burst;
300
301 if (dir == DMA_DEV_TO_MEM) {
302 dev_addr = c->cfg.src_addr;
303 dev_width = c->cfg.src_addr_width;
304 burst = c->cfg.src_maxburst;
305 sync_type = OMAP_DMA_SRC_SYNC;
306 } else if (dir == DMA_MEM_TO_DEV) {
307 dev_addr = c->cfg.dst_addr;
308 dev_width = c->cfg.dst_addr_width;
309 burst = c->cfg.dst_maxburst;
310 sync_type = OMAP_DMA_DST_SYNC;
311 } else {
312 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
313 return NULL;
314 }
315
316 /* Bus width translates to the element size (ES) */
317 switch (dev_width) {
318 case DMA_SLAVE_BUSWIDTH_1_BYTE:
319 es = OMAP_DMA_DATA_TYPE_S8;
320 break;
321 case DMA_SLAVE_BUSWIDTH_2_BYTES:
322 es = OMAP_DMA_DATA_TYPE_S16;
323 break;
324 case DMA_SLAVE_BUSWIDTH_4_BYTES:
325 es = OMAP_DMA_DATA_TYPE_S32;
326 break;
327 default: /* not reached */
328 return NULL;
329 }
330
331 /* Now allocate and setup the descriptor. */
332 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
333 if (!d)
334 return NULL;
335
336 d->dir = dir;
337 d->dev_addr = dev_addr;
338 d->es = es;
339 d->sync_mode = OMAP_DMA_SYNC_FRAME;
340 d->sync_type = sync_type;
341 d->periph_port = OMAP_DMA_PORT_TIPB;
342
343 /*
344 * Build our scatterlist entries: each contains the address,
345 * the number of elements (EN) in each frame, and the number of
346 * frames (FN). Number of bytes for this entry = ES * EN * FN.
347 *
348 * Burst size translates to number of elements with frame sync.
349 * Note: DMA engine defines burst to be the number of dev-width
350 * transfers.
351 */
352 en = burst;
353 frame_bytes = es_bytes[es] * en;
354 for_each_sg(sgl, sgent, sglen, i) {
355 d->sg[j].addr = sg_dma_address(sgent);
356 d->sg[j].en = en;
357 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
358 j++;
359 }
360
361 d->sglen = j;
362
363 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
364}
365
366static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
367 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
368 size_t period_len, enum dma_transfer_direction dir, void *context)
369{
370 struct omap_chan *c = to_omap_dma_chan(chan);
371 enum dma_slave_buswidth dev_width;
372 struct omap_desc *d;
373 dma_addr_t dev_addr;
374 unsigned es, sync_type;
375 u32 burst;
376
377 if (dir == DMA_DEV_TO_MEM) {
378 dev_addr = c->cfg.src_addr;
379 dev_width = c->cfg.src_addr_width;
380 burst = c->cfg.src_maxburst;
381 sync_type = OMAP_DMA_SRC_SYNC;
382 } else if (dir == DMA_MEM_TO_DEV) {
383 dev_addr = c->cfg.dst_addr;
384 dev_width = c->cfg.dst_addr_width;
385 burst = c->cfg.dst_maxburst;
386 sync_type = OMAP_DMA_DST_SYNC;
387 } else {
388 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
389 return NULL;
390 }
391
392 /* Bus width translates to the element size (ES) */
393 switch (dev_width) {
394 case DMA_SLAVE_BUSWIDTH_1_BYTE:
395 es = OMAP_DMA_DATA_TYPE_S8;
396 break;
397 case DMA_SLAVE_BUSWIDTH_2_BYTES:
398 es = OMAP_DMA_DATA_TYPE_S16;
399 break;
400 case DMA_SLAVE_BUSWIDTH_4_BYTES:
401 es = OMAP_DMA_DATA_TYPE_S32;
402 break;
403 default: /* not reached */
404 return NULL;
405 }
406
407 /* Now allocate and setup the descriptor. */
408 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
409 if (!d)
410 return NULL;
411
412 d->dir = dir;
413 d->dev_addr = dev_addr;
414 d->fi = burst;
415 d->es = es;
416 d->sync_mode = OMAP_DMA_SYNC_PACKET;
417 d->sync_type = sync_type;
418 d->periph_port = OMAP_DMA_PORT_MPUI;
419 d->sg[0].addr = buf_addr;
420 d->sg[0].en = period_len / es_bytes[es];
421 d->sg[0].fn = buf_len / period_len;
422 d->sglen = 1;
423
424 if (!c->cyclic) {
425 c->cyclic = true;
426 omap_dma_link_lch(c->dma_ch, c->dma_ch);
427 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
428 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
429 }
430
431 if (!cpu_class_is_omap1()) {
432 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
433 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
434 }
435
436 return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
437}
438
439static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
440{
441 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
442 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
443 return -EINVAL;
444
445 memcpy(&c->cfg, cfg, sizeof(c->cfg));
446
447 return 0;
448}
449
450static int omap_dma_terminate_all(struct omap_chan *c)
451{
452 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
453 unsigned long flags;
454 LIST_HEAD(head);
455
456 spin_lock_irqsave(&c->vc.lock, flags);
457
458 /* Prevent this channel being scheduled */
459 spin_lock(&d->lock);
460 list_del_init(&c->node);
461 spin_unlock(&d->lock);
462
463 /*
464 * Stop DMA activity: we assume the callback will not be called
465 * after omap_stop_dma() returns (even if it does, it will see
466 * c->desc is NULL and exit.)
467 */
468 if (c->desc) {
469 c->desc = NULL;
470 omap_stop_dma(c->dma_ch);
471 }
472
473 if (c->cyclic) {
474 c->cyclic = false;
475 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
476 }
477
478 vchan_get_all_descriptors(&c->vc, &head);
479 spin_unlock_irqrestore(&c->vc.lock, flags);
480 vchan_dma_desc_free_list(&c->vc, &head);
481
482 return 0;
483}
484
485static int omap_dma_pause(struct omap_chan *c)
486{
487 /* FIXME: not supported by platform private API */
488 return -EINVAL;
489}
490
491static int omap_dma_resume(struct omap_chan *c)
492{
493 /* FIXME: not supported by platform private API */
494 return -EINVAL;
495}
496
497static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
498 unsigned long arg)
499{
500 struct omap_chan *c = to_omap_dma_chan(chan);
501 int ret;
502
503 switch (cmd) {
504 case DMA_SLAVE_CONFIG:
505 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
506 break;
507
508 case DMA_TERMINATE_ALL:
509 ret = omap_dma_terminate_all(c);
510 break;
511
512 case DMA_PAUSE:
513 ret = omap_dma_pause(c);
514 break;
515
516 case DMA_RESUME:
517 ret = omap_dma_resume(c);
518 break;
519
520 default:
521 ret = -ENXIO;
522 break;
523 }
524
525 return ret;
526}
527
528static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
529{
530 struct omap_chan *c;
531
532 c = kzalloc(sizeof(*c), GFP_KERNEL);
533 if (!c)
534 return -ENOMEM;
535
536 c->dma_sig = dma_sig;
537 c->vc.desc_free = omap_dma_desc_free;
538 vchan_init(&c->vc, &od->ddev);
539 INIT_LIST_HEAD(&c->node);
540
541 od->ddev.chancnt++;
542
543 return 0;
544}
545
546static void omap_dma_free(struct omap_dmadev *od)
547{
548 tasklet_kill(&od->task);
549 while (!list_empty(&od->ddev.channels)) {
550 struct omap_chan *c = list_first_entry(&od->ddev.channels,
551 struct omap_chan, vc.chan.device_node);
552
553 list_del(&c->vc.chan.device_node);
554 tasklet_kill(&c->vc.task);
555 kfree(c);
556 }
557 kfree(od);
558}
559
560static int omap_dma_probe(struct platform_device *pdev)
561{
562 struct omap_dmadev *od;
563 int rc, i;
564
565 od = kzalloc(sizeof(*od), GFP_KERNEL);
566 if (!od)
567 return -ENOMEM;
568
569 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
570 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
571 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
572 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
573 od->ddev.device_tx_status = omap_dma_tx_status;
574 od->ddev.device_issue_pending = omap_dma_issue_pending;
575 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
576 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
577 od->ddev.device_control = omap_dma_control;
578 od->ddev.dev = &pdev->dev;
579 INIT_LIST_HEAD(&od->ddev.channels);
580 INIT_LIST_HEAD(&od->pending);
581 spin_lock_init(&od->lock);
582
583 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
584
585 for (i = 0; i < 127; i++) {
586 rc = omap_dma_chan_init(od, i);
587 if (rc) {
588 omap_dma_free(od);
589 return rc;
590 }
591 }
592
593 rc = dma_async_device_register(&od->ddev);
594 if (rc) {
595 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
596 rc);
597 omap_dma_free(od);
598 } else {
599 platform_set_drvdata(pdev, od);
600 }
601
602 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
603
604 return rc;
605}
606
607static int omap_dma_remove(struct platform_device *pdev)
608{
609 struct omap_dmadev *od = platform_get_drvdata(pdev);
610
611 dma_async_device_unregister(&od->ddev);
612 omap_dma_free(od);
613
614 return 0;
615}
616
617static struct platform_driver omap_dma_driver = {
618 .probe = omap_dma_probe,
619 .remove = omap_dma_remove,
620 .driver = {
621 .name = "omap-dma-engine",
622 .owner = THIS_MODULE,
623 },
624};
625
626bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
627{
628 if (chan->device->dev->driver == &omap_dma_driver.driver) {
629 struct omap_chan *c = to_omap_dma_chan(chan);
630 unsigned req = *(unsigned *)param;
631
632 return req == c->dma_sig;
633 }
634 return false;
635}
636EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
637
638static struct platform_device *pdev;
639
640static const struct platform_device_info omap_dma_dev_info = {
641 .name = "omap-dma-engine",
642 .id = -1,
643 .dma_mask = DMA_BIT_MASK(32),
644};
645
646static int omap_dma_init(void)
647{
648 int rc = platform_driver_register(&omap_dma_driver);
649
650 if (rc == 0) {
651 pdev = platform_device_register_full(&omap_dma_dev_info);
652 if (IS_ERR(pdev)) {
653 platform_driver_unregister(&omap_dma_driver);
654 rc = PTR_ERR(pdev);
655 }
656 }
657 return rc;
658}
659subsys_initcall(omap_dma_init);
660
661static void __exit omap_dma_exit(void)
662{
663 platform_device_unregister(pdev);
664 platform_driver_unregister(&omap_dma_driver);
665}
666module_exit(omap_dma_exit);
667
668MODULE_AUTHOR("Russell King");
669MODULE_LICENSE("GPL");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cbcc28e79be6..e4feba6b03c0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -392,6 +392,8 @@ struct pl330_req {
392 struct pl330_reqcfg *cfg; 392 struct pl330_reqcfg *cfg;
393 /* Pointer to first xfer in the request. */ 393 /* Pointer to first xfer in the request. */
394 struct pl330_xfer *x; 394 struct pl330_xfer *x;
395 /* Hook to attach to DMAC's list of reqs with due callback */
396 struct list_head rqd;
395}; 397};
396 398
397/* 399/*
@@ -461,8 +463,6 @@ struct _pl330_req {
461 /* Number of bytes taken to setup MC for the req */ 463 /* Number of bytes taken to setup MC for the req */
462 u32 mc_len; 464 u32 mc_len;
463 struct pl330_req *r; 465 struct pl330_req *r;
464 /* Hook to attach to DMAC's list of reqs with due callback */
465 struct list_head rqd;
466}; 466};
467 467
468/* ToBeDone for tasklet */ 468/* ToBeDone for tasklet */
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
1683/* Returns 1 if state was updated, 0 otherwise */ 1683/* Returns 1 if state was updated, 0 otherwise */
1684static int pl330_update(const struct pl330_info *pi) 1684static int pl330_update(const struct pl330_info *pi)
1685{ 1685{
1686 struct _pl330_req *rqdone; 1686 struct pl330_req *rqdone, *tmp;
1687 struct pl330_dmac *pl330; 1687 struct pl330_dmac *pl330;
1688 unsigned long flags; 1688 unsigned long flags;
1689 void __iomem *regs; 1689 void __iomem *regs;
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
1750 if (active == -1) /* Aborted */ 1750 if (active == -1) /* Aborted */
1751 continue; 1751 continue;
1752 1752
1753 rqdone = &thrd->req[active]; 1753 /* Detach the req */
1754 rqdone = thrd->req[active].r;
1755 thrd->req[active].r = NULL;
1756
1754 mark_free(thrd, active); 1757 mark_free(thrd, active);
1755 1758
1756 /* Get going again ASAP */ 1759 /* Get going again ASAP */
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
1762 } 1765 }
1763 1766
1764 /* Now that we are in no hurry, do the callbacks */ 1767 /* Now that we are in no hurry, do the callbacks */
1765 while (!list_empty(&pl330->req_done)) { 1768 list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
1766 struct pl330_req *r; 1769 list_del(&rqdone->rqd);
1767
1768 rqdone = container_of(pl330->req_done.next,
1769 struct _pl330_req, rqd);
1770
1771 list_del_init(&rqdone->rqd);
1772
1773 /* Detach the req */
1774 r = rqdone->r;
1775 rqdone->r = NULL;
1776 1770
1777 spin_unlock_irqrestore(&pl330->lock, flags); 1771 spin_unlock_irqrestore(&pl330->lock, flags);
1778 _callback(r, PL330_ERR_NONE); 1772 _callback(rqdone, PL330_ERR_NONE);
1779 spin_lock_irqsave(&pl330->lock, flags); 1773 spin_lock_irqsave(&pl330->lock, flags);
1780 } 1774 }
1781 1775
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
2321 /* Pick up ripe tomatoes */ 2315 /* Pick up ripe tomatoes */
2322 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 2316 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2323 if (desc->status == DONE) { 2317 if (desc->status == DONE) {
2324 if (pch->cyclic) 2318 if (!pch->cyclic)
2325 dma_cookie_complete(&desc->txd); 2319 dma_cookie_complete(&desc->txd);
2326 list_move_tail(&desc->node, &list); 2320 list_move_tail(&desc->node, &list);
2327 } 2321 }
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
2539} 2533}
2540 2534
2541/* Returns the number of descriptors added to the DMAC pool */ 2535/* Returns the number of descriptors added to the DMAC pool */
2542int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) 2536static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2543{ 2537{
2544 struct dma_pl330_desc *desc; 2538 struct dma_pl330_desc *desc;
2545 unsigned long flags; 2539 unsigned long flags;
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ec78ccef9132..f5a73606217e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -21,6 +21,8 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23 23
24#include "virt-dma.h"
25
24#define NR_PHY_CHAN 6 26#define NR_PHY_CHAN 6
25#define DMA_ALIGN 3 27#define DMA_ALIGN 3
26#define DMA_MAX_SIZE 0x1fff 28#define DMA_MAX_SIZE 0x1fff
@@ -72,12 +74,13 @@ struct sa11x0_dma_sg {
72}; 74};
73 75
74struct sa11x0_dma_desc { 76struct sa11x0_dma_desc {
75 struct dma_async_tx_descriptor tx; 77 struct virt_dma_desc vd;
78
76 u32 ddar; 79 u32 ddar;
77 size_t size; 80 size_t size;
81 unsigned period;
82 bool cyclic;
78 83
79 /* maybe protected by c->lock */
80 struct list_head node;
81 unsigned sglen; 84 unsigned sglen;
82 struct sa11x0_dma_sg sg[0]; 85 struct sa11x0_dma_sg sg[0];
83}; 86};
@@ -85,15 +88,11 @@ struct sa11x0_dma_desc {
85struct sa11x0_dma_phy; 88struct sa11x0_dma_phy;
86 89
87struct sa11x0_dma_chan { 90struct sa11x0_dma_chan {
88 struct dma_chan chan; 91 struct virt_dma_chan vc;
89 spinlock_t lock;
90 dma_cookie_t lc;
91 92
92 /* protected by c->lock */ 93 /* protected by c->vc.lock */
93 struct sa11x0_dma_phy *phy; 94 struct sa11x0_dma_phy *phy;
94 enum dma_status status; 95 enum dma_status status;
95 struct list_head desc_submitted;
96 struct list_head desc_issued;
97 96
98 /* protected by d->lock */ 97 /* protected by d->lock */
99 struct list_head node; 98 struct list_head node;
@@ -109,7 +108,7 @@ struct sa11x0_dma_phy {
109 108
110 struct sa11x0_dma_chan *vchan; 109 struct sa11x0_dma_chan *vchan;
111 110
112 /* Protected by c->lock */ 111 /* Protected by c->vc.lock */
113 unsigned sg_load; 112 unsigned sg_load;
114 struct sa11x0_dma_desc *txd_load; 113 struct sa11x0_dma_desc *txd_load;
115 unsigned sg_done; 114 unsigned sg_done;
@@ -127,13 +126,12 @@ struct sa11x0_dma_dev {
127 spinlock_t lock; 126 spinlock_t lock;
128 struct tasklet_struct task; 127 struct tasklet_struct task;
129 struct list_head chan_pending; 128 struct list_head chan_pending;
130 struct list_head desc_complete;
131 struct sa11x0_dma_phy phy[NR_PHY_CHAN]; 129 struct sa11x0_dma_phy phy[NR_PHY_CHAN];
132}; 130};
133 131
134static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) 132static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
135{ 133{
136 return container_of(chan, struct sa11x0_dma_chan, chan); 134 return container_of(chan, struct sa11x0_dma_chan, vc.chan);
137} 135}
138 136
139static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) 137static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
@@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
141 return container_of(dmadev, struct sa11x0_dma_dev, slave); 139 return container_of(dmadev, struct sa11x0_dma_dev, slave);
142} 140}
143 141
144static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) 142static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
145{ 143{
146 return container_of(tx, struct sa11x0_dma_desc, tx); 144 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
145
146 return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
147} 147}
148 148
149static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) 149static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
150{ 150{
151 if (list_empty(&c->desc_issued)) 151 kfree(container_of(vd, struct sa11x0_dma_desc, vd));
152 return NULL;
153
154 return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
155} 152}
156 153
157static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) 154static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
158{ 155{
159 list_del(&txd->node); 156 list_del(&txd->vd.node);
160 p->txd_load = txd; 157 p->txd_load = txd;
161 p->sg_load = 0; 158 p->sg_load = 0;
162 159
163 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", 160 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
164 p->num, txd, txd->tx.cookie, txd->ddar); 161 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
165} 162}
166 163
167static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, 164static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
@@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
183 return; 180 return;
184 181
185 if (p->sg_load == txd->sglen) { 182 if (p->sg_load == txd->sglen) {
186 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); 183 if (!txd->cyclic) {
184 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
187 185
188 /* 186 /*
189 * We have reached the end of the current descriptor. 187 * We have reached the end of the current descriptor.
190 * Peek at the next descriptor, and if compatible with 188 * Peek at the next descriptor, and if compatible with
191 * the current, start processing it. 189 * the current, start processing it.
192 */ 190 */
193 if (txn && txn->ddar == txd->ddar) { 191 if (txn && txn->ddar == txd->ddar) {
194 txd = txn; 192 txd = txn;
195 sa11x0_dma_start_desc(p, txn); 193 sa11x0_dma_start_desc(p, txn);
194 } else {
195 p->txd_load = NULL;
196 return;
197 }
196 } else { 198 } else {
197 p->txd_load = NULL; 199 /* Cyclic: reset back to beginning */
198 return; 200 p->sg_load = 0;
199 } 201 }
200 } 202 }
201 203
@@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
229 struct sa11x0_dma_desc *txd = p->txd_done; 231 struct sa11x0_dma_desc *txd = p->txd_done;
230 232
231 if (++p->sg_done == txd->sglen) { 233 if (++p->sg_done == txd->sglen) {
232 struct sa11x0_dma_dev *d = p->dev; 234 if (!txd->cyclic) {
233 235 vchan_cookie_complete(&txd->vd);
234 dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
235 p->num, p->txd_done, p->txd_done->tx.cookie);
236
237 c->lc = txd->tx.cookie;
238 236
239 spin_lock(&d->lock); 237 p->sg_done = 0;
240 list_add_tail(&txd->node, &d->desc_complete); 238 p->txd_done = p->txd_load;
241 spin_unlock(&d->lock);
242 239
243 p->sg_done = 0; 240 if (!p->txd_done)
244 p->txd_done = p->txd_load; 241 tasklet_schedule(&p->dev->task);
242 } else {
243 if ((p->sg_done % txd->period) == 0)
244 vchan_cyclic_callback(&txd->vd);
245 245
246 tasklet_schedule(&d->task); 246 /* Cyclic: reset back to beginning */
247 p->sg_done = 0;
248 }
247 } 249 }
248 250
249 sa11x0_dma_start_sg(p, c); 251 sa11x0_dma_start_sg(p, c);
@@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
280 if (c) { 282 if (c) {
281 unsigned long flags; 283 unsigned long flags;
282 284
283 spin_lock_irqsave(&c->lock, flags); 285 spin_lock_irqsave(&c->vc.lock, flags);
284 /* 286 /*
285 * Now that we're holding the lock, check that the vchan 287 * Now that we're holding the lock, check that the vchan
286 * really is associated with this pchan before touching the 288 * really is associated with this pchan before touching the
@@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
294 if (dcsr & DCSR_DONEB) 296 if (dcsr & DCSR_DONEB)
295 sa11x0_dma_complete(p, c); 297 sa11x0_dma_complete(p, c);
296 } 298 }
297 spin_unlock_irqrestore(&c->lock, flags); 299 spin_unlock_irqrestore(&c->vc.lock, flags);
298 } 300 }
299 301
300 return IRQ_HANDLED; 302 return IRQ_HANDLED;
@@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg)
332 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; 334 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
333 struct sa11x0_dma_phy *p; 335 struct sa11x0_dma_phy *p;
334 struct sa11x0_dma_chan *c; 336 struct sa11x0_dma_chan *c;
335 struct sa11x0_dma_desc *txd, *txn;
336 LIST_HEAD(head);
337 unsigned pch, pch_alloc = 0; 337 unsigned pch, pch_alloc = 0;
338 338
339 dev_dbg(d->slave.dev, "tasklet enter\n"); 339 dev_dbg(d->slave.dev, "tasklet enter\n");
340 340
341 /* Get the completed tx descriptors */ 341 list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
342 spin_lock_irq(&d->lock); 342 spin_lock_irq(&c->vc.lock);
343 list_splice_init(&d->desc_complete, &head);
344 spin_unlock_irq(&d->lock);
345
346 list_for_each_entry(txd, &head, node) {
347 c = to_sa11x0_dma_chan(txd->tx.chan);
348
349 dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
350 c, txd, txd->tx.cookie);
351
352 spin_lock_irq(&c->lock);
353 p = c->phy; 343 p = c->phy;
354 if (p) { 344 if (p && !p->txd_done) {
355 if (!p->txd_done) 345 sa11x0_dma_start_txd(c);
356 sa11x0_dma_start_txd(c);
357 if (!p->txd_done) { 346 if (!p->txd_done) {
358 /* No current txd associated with this channel */ 347 /* No current txd associated with this channel */
359 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); 348 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
@@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
363 p->vchan = NULL; 352 p->vchan = NULL;
364 } 353 }
365 } 354 }
366 spin_unlock_irq(&c->lock); 355 spin_unlock_irq(&c->vc.lock);
367 } 356 }
368 357
369 spin_lock_irq(&d->lock); 358 spin_lock_irq(&d->lock);
@@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
380 /* Mark this channel allocated */ 369 /* Mark this channel allocated */
381 p->vchan = c; 370 p->vchan = c;
382 371
383 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); 372 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
384 } 373 }
385 } 374 }
386 spin_unlock_irq(&d->lock); 375 spin_unlock_irq(&d->lock);
@@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg)
390 p = &d->phy[pch]; 379 p = &d->phy[pch];
391 c = p->vchan; 380 c = p->vchan;
392 381
393 spin_lock_irq(&c->lock); 382 spin_lock_irq(&c->vc.lock);
394 c->phy = p; 383 c->phy = p;
395 384
396 sa11x0_dma_start_txd(c); 385 sa11x0_dma_start_txd(c);
397 spin_unlock_irq(&c->lock); 386 spin_unlock_irq(&c->vc.lock);
398 } 387 }
399 } 388 }
400 389
401 /* Now free the completed tx descriptor, and call their callbacks */
402 list_for_each_entry_safe(txd, txn, &head, node) {
403 dma_async_tx_callback callback = txd->tx.callback;
404 void *callback_param = txd->tx.callback_param;
405
406 dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
407 txd, txd->tx.cookie);
408
409 kfree(txd);
410
411 if (callback)
412 callback(callback_param);
413 }
414
415 dev_dbg(d->slave.dev, "tasklet exit\n"); 390 dev_dbg(d->slave.dev, "tasklet exit\n");
416} 391}
417 392
418 393
419static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
420{
421 struct sa11x0_dma_desc *txd, *txn;
422
423 list_for_each_entry_safe(txd, txn, head, node) {
424 dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
425 kfree(txd);
426 }
427}
428
429static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) 394static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
430{ 395{
431 return 0; 396 return 0;
@@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
436 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 401 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
437 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 402 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
438 unsigned long flags; 403 unsigned long flags;
439 LIST_HEAD(head);
440 404
441 spin_lock_irqsave(&c->lock, flags); 405 spin_lock_irqsave(&d->lock, flags);
442 spin_lock(&d->lock);
443 list_del_init(&c->node); 406 list_del_init(&c->node);
444 spin_unlock(&d->lock); 407 spin_unlock_irqrestore(&d->lock, flags);
445
446 list_splice_tail_init(&c->desc_submitted, &head);
447 list_splice_tail_init(&c->desc_issued, &head);
448 spin_unlock_irqrestore(&c->lock, flags);
449 408
450 sa11x0_dma_desc_free(d, &head); 409 vchan_free_chan_resources(&c->vc);
451} 410}
452 411
453static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) 412static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
@@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
472 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 431 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
473 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 432 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
474 struct sa11x0_dma_phy *p; 433 struct sa11x0_dma_phy *p;
475 struct sa11x0_dma_desc *txd; 434 struct virt_dma_desc *vd;
476 dma_cookie_t last_used, last_complete;
477 unsigned long flags; 435 unsigned long flags;
478 enum dma_status ret; 436 enum dma_status ret;
479 size_t bytes = 0;
480
481 last_used = c->chan.cookie;
482 last_complete = c->lc;
483 437
484 ret = dma_async_is_complete(cookie, last_complete, last_used); 438 ret = dma_cookie_status(&c->vc.chan, cookie, state);
485 if (ret == DMA_SUCCESS) { 439 if (ret == DMA_SUCCESS)
486 dma_set_tx_state(state, last_complete, last_used, 0);
487 return ret; 440 return ret;
488 }
489 441
490 spin_lock_irqsave(&c->lock, flags); 442 if (!state)
443 return c->status;
444
445 spin_lock_irqsave(&c->vc.lock, flags);
491 p = c->phy; 446 p = c->phy;
492 ret = c->status;
493 if (p) {
494 dma_addr_t addr = sa11x0_dma_pos(p);
495 447
496 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); 448 /*
449 * If the cookie is on our issue queue, then the residue is
450 * its total size.
451 */
452 vd = vchan_find_desc(&c->vc, cookie);
453 if (vd) {
454 state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
455 } else if (!p) {
456 state->residue = 0;
457 } else {
458 struct sa11x0_dma_desc *txd;
459 size_t bytes = 0;
497 460
498 txd = p->txd_done; 461 if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
462 txd = p->txd_done;
463 else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
464 txd = p->txd_load;
465 else
466 txd = NULL;
467
468 ret = c->status;
499 if (txd) { 469 if (txd) {
470 dma_addr_t addr = sa11x0_dma_pos(p);
500 unsigned i; 471 unsigned i;
501 472
473 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
474
502 for (i = 0; i < txd->sglen; i++) { 475 for (i = 0; i < txd->sglen; i++) {
503 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", 476 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
504 i, txd->sg[i].addr, txd->sg[i].len); 477 i, txd->sg[i].addr, txd->sg[i].len);
@@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
521 bytes += txd->sg[i].len; 494 bytes += txd->sg[i].len;
522 } 495 }
523 } 496 }
524 if (txd != p->txd_load && p->txd_load) 497 state->residue = bytes;
525 bytes += p->txd_load->size;
526 }
527 list_for_each_entry(txd, &c->desc_issued, node) {
528 bytes += txd->size;
529 } 498 }
530 spin_unlock_irqrestore(&c->lock, flags); 499 spin_unlock_irqrestore(&c->vc.lock, flags);
531
532 dma_set_tx_state(state, last_complete, last_used, bytes);
533 500
534 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); 501 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
535 502
536 return ret; 503 return ret;
537} 504}
@@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan)
547 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 514 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
548 unsigned long flags; 515 unsigned long flags;
549 516
550 spin_lock_irqsave(&c->lock, flags); 517 spin_lock_irqsave(&c->vc.lock, flags);
551 list_splice_tail_init(&c->desc_submitted, &c->desc_issued); 518 if (vchan_issue_pending(&c->vc)) {
552 if (!list_empty(&c->desc_issued)) { 519 if (!c->phy) {
553 spin_lock(&d->lock); 520 spin_lock(&d->lock);
554 if (!c->phy && list_empty(&c->node)) { 521 if (list_empty(&c->node)) {
555 list_add_tail(&c->node, &d->chan_pending); 522 list_add_tail(&c->node, &d->chan_pending);
556 tasklet_schedule(&d->task); 523 tasklet_schedule(&d->task);
557 dev_dbg(d->slave.dev, "vchan %p: issued\n", c); 524 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
525 }
526 spin_unlock(&d->lock);
558 } 527 }
559 spin_unlock(&d->lock);
560 } else 528 } else
561 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); 529 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
562 spin_unlock_irqrestore(&c->lock, flags); 530 spin_unlock_irqrestore(&c->vc.lock, flags);
563}
564
565static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
566{
567 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
568 struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
569 unsigned long flags;
570
571 spin_lock_irqsave(&c->lock, flags);
572 c->chan.cookie += 1;
573 if (c->chan.cookie < 0)
574 c->chan.cookie = 1;
575 txd->tx.cookie = c->chan.cookie;
576
577 list_add_tail(&txd->node, &c->desc_submitted);
578 spin_unlock_irqrestore(&c->lock, flags);
579
580 dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
581 c, txd, txd->tx.cookie);
582
583 return txd->tx.cookie;
584} 531}
585 532
586static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( 533static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
@@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
596 /* SA11x0 channels can only operate in their native direction */ 543 /* SA11x0 channels can only operate in their native direction */
597 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { 544 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
598 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", 545 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
599 c, c->ddar, dir); 546 &c->vc, c->ddar, dir);
600 return NULL; 547 return NULL;
601 } 548 }
602 549
@@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
612 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; 559 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
613 if (addr & DMA_ALIGN) { 560 if (addr & DMA_ALIGN) {
614 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", 561 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
615 c, addr); 562 &c->vc, addr);
616 return NULL; 563 return NULL;
617 } 564 }
618 } 565 }
619 566
620 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); 567 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
621 if (!txd) { 568 if (!txd) {
622 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); 569 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
623 return NULL; 570 return NULL;
624 } 571 }
625 572
@@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
655 } while (len); 602 } while (len);
656 } 603 }
657 604
658 dma_async_tx_descriptor_init(&txd->tx, &c->chan);
659 txd->tx.flags = flags;
660 txd->tx.tx_submit = sa11x0_dma_tx_submit;
661 txd->ddar = c->ddar; 605 txd->ddar = c->ddar;
662 txd->size = size; 606 txd->size = size;
663 txd->sglen = j; 607 txd->sglen = j;
664 608
665 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", 609 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
666 c, txd, txd->size, txd->sglen); 610 &c->vc, &txd->vd, txd->size, txd->sglen);
667 611
668 return &txd->tx; 612 return vchan_tx_prep(&c->vc, &txd->vd, flags);
613}
614
615static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
616 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
617 enum dma_transfer_direction dir, void *context)
618{
619 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
620 struct sa11x0_dma_desc *txd;
621 unsigned i, j, k, sglen, sgperiod;
622
623 /* SA11x0 channels can only operate in their native direction */
624 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
625 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
626 &c->vc, c->ddar, dir);
627 return NULL;
628 }
629
630 sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
631 sglen = size * sgperiod / period;
632
633 /* Do not allow zero-sized txds */
634 if (sglen == 0)
635 return NULL;
636
637 txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
638 if (!txd) {
639 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
640 return NULL;
641 }
642
643 for (i = k = 0; i < size / period; i++) {
644 size_t tlen, len = period;
645
646 for (j = 0; j < sgperiod; j++, k++) {
647 tlen = len;
648
649 if (tlen > DMA_MAX_SIZE) {
650 unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
651 tlen = (tlen / mult) & ~DMA_ALIGN;
652 }
653
654 txd->sg[k].addr = addr;
655 txd->sg[k].len = tlen;
656 addr += tlen;
657 len -= tlen;
658 }
659
660 WARN_ON(len != 0);
661 }
662
663 WARN_ON(k != sglen);
664
665 txd->ddar = c->ddar;
666 txd->size = size;
667 txd->sglen = sglen;
668 txd->cyclic = 1;
669 txd->period = sgperiod;
670
671 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
669} 672}
670 673
671static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 674static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
@@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
695 if (maxburst == 8) 698 if (maxburst == 8)
696 ddar |= DDAR_BS; 699 ddar |= DDAR_BS;
697 700
698 dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", 701 dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
699 c, addr, width, maxburst); 702 &c->vc, addr, width, maxburst);
700 703
701 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; 704 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
702 705
@@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
718 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); 721 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
719 722
720 case DMA_TERMINATE_ALL: 723 case DMA_TERMINATE_ALL:
721 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); 724 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
722 /* Clear the tx descriptor lists */ 725 /* Clear the tx descriptor lists */
723 spin_lock_irqsave(&c->lock, flags); 726 spin_lock_irqsave(&c->vc.lock, flags);
724 list_splice_tail_init(&c->desc_submitted, &head); 727 vchan_get_all_descriptors(&c->vc, &head);
725 list_splice_tail_init(&c->desc_issued, &head);
726 728
727 p = c->phy; 729 p = c->phy;
728 if (p) { 730 if (p) {
729 struct sa11x0_dma_desc *txd, *txn;
730
731 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 731 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
732 /* vchan is assigned to a pchan - stop the channel */ 732 /* vchan is assigned to a pchan - stop the channel */
733 writel(DCSR_RUN | DCSR_IE | 733 writel(DCSR_RUN | DCSR_IE |
@@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
735 DCSR_STRTB | DCSR_DONEB, 735 DCSR_STRTB | DCSR_DONEB,
736 p->base + DMA_DCSR_C); 736 p->base + DMA_DCSR_C);
737 737
738 list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
739 if (txd->tx.chan == &c->chan)
740 list_move(&txd->node, &head);
741
742 if (p->txd_load) { 738 if (p->txd_load) {
743 if (p->txd_load != p->txd_done) 739 if (p->txd_load != p->txd_done)
744 list_add_tail(&p->txd_load->node, &head); 740 list_add_tail(&p->txd_load->vd.node, &head);
745 p->txd_load = NULL; 741 p->txd_load = NULL;
746 } 742 }
747 if (p->txd_done) { 743 if (p->txd_done) {
748 list_add_tail(&p->txd_done->node, &head); 744 list_add_tail(&p->txd_done->vd.node, &head);
749 p->txd_done = NULL; 745 p->txd_done = NULL;
750 } 746 }
751 c->phy = NULL; 747 c->phy = NULL;
@@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
754 spin_unlock(&d->lock); 750 spin_unlock(&d->lock);
755 tasklet_schedule(&d->task); 751 tasklet_schedule(&d->task);
756 } 752 }
757 spin_unlock_irqrestore(&c->lock, flags); 753 spin_unlock_irqrestore(&c->vc.lock, flags);
758 sa11x0_dma_desc_free(d, &head); 754 vchan_dma_desc_free_list(&c->vc, &head);
759 ret = 0; 755 ret = 0;
760 break; 756 break;
761 757
762 case DMA_PAUSE: 758 case DMA_PAUSE:
763 dev_dbg(d->slave.dev, "vchan %p: pause\n", c); 759 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
764 spin_lock_irqsave(&c->lock, flags); 760 spin_lock_irqsave(&c->vc.lock, flags);
765 if (c->status == DMA_IN_PROGRESS) { 761 if (c->status == DMA_IN_PROGRESS) {
766 c->status = DMA_PAUSED; 762 c->status = DMA_PAUSED;
767 763
@@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
774 spin_unlock(&d->lock); 770 spin_unlock(&d->lock);
775 } 771 }
776 } 772 }
777 spin_unlock_irqrestore(&c->lock, flags); 773 spin_unlock_irqrestore(&c->vc.lock, flags);
778 ret = 0; 774 ret = 0;
779 break; 775 break;
780 776
781 case DMA_RESUME: 777 case DMA_RESUME:
782 dev_dbg(d->slave.dev, "vchan %p: resume\n", c); 778 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
783 spin_lock_irqsave(&c->lock, flags); 779 spin_lock_irqsave(&c->vc.lock, flags);
784 if (c->status == DMA_PAUSED) { 780 if (c->status == DMA_PAUSED) {
785 c->status = DMA_IN_PROGRESS; 781 c->status = DMA_IN_PROGRESS;
786 782
787 p = c->phy; 783 p = c->phy;
788 if (p) { 784 if (p) {
789 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 785 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
790 } else if (!list_empty(&c->desc_issued)) { 786 } else if (!list_empty(&c->vc.desc_issued)) {
791 spin_lock(&d->lock); 787 spin_lock(&d->lock);
792 list_add_tail(&c->node, &d->chan_pending); 788 list_add_tail(&c->node, &d->chan_pending);
793 spin_unlock(&d->lock); 789 spin_unlock(&d->lock);
794 } 790 }
795 } 791 }
796 spin_unlock_irqrestore(&c->lock, flags); 792 spin_unlock_irqrestore(&c->vc.lock, flags);
797 ret = 0; 793 ret = 0;
798 break; 794 break;
799 795
@@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
853 return -ENOMEM; 849 return -ENOMEM;
854 } 850 }
855 851
856 c->chan.device = dmadev;
857 c->status = DMA_IN_PROGRESS; 852 c->status = DMA_IN_PROGRESS;
858 c->ddar = chan_desc[i].ddar; 853 c->ddar = chan_desc[i].ddar;
859 c->name = chan_desc[i].name; 854 c->name = chan_desc[i].name;
860 spin_lock_init(&c->lock);
861 INIT_LIST_HEAD(&c->desc_submitted);
862 INIT_LIST_HEAD(&c->desc_issued);
863 INIT_LIST_HEAD(&c->node); 855 INIT_LIST_HEAD(&c->node);
864 list_add_tail(&c->chan.device_node, &dmadev->channels); 856
857 c->vc.desc_free = sa11x0_dma_free_desc;
858 vchan_init(&c->vc, dmadev);
865 } 859 }
866 860
867 return dma_async_device_register(dmadev); 861 return dma_async_device_register(dmadev);
@@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
890{ 884{
891 struct sa11x0_dma_chan *c, *cn; 885 struct sa11x0_dma_chan *c, *cn;
892 886
893 list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { 887 list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
894 list_del(&c->chan.device_node); 888 list_del(&c->vc.chan.device_node);
889 tasklet_kill(&c->vc.task);
895 kfree(c); 890 kfree(c);
896 } 891 }
897} 892}
@@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
915 910
916 spin_lock_init(&d->lock); 911 spin_lock_init(&d->lock);
917 INIT_LIST_HEAD(&d->chan_pending); 912 INIT_LIST_HEAD(&d->chan_pending);
918 INIT_LIST_HEAD(&d->desc_complete);
919 913
920 d->base = ioremap(res->start, resource_size(res)); 914 d->base = ioremap(res->start, resource_size(res));
921 if (!d->base) { 915 if (!d->base) {
@@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
947 } 941 }
948 942
949 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 943 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
944 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
950 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; 945 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
946 d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
951 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); 947 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
952 if (ret) { 948 if (ret) {
953 dev_warn(d->slave.dev, "failed to register slave async device: %d\n", 949 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
new file mode 100644
index 000000000000..54ae9572b0ac
--- /dev/null
+++ b/drivers/dma/sh/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_SH_DMAE) += shdma-base.o
2obj-$(CONFIG_SH_DMAE) += shdma.o
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
new file mode 100644
index 000000000000..f4cd946d259d
--- /dev/null
+++ b/drivers/dma/sh/shdma-base.c
@@ -0,0 +1,943 @@
1/*
2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
3 *
4 * extracted from shdma.c
5 *
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 *
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/shdma-base.h>
18#include <linux/dmaengine.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/module.h>
22#include <linux/pm_runtime.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26#include "../dmaengine.h"
27
28/* DMA descriptor control */
29enum shdma_desc_status {
30 DESC_IDLE,
31 DESC_PREPARED,
32 DESC_SUBMITTED,
33 DESC_COMPLETED, /* completed, have to call callback */
34 DESC_WAITING, /* callback called, waiting for ack / re-submit */
35};
36
37#define NR_DESCS_PER_CHANNEL 32
38
39#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
41
42/*
43 * For slave DMA we assume, that there is a finite number of DMA slaves in the
44 * system, and that each such slave can only use a finite number of channels.
45 * We use slave channel IDs to make sure, that no such slave channel ID is
46 * allocated more than once.
47 */
48static unsigned int slave_num = 256;
49module_param(slave_num, uint, 0444);
50
51/* A bitmask with slave_num bits */
52static unsigned long *shdma_slave_used;
53
54/* Called under spin_lock_irq(&schan->chan_lock") */
55static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
56{
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 const struct shdma_ops *ops = sdev->ops;
59 struct shdma_desc *sdesc;
60
61 /* DMA work check */
62 if (ops->channel_busy(schan))
63 return;
64
65 /* Find the first not transferred descriptor */
66 list_for_each_entry(sdesc, &schan->ld_queue, node)
67 if (sdesc->mark == DESC_SUBMITTED) {
68 ops->start_xfer(schan, sdesc);
69 break;
70 }
71}
72
73static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
74{
75 struct shdma_desc *chunk, *c, *desc =
76 container_of(tx, struct shdma_desc, async_tx),
77 *last = desc;
78 struct shdma_chan *schan = to_shdma_chan(tx->chan);
79 dma_async_tx_callback callback = tx->callback;
80 dma_cookie_t cookie;
81 bool power_up;
82
83 spin_lock_irq(&schan->chan_lock);
84
85 power_up = list_empty(&schan->ld_queue);
86
87 cookie = dma_cookie_assign(tx);
88
89 /* Mark all chunks of this descriptor as submitted, move to the queue */
90 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
91 /*
92 * All chunks are on the global ld_free, so, we have to find
93 * the end of the chain ourselves
94 */
95 if (chunk != desc && (chunk->mark == DESC_IDLE ||
96 chunk->async_tx.cookie > 0 ||
97 chunk->async_tx.cookie == -EBUSY ||
98 &chunk->node == &schan->ld_free))
99 break;
100 chunk->mark = DESC_SUBMITTED;
101 /* Callback goes to the last chunk */
102 chunk->async_tx.callback = NULL;
103 chunk->cookie = cookie;
104 list_move_tail(&chunk->node, &schan->ld_queue);
105 last = chunk;
106
107 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108 tx->cookie, &last->async_tx, schan->id);
109 }
110
111 last->async_tx.callback = callback;
112 last->async_tx.callback_param = tx->callback_param;
113
114 if (power_up) {
115 int ret;
116 schan->pm_state = SHDMA_PM_BUSY;
117
118 ret = pm_runtime_get(schan->dev);
119
120 spin_unlock_irq(&schan->chan_lock);
121 if (ret < 0)
122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
123
124 pm_runtime_barrier(schan->dev);
125
126 spin_lock_irq(&schan->chan_lock);
127
128 /* Have we been reset, while waiting? */
129 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
130 struct shdma_dev *sdev =
131 to_shdma_dev(schan->dma_chan.device);
132 const struct shdma_ops *ops = sdev->ops;
133 dev_dbg(schan->dev, "Bring up channel %d\n",
134 schan->id);
135 /*
136 * TODO: .xfer_setup() might fail on some platforms.
137 * Make it int then, on error remove chunks from the
138 * queue again
139 */
140 ops->setup_xfer(schan, schan->slave_id);
141
142 if (schan->pm_state == SHDMA_PM_PENDING)
143 shdma_chan_xfer_ld_queue(schan);
144 schan->pm_state = SHDMA_PM_ESTABLISHED;
145 }
146 } else {
147 /*
148 * Tell .device_issue_pending() not to run the queue, interrupts
149 * will do it anyway
150 */
151 schan->pm_state = SHDMA_PM_PENDING;
152 }
153
154 spin_unlock_irq(&schan->chan_lock);
155
156 return cookie;
157}
158
159/* Called with desc_lock held */
160static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
161{
162 struct shdma_desc *sdesc;
163
164 list_for_each_entry(sdesc, &schan->ld_free, node)
165 if (sdesc->mark != DESC_PREPARED) {
166 BUG_ON(sdesc->mark != DESC_IDLE);
167 list_del(&sdesc->node);
168 return sdesc;
169 }
170
171 return NULL;
172}
173
174static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
175{
176 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
177 const struct shdma_ops *ops = sdev->ops;
178 int ret;
179
180 if (slave_id < 0 || slave_id >= slave_num)
181 return -EINVAL;
182
183 if (test_and_set_bit(slave_id, shdma_slave_used))
184 return -EBUSY;
185
186 ret = ops->set_slave(schan, slave_id, false);
187 if (ret < 0) {
188 clear_bit(slave_id, shdma_slave_used);
189 return ret;
190 }
191
192 schan->slave_id = slave_id;
193
194 return 0;
195}
196
197/*
198 * This is the standard shdma filter function to be used as a replacement to the
199 * "old" method, using the .private pointer. If for some reason you allocate a
200 * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
201 * parameter. If this filter is used, the slave driver, after calling
202 * dma_request_channel(), will also have to call dmaengine_slave_config() with
203 * .slave_id, .direction, and either .src_addr or .dst_addr set.
204 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
205 * capability! If this becomes a requirement, hardware glue drivers, using this
206 * services would have to provide their own filters, which first would check
207 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
208 * this, and only then, in case of a match, call this common filter.
209 */
210bool shdma_chan_filter(struct dma_chan *chan, void *arg)
211{
212 struct shdma_chan *schan = to_shdma_chan(chan);
213 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
214 const struct shdma_ops *ops = sdev->ops;
215 int slave_id = (int)arg;
216 int ret;
217
218 if (slave_id < 0)
219 /* No slave requested - arbitrary channel */
220 return true;
221
222 if (slave_id >= slave_num)
223 return false;
224
225 ret = ops->set_slave(schan, slave_id, true);
226 if (ret < 0)
227 return false;
228
229 return true;
230}
231EXPORT_SYMBOL(shdma_chan_filter);
232
233static int shdma_alloc_chan_resources(struct dma_chan *chan)
234{
235 struct shdma_chan *schan = to_shdma_chan(chan);
236 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
237 const struct shdma_ops *ops = sdev->ops;
238 struct shdma_desc *desc;
239 struct shdma_slave *slave = chan->private;
240 int ret, i;
241
242 /*
243 * This relies on the guarantee from dmaengine that alloc_chan_resources
244 * never runs concurrently with itself or free_chan_resources.
245 */
246 if (slave) {
247 /* Legacy mode: .private is set in filter */
248 ret = shdma_setup_slave(schan, slave->slave_id);
249 if (ret < 0)
250 goto esetslave;
251 } else {
252 schan->slave_id = -EINVAL;
253 }
254
255 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
256 sdev->desc_size, GFP_KERNEL);
257 if (!schan->desc) {
258 ret = -ENOMEM;
259 goto edescalloc;
260 }
261 schan->desc_num = NR_DESCS_PER_CHANNEL;
262
263 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
264 desc = ops->embedded_desc(schan->desc, i);
265 dma_async_tx_descriptor_init(&desc->async_tx,
266 &schan->dma_chan);
267 desc->async_tx.tx_submit = shdma_tx_submit;
268 desc->mark = DESC_IDLE;
269
270 list_add(&desc->node, &schan->ld_free);
271 }
272
273 return NR_DESCS_PER_CHANNEL;
274
275edescalloc:
276 if (slave)
277esetslave:
278 clear_bit(slave->slave_id, shdma_slave_used);
279 chan->private = NULL;
280 return ret;
281}
282
283static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
284{
285 struct shdma_desc *desc, *_desc;
286 /* Is the "exposed" head of a chain acked? */
287 bool head_acked = false;
288 dma_cookie_t cookie = 0;
289 dma_async_tx_callback callback = NULL;
290 void *param = NULL;
291 unsigned long flags;
292
293 spin_lock_irqsave(&schan->chan_lock, flags);
294 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
295 struct dma_async_tx_descriptor *tx = &desc->async_tx;
296
297 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
298 BUG_ON(desc->mark != DESC_SUBMITTED &&
299 desc->mark != DESC_COMPLETED &&
300 desc->mark != DESC_WAITING);
301
302 /*
303 * queue is ordered, and we use this loop to (1) clean up all
304 * completed descriptors, and to (2) update descriptor flags of
305 * any chunks in a (partially) completed chain
306 */
307 if (!all && desc->mark == DESC_SUBMITTED &&
308 desc->cookie != cookie)
309 break;
310
311 if (tx->cookie > 0)
312 cookie = tx->cookie;
313
314 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
315 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
316 dev_dbg(schan->dev,
317 "Completing cookie %d, expected %d\n",
318 desc->cookie,
319 schan->dma_chan.completed_cookie + 1);
320 schan->dma_chan.completed_cookie = desc->cookie;
321 }
322
323 /* Call callback on the last chunk */
324 if (desc->mark == DESC_COMPLETED && tx->callback) {
325 desc->mark = DESC_WAITING;
326 callback = tx->callback;
327 param = tx->callback_param;
328 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
329 tx->cookie, tx, schan->id);
330 BUG_ON(desc->chunks != 1);
331 break;
332 }
333
334 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
335 if (desc->mark == DESC_COMPLETED) {
336 BUG_ON(tx->cookie < 0);
337 desc->mark = DESC_WAITING;
338 }
339 head_acked = async_tx_test_ack(tx);
340 } else {
341 switch (desc->mark) {
342 case DESC_COMPLETED:
343 desc->mark = DESC_WAITING;
344 /* Fall through */
345 case DESC_WAITING:
346 if (head_acked)
347 async_tx_ack(&desc->async_tx);
348 }
349 }
350
351 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
352 tx, tx->cookie);
353
354 if (((desc->mark == DESC_COMPLETED ||
355 desc->mark == DESC_WAITING) &&
356 async_tx_test_ack(&desc->async_tx)) || all) {
357 /* Remove from ld_queue list */
358 desc->mark = DESC_IDLE;
359
360 list_move(&desc->node, &schan->ld_free);
361
362 if (list_empty(&schan->ld_queue)) {
363 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
364 pm_runtime_put(schan->dev);
365 schan->pm_state = SHDMA_PM_ESTABLISHED;
366 }
367 }
368 }
369
370 if (all && !callback)
371 /*
372 * Terminating and the loop completed normally: forgive
373 * uncompleted cookies
374 */
375 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
376
377 spin_unlock_irqrestore(&schan->chan_lock, flags);
378
379 if (callback)
380 callback(param);
381
382 return callback;
383}
384
385/*
386 * shdma_chan_ld_cleanup - Clean up link descriptors
387 *
388 * Clean up the ld_queue of DMA channel.
389 */
390static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
391{
392 while (__ld_cleanup(schan, all))
393 ;
394}
395
396/*
397 * shdma_free_chan_resources - Free all resources of the channel.
398 */
399static void shdma_free_chan_resources(struct dma_chan *chan)
400{
401 struct shdma_chan *schan = to_shdma_chan(chan);
402 struct shdma_dev *sdev = to_shdma_dev(chan->device);
403 const struct shdma_ops *ops = sdev->ops;
404 LIST_HEAD(list);
405
406 /* Protect against ISR */
407 spin_lock_irq(&schan->chan_lock);
408 ops->halt_channel(schan);
409 spin_unlock_irq(&schan->chan_lock);
410
411 /* Now no new interrupts will occur */
412
413 /* Prepared and not submitted descriptors can still be on the queue */
414 if (!list_empty(&schan->ld_queue))
415 shdma_chan_ld_cleanup(schan, true);
416
417 if (schan->slave_id >= 0) {
418 /* The caller is holding dma_list_mutex */
419 clear_bit(schan->slave_id, shdma_slave_used);
420 chan->private = NULL;
421 }
422
423 spin_lock_irq(&schan->chan_lock);
424
425 list_splice_init(&schan->ld_free, &list);
426 schan->desc_num = 0;
427
428 spin_unlock_irq(&schan->chan_lock);
429
430 kfree(schan->desc);
431}
432
433/**
434 * shdma_add_desc - get, set up and return one transfer descriptor
435 * @schan: DMA channel
436 * @flags: DMA transfer flags
437 * @dst: destination DMA address, incremented when direction equals
438 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
439 * @src: source DMA address, incremented when direction equals
440 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
441 * @len: DMA transfer length
442 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
443 * @direction: needed for slave DMA to decide which address to keep constant,
444 * equals DMA_MEM_TO_MEM for MEMCPY
445 * Returns 0 or an error
446 * Locks: called with desc_lock held
447 */
448static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
449 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
450 struct shdma_desc **first, enum dma_transfer_direction direction)
451{
452 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
453 const struct shdma_ops *ops = sdev->ops;
454 struct shdma_desc *new;
455 size_t copy_size = *len;
456
457 if (!copy_size)
458 return NULL;
459
460 /* Allocate the link descriptor from the free list */
461 new = shdma_get_desc(schan);
462 if (!new) {
463 dev_err(schan->dev, "No free link descriptor available\n");
464 return NULL;
465 }
466
467 ops->desc_setup(schan, new, *src, *dst, &copy_size);
468
469 if (!*first) {
470 /* First desc */
471 new->async_tx.cookie = -EBUSY;
472 *first = new;
473 } else {
474 /* Other desc - invisible to the user */
475 new->async_tx.cookie = -EINVAL;
476 }
477
478 dev_dbg(schan->dev,
479 "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
480 copy_size, *len, *src, *dst, &new->async_tx,
481 new->async_tx.cookie);
482
483 new->mark = DESC_PREPARED;
484 new->async_tx.flags = flags;
485 new->direction = direction;
486 new->partial = 0;
487
488 *len -= copy_size;
489 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
490 *src += copy_size;
491 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
492 *dst += copy_size;
493
494 return new;
495}
496
497/*
498 * shdma_prep_sg - prepare transfer descriptors from an SG list
499 *
500 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
501 * converted to scatter-gather to guarantee consistent locking and a correct
502 * list manipulation. For slave DMA direction carries the usual meaning, and,
503 * logically, the SG list is RAM and the addr variable contains slave address,
504 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
505 * and the SG list contains only one element and points at the source buffer.
506 */
507static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
508 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
509 enum dma_transfer_direction direction, unsigned long flags)
510{
511 struct scatterlist *sg;
512 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
513 LIST_HEAD(tx_list);
514 int chunks = 0;
515 unsigned long irq_flags;
516 int i;
517
518 for_each_sg(sgl, sg, sg_len, i)
519 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
520
521 /* Have to lock the whole loop to protect against concurrent release */
522 spin_lock_irqsave(&schan->chan_lock, irq_flags);
523
524 /*
525 * Chaining:
526 * first descriptor is what user is dealing with in all API calls, its
527 * cookie is at first set to -EBUSY, at tx-submit to a positive
528 * number
529 * if more than one chunk is needed further chunks have cookie = -EINVAL
530 * the last chunk, if not equal to the first, has cookie = -ENOSPC
531 * all chunks are linked onto the tx_list head with their .node heads
532 * only during this function, then they are immediately spliced
533 * back onto the free list in form of a chain
534 */
535 for_each_sg(sgl, sg, sg_len, i) {
536 dma_addr_t sg_addr = sg_dma_address(sg);
537 size_t len = sg_dma_len(sg);
538
539 if (!len)
540 goto err_get_desc;
541
542 do {
543 dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
544 i, sg, len, (unsigned long long)sg_addr);
545
546 if (direction == DMA_DEV_TO_MEM)
547 new = shdma_add_desc(schan, flags,
548 &sg_addr, addr, &len, &first,
549 direction);
550 else
551 new = shdma_add_desc(schan, flags,
552 addr, &sg_addr, &len, &first,
553 direction);
554 if (!new)
555 goto err_get_desc;
556
557 new->chunks = chunks--;
558 list_add_tail(&new->node, &tx_list);
559 } while (len);
560 }
561
562 if (new != first)
563 new->async_tx.cookie = -ENOSPC;
564
565 /* Put them back on the free list, so, they don't get lost */
566 list_splice_tail(&tx_list, &schan->ld_free);
567
568 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
569
570 return &first->async_tx;
571
572err_get_desc:
573 list_for_each_entry(new, &tx_list, node)
574 new->mark = DESC_IDLE;
575 list_splice(&tx_list, &schan->ld_free);
576
577 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
578
579 return NULL;
580}
581
582static struct dma_async_tx_descriptor *shdma_prep_memcpy(
583 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
584 size_t len, unsigned long flags)
585{
586 struct shdma_chan *schan = to_shdma_chan(chan);
587 struct scatterlist sg;
588
589 if (!chan || !len)
590 return NULL;
591
592 BUG_ON(!schan->desc_num);
593
594 sg_init_table(&sg, 1);
595 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
596 offset_in_page(dma_src));
597 sg_dma_address(&sg) = dma_src;
598 sg_dma_len(&sg) = len;
599
600 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
601}
602
603static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
604 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
605 enum dma_transfer_direction direction, unsigned long flags, void *context)
606{
607 struct shdma_chan *schan = to_shdma_chan(chan);
608 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
609 const struct shdma_ops *ops = sdev->ops;
610 int slave_id = schan->slave_id;
611 dma_addr_t slave_addr;
612
613 if (!chan)
614 return NULL;
615
616 BUG_ON(!schan->desc_num);
617
618 /* Someone calling slave DMA on a generic channel? */
619 if (slave_id < 0 || !sg_len) {
620 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
621 __func__, sg_len, slave_id);
622 return NULL;
623 }
624
625 slave_addr = ops->slave_addr(schan);
626
627 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
628 direction, flags);
629}
630
631static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
632 unsigned long arg)
633{
634 struct shdma_chan *schan = to_shdma_chan(chan);
635 struct shdma_dev *sdev = to_shdma_dev(chan->device);
636 const struct shdma_ops *ops = sdev->ops;
637 struct dma_slave_config *config;
638 unsigned long flags;
639 int ret;
640
641 if (!chan)
642 return -EINVAL;
643
644 switch (cmd) {
645 case DMA_TERMINATE_ALL:
646 spin_lock_irqsave(&schan->chan_lock, flags);
647 ops->halt_channel(schan);
648
649 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
650 /* Record partial transfer */
651 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
652 struct shdma_desc, node);
653 desc->partial = ops->get_partial(schan, desc);
654 }
655
656 spin_unlock_irqrestore(&schan->chan_lock, flags);
657
658 shdma_chan_ld_cleanup(schan, true);
659 break;
660 case DMA_SLAVE_CONFIG:
661 /*
662 * So far only .slave_id is used, but the slave drivers are
663 * encouraged to also set a transfer direction and an address.
664 */
665 if (!arg)
666 return -EINVAL;
667 /*
668 * We could lock this, but you shouldn't be configuring the
669 * channel, while using it...
670 */
671 config = (struct dma_slave_config *)arg;
672 ret = shdma_setup_slave(schan, config->slave_id);
673 if (ret < 0)
674 return ret;
675 break;
676 default:
677 return -ENXIO;
678 }
679
680 return 0;
681}
682
683static void shdma_issue_pending(struct dma_chan *chan)
684{
685 struct shdma_chan *schan = to_shdma_chan(chan);
686
687 spin_lock_irq(&schan->chan_lock);
688 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
689 shdma_chan_xfer_ld_queue(schan);
690 else
691 schan->pm_state = SHDMA_PM_PENDING;
692 spin_unlock_irq(&schan->chan_lock);
693}
694
695static enum dma_status shdma_tx_status(struct dma_chan *chan,
696 dma_cookie_t cookie,
697 struct dma_tx_state *txstate)
698{
699 struct shdma_chan *schan = to_shdma_chan(chan);
700 enum dma_status status;
701 unsigned long flags;
702
703 shdma_chan_ld_cleanup(schan, false);
704
705 spin_lock_irqsave(&schan->chan_lock, flags);
706
707 status = dma_cookie_status(chan, cookie, txstate);
708
709 /*
710 * If we don't find cookie on the queue, it has been aborted and we have
711 * to report error
712 */
713 if (status != DMA_SUCCESS) {
714 struct shdma_desc *sdesc;
715 status = DMA_ERROR;
716 list_for_each_entry(sdesc, &schan->ld_queue, node)
717 if (sdesc->cookie == cookie) {
718 status = DMA_IN_PROGRESS;
719 break;
720 }
721 }
722
723 spin_unlock_irqrestore(&schan->chan_lock, flags);
724
725 return status;
726}
727
728/* Called from error IRQ or NMI */
729bool shdma_reset(struct shdma_dev *sdev)
730{
731 const struct shdma_ops *ops = sdev->ops;
732 struct shdma_chan *schan;
733 unsigned int handled = 0;
734 int i;
735
736 /* Reset all channels */
737 shdma_for_each_chan(schan, sdev, i) {
738 struct shdma_desc *sdesc;
739 LIST_HEAD(dl);
740
741 if (!schan)
742 continue;
743
744 spin_lock(&schan->chan_lock);
745
746 /* Stop the channel */
747 ops->halt_channel(schan);
748
749 list_splice_init(&schan->ld_queue, &dl);
750
751 if (!list_empty(&dl)) {
752 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
753 pm_runtime_put(schan->dev);
754 }
755 schan->pm_state = SHDMA_PM_ESTABLISHED;
756
757 spin_unlock(&schan->chan_lock);
758
759 /* Complete all */
760 list_for_each_entry(sdesc, &dl, node) {
761 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
762 sdesc->mark = DESC_IDLE;
763 if (tx->callback)
764 tx->callback(tx->callback_param);
765 }
766
767 spin_lock(&schan->chan_lock);
768 list_splice(&dl, &schan->ld_free);
769 spin_unlock(&schan->chan_lock);
770
771 handled++;
772 }
773
774 return !!handled;
775}
776EXPORT_SYMBOL(shdma_reset);
777
778static irqreturn_t chan_irq(int irq, void *dev)
779{
780 struct shdma_chan *schan = dev;
781 const struct shdma_ops *ops =
782 to_shdma_dev(schan->dma_chan.device)->ops;
783 irqreturn_t ret;
784
785 spin_lock(&schan->chan_lock);
786
787 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
788
789 spin_unlock(&schan->chan_lock);
790
791 return ret;
792}
793
794static irqreturn_t chan_irqt(int irq, void *dev)
795{
796 struct shdma_chan *schan = dev;
797 const struct shdma_ops *ops =
798 to_shdma_dev(schan->dma_chan.device)->ops;
799 struct shdma_desc *sdesc;
800
801 spin_lock_irq(&schan->chan_lock);
802 list_for_each_entry(sdesc, &schan->ld_queue, node) {
803 if (sdesc->mark == DESC_SUBMITTED &&
804 ops->desc_completed(schan, sdesc)) {
805 dev_dbg(schan->dev, "done #%d@%p\n",
806 sdesc->async_tx.cookie, &sdesc->async_tx);
807 sdesc->mark = DESC_COMPLETED;
808 break;
809 }
810 }
811 /* Next desc */
812 shdma_chan_xfer_ld_queue(schan);
813 spin_unlock_irq(&schan->chan_lock);
814
815 shdma_chan_ld_cleanup(schan, false);
816
817 return IRQ_HANDLED;
818}
819
820int shdma_request_irq(struct shdma_chan *schan, int irq,
821 unsigned long flags, const char *name)
822{
823 int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
824 flags, name, schan);
825
826 schan->irq = ret < 0 ? ret : irq;
827
828 return ret;
829}
830EXPORT_SYMBOL(shdma_request_irq);
831
832void shdma_free_irq(struct shdma_chan *schan)
833{
834 if (schan->irq >= 0)
835 free_irq(schan->irq, schan);
836}
837EXPORT_SYMBOL(shdma_free_irq);
838
839void shdma_chan_probe(struct shdma_dev *sdev,
840 struct shdma_chan *schan, int id)
841{
842 schan->pm_state = SHDMA_PM_ESTABLISHED;
843
844 /* reference struct dma_device */
845 schan->dma_chan.device = &sdev->dma_dev;
846 dma_cookie_init(&schan->dma_chan);
847
848 schan->dev = sdev->dma_dev.dev;
849 schan->id = id;
850
851 if (!schan->max_xfer_len)
852 schan->max_xfer_len = PAGE_SIZE;
853
854 spin_lock_init(&schan->chan_lock);
855
856 /* Init descripter manage list */
857 INIT_LIST_HEAD(&schan->ld_queue);
858 INIT_LIST_HEAD(&schan->ld_free);
859
860 /* Add the channel to DMA device channel list */
861 list_add_tail(&schan->dma_chan.device_node,
862 &sdev->dma_dev.channels);
863 sdev->schan[sdev->dma_dev.chancnt++] = schan;
864}
865EXPORT_SYMBOL(shdma_chan_probe);
866
867void shdma_chan_remove(struct shdma_chan *schan)
868{
869 list_del(&schan->dma_chan.device_node);
870}
871EXPORT_SYMBOL(shdma_chan_remove);
872
873int shdma_init(struct device *dev, struct shdma_dev *sdev,
874 int chan_num)
875{
876 struct dma_device *dma_dev = &sdev->dma_dev;
877
878 /*
879 * Require all call-backs for now, they can trivially be made optional
880 * later as required
881 */
882 if (!sdev->ops ||
883 !sdev->desc_size ||
884 !sdev->ops->embedded_desc ||
885 !sdev->ops->start_xfer ||
886 !sdev->ops->setup_xfer ||
887 !sdev->ops->set_slave ||
888 !sdev->ops->desc_setup ||
889 !sdev->ops->slave_addr ||
890 !sdev->ops->channel_busy ||
891 !sdev->ops->halt_channel ||
892 !sdev->ops->desc_completed)
893 return -EINVAL;
894
895 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
896 if (!sdev->schan)
897 return -ENOMEM;
898
899 INIT_LIST_HEAD(&dma_dev->channels);
900
901 /* Common and MEMCPY operations */
902 dma_dev->device_alloc_chan_resources
903 = shdma_alloc_chan_resources;
904 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
905 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
906 dma_dev->device_tx_status = shdma_tx_status;
907 dma_dev->device_issue_pending = shdma_issue_pending;
908
909 /* Compulsory for DMA_SLAVE fields */
910 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
911 dma_dev->device_control = shdma_control;
912
913 dma_dev->dev = dev;
914
915 return 0;
916}
917EXPORT_SYMBOL(shdma_init);
918
919void shdma_cleanup(struct shdma_dev *sdev)
920{
921 kfree(sdev->schan);
922}
923EXPORT_SYMBOL(shdma_cleanup);
924
925static int __init shdma_enter(void)
926{
927 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
928 sizeof(long), GFP_KERNEL);
929 if (!shdma_slave_used)
930 return -ENOMEM;
931 return 0;
932}
933module_init(shdma_enter);
934
935static void __exit shdma_exit(void)
936{
937 kfree(shdma_slave_used);
938}
939module_exit(shdma_exit);
940
941MODULE_LICENSE("GPL v2");
942MODULE_DESCRIPTION("SH-DMA driver base library");
943MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
new file mode 100644
index 000000000000..f41bcc5267fd
--- /dev/null
+++ b/drivers/dma/sh/shdma.c
@@ -0,0 +1,955 @@
1/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 *
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * - DMA of SuperH does not have Hardware DMA chain mode.
17 * - MAX DMA size is 16MB.
18 *
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/dmaengine.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/pm_runtime.h>
29#include <linux/sh_dma.h>
30#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
34
35#include "../dmaengine.h"
36#include "shdma.h"
37
38#define SH_DMAE_DRV_NAME "sh-dma-engine"
39
40/* Default MEMCPY transfer size = 2^2 = 4 bytes */
41#define LOG2_DEFAULT_XFER_SIZE 2
42#define SH_DMA_SLAVE_NUMBER 256
43#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
44
45/*
46 * Used for write-side mutual exclusion for the global device list,
47 * read-side synchronization by way of RCU, and per-controller data.
48 */
49static DEFINE_SPINLOCK(sh_dmae_lock);
50static LIST_HEAD(sh_dmae_devices);
51
52static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
53{
54 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
55
56 __raw_writel(data, shdev->chan_reg +
57 shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
58}
59
60static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
61{
62 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
63}
64
65static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
66{
67 return __raw_readl(sh_dc->base + reg / sizeof(u32));
68}
69
70static u16 dmaor_read(struct sh_dmae_device *shdev)
71{
72 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
73
74 if (shdev->pdata->dmaor_is_32bit)
75 return __raw_readl(addr);
76 else
77 return __raw_readw(addr);
78}
79
80static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
81{
82 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
83
84 if (shdev->pdata->dmaor_is_32bit)
85 __raw_writel(data, addr);
86 else
87 __raw_writew(data, addr);
88}
89
90static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
91{
92 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
93
94 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
95}
96
97static u32 chcr_read(struct sh_dmae_chan *sh_dc)
98{
99 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
100
101 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
102}
103
104/*
105 * Reset DMA controller
106 *
107 * SH7780 has two DMAOR register
108 */
109static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
110{
111 unsigned short dmaor;
112 unsigned long flags;
113
114 spin_lock_irqsave(&sh_dmae_lock, flags);
115
116 dmaor = dmaor_read(shdev);
117 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
118
119 spin_unlock_irqrestore(&sh_dmae_lock, flags);
120}
121
122static int sh_dmae_rst(struct sh_dmae_device *shdev)
123{
124 unsigned short dmaor;
125 unsigned long flags;
126
127 spin_lock_irqsave(&sh_dmae_lock, flags);
128
129 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
130
131 if (shdev->pdata->chclr_present) {
132 int i;
133 for (i = 0; i < shdev->pdata->channel_num; i++) {
134 struct sh_dmae_chan *sh_chan = shdev->chan[i];
135 if (sh_chan)
136 chclr_write(sh_chan, 0);
137 }
138 }
139
140 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
141
142 dmaor = dmaor_read(shdev);
143
144 spin_unlock_irqrestore(&sh_dmae_lock, flags);
145
146 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
147 dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
148 return -EIO;
149 }
150 if (shdev->pdata->dmaor_init & ~dmaor)
151 dev_warn(shdev->shdma_dev.dma_dev.dev,
152 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
153 dmaor, shdev->pdata->dmaor_init);
154 return 0;
155}
156
157static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
158{
159 u32 chcr = chcr_read(sh_chan);
160
161 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
162 return true; /* working */
163
164 return false; /* waiting */
165}
166
167static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
168{
169 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
170 struct sh_dmae_pdata *pdata = shdev->pdata;
171 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
172 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
173
174 if (cnt >= pdata->ts_shift_num)
175 cnt = 0;
176
177 return pdata->ts_shift[cnt];
178}
179
180static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
181{
182 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
183 struct sh_dmae_pdata *pdata = shdev->pdata;
184 int i;
185
186 for (i = 0; i < pdata->ts_shift_num; i++)
187 if (pdata->ts_shift[i] == l2size)
188 break;
189
190 if (i == pdata->ts_shift_num)
191 i = 0;
192
193 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
194 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
195}
196
197static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
198{
199 sh_dmae_writel(sh_chan, hw->sar, SAR);
200 sh_dmae_writel(sh_chan, hw->dar, DAR);
201 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
202}
203
204static void dmae_start(struct sh_dmae_chan *sh_chan)
205{
206 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
207 u32 chcr = chcr_read(sh_chan);
208
209 if (shdev->pdata->needs_tend_set)
210 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
211
212 chcr |= CHCR_DE | shdev->chcr_ie_bit;
213 chcr_write(sh_chan, chcr & ~CHCR_TE);
214}
215
216static void dmae_init(struct sh_dmae_chan *sh_chan)
217{
218 /*
219 * Default configuration for dual address memory-memory transfer.
220 * 0x400 represents auto-request.
221 */
222 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
223 LOG2_DEFAULT_XFER_SIZE);
224 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
225 chcr_write(sh_chan, chcr);
226}
227
228static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
229{
230 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
231 if (dmae_is_busy(sh_chan))
232 return -EBUSY;
233
234 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
235 chcr_write(sh_chan, val);
236
237 return 0;
238}
239
240static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
241{
242 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
243 struct sh_dmae_pdata *pdata = shdev->pdata;
244 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
245 u16 __iomem *addr = shdev->dmars;
246 unsigned int shift = chan_pdata->dmars_bit;
247
248 if (dmae_is_busy(sh_chan))
249 return -EBUSY;
250
251 if (pdata->no_dmars)
252 return 0;
253
254 /* in the case of a missing DMARS resource use first memory window */
255 if (!addr)
256 addr = (u16 __iomem *)shdev->chan_reg;
257 addr += chan_pdata->dmars / sizeof(u16);
258
259 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
260 addr);
261
262 return 0;
263}
264
265static void sh_dmae_start_xfer(struct shdma_chan *schan,
266 struct shdma_desc *sdesc)
267{
268 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
269 shdma_chan);
270 struct sh_dmae_desc *sh_desc = container_of(sdesc,
271 struct sh_dmae_desc, shdma_desc);
272 dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
273 sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
274 sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
275 /* Get the ld start address from ld_queue */
276 dmae_set_reg(sh_chan, &sh_desc->hw);
277 dmae_start(sh_chan);
278}
279
280static bool sh_dmae_channel_busy(struct shdma_chan *schan)
281{
282 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
283 shdma_chan);
284 return dmae_is_busy(sh_chan);
285}
286
287static void sh_dmae_setup_xfer(struct shdma_chan *schan,
288 int slave_id)
289{
290 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
291 shdma_chan);
292
293 if (slave_id >= 0) {
294 const struct sh_dmae_slave_config *cfg =
295 sh_chan->config;
296
297 dmae_set_dmars(sh_chan, cfg->mid_rid);
298 dmae_set_chcr(sh_chan, cfg->chcr);
299 } else {
300 dmae_init(sh_chan);
301 }
302}
303
304static const struct sh_dmae_slave_config *dmae_find_slave(
305 struct sh_dmae_chan *sh_chan, int slave_id)
306{
307 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
308 struct sh_dmae_pdata *pdata = shdev->pdata;
309 const struct sh_dmae_slave_config *cfg;
310 int i;
311
312 if (slave_id >= SH_DMA_SLAVE_NUMBER)
313 return NULL;
314
315 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
316 if (cfg->slave_id == slave_id)
317 return cfg;
318
319 return NULL;
320}
321
322static int sh_dmae_set_slave(struct shdma_chan *schan,
323 int slave_id, bool try)
324{
325 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
326 shdma_chan);
327 const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
328 if (!cfg)
329 return -ENODEV;
330
331 if (!try)
332 sh_chan->config = cfg;
333
334 return 0;
335}
336
337static void dmae_halt(struct sh_dmae_chan *sh_chan)
338{
339 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
340 u32 chcr = chcr_read(sh_chan);
341
342 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
343 chcr_write(sh_chan, chcr);
344}
345
346static int sh_dmae_desc_setup(struct shdma_chan *schan,
347 struct shdma_desc *sdesc,
348 dma_addr_t src, dma_addr_t dst, size_t *len)
349{
350 struct sh_dmae_desc *sh_desc = container_of(sdesc,
351 struct sh_dmae_desc, shdma_desc);
352
353 if (*len > schan->max_xfer_len)
354 *len = schan->max_xfer_len;
355
356 sh_desc->hw.sar = src;
357 sh_desc->hw.dar = dst;
358 sh_desc->hw.tcr = *len;
359
360 return 0;
361}
362
363static void sh_dmae_halt(struct shdma_chan *schan)
364{
365 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
366 shdma_chan);
367 dmae_halt(sh_chan);
368}
369
370static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
371{
372 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
373 shdma_chan);
374
375 if (!(chcr_read(sh_chan) & CHCR_TE))
376 return false;
377
378 /* DMA stop */
379 dmae_halt(sh_chan);
380
381 return true;
382}
383
384static size_t sh_dmae_get_partial(struct shdma_chan *schan,
385 struct shdma_desc *sdesc)
386{
387 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
388 shdma_chan);
389 struct sh_dmae_desc *sh_desc = container_of(sdesc,
390 struct sh_dmae_desc, shdma_desc);
391 return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
392 sh_chan->xmit_shift;
393}
394
395/* Called from error IRQ or NMI */
396static bool sh_dmae_reset(struct sh_dmae_device *shdev)
397{
398 bool ret;
399
400 /* halt the dma controller */
401 sh_dmae_ctl_stop(shdev);
402
403 /* We cannot detect, which channel caused the error, have to reset all */
404 ret = shdma_reset(&shdev->shdma_dev);
405
406 sh_dmae_rst(shdev);
407
408 return ret;
409}
410
411static irqreturn_t sh_dmae_err(int irq, void *data)
412{
413 struct sh_dmae_device *shdev = data;
414
415 if (!(dmaor_read(shdev) & DMAOR_AE))
416 return IRQ_NONE;
417
418 sh_dmae_reset(shdev);
419 return IRQ_HANDLED;
420}
421
422static bool sh_dmae_desc_completed(struct shdma_chan *schan,
423 struct shdma_desc *sdesc)
424{
425 struct sh_dmae_chan *sh_chan = container_of(schan,
426 struct sh_dmae_chan, shdma_chan);
427 struct sh_dmae_desc *sh_desc = container_of(sdesc,
428 struct sh_dmae_desc, shdma_desc);
429 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
430 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
431
432 return (sdesc->direction == DMA_DEV_TO_MEM &&
433 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
434 (sdesc->direction != DMA_DEV_TO_MEM &&
435 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
436}
437
438static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
439{
440 /* Fast path out if NMIF is not asserted for this controller */
441 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
442 return false;
443
444 return sh_dmae_reset(shdev);
445}
446
447static int sh_dmae_nmi_handler(struct notifier_block *self,
448 unsigned long cmd, void *data)
449{
450 struct sh_dmae_device *shdev;
451 int ret = NOTIFY_DONE;
452 bool triggered;
453
454 /*
455 * Only concern ourselves with NMI events.
456 *
457 * Normally we would check the die chain value, but as this needs
458 * to be architecture independent, check for NMI context instead.
459 */
460 if (!in_nmi())
461 return NOTIFY_DONE;
462
463 rcu_read_lock();
464 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
465 /*
466 * Only stop if one of the controllers has NMIF asserted,
467 * we do not want to interfere with regular address error
468 * handling or NMI events that don't concern the DMACs.
469 */
470 triggered = sh_dmae_nmi_notify(shdev);
471 if (triggered == true)
472 ret = NOTIFY_OK;
473 }
474 rcu_read_unlock();
475
476 return ret;
477}
478
479static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
480 .notifier_call = sh_dmae_nmi_handler,
481
482 /* Run before NMI debug handler and KGDB */
483 .priority = 1,
484};
485
486static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
487 int irq, unsigned long flags)
488{
489 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
490 struct shdma_dev *sdev = &shdev->shdma_dev;
491 struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
492 struct sh_dmae_chan *sh_chan;
493 struct shdma_chan *schan;
494 int err;
495
496 sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
497 if (!sh_chan) {
498 dev_err(sdev->dma_dev.dev,
499 "No free memory for allocating dma channels!\n");
500 return -ENOMEM;
501 }
502
503 schan = &sh_chan->shdma_chan;
504 schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
505
506 shdma_chan_probe(sdev, schan, id);
507
508 sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
509
510 /* set up channel irq */
511 if (pdev->id >= 0)
512 snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
513 "sh-dmae%d.%d", pdev->id, id);
514 else
515 snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
516 "sh-dma%d", id);
517
518 err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
519 if (err) {
520 dev_err(sdev->dma_dev.dev,
521 "DMA channel %d request_irq error %d\n",
522 id, err);
523 goto err_no_irq;
524 }
525
526 shdev->chan[id] = sh_chan;
527 return 0;
528
529err_no_irq:
530 /* remove from dmaengine device node */
531 shdma_chan_remove(schan);
532 kfree(sh_chan);
533 return err;
534}
535
536static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
537{
538 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
539 struct shdma_chan *schan;
540 int i;
541
542 shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
543 struct sh_dmae_chan *sh_chan = container_of(schan,
544 struct sh_dmae_chan, shdma_chan);
545 BUG_ON(!schan);
546
547 shdma_free_irq(&sh_chan->shdma_chan);
548
549 shdma_chan_remove(schan);
550 kfree(sh_chan);
551 }
552 dma_dev->chancnt = 0;
553}
554
555static void sh_dmae_shutdown(struct platform_device *pdev)
556{
557 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
558 sh_dmae_ctl_stop(shdev);
559}
560
561static int sh_dmae_runtime_suspend(struct device *dev)
562{
563 return 0;
564}
565
566static int sh_dmae_runtime_resume(struct device *dev)
567{
568 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
569
570 return sh_dmae_rst(shdev);
571}
572
573#ifdef CONFIG_PM
574static int sh_dmae_suspend(struct device *dev)
575{
576 return 0;
577}
578
579static int sh_dmae_resume(struct device *dev)
580{
581 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
582 int i, ret;
583
584 ret = sh_dmae_rst(shdev);
585 if (ret < 0)
586 dev_err(dev, "Failed to reset!\n");
587
588 for (i = 0; i < shdev->pdata->channel_num; i++) {
589 struct sh_dmae_chan *sh_chan = shdev->chan[i];
590
591 if (!sh_chan->shdma_chan.desc_num)
592 continue;
593
594 if (sh_chan->shdma_chan.slave_id >= 0) {
595 const struct sh_dmae_slave_config *cfg = sh_chan->config;
596 dmae_set_dmars(sh_chan, cfg->mid_rid);
597 dmae_set_chcr(sh_chan, cfg->chcr);
598 } else {
599 dmae_init(sh_chan);
600 }
601 }
602
603 return 0;
604}
605#else
606#define sh_dmae_suspend NULL
607#define sh_dmae_resume NULL
608#endif
609
610const struct dev_pm_ops sh_dmae_pm = {
611 .suspend = sh_dmae_suspend,
612 .resume = sh_dmae_resume,
613 .runtime_suspend = sh_dmae_runtime_suspend,
614 .runtime_resume = sh_dmae_runtime_resume,
615};
616
617static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
618{
619 struct sh_dmae_chan *sh_chan = container_of(schan,
620 struct sh_dmae_chan, shdma_chan);
621
622 /*
623 * Implicit BUG_ON(!sh_chan->config)
624 * This is an exclusive slave DMA operation, may only be called after a
625 * successful slave configuration.
626 */
627 return sh_chan->config->addr;
628}
629
630static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
631{
632 return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
633}
634
635static const struct shdma_ops sh_dmae_shdma_ops = {
636 .desc_completed = sh_dmae_desc_completed,
637 .halt_channel = sh_dmae_halt,
638 .channel_busy = sh_dmae_channel_busy,
639 .slave_addr = sh_dmae_slave_addr,
640 .desc_setup = sh_dmae_desc_setup,
641 .set_slave = sh_dmae_set_slave,
642 .setup_xfer = sh_dmae_setup_xfer,
643 .start_xfer = sh_dmae_start_xfer,
644 .embedded_desc = sh_dmae_embedded_desc,
645 .chan_irq = sh_dmae_chan_irq,
646 .get_partial = sh_dmae_get_partial,
647};
648
649static int __devinit sh_dmae_probe(struct platform_device *pdev)
650{
651 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
652 unsigned long irqflags = IRQF_DISABLED,
653 chan_flag[SH_DMAE_MAX_CHANNELS] = {};
654 int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
655 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
656 struct sh_dmae_device *shdev;
657 struct dma_device *dma_dev;
658 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
659
660 /* get platform data */
661 if (!pdata || !pdata->channel_num)
662 return -ENODEV;
663
664 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
665 /* DMARS area is optional */
666 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
667 /*
668 * IRQ resources:
669 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
670 * the error IRQ, in which case it is the only IRQ in this resource:
671 * start == end. If it is the only IRQ resource, all channels also
672 * use the same IRQ.
673 * 2. DMA channel IRQ resources can be specified one per resource or in
674 * ranges (start != end)
675 * 3. iff all events (channels and, optionally, error) on this
676 * controller use the same IRQ, only one IRQ resource can be
677 * specified, otherwise there must be one IRQ per channel, even if
678 * some of them are equal
679 * 4. if all IRQs on this controller are equal or if some specific IRQs
680 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
681 * requested with the IRQF_SHARED flag
682 */
683 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
684 if (!chan || !errirq_res)
685 return -ENODEV;
686
687 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
688 dev_err(&pdev->dev, "DMAC register region already claimed\n");
689 return -EBUSY;
690 }
691
692 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
693 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
694 err = -EBUSY;
695 goto ermrdmars;
696 }
697
698 err = -ENOMEM;
699 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
700 if (!shdev) {
701 dev_err(&pdev->dev, "Not enough memory\n");
702 goto ealloc;
703 }
704
705 dma_dev = &shdev->shdma_dev.dma_dev;
706
707 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
708 if (!shdev->chan_reg)
709 goto emapchan;
710 if (dmars) {
711 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
712 if (!shdev->dmars)
713 goto emapdmars;
714 }
715
716 if (!pdata->slave_only)
717 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
718 if (pdata->slave && pdata->slave_num)
719 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
720
721 /* Default transfer size of 32 bytes requires 32-byte alignment */
722 dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
723
724 shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
725 shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
726 err = shdma_init(&pdev->dev, &shdev->shdma_dev,
727 pdata->channel_num);
728 if (err < 0)
729 goto eshdma;
730
731 /* platform data */
732 shdev->pdata = pdev->dev.platform_data;
733
734 if (pdata->chcr_offset)
735 shdev->chcr_offset = pdata->chcr_offset;
736 else
737 shdev->chcr_offset = CHCR;
738
739 if (pdata->chcr_ie_bit)
740 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
741 else
742 shdev->chcr_ie_bit = CHCR_IE;
743
744 platform_set_drvdata(pdev, shdev);
745
746 pm_runtime_enable(&pdev->dev);
747 err = pm_runtime_get_sync(&pdev->dev);
748 if (err < 0)
749 dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
750
751 spin_lock_irq(&sh_dmae_lock);
752 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
753 spin_unlock_irq(&sh_dmae_lock);
754
755 /* reset dma controller - only needed as a test */
756 err = sh_dmae_rst(shdev);
757 if (err)
758 goto rst_err;
759
760#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
761 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
762
763 if (!chanirq_res)
764 chanirq_res = errirq_res;
765 else
766 irqres++;
767
768 if (chanirq_res == errirq_res ||
769 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
770 irqflags = IRQF_SHARED;
771
772 errirq = errirq_res->start;
773
774 err = request_irq(errirq, sh_dmae_err, irqflags,
775 "DMAC Address Error", shdev);
776 if (err) {
777 dev_err(&pdev->dev,
778 "DMA failed requesting irq #%d, error %d\n",
779 errirq, err);
780 goto eirq_err;
781 }
782
783#else
784 chanirq_res = errirq_res;
785#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
786
787 if (chanirq_res->start == chanirq_res->end &&
788 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
789 /* Special case - all multiplexed */
790 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
791 if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
792 chan_irq[irq_cnt] = chanirq_res->start;
793 chan_flag[irq_cnt] = IRQF_SHARED;
794 } else {
795 irq_cap = 1;
796 break;
797 }
798 }
799 } else {
800 do {
801 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
802 if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
803 irq_cap = 1;
804 break;
805 }
806
807 if ((errirq_res->flags & IORESOURCE_BITS) ==
808 IORESOURCE_IRQ_SHAREABLE)
809 chan_flag[irq_cnt] = IRQF_SHARED;
810 else
811 chan_flag[irq_cnt] = IRQF_DISABLED;
812 dev_dbg(&pdev->dev,
813 "Found IRQ %d for channel %d\n",
814 i, irq_cnt);
815 chan_irq[irq_cnt++] = i;
816 }
817
818 if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
819 break;
820
821 chanirq_res = platform_get_resource(pdev,
822 IORESOURCE_IRQ, ++irqres);
823 } while (irq_cnt < pdata->channel_num && chanirq_res);
824 }
825
826 /* Create DMA Channel */
827 for (i = 0; i < irq_cnt; i++) {
828 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
829 if (err)
830 goto chan_probe_err;
831 }
832
833 if (irq_cap)
834 dev_notice(&pdev->dev, "Attempting to register %d DMA "
835 "channels when a maximum of %d are supported.\n",
836 pdata->channel_num, SH_DMAE_MAX_CHANNELS);
837
838 pm_runtime_put(&pdev->dev);
839
840 err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
841 if (err < 0)
842 goto edmadevreg;
843
844 return err;
845
846edmadevreg:
847 pm_runtime_get(&pdev->dev);
848
849chan_probe_err:
850 sh_dmae_chan_remove(shdev);
851
852#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
853 free_irq(errirq, shdev);
854eirq_err:
855#endif
856rst_err:
857 spin_lock_irq(&sh_dmae_lock);
858 list_del_rcu(&shdev->node);
859 spin_unlock_irq(&sh_dmae_lock);
860
861 pm_runtime_put(&pdev->dev);
862 pm_runtime_disable(&pdev->dev);
863
864 platform_set_drvdata(pdev, NULL);
865 shdma_cleanup(&shdev->shdma_dev);
866eshdma:
867 if (dmars)
868 iounmap(shdev->dmars);
869emapdmars:
870 iounmap(shdev->chan_reg);
871 synchronize_rcu();
872emapchan:
873 kfree(shdev);
874ealloc:
875 if (dmars)
876 release_mem_region(dmars->start, resource_size(dmars));
877ermrdmars:
878 release_mem_region(chan->start, resource_size(chan));
879
880 return err;
881}
882
883static int __devexit sh_dmae_remove(struct platform_device *pdev)
884{
885 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
886 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
887 struct resource *res;
888 int errirq = platform_get_irq(pdev, 0);
889
890 dma_async_device_unregister(dma_dev);
891
892 if (errirq > 0)
893 free_irq(errirq, shdev);
894
895 spin_lock_irq(&sh_dmae_lock);
896 list_del_rcu(&shdev->node);
897 spin_unlock_irq(&sh_dmae_lock);
898
899 pm_runtime_disable(&pdev->dev);
900
901 sh_dmae_chan_remove(shdev);
902 shdma_cleanup(&shdev->shdma_dev);
903
904 if (shdev->dmars)
905 iounmap(shdev->dmars);
906 iounmap(shdev->chan_reg);
907
908 platform_set_drvdata(pdev, NULL);
909
910 synchronize_rcu();
911 kfree(shdev);
912
913 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
914 if (res)
915 release_mem_region(res->start, resource_size(res));
916 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
917 if (res)
918 release_mem_region(res->start, resource_size(res));
919
920 return 0;
921}
922
923static struct platform_driver sh_dmae_driver = {
924 .driver = {
925 .owner = THIS_MODULE,
926 .pm = &sh_dmae_pm,
927 .name = SH_DMAE_DRV_NAME,
928 },
929 .remove = __devexit_p(sh_dmae_remove),
930 .shutdown = sh_dmae_shutdown,
931};
932
933static int __init sh_dmae_init(void)
934{
935 /* Wire up NMI handling */
936 int err = register_die_notifier(&sh_dmae_nmi_notifier);
937 if (err)
938 return err;
939
940 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
941}
942module_init(sh_dmae_init);
943
944static void __exit sh_dmae_exit(void)
945{
946 platform_driver_unregister(&sh_dmae_driver);
947
948 unregister_die_notifier(&sh_dmae_nmi_notifier);
949}
950module_exit(sh_dmae_exit);
951
952MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
953MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
954MODULE_LICENSE("GPL");
955MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
diff --git a/drivers/dma/shdma.h b/drivers/dma/sh/shdma.h
index 0b1d2c105f02..9314e93225db 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -13,42 +13,29 @@
13#ifndef __DMA_SHDMA_H 13#ifndef __DMA_SHDMA_H
14#define __DMA_SHDMA_H 14#define __DMA_SHDMA_H
15 15
16#include <linux/sh_dma.h>
17#include <linux/shdma-base.h>
16#include <linux/dmaengine.h> 18#include <linux/dmaengine.h>
17#include <linux/interrupt.h> 19#include <linux/interrupt.h>
18#include <linux/list.h> 20#include <linux/list.h>
19 21
20#define SH_DMAC_MAX_CHANNELS 20 22#define SH_DMAE_MAX_CHANNELS 20
21#define SH_DMA_SLAVE_NUMBER 256 23#define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */
22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
23 24
24struct device; 25struct device;
25 26
26enum dmae_pm_state {
27 DMAE_PM_ESTABLISHED,
28 DMAE_PM_BUSY,
29 DMAE_PM_PENDING,
30};
31
32struct sh_dmae_chan { 27struct sh_dmae_chan {
33 spinlock_t desc_lock; /* Descriptor operation lock */ 28 struct shdma_chan shdma_chan;
34 struct list_head ld_queue; /* Link descriptors queue */ 29 const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
35 struct list_head ld_free; /* Link descriptors free */
36 struct dma_chan common; /* DMA common channel */
37 struct device *dev; /* Channel device */
38 struct tasklet_struct tasklet; /* Tasklet */
39 int descs_allocated; /* desc count */
40 int xmit_shift; /* log_2(bytes_per_xfer) */ 30 int xmit_shift; /* log_2(bytes_per_xfer) */
41 int irq;
42 int id; /* Raw id of this channel */
43 u32 __iomem *base; 31 u32 __iomem *base;
44 char dev_id[16]; /* unique name per DMAC of channel */ 32 char dev_id[16]; /* unique name per DMAC of channel */
45 int pm_error; 33 int pm_error;
46 enum dmae_pm_state pm_state;
47}; 34};
48 35
49struct sh_dmae_device { 36struct sh_dmae_device {
50 struct dma_device common; 37 struct shdma_dev shdma_dev;
51 struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; 38 struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
52 struct sh_dmae_pdata *pdata; 39 struct sh_dmae_pdata *pdata;
53 struct list_head node; 40 struct list_head node;
54 u32 __iomem *chan_reg; 41 u32 __iomem *chan_reg;
@@ -57,10 +44,21 @@ struct sh_dmae_device {
57 u32 chcr_ie_bit; 44 u32 chcr_ie_bit;
58}; 45};
59 46
60#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) 47struct sh_dmae_regs {
48 u32 sar; /* SAR / source address */
49 u32 dar; /* DAR / destination address */
50 u32 tcr; /* TCR / transfer count */
51};
52
53struct sh_dmae_desc {
54 struct sh_dmae_regs hw;
55 struct shdma_desc shdma_desc;
56};
57
58#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
61#define to_sh_desc(lh) container_of(lh, struct sh_desc, node) 59#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
62#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) 60#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
63#define to_sh_dev(chan) container_of(chan->common.device,\ 61#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
64 struct sh_dmae_device, common) 62 struct sh_dmae_device, shdma_dev.dma_dev)
65 63
66#endif /* __DMA_SHDMA_H */ 64#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
deleted file mode 100644
index 19d7a8d3975d..000000000000
--- a/drivers/dma/shdma.c
+++ /dev/null
@@ -1,1524 +0,0 @@
1/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/platform_device.h>
27#include <linux/pm_runtime.h>
28#include <linux/sh_dma.h>
29#include <linux/notifier.h>
30#include <linux/kdebug.h>
31#include <linux/spinlock.h>
32#include <linux/rculist.h>
33
34#include "dmaengine.h"
35#include "shdma.h"
36
37/* DMA descriptor control */
38enum sh_dmae_desc_status {
39 DESC_IDLE,
40 DESC_PREPARED,
41 DESC_SUBMITTED,
42 DESC_COMPLETED, /* completed, have to call callback */
43 DESC_WAITING, /* callback called, waiting for ack / re-submit */
44};
45
46#define NR_DESCS_PER_CHANNEL 32
47/* Default MEMCPY transfer size = 2^2 = 4 bytes */
48#define LOG2_DEFAULT_XFER_SIZE 2
49
50/*
51 * Used for write-side mutual exclusion for the global device list,
52 * read-side synchronization by way of RCU, and per-controller data.
53 */
54static DEFINE_SPINLOCK(sh_dmae_lock);
55static LIST_HEAD(sh_dmae_devices);
56
57/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
58static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
59
60static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
61static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
62
63static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
64{
65 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
66
67 __raw_writel(data, shdev->chan_reg +
68 shdev->pdata->channel[sh_dc->id].chclr_offset);
69}
70
71static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
72{
73 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
74}
75
76static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
77{
78 return __raw_readl(sh_dc->base + reg / sizeof(u32));
79}
80
81static u16 dmaor_read(struct sh_dmae_device *shdev)
82{
83 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
84
85 if (shdev->pdata->dmaor_is_32bit)
86 return __raw_readl(addr);
87 else
88 return __raw_readw(addr);
89}
90
91static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
92{
93 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
94
95 if (shdev->pdata->dmaor_is_32bit)
96 __raw_writel(data, addr);
97 else
98 __raw_writew(data, addr);
99}
100
101static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
102{
103 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
104
105 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
106}
107
108static u32 chcr_read(struct sh_dmae_chan *sh_dc)
109{
110 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
111
112 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
113}
114
115/*
116 * Reset DMA controller
117 *
118 * SH7780 has two DMAOR register
119 */
120static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
121{
122 unsigned short dmaor;
123 unsigned long flags;
124
125 spin_lock_irqsave(&sh_dmae_lock, flags);
126
127 dmaor = dmaor_read(shdev);
128 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
129
130 spin_unlock_irqrestore(&sh_dmae_lock, flags);
131}
132
133static int sh_dmae_rst(struct sh_dmae_device *shdev)
134{
135 unsigned short dmaor;
136 unsigned long flags;
137
138 spin_lock_irqsave(&sh_dmae_lock, flags);
139
140 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
141
142 if (shdev->pdata->chclr_present) {
143 int i;
144 for (i = 0; i < shdev->pdata->channel_num; i++) {
145 struct sh_dmae_chan *sh_chan = shdev->chan[i];
146 if (sh_chan)
147 chclr_write(sh_chan, 0);
148 }
149 }
150
151 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
152
153 dmaor = dmaor_read(shdev);
154
155 spin_unlock_irqrestore(&sh_dmae_lock, flags);
156
157 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
158 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
159 return -EIO;
160 }
161 if (shdev->pdata->dmaor_init & ~dmaor)
162 dev_warn(shdev->common.dev,
163 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
164 dmaor, shdev->pdata->dmaor_init);
165 return 0;
166}
167
168static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
169{
170 u32 chcr = chcr_read(sh_chan);
171
172 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
173 return true; /* working */
174
175 return false; /* waiting */
176}
177
178static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
179{
180 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
181 struct sh_dmae_pdata *pdata = shdev->pdata;
182 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
183 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
184
185 if (cnt >= pdata->ts_shift_num)
186 cnt = 0;
187
188 return pdata->ts_shift[cnt];
189}
190
191static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
192{
193 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
194 struct sh_dmae_pdata *pdata = shdev->pdata;
195 int i;
196
197 for (i = 0; i < pdata->ts_shift_num; i++)
198 if (pdata->ts_shift[i] == l2size)
199 break;
200
201 if (i == pdata->ts_shift_num)
202 i = 0;
203
204 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
205 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
206}
207
208static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
209{
210 sh_dmae_writel(sh_chan, hw->sar, SAR);
211 sh_dmae_writel(sh_chan, hw->dar, DAR);
212 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
213}
214
215static void dmae_start(struct sh_dmae_chan *sh_chan)
216{
217 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
218 u32 chcr = chcr_read(sh_chan);
219
220 if (shdev->pdata->needs_tend_set)
221 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
222
223 chcr |= CHCR_DE | shdev->chcr_ie_bit;
224 chcr_write(sh_chan, chcr & ~CHCR_TE);
225}
226
227static void dmae_halt(struct sh_dmae_chan *sh_chan)
228{
229 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
230 u32 chcr = chcr_read(sh_chan);
231
232 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
233 chcr_write(sh_chan, chcr);
234}
235
236static void dmae_init(struct sh_dmae_chan *sh_chan)
237{
238 /*
239 * Default configuration for dual address memory-memory transfer.
240 * 0x400 represents auto-request.
241 */
242 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
243 LOG2_DEFAULT_XFER_SIZE);
244 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
245 chcr_write(sh_chan, chcr);
246}
247
248static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
249{
250 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
251 if (dmae_is_busy(sh_chan))
252 return -EBUSY;
253
254 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
255 chcr_write(sh_chan, val);
256
257 return 0;
258}
259
260static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
261{
262 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
263 struct sh_dmae_pdata *pdata = shdev->pdata;
264 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
265 u16 __iomem *addr = shdev->dmars;
266 unsigned int shift = chan_pdata->dmars_bit;
267
268 if (dmae_is_busy(sh_chan))
269 return -EBUSY;
270
271 if (pdata->no_dmars)
272 return 0;
273
274 /* in the case of a missing DMARS resource use first memory window */
275 if (!addr)
276 addr = (u16 __iomem *)shdev->chan_reg;
277 addr += chan_pdata->dmars / sizeof(u16);
278
279 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
280 addr);
281
282 return 0;
283}
284
285static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
286{
287 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
288 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
289 struct sh_dmae_slave *param = tx->chan->private;
290 dma_async_tx_callback callback = tx->callback;
291 dma_cookie_t cookie;
292 bool power_up;
293
294 spin_lock_irq(&sh_chan->desc_lock);
295
296 if (list_empty(&sh_chan->ld_queue))
297 power_up = true;
298 else
299 power_up = false;
300
301 cookie = dma_cookie_assign(tx);
302
303 /* Mark all chunks of this descriptor as submitted, move to the queue */
304 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
305 /*
306 * All chunks are on the global ld_free, so, we have to find
307 * the end of the chain ourselves
308 */
309 if (chunk != desc && (chunk->mark == DESC_IDLE ||
310 chunk->async_tx.cookie > 0 ||
311 chunk->async_tx.cookie == -EBUSY ||
312 &chunk->node == &sh_chan->ld_free))
313 break;
314 chunk->mark = DESC_SUBMITTED;
315 /* Callback goes to the last chunk */
316 chunk->async_tx.callback = NULL;
317 chunk->cookie = cookie;
318 list_move_tail(&chunk->node, &sh_chan->ld_queue);
319 last = chunk;
320 }
321
322 last->async_tx.callback = callback;
323 last->async_tx.callback_param = tx->callback_param;
324
325 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
326 tx->cookie, &last->async_tx, sh_chan->id,
327 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
328
329 if (power_up) {
330 sh_chan->pm_state = DMAE_PM_BUSY;
331
332 pm_runtime_get(sh_chan->dev);
333
334 spin_unlock_irq(&sh_chan->desc_lock);
335
336 pm_runtime_barrier(sh_chan->dev);
337
338 spin_lock_irq(&sh_chan->desc_lock);
339
340 /* Have we been reset, while waiting? */
341 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
342 dev_dbg(sh_chan->dev, "Bring up channel %d\n",
343 sh_chan->id);
344 if (param) {
345 const struct sh_dmae_slave_config *cfg =
346 param->config;
347
348 dmae_set_dmars(sh_chan, cfg->mid_rid);
349 dmae_set_chcr(sh_chan, cfg->chcr);
350 } else {
351 dmae_init(sh_chan);
352 }
353
354 if (sh_chan->pm_state == DMAE_PM_PENDING)
355 sh_chan_xfer_ld_queue(sh_chan);
356 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
357 }
358 } else {
359 sh_chan->pm_state = DMAE_PM_PENDING;
360 }
361
362 spin_unlock_irq(&sh_chan->desc_lock);
363
364 return cookie;
365}
366
367/* Called with desc_lock held */
368static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
369{
370 struct sh_desc *desc;
371
372 list_for_each_entry(desc, &sh_chan->ld_free, node)
373 if (desc->mark != DESC_PREPARED) {
374 BUG_ON(desc->mark != DESC_IDLE);
375 list_del(&desc->node);
376 return desc;
377 }
378
379 return NULL;
380}
381
382static const struct sh_dmae_slave_config *sh_dmae_find_slave(
383 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
384{
385 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
386 struct sh_dmae_pdata *pdata = shdev->pdata;
387 int i;
388
389 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
390 return NULL;
391
392 for (i = 0; i < pdata->slave_num; i++)
393 if (pdata->slave[i].slave_id == param->slave_id)
394 return pdata->slave + i;
395
396 return NULL;
397}
398
399static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
400{
401 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
402 struct sh_desc *desc;
403 struct sh_dmae_slave *param = chan->private;
404 int ret;
405
406 /*
407 * This relies on the guarantee from dmaengine that alloc_chan_resources
408 * never runs concurrently with itself or free_chan_resources.
409 */
410 if (param) {
411 const struct sh_dmae_slave_config *cfg;
412
413 cfg = sh_dmae_find_slave(sh_chan, param);
414 if (!cfg) {
415 ret = -EINVAL;
416 goto efindslave;
417 }
418
419 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
420 ret = -EBUSY;
421 goto etestused;
422 }
423
424 param->config = cfg;
425 }
426
427 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
428 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
429 if (!desc)
430 break;
431 dma_async_tx_descriptor_init(&desc->async_tx,
432 &sh_chan->common);
433 desc->async_tx.tx_submit = sh_dmae_tx_submit;
434 desc->mark = DESC_IDLE;
435
436 list_add(&desc->node, &sh_chan->ld_free);
437 sh_chan->descs_allocated++;
438 }
439
440 if (!sh_chan->descs_allocated) {
441 ret = -ENOMEM;
442 goto edescalloc;
443 }
444
445 return sh_chan->descs_allocated;
446
447edescalloc:
448 if (param)
449 clear_bit(param->slave_id, sh_dmae_slave_used);
450etestused:
451efindslave:
452 chan->private = NULL;
453 return ret;
454}
455
456/*
457 * sh_dma_free_chan_resources - Free all resources of the channel.
458 */
459static void sh_dmae_free_chan_resources(struct dma_chan *chan)
460{
461 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
462 struct sh_desc *desc, *_desc;
463 LIST_HEAD(list);
464
465 /* Protect against ISR */
466 spin_lock_irq(&sh_chan->desc_lock);
467 dmae_halt(sh_chan);
468 spin_unlock_irq(&sh_chan->desc_lock);
469
470 /* Now no new interrupts will occur */
471
472 /* Prepared and not submitted descriptors can still be on the queue */
473 if (!list_empty(&sh_chan->ld_queue))
474 sh_dmae_chan_ld_cleanup(sh_chan, true);
475
476 if (chan->private) {
477 /* The caller is holding dma_list_mutex */
478 struct sh_dmae_slave *param = chan->private;
479 clear_bit(param->slave_id, sh_dmae_slave_used);
480 chan->private = NULL;
481 }
482
483 spin_lock_irq(&sh_chan->desc_lock);
484
485 list_splice_init(&sh_chan->ld_free, &list);
486 sh_chan->descs_allocated = 0;
487
488 spin_unlock_irq(&sh_chan->desc_lock);
489
490 list_for_each_entry_safe(desc, _desc, &list, node)
491 kfree(desc);
492}
493
494/**
495 * sh_dmae_add_desc - get, set up and return one transfer descriptor
496 * @sh_chan: DMA channel
497 * @flags: DMA transfer flags
498 * @dest: destination DMA address, incremented when direction equals
499 * DMA_DEV_TO_MEM
500 * @src: source DMA address, incremented when direction equals
501 * DMA_MEM_TO_DEV
502 * @len: DMA transfer length
503 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
504 * @direction: needed for slave DMA to decide which address to keep constant,
505 * equals DMA_MEM_TO_MEM for MEMCPY
506 * Returns 0 or an error
507 * Locks: called with desc_lock held
508 */
509static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
510 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
511 struct sh_desc **first, enum dma_transfer_direction direction)
512{
513 struct sh_desc *new;
514 size_t copy_size;
515
516 if (!*len)
517 return NULL;
518
519 /* Allocate the link descriptor from the free list */
520 new = sh_dmae_get_desc(sh_chan);
521 if (!new) {
522 dev_err(sh_chan->dev, "No free link descriptor available\n");
523 return NULL;
524 }
525
526 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
527
528 new->hw.sar = *src;
529 new->hw.dar = *dest;
530 new->hw.tcr = copy_size;
531
532 if (!*first) {
533 /* First desc */
534 new->async_tx.cookie = -EBUSY;
535 *first = new;
536 } else {
537 /* Other desc - invisible to the user */
538 new->async_tx.cookie = -EINVAL;
539 }
540
541 dev_dbg(sh_chan->dev,
542 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
543 copy_size, *len, *src, *dest, &new->async_tx,
544 new->async_tx.cookie, sh_chan->xmit_shift);
545
546 new->mark = DESC_PREPARED;
547 new->async_tx.flags = flags;
548 new->direction = direction;
549
550 *len -= copy_size;
551 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
552 *src += copy_size;
553 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
554 *dest += copy_size;
555
556 return new;
557}
558
559/*
560 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
561 *
562 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
563 * converted to scatter-gather to guarantee consistent locking and a correct
564 * list manipulation. For slave DMA direction carries the usual meaning, and,
565 * logically, the SG list is RAM and the addr variable contains slave address,
566 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
567 * and the SG list contains only one element and points at the source buffer.
568 */
569static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
570 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
571 enum dma_transfer_direction direction, unsigned long flags)
572{
573 struct scatterlist *sg;
574 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
575 LIST_HEAD(tx_list);
576 int chunks = 0;
577 unsigned long irq_flags;
578 int i;
579
580 if (!sg_len)
581 return NULL;
582
583 for_each_sg(sgl, sg, sg_len, i)
584 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
585 (SH_DMA_TCR_MAX + 1);
586
587 /* Have to lock the whole loop to protect against concurrent release */
588 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
589
590 /*
591 * Chaining:
592 * first descriptor is what user is dealing with in all API calls, its
593 * cookie is at first set to -EBUSY, at tx-submit to a positive
594 * number
595 * if more than one chunk is needed further chunks have cookie = -EINVAL
596 * the last chunk, if not equal to the first, has cookie = -ENOSPC
597 * all chunks are linked onto the tx_list head with their .node heads
598 * only during this function, then they are immediately spliced
599 * back onto the free list in form of a chain
600 */
601 for_each_sg(sgl, sg, sg_len, i) {
602 dma_addr_t sg_addr = sg_dma_address(sg);
603 size_t len = sg_dma_len(sg);
604
605 if (!len)
606 goto err_get_desc;
607
608 do {
609 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
610 i, sg, len, (unsigned long long)sg_addr);
611
612 if (direction == DMA_DEV_TO_MEM)
613 new = sh_dmae_add_desc(sh_chan, flags,
614 &sg_addr, addr, &len, &first,
615 direction);
616 else
617 new = sh_dmae_add_desc(sh_chan, flags,
618 addr, &sg_addr, &len, &first,
619 direction);
620 if (!new)
621 goto err_get_desc;
622
623 new->chunks = chunks--;
624 list_add_tail(&new->node, &tx_list);
625 } while (len);
626 }
627
628 if (new != first)
629 new->async_tx.cookie = -ENOSPC;
630
631 /* Put them back on the free list, so, they don't get lost */
632 list_splice_tail(&tx_list, &sh_chan->ld_free);
633
634 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
635
636 return &first->async_tx;
637
638err_get_desc:
639 list_for_each_entry(new, &tx_list, node)
640 new->mark = DESC_IDLE;
641 list_splice(&tx_list, &sh_chan->ld_free);
642
643 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
644
645 return NULL;
646}
647
648static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
649 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
650 size_t len, unsigned long flags)
651{
652 struct sh_dmae_chan *sh_chan;
653 struct scatterlist sg;
654
655 if (!chan || !len)
656 return NULL;
657
658 sh_chan = to_sh_chan(chan);
659
660 sg_init_table(&sg, 1);
661 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
662 offset_in_page(dma_src));
663 sg_dma_address(&sg) = dma_src;
664 sg_dma_len(&sg) = len;
665
666 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
667 flags);
668}
669
670static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
671 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
672 enum dma_transfer_direction direction, unsigned long flags,
673 void *context)
674{
675 struct sh_dmae_slave *param;
676 struct sh_dmae_chan *sh_chan;
677 dma_addr_t slave_addr;
678
679 if (!chan)
680 return NULL;
681
682 sh_chan = to_sh_chan(chan);
683 param = chan->private;
684
685 /* Someone calling slave DMA on a public channel? */
686 if (!param || !sg_len) {
687 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
688 __func__, param, sg_len, param ? param->slave_id : -1);
689 return NULL;
690 }
691
692 slave_addr = param->config->addr;
693
694 /*
695 * if (param != NULL), this is a successfully requested slave channel,
696 * therefore param->config != NULL too.
697 */
698 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
699 direction, flags);
700}
701
702static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
703 unsigned long arg)
704{
705 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
706 unsigned long flags;
707
708 /* Only supports DMA_TERMINATE_ALL */
709 if (cmd != DMA_TERMINATE_ALL)
710 return -ENXIO;
711
712 if (!chan)
713 return -EINVAL;
714
715 spin_lock_irqsave(&sh_chan->desc_lock, flags);
716 dmae_halt(sh_chan);
717
718 if (!list_empty(&sh_chan->ld_queue)) {
719 /* Record partial transfer */
720 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
721 struct sh_desc, node);
722 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
723 sh_chan->xmit_shift;
724 }
725 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
726
727 sh_dmae_chan_ld_cleanup(sh_chan, true);
728
729 return 0;
730}
731
732static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
733{
734 struct sh_desc *desc, *_desc;
735 /* Is the "exposed" head of a chain acked? */
736 bool head_acked = false;
737 dma_cookie_t cookie = 0;
738 dma_async_tx_callback callback = NULL;
739 void *param = NULL;
740 unsigned long flags;
741
742 spin_lock_irqsave(&sh_chan->desc_lock, flags);
743 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
744 struct dma_async_tx_descriptor *tx = &desc->async_tx;
745
746 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
747 BUG_ON(desc->mark != DESC_SUBMITTED &&
748 desc->mark != DESC_COMPLETED &&
749 desc->mark != DESC_WAITING);
750
751 /*
752 * queue is ordered, and we use this loop to (1) clean up all
753 * completed descriptors, and to (2) update descriptor flags of
754 * any chunks in a (partially) completed chain
755 */
756 if (!all && desc->mark == DESC_SUBMITTED &&
757 desc->cookie != cookie)
758 break;
759
760 if (tx->cookie > 0)
761 cookie = tx->cookie;
762
763 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
764 if (sh_chan->common.completed_cookie != desc->cookie - 1)
765 dev_dbg(sh_chan->dev,
766 "Completing cookie %d, expected %d\n",
767 desc->cookie,
768 sh_chan->common.completed_cookie + 1);
769 sh_chan->common.completed_cookie = desc->cookie;
770 }
771
772 /* Call callback on the last chunk */
773 if (desc->mark == DESC_COMPLETED && tx->callback) {
774 desc->mark = DESC_WAITING;
775 callback = tx->callback;
776 param = tx->callback_param;
777 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
778 tx->cookie, tx, sh_chan->id);
779 BUG_ON(desc->chunks != 1);
780 break;
781 }
782
783 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
784 if (desc->mark == DESC_COMPLETED) {
785 BUG_ON(tx->cookie < 0);
786 desc->mark = DESC_WAITING;
787 }
788 head_acked = async_tx_test_ack(tx);
789 } else {
790 switch (desc->mark) {
791 case DESC_COMPLETED:
792 desc->mark = DESC_WAITING;
793 /* Fall through */
794 case DESC_WAITING:
795 if (head_acked)
796 async_tx_ack(&desc->async_tx);
797 }
798 }
799
800 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
801 tx, tx->cookie);
802
803 if (((desc->mark == DESC_COMPLETED ||
804 desc->mark == DESC_WAITING) &&
805 async_tx_test_ack(&desc->async_tx)) || all) {
806 /* Remove from ld_queue list */
807 desc->mark = DESC_IDLE;
808
809 list_move(&desc->node, &sh_chan->ld_free);
810
811 if (list_empty(&sh_chan->ld_queue)) {
812 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
813 pm_runtime_put(sh_chan->dev);
814 }
815 }
816 }
817
818 if (all && !callback)
819 /*
820 * Terminating and the loop completed normally: forgive
821 * uncompleted cookies
822 */
823 sh_chan->common.completed_cookie = sh_chan->common.cookie;
824
825 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
826
827 if (callback)
828 callback(param);
829
830 return callback;
831}
832
833/*
834 * sh_chan_ld_cleanup - Clean up link descriptors
835 *
836 * This function cleans up the ld_queue of DMA channel.
837 */
838static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
839{
840 while (__ld_cleanup(sh_chan, all))
841 ;
842}
843
844/* Called under spin_lock_irq(&sh_chan->desc_lock) */
845static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
846{
847 struct sh_desc *desc;
848
849 /* DMA work check */
850 if (dmae_is_busy(sh_chan))
851 return;
852
853 /* Find the first not transferred descriptor */
854 list_for_each_entry(desc, &sh_chan->ld_queue, node)
855 if (desc->mark == DESC_SUBMITTED) {
856 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
857 desc->async_tx.cookie, sh_chan->id,
858 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
859 /* Get the ld start address from ld_queue */
860 dmae_set_reg(sh_chan, &desc->hw);
861 dmae_start(sh_chan);
862 break;
863 }
864}
865
866static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
867{
868 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
869
870 spin_lock_irq(&sh_chan->desc_lock);
871 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
872 sh_chan_xfer_ld_queue(sh_chan);
873 else
874 sh_chan->pm_state = DMAE_PM_PENDING;
875 spin_unlock_irq(&sh_chan->desc_lock);
876}
877
878static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
879 dma_cookie_t cookie,
880 struct dma_tx_state *txstate)
881{
882 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
883 enum dma_status status;
884 unsigned long flags;
885
886 sh_dmae_chan_ld_cleanup(sh_chan, false);
887
888 spin_lock_irqsave(&sh_chan->desc_lock, flags);
889
890 status = dma_cookie_status(chan, cookie, txstate);
891
892 /*
893 * If we don't find cookie on the queue, it has been aborted and we have
894 * to report error
895 */
896 if (status != DMA_SUCCESS) {
897 struct sh_desc *desc;
898 status = DMA_ERROR;
899 list_for_each_entry(desc, &sh_chan->ld_queue, node)
900 if (desc->cookie == cookie) {
901 status = DMA_IN_PROGRESS;
902 break;
903 }
904 }
905
906 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
907
908 return status;
909}
910
911static irqreturn_t sh_dmae_interrupt(int irq, void *data)
912{
913 irqreturn_t ret = IRQ_NONE;
914 struct sh_dmae_chan *sh_chan = data;
915 u32 chcr;
916
917 spin_lock(&sh_chan->desc_lock);
918
919 chcr = chcr_read(sh_chan);
920
921 if (chcr & CHCR_TE) {
922 /* DMA stop */
923 dmae_halt(sh_chan);
924
925 ret = IRQ_HANDLED;
926 tasklet_schedule(&sh_chan->tasklet);
927 }
928
929 spin_unlock(&sh_chan->desc_lock);
930
931 return ret;
932}
933
934/* Called from error IRQ or NMI */
935static bool sh_dmae_reset(struct sh_dmae_device *shdev)
936{
937 unsigned int handled = 0;
938 int i;
939
940 /* halt the dma controller */
941 sh_dmae_ctl_stop(shdev);
942
943 /* We cannot detect, which channel caused the error, have to reset all */
944 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
945 struct sh_dmae_chan *sh_chan = shdev->chan[i];
946 struct sh_desc *desc;
947 LIST_HEAD(dl);
948
949 if (!sh_chan)
950 continue;
951
952 spin_lock(&sh_chan->desc_lock);
953
954 /* Stop the channel */
955 dmae_halt(sh_chan);
956
957 list_splice_init(&sh_chan->ld_queue, &dl);
958
959 if (!list_empty(&dl)) {
960 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
961 pm_runtime_put(sh_chan->dev);
962 }
963 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
964
965 spin_unlock(&sh_chan->desc_lock);
966
967 /* Complete all */
968 list_for_each_entry(desc, &dl, node) {
969 struct dma_async_tx_descriptor *tx = &desc->async_tx;
970 desc->mark = DESC_IDLE;
971 if (tx->callback)
972 tx->callback(tx->callback_param);
973 }
974
975 spin_lock(&sh_chan->desc_lock);
976 list_splice(&dl, &sh_chan->ld_free);
977 spin_unlock(&sh_chan->desc_lock);
978
979 handled++;
980 }
981
982 sh_dmae_rst(shdev);
983
984 return !!handled;
985}
986
987static irqreturn_t sh_dmae_err(int irq, void *data)
988{
989 struct sh_dmae_device *shdev = data;
990
991 if (!(dmaor_read(shdev) & DMAOR_AE))
992 return IRQ_NONE;
993
994 sh_dmae_reset(data);
995 return IRQ_HANDLED;
996}
997
998static void dmae_do_tasklet(unsigned long data)
999{
1000 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
1001 struct sh_desc *desc;
1002 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
1003 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
1004
1005 spin_lock_irq(&sh_chan->desc_lock);
1006 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
1007 if (desc->mark == DESC_SUBMITTED &&
1008 ((desc->direction == DMA_DEV_TO_MEM &&
1009 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
1010 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
1011 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
1012 desc->async_tx.cookie, &desc->async_tx,
1013 desc->hw.dar);
1014 desc->mark = DESC_COMPLETED;
1015 break;
1016 }
1017 }
1018 /* Next desc */
1019 sh_chan_xfer_ld_queue(sh_chan);
1020 spin_unlock_irq(&sh_chan->desc_lock);
1021
1022 sh_dmae_chan_ld_cleanup(sh_chan, false);
1023}
1024
1025static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
1026{
1027 /* Fast path out if NMIF is not asserted for this controller */
1028 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
1029 return false;
1030
1031 return sh_dmae_reset(shdev);
1032}
1033
1034static int sh_dmae_nmi_handler(struct notifier_block *self,
1035 unsigned long cmd, void *data)
1036{
1037 struct sh_dmae_device *shdev;
1038 int ret = NOTIFY_DONE;
1039 bool triggered;
1040
1041 /*
1042 * Only concern ourselves with NMI events.
1043 *
1044 * Normally we would check the die chain value, but as this needs
1045 * to be architecture independent, check for NMI context instead.
1046 */
1047 if (!in_nmi())
1048 return NOTIFY_DONE;
1049
1050 rcu_read_lock();
1051 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
1052 /*
1053 * Only stop if one of the controllers has NMIF asserted,
1054 * we do not want to interfere with regular address error
1055 * handling or NMI events that don't concern the DMACs.
1056 */
1057 triggered = sh_dmae_nmi_notify(shdev);
1058 if (triggered == true)
1059 ret = NOTIFY_OK;
1060 }
1061 rcu_read_unlock();
1062
1063 return ret;
1064}
1065
1066static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
1067 .notifier_call = sh_dmae_nmi_handler,
1068
1069 /* Run before NMI debug handler and KGDB */
1070 .priority = 1,
1071};
1072
1073static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1074 int irq, unsigned long flags)
1075{
1076 int err;
1077 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
1078 struct platform_device *pdev = to_platform_device(shdev->common.dev);
1079 struct sh_dmae_chan *new_sh_chan;
1080
1081 /* alloc channel */
1082 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1083 if (!new_sh_chan) {
1084 dev_err(shdev->common.dev,
1085 "No free memory for allocating dma channels!\n");
1086 return -ENOMEM;
1087 }
1088
1089 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
1090
1091 /* reference struct dma_device */
1092 new_sh_chan->common.device = &shdev->common;
1093 dma_cookie_init(&new_sh_chan->common);
1094
1095 new_sh_chan->dev = shdev->common.dev;
1096 new_sh_chan->id = id;
1097 new_sh_chan->irq = irq;
1098 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
1099
1100 /* Init DMA tasklet */
1101 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1102 (unsigned long)new_sh_chan);
1103
1104 spin_lock_init(&new_sh_chan->desc_lock);
1105
1106 /* Init descripter manage list */
1107 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1108 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1109
1110 /* Add the channel to DMA device channel list */
1111 list_add_tail(&new_sh_chan->common.device_node,
1112 &shdev->common.channels);
1113 shdev->common.chancnt++;
1114
1115 if (pdev->id >= 0)
1116 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1117 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1118 else
1119 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1120 "sh-dma%d", new_sh_chan->id);
1121
1122 /* set up channel irq */
1123 err = request_irq(irq, &sh_dmae_interrupt, flags,
1124 new_sh_chan->dev_id, new_sh_chan);
1125 if (err) {
1126 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1127 "with return %d\n", id, err);
1128 goto err_no_irq;
1129 }
1130
1131 shdev->chan[id] = new_sh_chan;
1132 return 0;
1133
1134err_no_irq:
1135 /* remove from dmaengine device node */
1136 list_del(&new_sh_chan->common.device_node);
1137 kfree(new_sh_chan);
1138 return err;
1139}
1140
1141static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1142{
1143 int i;
1144
1145 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1146 if (shdev->chan[i]) {
1147 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1148
1149 free_irq(sh_chan->irq, sh_chan);
1150
1151 list_del(&sh_chan->common.device_node);
1152 kfree(sh_chan);
1153 shdev->chan[i] = NULL;
1154 }
1155 }
1156 shdev->common.chancnt = 0;
1157}
1158
1159static int __init sh_dmae_probe(struct platform_device *pdev)
1160{
1161 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1162 unsigned long irqflags = IRQF_DISABLED,
1163 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1164 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
1165 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
1166 struct sh_dmae_device *shdev;
1167 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
1168
1169 /* get platform data */
1170 if (!pdata || !pdata->channel_num)
1171 return -ENODEV;
1172
1173 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1174 /* DMARS area is optional */
1175 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1176 /*
1177 * IRQ resources:
1178 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1179 * the error IRQ, in which case it is the only IRQ in this resource:
1180 * start == end. If it is the only IRQ resource, all channels also
1181 * use the same IRQ.
1182 * 2. DMA channel IRQ resources can be specified one per resource or in
1183 * ranges (start != end)
1184 * 3. iff all events (channels and, optionally, error) on this
1185 * controller use the same IRQ, only one IRQ resource can be
1186 * specified, otherwise there must be one IRQ per channel, even if
1187 * some of them are equal
1188 * 4. if all IRQs on this controller are equal or if some specific IRQs
1189 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1190 * requested with the IRQF_SHARED flag
1191 */
1192 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1193 if (!chan || !errirq_res)
1194 return -ENODEV;
1195
1196 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1197 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1198 return -EBUSY;
1199 }
1200
1201 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1202 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1203 err = -EBUSY;
1204 goto ermrdmars;
1205 }
1206
1207 err = -ENOMEM;
1208 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1209 if (!shdev) {
1210 dev_err(&pdev->dev, "Not enough memory\n");
1211 goto ealloc;
1212 }
1213
1214 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1215 if (!shdev->chan_reg)
1216 goto emapchan;
1217 if (dmars) {
1218 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1219 if (!shdev->dmars)
1220 goto emapdmars;
1221 }
1222
1223 /* platform data */
1224 shdev->pdata = pdata;
1225
1226 if (pdata->chcr_offset)
1227 shdev->chcr_offset = pdata->chcr_offset;
1228 else
1229 shdev->chcr_offset = CHCR;
1230
1231 if (pdata->chcr_ie_bit)
1232 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1233 else
1234 shdev->chcr_ie_bit = CHCR_IE;
1235
1236 platform_set_drvdata(pdev, shdev);
1237
1238 shdev->common.dev = &pdev->dev;
1239
1240 pm_runtime_enable(&pdev->dev);
1241 pm_runtime_get_sync(&pdev->dev);
1242
1243 spin_lock_irq(&sh_dmae_lock);
1244 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
1245 spin_unlock_irq(&sh_dmae_lock);
1246
1247 /* reset dma controller - only needed as a test */
1248 err = sh_dmae_rst(shdev);
1249 if (err)
1250 goto rst_err;
1251
1252 INIT_LIST_HEAD(&shdev->common.channels);
1253
1254 if (!pdata->slave_only)
1255 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1256 if (pdata->slave && pdata->slave_num)
1257 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1258
1259 shdev->common.device_alloc_chan_resources
1260 = sh_dmae_alloc_chan_resources;
1261 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1262 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1263 shdev->common.device_tx_status = sh_dmae_tx_status;
1264 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
1265
1266 /* Compulsory for DMA_SLAVE fields */
1267 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1268 shdev->common.device_control = sh_dmae_control;
1269
1270 /* Default transfer size of 32 bytes requires 32-byte alignment */
1271 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1272
1273#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1274 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1275
1276 if (!chanirq_res)
1277 chanirq_res = errirq_res;
1278 else
1279 irqres++;
1280
1281 if (chanirq_res == errirq_res ||
1282 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
1283 irqflags = IRQF_SHARED;
1284
1285 errirq = errirq_res->start;
1286
1287 err = request_irq(errirq, sh_dmae_err, irqflags,
1288 "DMAC Address Error", shdev);
1289 if (err) {
1290 dev_err(&pdev->dev,
1291 "DMA failed requesting irq #%d, error %d\n",
1292 errirq, err);
1293 goto eirq_err;
1294 }
1295
1296#else
1297 chanirq_res = errirq_res;
1298#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
1299
1300 if (chanirq_res->start == chanirq_res->end &&
1301 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1302 /* Special case - all multiplexed */
1303 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1304 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1305 chan_irq[irq_cnt] = chanirq_res->start;
1306 chan_flag[irq_cnt] = IRQF_SHARED;
1307 } else {
1308 irq_cap = 1;
1309 break;
1310 }
1311 }
1312 } else {
1313 do {
1314 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1315 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1316 irq_cap = 1;
1317 break;
1318 }
1319
1320 if ((errirq_res->flags & IORESOURCE_BITS) ==
1321 IORESOURCE_IRQ_SHAREABLE)
1322 chan_flag[irq_cnt] = IRQF_SHARED;
1323 else
1324 chan_flag[irq_cnt] = IRQF_DISABLED;
1325 dev_dbg(&pdev->dev,
1326 "Found IRQ %d for channel %d\n",
1327 i, irq_cnt);
1328 chan_irq[irq_cnt++] = i;
1329 }
1330
1331 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1332 break;
1333
1334 chanirq_res = platform_get_resource(pdev,
1335 IORESOURCE_IRQ, ++irqres);
1336 } while (irq_cnt < pdata->channel_num && chanirq_res);
1337 }
1338
1339 /* Create DMA Channel */
1340 for (i = 0; i < irq_cnt; i++) {
1341 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1342 if (err)
1343 goto chan_probe_err;
1344 }
1345
1346 if (irq_cap)
1347 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1348 "channels when a maximum of %d are supported.\n",
1349 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1350
1351 pm_runtime_put(&pdev->dev);
1352
1353 dma_async_device_register(&shdev->common);
1354
1355 return err;
1356
1357chan_probe_err:
1358 sh_dmae_chan_remove(shdev);
1359
1360#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1361 free_irq(errirq, shdev);
1362eirq_err:
1363#endif
1364rst_err:
1365 spin_lock_irq(&sh_dmae_lock);
1366 list_del_rcu(&shdev->node);
1367 spin_unlock_irq(&sh_dmae_lock);
1368
1369 pm_runtime_put(&pdev->dev);
1370 pm_runtime_disable(&pdev->dev);
1371
1372 if (dmars)
1373 iounmap(shdev->dmars);
1374
1375 platform_set_drvdata(pdev, NULL);
1376emapdmars:
1377 iounmap(shdev->chan_reg);
1378 synchronize_rcu();
1379emapchan:
1380 kfree(shdev);
1381ealloc:
1382 if (dmars)
1383 release_mem_region(dmars->start, resource_size(dmars));
1384ermrdmars:
1385 release_mem_region(chan->start, resource_size(chan));
1386
1387 return err;
1388}
1389
1390static int __exit sh_dmae_remove(struct platform_device *pdev)
1391{
1392 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1393 struct resource *res;
1394 int errirq = platform_get_irq(pdev, 0);
1395
1396 dma_async_device_unregister(&shdev->common);
1397
1398 if (errirq > 0)
1399 free_irq(errirq, shdev);
1400
1401 spin_lock_irq(&sh_dmae_lock);
1402 list_del_rcu(&shdev->node);
1403 spin_unlock_irq(&sh_dmae_lock);
1404
1405 /* channel data remove */
1406 sh_dmae_chan_remove(shdev);
1407
1408 pm_runtime_disable(&pdev->dev);
1409
1410 if (shdev->dmars)
1411 iounmap(shdev->dmars);
1412 iounmap(shdev->chan_reg);
1413
1414 platform_set_drvdata(pdev, NULL);
1415
1416 synchronize_rcu();
1417 kfree(shdev);
1418
1419 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1420 if (res)
1421 release_mem_region(res->start, resource_size(res));
1422 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1423 if (res)
1424 release_mem_region(res->start, resource_size(res));
1425
1426 return 0;
1427}
1428
1429static void sh_dmae_shutdown(struct platform_device *pdev)
1430{
1431 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1432 sh_dmae_ctl_stop(shdev);
1433}
1434
1435static int sh_dmae_runtime_suspend(struct device *dev)
1436{
1437 return 0;
1438}
1439
1440static int sh_dmae_runtime_resume(struct device *dev)
1441{
1442 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1443
1444 return sh_dmae_rst(shdev);
1445}
1446
1447#ifdef CONFIG_PM
1448static int sh_dmae_suspend(struct device *dev)
1449{
1450 return 0;
1451}
1452
1453static int sh_dmae_resume(struct device *dev)
1454{
1455 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1456 int i, ret;
1457
1458 ret = sh_dmae_rst(shdev);
1459 if (ret < 0)
1460 dev_err(dev, "Failed to reset!\n");
1461
1462 for (i = 0; i < shdev->pdata->channel_num; i++) {
1463 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1464 struct sh_dmae_slave *param = sh_chan->common.private;
1465
1466 if (!sh_chan->descs_allocated)
1467 continue;
1468
1469 if (param) {
1470 const struct sh_dmae_slave_config *cfg = param->config;
1471 dmae_set_dmars(sh_chan, cfg->mid_rid);
1472 dmae_set_chcr(sh_chan, cfg->chcr);
1473 } else {
1474 dmae_init(sh_chan);
1475 }
1476 }
1477
1478 return 0;
1479}
1480#else
1481#define sh_dmae_suspend NULL
1482#define sh_dmae_resume NULL
1483#endif
1484
1485const struct dev_pm_ops sh_dmae_pm = {
1486 .suspend = sh_dmae_suspend,
1487 .resume = sh_dmae_resume,
1488 .runtime_suspend = sh_dmae_runtime_suspend,
1489 .runtime_resume = sh_dmae_runtime_resume,
1490};
1491
1492static struct platform_driver sh_dmae_driver = {
1493 .remove = __exit_p(sh_dmae_remove),
1494 .shutdown = sh_dmae_shutdown,
1495 .driver = {
1496 .owner = THIS_MODULE,
1497 .name = "sh-dma-engine",
1498 .pm = &sh_dmae_pm,
1499 },
1500};
1501
1502static int __init sh_dmae_init(void)
1503{
1504 /* Wire up NMI handling */
1505 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1506 if (err)
1507 return err;
1508
1509 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1510}
1511module_init(sh_dmae_init);
1512
1513static void __exit sh_dmae_exit(void)
1514{
1515 platform_driver_unregister(&sh_dmae_driver);
1516
1517 unregister_die_notifier(&sh_dmae_nmi_notifier);
1518}
1519module_exit(sh_dmae_exit);
1520
1521MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1522MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1523MODULE_LICENSE("GPL");
1524MODULE_ALIAS("platform:sh-dma-engine");
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
new file mode 100644
index 000000000000..24acd711e032
--- /dev/null
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -0,0 +1,1431 @@
1/*
2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/bitops.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/mm.h>
28#include <linux/module.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/platform_device.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h>
34
35#include <mach/clk.h>
36#include "dmaengine.h"
37
38#define TEGRA_APBDMA_GENERAL 0x0
39#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
40
41#define TEGRA_APBDMA_CONTROL 0x010
42#define TEGRA_APBDMA_IRQ_MASK 0x01c
43#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
44
45/* CSR register */
46#define TEGRA_APBDMA_CHAN_CSR 0x00
47#define TEGRA_APBDMA_CSR_ENB BIT(31)
48#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
49#define TEGRA_APBDMA_CSR_HOLD BIT(29)
50#define TEGRA_APBDMA_CSR_DIR BIT(28)
51#define TEGRA_APBDMA_CSR_ONCE BIT(27)
52#define TEGRA_APBDMA_CSR_FLOW BIT(21)
53#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
54#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
55
56/* STATUS register */
57#define TEGRA_APBDMA_CHAN_STATUS 0x004
58#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
59#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
60#define TEGRA_APBDMA_STATUS_HALT BIT(29)
61#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
62#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
63#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
64
65/* AHB memory address */
66#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
67
68/* AHB sequence register */
69#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
70#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
71#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
72#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
73#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
74#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
75#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
76#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
77#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
78#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
79#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
80#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
81#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
82#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
83
84/* APB address */
85#define TEGRA_APBDMA_CHAN_APBPTR 0x018
86
87/* APB sequence register */
88#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
89#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
90#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
91#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
92#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
93#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
94#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
95#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
96
97/*
98 * If any burst is in flight and DMA paused then this is the time to complete
99 * on-flight burst and update DMA status register.
100 */
101#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
102
103/* Channel base address offset from APBDMA base address */
104#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
105
106/* DMA channel register space size */
107#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
108
109struct tegra_dma;
110
111/*
112 * tegra_dma_chip_data Tegra chip specific DMA data
113 * @nr_channels: Number of channels available in the controller.
114 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
115 */
116struct tegra_dma_chip_data {
117 int nr_channels;
118 int max_dma_count;
119};
120
121/* DMA channel registers */
122struct tegra_dma_channel_regs {
123 unsigned long csr;
124 unsigned long ahb_ptr;
125 unsigned long apb_ptr;
126 unsigned long ahb_seq;
127 unsigned long apb_seq;
128};
129
130/*
131 * tegra_dma_sg_req: Dma request details to configure hardware. This
132 * contains the details for one transfer to configure DMA hw.
133 * The client's request for data transfer can be broken into multiple
134 * sub-transfer as per requester details and hw support.
135 * This sub transfer get added in the list of transfer and point to Tegra
136 * DMA descriptor which manages the transfer details.
137 */
138struct tegra_dma_sg_req {
139 struct tegra_dma_channel_regs ch_regs;
140 int req_len;
141 bool configured;
142 bool last_sg;
143 bool half_done;
144 struct list_head node;
145 struct tegra_dma_desc *dma_desc;
146};
147
148/*
149 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
150 * This descriptor keep track of transfer status, callbacks and request
151 * counts etc.
152 */
153struct tegra_dma_desc {
154 struct dma_async_tx_descriptor txd;
155 int bytes_requested;
156 int bytes_transferred;
157 enum dma_status dma_status;
158 struct list_head node;
159 struct list_head tx_list;
160 struct list_head cb_node;
161 int cb_count;
162};
163
164struct tegra_dma_channel;
165
166typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
167 bool to_terminate);
168
169/* tegra_dma_channel: Channel specific information */
170struct tegra_dma_channel {
171 struct dma_chan dma_chan;
172 bool config_init;
173 int id;
174 int irq;
175 unsigned long chan_base_offset;
176 spinlock_t lock;
177 bool busy;
178 struct tegra_dma *tdma;
179 bool cyclic;
180
181 /* Different lists for managing the requests */
182 struct list_head free_sg_req;
183 struct list_head pending_sg_req;
184 struct list_head free_dma_desc;
185 struct list_head cb_desc;
186
187 /* ISR handler and tasklet for bottom half of isr handling */
188 dma_isr_handler isr_handler;
189 struct tasklet_struct tasklet;
190 dma_async_tx_callback callback;
191 void *callback_param;
192
193 /* Channel-slave specific configuration */
194 struct dma_slave_config dma_sconfig;
195};
196
197/* tegra_dma: Tegra DMA specific information */
198struct tegra_dma {
199 struct dma_device dma_dev;
200 struct device *dev;
201 struct clk *dma_clk;
202 spinlock_t global_lock;
203 void __iomem *base_addr;
204 struct tegra_dma_chip_data *chip_data;
205
206 /* Some register need to be cache before suspend */
207 u32 reg_gen;
208
209 /* Last member of the structure */
210 struct tegra_dma_channel channels[0];
211};
212
213static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
214{
215 writel(val, tdma->base_addr + reg);
216}
217
218static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
219{
220 return readl(tdma->base_addr + reg);
221}
222
223static inline void tdc_write(struct tegra_dma_channel *tdc,
224 u32 reg, u32 val)
225{
226 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
227}
228
229static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
230{
231 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
232}
233
234static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
235{
236 return container_of(dc, struct tegra_dma_channel, dma_chan);
237}
238
239static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
240 struct dma_async_tx_descriptor *td)
241{
242 return container_of(td, struct tegra_dma_desc, txd);
243}
244
245static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
246{
247 return &tdc->dma_chan.dev->device;
248}
249
250static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
251static int tegra_dma_runtime_suspend(struct device *dev);
252static int tegra_dma_runtime_resume(struct device *dev);
253
254/* Get DMA desc from free list, if not there then allocate it. */
255static struct tegra_dma_desc *tegra_dma_desc_get(
256 struct tegra_dma_channel *tdc)
257{
258 struct tegra_dma_desc *dma_desc;
259 unsigned long flags;
260
261 spin_lock_irqsave(&tdc->lock, flags);
262
263 /* Do not allocate if desc are waiting for ack */
264 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
265 if (async_tx_test_ack(&dma_desc->txd)) {
266 list_del(&dma_desc->node);
267 spin_unlock_irqrestore(&tdc->lock, flags);
268 return dma_desc;
269 }
270 }
271
272 spin_unlock_irqrestore(&tdc->lock, flags);
273
274 /* Allocate DMA desc */
275 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
276 if (!dma_desc) {
277 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
278 return NULL;
279 }
280
281 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
282 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
283 dma_desc->txd.flags = 0;
284 return dma_desc;
285}
286
287static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
288 struct tegra_dma_desc *dma_desc)
289{
290 unsigned long flags;
291
292 spin_lock_irqsave(&tdc->lock, flags);
293 if (!list_empty(&dma_desc->tx_list))
294 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
295 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
296 spin_unlock_irqrestore(&tdc->lock, flags);
297}
298
299static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
300 struct tegra_dma_channel *tdc)
301{
302 struct tegra_dma_sg_req *sg_req = NULL;
303 unsigned long flags;
304
305 spin_lock_irqsave(&tdc->lock, flags);
306 if (!list_empty(&tdc->free_sg_req)) {
307 sg_req = list_first_entry(&tdc->free_sg_req,
308 typeof(*sg_req), node);
309 list_del(&sg_req->node);
310 spin_unlock_irqrestore(&tdc->lock, flags);
311 return sg_req;
312 }
313 spin_unlock_irqrestore(&tdc->lock, flags);
314
315 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
316 if (!sg_req)
317 dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
318 return sg_req;
319}
320
321static int tegra_dma_slave_config(struct dma_chan *dc,
322 struct dma_slave_config *sconfig)
323{
324 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
325
326 if (!list_empty(&tdc->pending_sg_req)) {
327 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
328 return -EBUSY;
329 }
330
331 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
332 tdc->config_init = true;
333 return 0;
334}
335
336static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
337 bool wait_for_burst_complete)
338{
339 struct tegra_dma *tdma = tdc->tdma;
340
341 spin_lock(&tdma->global_lock);
342 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
343 if (wait_for_burst_complete)
344 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
345}
346
347static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
348{
349 struct tegra_dma *tdma = tdc->tdma;
350
351 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
352 spin_unlock(&tdma->global_lock);
353}
354
355static void tegra_dma_stop(struct tegra_dma_channel *tdc)
356{
357 u32 csr;
358 u32 status;
359
360 /* Disable interrupts */
361 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
362 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
363 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
364
365 /* Disable DMA */
366 csr &= ~TEGRA_APBDMA_CSR_ENB;
367 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
368
369 /* Clear interrupt status if it is there */
370 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
371 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
372 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
373 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
374 }
375 tdc->busy = false;
376}
377
378static void tegra_dma_start(struct tegra_dma_channel *tdc,
379 struct tegra_dma_sg_req *sg_req)
380{
381 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
382
383 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
384 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
385 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
386 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
387 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
388
389 /* Start DMA */
390 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
391 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
392}
393
394static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
395 struct tegra_dma_sg_req *nsg_req)
396{
397 unsigned long status;
398
399 /*
400 * The DMA controller reloads the new configuration for next transfer
401 * after last burst of current transfer completes.
402 * If there is no IEC status then this makes sure that last burst
403 * has not be completed. There may be case that last burst is on
404 * flight and so it can complete but because DMA is paused, it
405 * will not generates interrupt as well as not reload the new
406 * configuration.
407 * If there is already IEC status then interrupt handler need to
408 * load new configuration.
409 */
410 tegra_dma_global_pause(tdc, false);
411 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
412
413 /*
414 * If interrupt is pending then do nothing as the ISR will handle
415 * the programing for new request.
416 */
417 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
418 dev_err(tdc2dev(tdc),
419 "Skipping new configuration as interrupt is pending\n");
420 tegra_dma_global_resume(tdc);
421 return;
422 }
423
424 /* Safe to program new configuration */
425 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
426 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
427 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
428 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
429 nsg_req->configured = true;
430
431 tegra_dma_global_resume(tdc);
432}
433
434static void tdc_start_head_req(struct tegra_dma_channel *tdc)
435{
436 struct tegra_dma_sg_req *sg_req;
437
438 if (list_empty(&tdc->pending_sg_req))
439 return;
440
441 sg_req = list_first_entry(&tdc->pending_sg_req,
442 typeof(*sg_req), node);
443 tegra_dma_start(tdc, sg_req);
444 sg_req->configured = true;
445 tdc->busy = true;
446}
447
448static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
449{
450 struct tegra_dma_sg_req *hsgreq;
451 struct tegra_dma_sg_req *hnsgreq;
452
453 if (list_empty(&tdc->pending_sg_req))
454 return;
455
456 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
457 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
458 hnsgreq = list_first_entry(&hsgreq->node,
459 typeof(*hnsgreq), node);
460 tegra_dma_configure_for_next(tdc, hnsgreq);
461 }
462}
463
464static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
465 struct tegra_dma_sg_req *sg_req, unsigned long status)
466{
467 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
468}
469
470static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
471{
472 struct tegra_dma_sg_req *sgreq;
473 struct tegra_dma_desc *dma_desc;
474
475 while (!list_empty(&tdc->pending_sg_req)) {
476 sgreq = list_first_entry(&tdc->pending_sg_req,
477 typeof(*sgreq), node);
478 list_del(&sgreq->node);
479 list_add_tail(&sgreq->node, &tdc->free_sg_req);
480 if (sgreq->last_sg) {
481 dma_desc = sgreq->dma_desc;
482 dma_desc->dma_status = DMA_ERROR;
483 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
484
485 /* Add in cb list if it is not there. */
486 if (!dma_desc->cb_count)
487 list_add_tail(&dma_desc->cb_node,
488 &tdc->cb_desc);
489 dma_desc->cb_count++;
490 }
491 }
492 tdc->isr_handler = NULL;
493}
494
495static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
496 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
497{
498 struct tegra_dma_sg_req *hsgreq = NULL;
499
500 if (list_empty(&tdc->pending_sg_req)) {
501 dev_err(tdc2dev(tdc), "Dma is running without req\n");
502 tegra_dma_stop(tdc);
503 return false;
504 }
505
506 /*
507 * Check that head req on list should be in flight.
508 * If it is not in flight then abort transfer as
509 * looping of transfer can not continue.
510 */
511 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
512 if (!hsgreq->configured) {
513 tegra_dma_stop(tdc);
514 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
515 tegra_dma_abort_all(tdc);
516 return false;
517 }
518
519 /* Configure next request */
520 if (!to_terminate)
521 tdc_configure_next_head_desc(tdc);
522 return true;
523}
524
525static void handle_once_dma_done(struct tegra_dma_channel *tdc,
526 bool to_terminate)
527{
528 struct tegra_dma_sg_req *sgreq;
529 struct tegra_dma_desc *dma_desc;
530
531 tdc->busy = false;
532 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
533 dma_desc = sgreq->dma_desc;
534 dma_desc->bytes_transferred += sgreq->req_len;
535
536 list_del(&sgreq->node);
537 if (sgreq->last_sg) {
538 dma_desc->dma_status = DMA_SUCCESS;
539 dma_cookie_complete(&dma_desc->txd);
540 if (!dma_desc->cb_count)
541 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
542 dma_desc->cb_count++;
543 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
544 }
545 list_add_tail(&sgreq->node, &tdc->free_sg_req);
546
547 /* Do not start DMA if it is going to be terminate */
548 if (to_terminate || list_empty(&tdc->pending_sg_req))
549 return;
550
551 tdc_start_head_req(tdc);
552 return;
553}
554
555static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
556 bool to_terminate)
557{
558 struct tegra_dma_sg_req *sgreq;
559 struct tegra_dma_desc *dma_desc;
560 bool st;
561
562 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
563 dma_desc = sgreq->dma_desc;
564 dma_desc->bytes_transferred += sgreq->req_len;
565
566 /* Callback need to be call */
567 if (!dma_desc->cb_count)
568 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
569 dma_desc->cb_count++;
570
571 /* If not last req then put at end of pending list */
572 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
573 list_del(&sgreq->node);
574 list_add_tail(&sgreq->node, &tdc->pending_sg_req);
575 sgreq->configured = false;
576 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
577 if (!st)
578 dma_desc->dma_status = DMA_ERROR;
579 }
580 return;
581}
582
583static void tegra_dma_tasklet(unsigned long data)
584{
585 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
586 dma_async_tx_callback callback = NULL;
587 void *callback_param = NULL;
588 struct tegra_dma_desc *dma_desc;
589 unsigned long flags;
590 int cb_count;
591
592 spin_lock_irqsave(&tdc->lock, flags);
593 while (!list_empty(&tdc->cb_desc)) {
594 dma_desc = list_first_entry(&tdc->cb_desc,
595 typeof(*dma_desc), cb_node);
596 list_del(&dma_desc->cb_node);
597 callback = dma_desc->txd.callback;
598 callback_param = dma_desc->txd.callback_param;
599 cb_count = dma_desc->cb_count;
600 dma_desc->cb_count = 0;
601 spin_unlock_irqrestore(&tdc->lock, flags);
602 while (cb_count-- && callback)
603 callback(callback_param);
604 spin_lock_irqsave(&tdc->lock, flags);
605 }
606 spin_unlock_irqrestore(&tdc->lock, flags);
607}
608
609static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
610{
611 struct tegra_dma_channel *tdc = dev_id;
612 unsigned long status;
613 unsigned long flags;
614
615 spin_lock_irqsave(&tdc->lock, flags);
616
617 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
618 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
619 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
620 tdc->isr_handler(tdc, false);
621 tasklet_schedule(&tdc->tasklet);
622 spin_unlock_irqrestore(&tdc->lock, flags);
623 return IRQ_HANDLED;
624 }
625
626 spin_unlock_irqrestore(&tdc->lock, flags);
627 dev_info(tdc2dev(tdc),
628 "Interrupt already served status 0x%08lx\n", status);
629 return IRQ_NONE;
630}
631
632static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
633{
634 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
635 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
636 unsigned long flags;
637 dma_cookie_t cookie;
638
639 spin_lock_irqsave(&tdc->lock, flags);
640 dma_desc->dma_status = DMA_IN_PROGRESS;
641 cookie = dma_cookie_assign(&dma_desc->txd);
642 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
643 spin_unlock_irqrestore(&tdc->lock, flags);
644 return cookie;
645}
646
647static void tegra_dma_issue_pending(struct dma_chan *dc)
648{
649 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
650 unsigned long flags;
651
652 spin_lock_irqsave(&tdc->lock, flags);
653 if (list_empty(&tdc->pending_sg_req)) {
654 dev_err(tdc2dev(tdc), "No DMA request\n");
655 goto end;
656 }
657 if (!tdc->busy) {
658 tdc_start_head_req(tdc);
659
660 /* Continuous single mode: Configure next req */
661 if (tdc->cyclic) {
662 /*
663 * Wait for 1 burst time for configure DMA for
664 * next transfer.
665 */
666 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
667 tdc_configure_next_head_desc(tdc);
668 }
669 }
670end:
671 spin_unlock_irqrestore(&tdc->lock, flags);
672 return;
673}
674
675static void tegra_dma_terminate_all(struct dma_chan *dc)
676{
677 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
678 struct tegra_dma_sg_req *sgreq;
679 struct tegra_dma_desc *dma_desc;
680 unsigned long flags;
681 unsigned long status;
682 bool was_busy;
683
684 spin_lock_irqsave(&tdc->lock, flags);
685 if (list_empty(&tdc->pending_sg_req)) {
686 spin_unlock_irqrestore(&tdc->lock, flags);
687 return;
688 }
689
690 if (!tdc->busy)
691 goto skip_dma_stop;
692
693 /* Pause DMA before checking the queue status */
694 tegra_dma_global_pause(tdc, true);
695
696 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
697 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
698 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
699 tdc->isr_handler(tdc, true);
700 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
701 }
702
703 was_busy = tdc->busy;
704 tegra_dma_stop(tdc);
705
706 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
707 sgreq = list_first_entry(&tdc->pending_sg_req,
708 typeof(*sgreq), node);
709 sgreq->dma_desc->bytes_transferred +=
710 get_current_xferred_count(tdc, sgreq, status);
711 }
712 tegra_dma_global_resume(tdc);
713
714skip_dma_stop:
715 tegra_dma_abort_all(tdc);
716
717 while (!list_empty(&tdc->cb_desc)) {
718 dma_desc = list_first_entry(&tdc->cb_desc,
719 typeof(*dma_desc), cb_node);
720 list_del(&dma_desc->cb_node);
721 dma_desc->cb_count = 0;
722 }
723 spin_unlock_irqrestore(&tdc->lock, flags);
724}
725
726static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
727 dma_cookie_t cookie, struct dma_tx_state *txstate)
728{
729 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
730 struct tegra_dma_desc *dma_desc;
731 struct tegra_dma_sg_req *sg_req;
732 enum dma_status ret;
733 unsigned long flags;
734 unsigned int residual;
735
736 spin_lock_irqsave(&tdc->lock, flags);
737
738 ret = dma_cookie_status(dc, cookie, txstate);
739 if (ret == DMA_SUCCESS) {
740 dma_set_residue(txstate, 0);
741 spin_unlock_irqrestore(&tdc->lock, flags);
742 return ret;
743 }
744
745 /* Check on wait_ack desc status */
746 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
747 if (dma_desc->txd.cookie == cookie) {
748 residual = dma_desc->bytes_requested -
749 (dma_desc->bytes_transferred %
750 dma_desc->bytes_requested);
751 dma_set_residue(txstate, residual);
752 ret = dma_desc->dma_status;
753 spin_unlock_irqrestore(&tdc->lock, flags);
754 return ret;
755 }
756 }
757
758 /* Check in pending list */
759 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
760 dma_desc = sg_req->dma_desc;
761 if (dma_desc->txd.cookie == cookie) {
762 residual = dma_desc->bytes_requested -
763 (dma_desc->bytes_transferred %
764 dma_desc->bytes_requested);
765 dma_set_residue(txstate, residual);
766 ret = dma_desc->dma_status;
767 spin_unlock_irqrestore(&tdc->lock, flags);
768 return ret;
769 }
770 }
771
772 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
773 spin_unlock_irqrestore(&tdc->lock, flags);
774 return ret;
775}
776
777static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
778 unsigned long arg)
779{
780 switch (cmd) {
781 case DMA_SLAVE_CONFIG:
782 return tegra_dma_slave_config(dc,
783 (struct dma_slave_config *)arg);
784
785 case DMA_TERMINATE_ALL:
786 tegra_dma_terminate_all(dc);
787 return 0;
788
789 default:
790 break;
791 }
792
793 return -ENXIO;
794}
795
796static inline int get_bus_width(struct tegra_dma_channel *tdc,
797 enum dma_slave_buswidth slave_bw)
798{
799 switch (slave_bw) {
800 case DMA_SLAVE_BUSWIDTH_1_BYTE:
801 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
802 case DMA_SLAVE_BUSWIDTH_2_BYTES:
803 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
804 case DMA_SLAVE_BUSWIDTH_4_BYTES:
805 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
806 case DMA_SLAVE_BUSWIDTH_8_BYTES:
807 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
808 default:
809 dev_warn(tdc2dev(tdc),
810 "slave bw is not supported, using 32bits\n");
811 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
812 }
813}
814
815static inline int get_burst_size(struct tegra_dma_channel *tdc,
816 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
817{
818 int burst_byte;
819 int burst_ahb_width;
820
821 /*
822 * burst_size from client is in terms of the bus_width.
823 * convert them into AHB memory width which is 4 byte.
824 */
825 burst_byte = burst_size * slave_bw;
826 burst_ahb_width = burst_byte / 4;
827
828 /* If burst size is 0 then calculate the burst size based on length */
829 if (!burst_ahb_width) {
830 if (len & 0xF)
831 return TEGRA_APBDMA_AHBSEQ_BURST_1;
832 else if ((len >> 4) & 0x1)
833 return TEGRA_APBDMA_AHBSEQ_BURST_4;
834 else
835 return TEGRA_APBDMA_AHBSEQ_BURST_8;
836 }
837 if (burst_ahb_width < 4)
838 return TEGRA_APBDMA_AHBSEQ_BURST_1;
839 else if (burst_ahb_width < 8)
840 return TEGRA_APBDMA_AHBSEQ_BURST_4;
841 else
842 return TEGRA_APBDMA_AHBSEQ_BURST_8;
843}
844
845static int get_transfer_param(struct tegra_dma_channel *tdc,
846 enum dma_transfer_direction direction, unsigned long *apb_addr,
847 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
848 enum dma_slave_buswidth *slave_bw)
849{
850
851 switch (direction) {
852 case DMA_MEM_TO_DEV:
853 *apb_addr = tdc->dma_sconfig.dst_addr;
854 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
855 *burst_size = tdc->dma_sconfig.dst_maxburst;
856 *slave_bw = tdc->dma_sconfig.dst_addr_width;
857 *csr = TEGRA_APBDMA_CSR_DIR;
858 return 0;
859
860 case DMA_DEV_TO_MEM:
861 *apb_addr = tdc->dma_sconfig.src_addr;
862 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
863 *burst_size = tdc->dma_sconfig.src_maxburst;
864 *slave_bw = tdc->dma_sconfig.src_addr_width;
865 *csr = 0;
866 return 0;
867
868 default:
869 dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
870 return -EINVAL;
871 }
872 return -EINVAL;
873}
874
875static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
876 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
877 enum dma_transfer_direction direction, unsigned long flags,
878 void *context)
879{
880 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
881 struct tegra_dma_desc *dma_desc;
882 unsigned int i;
883 struct scatterlist *sg;
884 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
885 struct list_head req_list;
886 struct tegra_dma_sg_req *sg_req = NULL;
887 u32 burst_size;
888 enum dma_slave_buswidth slave_bw;
889 int ret;
890
891 if (!tdc->config_init) {
892 dev_err(tdc2dev(tdc), "dma channel is not configured\n");
893 return NULL;
894 }
895 if (sg_len < 1) {
896 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
897 return NULL;
898 }
899
900 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
901 &burst_size, &slave_bw);
902 if (ret < 0)
903 return NULL;
904
905 INIT_LIST_HEAD(&req_list);
906
907 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
908 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
909 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
910 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
911
912 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
913 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
914 if (flags & DMA_PREP_INTERRUPT)
915 csr |= TEGRA_APBDMA_CSR_IE_EOC;
916
917 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
918
919 dma_desc = tegra_dma_desc_get(tdc);
920 if (!dma_desc) {
921 dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
922 return NULL;
923 }
924 INIT_LIST_HEAD(&dma_desc->tx_list);
925 INIT_LIST_HEAD(&dma_desc->cb_node);
926 dma_desc->cb_count = 0;
927 dma_desc->bytes_requested = 0;
928 dma_desc->bytes_transferred = 0;
929 dma_desc->dma_status = DMA_IN_PROGRESS;
930
931 /* Make transfer requests */
932 for_each_sg(sgl, sg, sg_len, i) {
933 u32 len, mem;
934
935 mem = sg_dma_address(sg);
936 len = sg_dma_len(sg);
937
938 if ((len & 3) || (mem & 3) ||
939 (len > tdc->tdma->chip_data->max_dma_count)) {
940 dev_err(tdc2dev(tdc),
941 "Dma length/memory address is not supported\n");
942 tegra_dma_desc_put(tdc, dma_desc);
943 return NULL;
944 }
945
946 sg_req = tegra_dma_sg_req_get(tdc);
947 if (!sg_req) {
948 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
949 tegra_dma_desc_put(tdc, dma_desc);
950 return NULL;
951 }
952
953 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
954 dma_desc->bytes_requested += len;
955
956 sg_req->ch_regs.apb_ptr = apb_ptr;
957 sg_req->ch_regs.ahb_ptr = mem;
958 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
959 sg_req->ch_regs.apb_seq = apb_seq;
960 sg_req->ch_regs.ahb_seq = ahb_seq;
961 sg_req->configured = false;
962 sg_req->last_sg = false;
963 sg_req->dma_desc = dma_desc;
964 sg_req->req_len = len;
965
966 list_add_tail(&sg_req->node, &dma_desc->tx_list);
967 }
968 sg_req->last_sg = true;
969 if (flags & DMA_CTRL_ACK)
970 dma_desc->txd.flags = DMA_CTRL_ACK;
971
972 /*
973 * Make sure that mode should not be conflicting with currently
974 * configured mode.
975 */
976 if (!tdc->isr_handler) {
977 tdc->isr_handler = handle_once_dma_done;
978 tdc->cyclic = false;
979 } else {
980 if (tdc->cyclic) {
981 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
982 tegra_dma_desc_put(tdc, dma_desc);
983 return NULL;
984 }
985 }
986
987 return &dma_desc->txd;
988}
989
990struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
991 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
992 size_t period_len, enum dma_transfer_direction direction,
993 void *context)
994{
995 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
996 struct tegra_dma_desc *dma_desc = NULL;
997 struct tegra_dma_sg_req *sg_req = NULL;
998 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
999 int len;
1000 size_t remain_len;
1001 dma_addr_t mem = buf_addr;
1002 u32 burst_size;
1003 enum dma_slave_buswidth slave_bw;
1004 int ret;
1005
1006 if (!buf_len || !period_len) {
1007 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1008 return NULL;
1009 }
1010
1011 if (!tdc->config_init) {
1012 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1013 return NULL;
1014 }
1015
1016 /*
1017 * We allow to take more number of requests till DMA is
1018 * not started. The driver will loop over all requests.
1019 * Once DMA is started then new requests can be queued only after
1020 * terminating the DMA.
1021 */
1022 if (tdc->busy) {
1023 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1024 return NULL;
1025 }
1026
1027 /*
1028 * We only support cycle transfer when buf_len is multiple of
1029 * period_len.
1030 */
1031 if (buf_len % period_len) {
1032 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1033 return NULL;
1034 }
1035
1036 len = period_len;
1037 if ((len & 3) || (buf_addr & 3) ||
1038 (len > tdc->tdma->chip_data->max_dma_count)) {
1039 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1040 return NULL;
1041 }
1042
1043 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1044 &burst_size, &slave_bw);
1045 if (ret < 0)
1046 return NULL;
1047
1048
1049 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1050 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1051 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1052 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1053
1054 csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC;
1055 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1056
1057 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1058
1059 dma_desc = tegra_dma_desc_get(tdc);
1060 if (!dma_desc) {
1061 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1062 return NULL;
1063 }
1064
1065 INIT_LIST_HEAD(&dma_desc->tx_list);
1066 INIT_LIST_HEAD(&dma_desc->cb_node);
1067 dma_desc->cb_count = 0;
1068
1069 dma_desc->bytes_transferred = 0;
1070 dma_desc->bytes_requested = buf_len;
1071 remain_len = buf_len;
1072
1073 /* Split transfer equal to period size */
1074 while (remain_len) {
1075 sg_req = tegra_dma_sg_req_get(tdc);
1076 if (!sg_req) {
1077 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1078 tegra_dma_desc_put(tdc, dma_desc);
1079 return NULL;
1080 }
1081
1082 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1083 sg_req->ch_regs.apb_ptr = apb_ptr;
1084 sg_req->ch_regs.ahb_ptr = mem;
1085 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
1086 sg_req->ch_regs.apb_seq = apb_seq;
1087 sg_req->ch_regs.ahb_seq = ahb_seq;
1088 sg_req->configured = false;
1089 sg_req->half_done = false;
1090 sg_req->last_sg = false;
1091 sg_req->dma_desc = dma_desc;
1092 sg_req->req_len = len;
1093
1094 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1095 remain_len -= len;
1096 mem += len;
1097 }
1098 sg_req->last_sg = true;
1099 dma_desc->txd.flags = 0;
1100
1101 /*
1102 * Make sure that mode should not be conflicting with currently
1103 * configured mode.
1104 */
1105 if (!tdc->isr_handler) {
1106 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1107 tdc->cyclic = true;
1108 } else {
1109 if (!tdc->cyclic) {
1110 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1111 tegra_dma_desc_put(tdc, dma_desc);
1112 return NULL;
1113 }
1114 }
1115
1116 return &dma_desc->txd;
1117}
1118
1119static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1120{
1121 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1122 struct tegra_dma *tdma = tdc->tdma;
1123 int ret;
1124
1125 dma_cookie_init(&tdc->dma_chan);
1126 tdc->config_init = false;
1127 ret = clk_prepare_enable(tdma->dma_clk);
1128 if (ret < 0)
1129 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
1130 return ret;
1131}
1132
1133static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1134{
1135 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1136 struct tegra_dma *tdma = tdc->tdma;
1137
1138 struct tegra_dma_desc *dma_desc;
1139 struct tegra_dma_sg_req *sg_req;
1140 struct list_head dma_desc_list;
1141 struct list_head sg_req_list;
1142 unsigned long flags;
1143
1144 INIT_LIST_HEAD(&dma_desc_list);
1145 INIT_LIST_HEAD(&sg_req_list);
1146
1147 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1148
1149 if (tdc->busy)
1150 tegra_dma_terminate_all(dc);
1151
1152 spin_lock_irqsave(&tdc->lock, flags);
1153 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1154 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1155 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1156 INIT_LIST_HEAD(&tdc->cb_desc);
1157 tdc->config_init = false;
1158 spin_unlock_irqrestore(&tdc->lock, flags);
1159
1160 while (!list_empty(&dma_desc_list)) {
1161 dma_desc = list_first_entry(&dma_desc_list,
1162 typeof(*dma_desc), node);
1163 list_del(&dma_desc->node);
1164 kfree(dma_desc);
1165 }
1166
1167 while (!list_empty(&sg_req_list)) {
1168 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1169 list_del(&sg_req->node);
1170 kfree(sg_req);
1171 }
1172 clk_disable_unprepare(tdma->dma_clk);
1173}
1174
1175/* Tegra20 specific DMA controller information */
1176static struct tegra_dma_chip_data tegra20_dma_chip_data = {
1177 .nr_channels = 16,
1178 .max_dma_count = 1024UL * 64,
1179};
1180
1181#if defined(CONFIG_OF)
1182/* Tegra30 specific DMA controller information */
1183static struct tegra_dma_chip_data tegra30_dma_chip_data = {
1184 .nr_channels = 32,
1185 .max_dma_count = 1024UL * 64,
1186};
1187
1188static const struct of_device_id tegra_dma_of_match[] __devinitconst = {
1189 {
1190 .compatible = "nvidia,tegra30-apbdma",
1191 .data = &tegra30_dma_chip_data,
1192 }, {
1193 .compatible = "nvidia,tegra20-apbdma",
1194 .data = &tegra20_dma_chip_data,
1195 }, {
1196 },
1197};
1198MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1199#endif
1200
1201static int __devinit tegra_dma_probe(struct platform_device *pdev)
1202{
1203 struct resource *res;
1204 struct tegra_dma *tdma;
1205 int ret;
1206 int i;
1207 struct tegra_dma_chip_data *cdata = NULL;
1208
1209 if (pdev->dev.of_node) {
1210 const struct of_device_id *match;
1211 match = of_match_device(of_match_ptr(tegra_dma_of_match),
1212 &pdev->dev);
1213 if (!match) {
1214 dev_err(&pdev->dev, "Error: No device match found\n");
1215 return -ENODEV;
1216 }
1217 cdata = match->data;
1218 } else {
1219 /* If no device tree then fallback to tegra20 */
1220 cdata = &tegra20_dma_chip_data;
1221 }
1222
1223 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1224 sizeof(struct tegra_dma_channel), GFP_KERNEL);
1225 if (!tdma) {
1226 dev_err(&pdev->dev, "Error: memory allocation failed\n");
1227 return -ENOMEM;
1228 }
1229
1230 tdma->dev = &pdev->dev;
1231 tdma->chip_data = cdata;
1232 platform_set_drvdata(pdev, tdma);
1233
1234 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1235 if (!res) {
1236 dev_err(&pdev->dev, "No mem resource for DMA\n");
1237 return -EINVAL;
1238 }
1239
1240 tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res);
1241 if (!tdma->base_addr) {
1242 dev_err(&pdev->dev,
1243 "Cannot request memregion/iomap dma address\n");
1244 return -EADDRNOTAVAIL;
1245 }
1246
1247 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1248 if (IS_ERR(tdma->dma_clk)) {
1249 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1250 return PTR_ERR(tdma->dma_clk);
1251 }
1252
1253 spin_lock_init(&tdma->global_lock);
1254
1255 pm_runtime_enable(&pdev->dev);
1256 if (!pm_runtime_enabled(&pdev->dev)) {
1257 ret = tegra_dma_runtime_resume(&pdev->dev);
1258 if (ret) {
1259 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
1260 ret);
1261 goto err_pm_disable;
1262 }
1263 }
1264
1265 /* Enable clock before accessing registers */
1266 ret = clk_prepare_enable(tdma->dma_clk);
1267 if (ret < 0) {
1268 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1269 goto err_pm_disable;
1270 }
1271
1272 /* Reset DMA controller */
1273 tegra_periph_reset_assert(tdma->dma_clk);
1274 udelay(2);
1275 tegra_periph_reset_deassert(tdma->dma_clk);
1276
1277 /* Enable global DMA registers */
1278 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1279 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1280 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1281
1282 clk_disable_unprepare(tdma->dma_clk);
1283
1284 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1285 for (i = 0; i < cdata->nr_channels; i++) {
1286 struct tegra_dma_channel *tdc = &tdma->channels[i];
1287 char irq_name[30];
1288
1289 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1290 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
1291
1292 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1293 if (!res) {
1294 ret = -EINVAL;
1295 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1296 goto err_irq;
1297 }
1298 tdc->irq = res->start;
1299 snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i);
1300 ret = devm_request_irq(&pdev->dev, tdc->irq,
1301 tegra_dma_isr, 0, irq_name, tdc);
1302 if (ret) {
1303 dev_err(&pdev->dev,
1304 "request_irq failed with err %d channel %d\n",
1305 i, ret);
1306 goto err_irq;
1307 }
1308
1309 tdc->dma_chan.device = &tdma->dma_dev;
1310 dma_cookie_init(&tdc->dma_chan);
1311 list_add_tail(&tdc->dma_chan.device_node,
1312 &tdma->dma_dev.channels);
1313 tdc->tdma = tdma;
1314 tdc->id = i;
1315
1316 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1317 (unsigned long)tdc);
1318 spin_lock_init(&tdc->lock);
1319
1320 INIT_LIST_HEAD(&tdc->pending_sg_req);
1321 INIT_LIST_HEAD(&tdc->free_sg_req);
1322 INIT_LIST_HEAD(&tdc->free_dma_desc);
1323 INIT_LIST_HEAD(&tdc->cb_desc);
1324 }
1325
1326 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1327 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1328 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1329
1330 tdma->dma_dev.dev = &pdev->dev;
1331 tdma->dma_dev.device_alloc_chan_resources =
1332 tegra_dma_alloc_chan_resources;
1333 tdma->dma_dev.device_free_chan_resources =
1334 tegra_dma_free_chan_resources;
1335 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1336 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1337 tdma->dma_dev.device_control = tegra_dma_device_control;
1338 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1339 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1340
1341 ret = dma_async_device_register(&tdma->dma_dev);
1342 if (ret < 0) {
1343 dev_err(&pdev->dev,
1344 "Tegra20 APB DMA driver registration failed %d\n", ret);
1345 goto err_irq;
1346 }
1347
1348 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1349 cdata->nr_channels);
1350 return 0;
1351
1352err_irq:
1353 while (--i >= 0) {
1354 struct tegra_dma_channel *tdc = &tdma->channels[i];
1355 tasklet_kill(&tdc->tasklet);
1356 }
1357
1358err_pm_disable:
1359 pm_runtime_disable(&pdev->dev);
1360 if (!pm_runtime_status_suspended(&pdev->dev))
1361 tegra_dma_runtime_suspend(&pdev->dev);
1362 return ret;
1363}
1364
1365static int __devexit tegra_dma_remove(struct platform_device *pdev)
1366{
1367 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1368 int i;
1369 struct tegra_dma_channel *tdc;
1370
1371 dma_async_device_unregister(&tdma->dma_dev);
1372
1373 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1374 tdc = &tdma->channels[i];
1375 tasklet_kill(&tdc->tasklet);
1376 }
1377
1378 pm_runtime_disable(&pdev->dev);
1379 if (!pm_runtime_status_suspended(&pdev->dev))
1380 tegra_dma_runtime_suspend(&pdev->dev);
1381
1382 return 0;
1383}
1384
1385static int tegra_dma_runtime_suspend(struct device *dev)
1386{
1387 struct platform_device *pdev = to_platform_device(dev);
1388 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1389
1390 clk_disable_unprepare(tdma->dma_clk);
1391 return 0;
1392}
1393
1394static int tegra_dma_runtime_resume(struct device *dev)
1395{
1396 struct platform_device *pdev = to_platform_device(dev);
1397 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1398 int ret;
1399
1400 ret = clk_prepare_enable(tdma->dma_clk);
1401 if (ret < 0) {
1402 dev_err(dev, "clk_enable failed: %d\n", ret);
1403 return ret;
1404 }
1405 return 0;
1406}
1407
1408static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = {
1409#ifdef CONFIG_PM_RUNTIME
1410 .runtime_suspend = tegra_dma_runtime_suspend,
1411 .runtime_resume = tegra_dma_runtime_resume,
1412#endif
1413};
1414
1415static struct platform_driver tegra_dmac_driver = {
1416 .driver = {
1417 .name = "tegra-apbdma",
1418 .owner = THIS_MODULE,
1419 .pm = &tegra_dma_dev_pm_ops,
1420 .of_match_table = of_match_ptr(tegra_dma_of_match),
1421 },
1422 .probe = tegra_dma_probe,
1423 .remove = __devexit_p(tegra_dma_remove),
1424};
1425
1426module_platform_driver(tegra_dmac_driver);
1427
1428MODULE_ALIAS("platform:tegra20-apbdma");
1429MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1430MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1431MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
new file mode 100644
index 000000000000..6f80432a3f0a
--- /dev/null
+++ b/drivers/dma/virt-dma.c
@@ -0,0 +1,123 @@
1/*
2 * Virtual DMA channel support for DMAengine
3 *
4 * Copyright (C) 2012 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/device.h>
11#include <linux/dmaengine.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14
15#include "virt-dma.h"
16
17static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
18{
19 return container_of(tx, struct virt_dma_desc, tx);
20}
21
22dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
23{
24 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
25 struct virt_dma_desc *vd = to_virt_desc(tx);
26 unsigned long flags;
27 dma_cookie_t cookie;
28
29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx);
31
32 list_add_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags);
34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
36 vc, vd, cookie);
37
38 return cookie;
39}
40EXPORT_SYMBOL_GPL(vchan_tx_submit);
41
42struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
43 dma_cookie_t cookie)
44{
45 struct virt_dma_desc *vd;
46
47 list_for_each_entry(vd, &vc->desc_issued, node)
48 if (vd->tx.cookie == cookie)
49 return vd;
50
51 return NULL;
52}
53EXPORT_SYMBOL_GPL(vchan_find_desc);
54
55/*
56 * This tasklet handles the completion of a DMA descriptor by
57 * calling its callback and freeing it.
58 */
59static void vchan_complete(unsigned long arg)
60{
61 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
62 struct virt_dma_desc *vd;
63 dma_async_tx_callback cb = NULL;
64 void *cb_data = NULL;
65 LIST_HEAD(head);
66
67 spin_lock_irq(&vc->lock);
68 list_splice_tail_init(&vc->desc_completed, &head);
69 vd = vc->cyclic;
70 if (vd) {
71 vc->cyclic = NULL;
72 cb = vd->tx.callback;
73 cb_data = vd->tx.callback_param;
74 }
75 spin_unlock_irq(&vc->lock);
76
77 if (cb)
78 cb(cb_data);
79
80 while (!list_empty(&head)) {
81 vd = list_first_entry(&head, struct virt_dma_desc, node);
82 cb = vd->tx.callback;
83 cb_data = vd->tx.callback_param;
84
85 list_del(&vd->node);
86
87 vc->desc_free(vd);
88
89 if (cb)
90 cb(cb_data);
91 }
92}
93
94void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
95{
96 while (!list_empty(head)) {
97 struct virt_dma_desc *vd = list_first_entry(head,
98 struct virt_dma_desc, node);
99 list_del(&vd->node);
100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
101 vc->desc_free(vd);
102 }
103}
104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
105
106void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
107{
108 dma_cookie_init(&vc->chan);
109
110 spin_lock_init(&vc->lock);
111 INIT_LIST_HEAD(&vc->desc_submitted);
112 INIT_LIST_HEAD(&vc->desc_issued);
113 INIT_LIST_HEAD(&vc->desc_completed);
114
115 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
116
117 vc->chan.device = dmadev;
118 list_add_tail(&vc->chan.device_node, &dmadev->channels);
119}
120EXPORT_SYMBOL_GPL(vchan_init);
121
122MODULE_AUTHOR("Russell King");
123MODULE_LICENSE("GPL");
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
new file mode 100644
index 000000000000..85c19d63f9fb
--- /dev/null
+++ b/drivers/dma/virt-dma.h
@@ -0,0 +1,152 @@
1/*
2 * Virtual DMA channel support for DMAengine
3 *
4 * Copyright (C) 2012 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef VIRT_DMA_H
11#define VIRT_DMA_H
12
13#include <linux/dmaengine.h>
14#include <linux/interrupt.h>
15
16#include "dmaengine.h"
17
18struct virt_dma_desc {
19 struct dma_async_tx_descriptor tx;
20 /* protected by vc.lock */
21 struct list_head node;
22};
23
24struct virt_dma_chan {
25 struct dma_chan chan;
26 struct tasklet_struct task;
27 void (*desc_free)(struct virt_dma_desc *);
28
29 spinlock_t lock;
30
31 /* protected by vc.lock */
32 struct list_head desc_submitted;
33 struct list_head desc_issued;
34 struct list_head desc_completed;
35
36 struct virt_dma_desc *cyclic;
37};
38
39static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
40{
41 return container_of(chan, struct virt_dma_chan, chan);
42}
43
44void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
45void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
46struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
47
48/**
49 * vchan_tx_prep - prepare a descriptor
50 * vc: virtual channel allocating this descriptor
51 * vd: virtual descriptor to prepare
52 * tx_flags: flags argument passed in to prepare function
53 */
54static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
55 struct virt_dma_desc *vd, unsigned long tx_flags)
56{
57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
58
59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
60 vd->tx.flags = tx_flags;
61 vd->tx.tx_submit = vchan_tx_submit;
62
63 return &vd->tx;
64}
65
66/**
67 * vchan_issue_pending - move submitted descriptors to issued list
68 * vc: virtual channel to update
69 *
70 * vc.lock must be held by caller
71 */
72static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
73{
74 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
75 return !list_empty(&vc->desc_issued);
76}
77
78/**
79 * vchan_cookie_complete - report completion of a descriptor
80 * vd: virtual descriptor to update
81 *
82 * vc.lock must be held by caller
83 */
84static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
85{
86 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
87
88 dma_cookie_complete(&vd->tx);
89 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
90 vd, vd->tx.cookie);
91 list_add_tail(&vd->node, &vc->desc_completed);
92
93 tasklet_schedule(&vc->task);
94}
95
96/**
97 * vchan_cyclic_callback - report the completion of a period
98 * vd: virtual descriptor
99 */
100static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
101{
102 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
103
104 vc->cyclic = vd;
105 tasklet_schedule(&vc->task);
106}
107
108/**
109 * vchan_next_desc - peek at the next descriptor to be processed
110 * vc: virtual channel to obtain descriptor from
111 *
112 * vc.lock must be held by caller
113 */
114static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
115{
116 if (list_empty(&vc->desc_issued))
117 return NULL;
118
119 return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
120}
121
122/**
123 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
124 * vc: virtual channel to get descriptors from
125 * head: list of descriptors found
126 *
127 * vc.lock must be held by caller
128 *
129 * Removes all submitted and issued descriptors from internal lists, and
130 * provides a list of all descriptors found
131 */
132static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
133 struct list_head *head)
134{
135 list_splice_tail_init(&vc->desc_submitted, head);
136 list_splice_tail_init(&vc->desc_issued, head);
137 list_splice_tail_init(&vc->desc_completed, head);
138}
139
140static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
141{
142 unsigned long flags;
143 LIST_HEAD(head);
144
145 spin_lock_irqsave(&vc->lock, flags);
146 vchan_get_all_descriptors(vc, &head);
147 spin_unlock_irqrestore(&vc->lock, flags);
148
149 vchan_dma_desc_free_list(vc, &head);
150}
151
152#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index fdffa1beca17..409b92b8d346 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,7 +7,7 @@
7menuconfig EDAC 7menuconfig EDAC
8 bool "EDAC (Error Detection And Correction) reporting" 8 bool "EDAC (Error Detection And Correction) reporting"
9 depends on HAS_IOMEM 9 depends on HAS_IOMEM
10 depends on X86 || PPC || TILE 10 depends on X86 || PPC || TILE || ARM
11 help 11 help
12 EDAC is designed to report errors in the core system. 12 EDAC is designed to report errors in the core system.
13 These are low-level errors that are reported in the CPU or 13 These are low-level errors that are reported in the CPU or
@@ -31,6 +31,14 @@ if EDAC
31 31
32comment "Reporting subsystems" 32comment "Reporting subsystems"
33 33
34config EDAC_LEGACY_SYSFS
35 bool "EDAC legacy sysfs"
36 default y
37 help
38 Enable the compatibility sysfs nodes.
39 Use 'Y' if your edac utilities aren't ported to work with the newer
40 structures.
41
34config EDAC_DEBUG 42config EDAC_DEBUG
35 bool "Debugging" 43 bool "Debugging"
36 help 44 help
@@ -294,4 +302,18 @@ config EDAC_TILE
294 Support for error detection and correction on the 302 Support for error detection and correction on the
295 Tilera memory controller. 303 Tilera memory controller.
296 304
305config EDAC_HIGHBANK_MC
306 tristate "Highbank Memory Controller"
307 depends on EDAC_MM_EDAC && ARCH_HIGHBANK
308 help
309 Support for error detection and correction on the
310 Calxeda Highbank memory controller.
311
312config EDAC_HIGHBANK_L2
313 tristate "Highbank L2 Cache"
314 depends on EDAC_MM_EDAC && ARCH_HIGHBANK
315 help
316 Support for error detection and correction on the
317 Calxeda Highbank memory controller.
318
297endif # EDAC 319endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 196a63dd37c5..7e5129a733f8 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -55,3 +55,6 @@ obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
55obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o 55obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
56 56
57obj-$(CONFIG_EDAC_TILE) += tile_edac.o 57obj-$(CONFIG_EDAC_TILE) += tile_edac.o
58
59obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
60obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7be9b7288e90..5a297a26211d 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -321,8 +321,8 @@ found:
321 return edac_mc_find((int)node_id); 321 return edac_mc_find((int)node_id);
322 322
323err_no_match: 323err_no_match:
324 debugf2("sys_addr 0x%lx doesn't match any node\n", 324 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
325 (unsigned long)sys_addr); 325 (unsigned long)sys_addr);
326 326
327 return NULL; 327 return NULL;
328} 328}
@@ -393,15 +393,15 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
393 mask = ~mask; 393 mask = ~mask;
394 394
395 if ((input_addr & mask) == (base & mask)) { 395 if ((input_addr & mask) == (base & mask)) {
396 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", 396 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
397 (unsigned long)input_addr, csrow, 397 (unsigned long)input_addr, csrow,
398 pvt->mc_node_id); 398 pvt->mc_node_id);
399 399
400 return csrow; 400 return csrow;
401 } 401 }
402 } 402 }
403 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", 403 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
404 (unsigned long)input_addr, pvt->mc_node_id); 404 (unsigned long)input_addr, pvt->mc_node_id);
405 405
406 return -1; 406 return -1;
407} 407}
@@ -430,20 +430,20 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
430 430
431 /* only revE and later have the DRAM Hole Address Register */ 431 /* only revE and later have the DRAM Hole Address Register */
432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { 432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
433 debugf1(" revision %d for node %d does not support DHAR\n", 433 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
434 pvt->ext_model, pvt->mc_node_id); 434 pvt->ext_model, pvt->mc_node_id);
435 return 1; 435 return 1;
436 } 436 }
437 437
438 /* valid for Fam10h and above */ 438 /* valid for Fam10h and above */
439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { 439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
440 debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); 440 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
441 return 1; 441 return 1;
442 } 442 }
443 443
444 if (!dhar_valid(pvt)) { 444 if (!dhar_valid(pvt)) {
445 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", 445 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
446 pvt->mc_node_id); 446 pvt->mc_node_id);
447 return 1; 447 return 1;
448 } 448 }
449 449
@@ -475,9 +475,9 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
475 else 475 else
476 *hole_offset = k8_dhar_offset(pvt); 476 *hole_offset = k8_dhar_offset(pvt);
477 477
478 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", 478 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
479 pvt->mc_node_id, (unsigned long)*hole_base, 479 pvt->mc_node_id, (unsigned long)*hole_base,
480 (unsigned long)*hole_offset, (unsigned long)*hole_size); 480 (unsigned long)*hole_offset, (unsigned long)*hole_size);
481 481
482 return 0; 482 return 0;
483} 483}
@@ -528,10 +528,9 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
528 /* use DHAR to translate SysAddr to DramAddr */ 528 /* use DHAR to translate SysAddr to DramAddr */
529 dram_addr = sys_addr - hole_offset; 529 dram_addr = sys_addr - hole_offset;
530 530
531 debugf2("using DHAR to translate SysAddr 0x%lx to " 531 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
532 "DramAddr 0x%lx\n", 532 (unsigned long)sys_addr,
533 (unsigned long)sys_addr, 533 (unsigned long)dram_addr);
534 (unsigned long)dram_addr);
535 534
536 return dram_addr; 535 return dram_addr;
537 } 536 }
@@ -548,9 +547,8 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
548 */ 547 */
549 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; 548 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
550 549
551 debugf2("using DRAM Base register to translate SysAddr 0x%lx to " 550 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
552 "DramAddr 0x%lx\n", (unsigned long)sys_addr, 551 (unsigned long)sys_addr, (unsigned long)dram_addr);
553 (unsigned long)dram_addr);
554 return dram_addr; 552 return dram_addr;
555} 553}
556 554
@@ -586,9 +584,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
586 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + 584 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
587 (dram_addr & 0xfff); 585 (dram_addr & 0xfff);
588 586
589 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", 587 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
590 intlv_shift, (unsigned long)dram_addr, 588 intlv_shift, (unsigned long)dram_addr,
591 (unsigned long)input_addr); 589 (unsigned long)input_addr);
592 590
593 return input_addr; 591 return input_addr;
594} 592}
@@ -604,8 +602,8 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
604 input_addr = 602 input_addr =
605 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); 603 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
606 604
607 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", 605 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
608 (unsigned long)sys_addr, (unsigned long)input_addr); 606 (unsigned long)sys_addr, (unsigned long)input_addr);
609 607
610 return input_addr; 608 return input_addr;
611} 609}
@@ -637,8 +635,8 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
637 635
638 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); 636 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
639 if (intlv_shift == 0) { 637 if (intlv_shift == 0) {
640 debugf1(" InputAddr 0x%lx translates to DramAddr of " 638 edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
641 "same value\n", (unsigned long)input_addr); 639 (unsigned long)input_addr);
642 640
643 return input_addr; 641 return input_addr;
644 } 642 }
@@ -649,9 +647,9 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
649 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); 647 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
650 dram_addr = bits + (intlv_sel << 12); 648 dram_addr = bits + (intlv_sel << 12);
651 649
652 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " 650 edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
653 "(%d node interleave bits)\n", (unsigned long)input_addr, 651 (unsigned long)input_addr,
654 (unsigned long)dram_addr, intlv_shift); 652 (unsigned long)dram_addr, intlv_shift);
655 653
656 return dram_addr; 654 return dram_addr;
657} 655}
@@ -673,9 +671,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
673 (dram_addr < (hole_base + hole_size))) { 671 (dram_addr < (hole_base + hole_size))) {
674 sys_addr = dram_addr + hole_offset; 672 sys_addr = dram_addr + hole_offset;
675 673
676 debugf1("using DHAR to translate DramAddr 0x%lx to " 674 edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
677 "SysAddr 0x%lx\n", (unsigned long)dram_addr, 675 (unsigned long)dram_addr,
678 (unsigned long)sys_addr); 676 (unsigned long)sys_addr);
679 677
680 return sys_addr; 678 return sys_addr;
681 } 679 }
@@ -697,9 +695,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
697 */ 695 */
698 sys_addr |= ~((sys_addr & (1ull << 39)) - 1); 696 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
699 697
700 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", 698 edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
701 pvt->mc_node_id, (unsigned long)dram_addr, 699 pvt->mc_node_id, (unsigned long)dram_addr,
702 (unsigned long)sys_addr); 700 (unsigned long)sys_addr);
703 701
704 return sys_addr; 702 return sys_addr;
705} 703}
@@ -768,49 +766,48 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
768 766
769static void amd64_dump_dramcfg_low(u32 dclr, int chan) 767static void amd64_dump_dramcfg_low(u32 dclr, int chan)
770{ 768{
771 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 769 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
772 770
773 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", 771 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
774 (dclr & BIT(16)) ? "un" : "", 772 (dclr & BIT(16)) ? "un" : "",
775 (dclr & BIT(19)) ? "yes" : "no"); 773 (dclr & BIT(19)) ? "yes" : "no");
776 774
777 debugf1(" PAR/ERR parity: %s\n", 775 edac_dbg(1, " PAR/ERR parity: %s\n",
778 (dclr & BIT(8)) ? "enabled" : "disabled"); 776 (dclr & BIT(8)) ? "enabled" : "disabled");
779 777
780 if (boot_cpu_data.x86 == 0x10) 778 if (boot_cpu_data.x86 == 0x10)
781 debugf1(" DCT 128bit mode width: %s\n", 779 edac_dbg(1, " DCT 128bit mode width: %s\n",
782 (dclr & BIT(11)) ? "128b" : "64b"); 780 (dclr & BIT(11)) ? "128b" : "64b");
783 781
784 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", 782 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
785 (dclr & BIT(12)) ? "yes" : "no", 783 (dclr & BIT(12)) ? "yes" : "no",
786 (dclr & BIT(13)) ? "yes" : "no", 784 (dclr & BIT(13)) ? "yes" : "no",
787 (dclr & BIT(14)) ? "yes" : "no", 785 (dclr & BIT(14)) ? "yes" : "no",
788 (dclr & BIT(15)) ? "yes" : "no"); 786 (dclr & BIT(15)) ? "yes" : "no");
789} 787}
790 788
791/* Display and decode various NB registers for debug purposes. */ 789/* Display and decode various NB registers for debug purposes. */
792static void dump_misc_regs(struct amd64_pvt *pvt) 790static void dump_misc_regs(struct amd64_pvt *pvt)
793{ 791{
794 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 792 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
795 793
796 debugf1(" NB two channel DRAM capable: %s\n", 794 edac_dbg(1, " NB two channel DRAM capable: %s\n",
797 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); 795 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
798 796
799 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", 797 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
800 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", 798 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
801 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); 799 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
802 800
803 amd64_dump_dramcfg_low(pvt->dclr0, 0); 801 amd64_dump_dramcfg_low(pvt->dclr0, 0);
804 802
805 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 803 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
806 804
807 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " 805 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
808 "offset: 0x%08x\n", 806 pvt->dhar, dhar_base(pvt),
809 pvt->dhar, dhar_base(pvt), 807 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
810 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) 808 : f10_dhar_offset(pvt));
811 : f10_dhar_offset(pvt));
812 809
813 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); 810 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
814 811
815 amd64_debug_display_dimm_sizes(pvt, 0); 812 amd64_debug_display_dimm_sizes(pvt, 0);
816 813
@@ -857,15 +854,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
857 u32 *base1 = &pvt->csels[1].csbases[cs]; 854 u32 *base1 = &pvt->csels[1].csbases[cs];
858 855
859 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) 856 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
860 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 857 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
861 cs, *base0, reg0); 858 cs, *base0, reg0);
862 859
863 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) 860 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
864 continue; 861 continue;
865 862
866 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) 863 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
867 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 864 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
868 cs, *base1, reg1); 865 cs, *base1, reg1);
869 } 866 }
870 867
871 for_each_chip_select_mask(cs, 0, pvt) { 868 for_each_chip_select_mask(cs, 0, pvt) {
@@ -875,15 +872,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
875 u32 *mask1 = &pvt->csels[1].csmasks[cs]; 872 u32 *mask1 = &pvt->csels[1].csmasks[cs];
876 873
877 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) 874 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
878 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 875 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
879 cs, *mask0, reg0); 876 cs, *mask0, reg0);
880 877
881 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) 878 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
882 continue; 879 continue;
883 880
884 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) 881 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
885 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 882 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
886 cs, *mask1, reg1); 883 cs, *mask1, reg1);
887 } 884 }
888} 885}
889 886
@@ -1049,24 +1046,22 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1049 if (!src_mci) { 1046 if (!src_mci) {
1050 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", 1047 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1051 (unsigned long)sys_addr); 1048 (unsigned long)sys_addr);
1052 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1049 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1053 page, offset, syndrome, 1050 page, offset, syndrome,
1054 -1, -1, -1, 1051 -1, -1, -1,
1055 EDAC_MOD_STR,
1056 "failed to map error addr to a node", 1052 "failed to map error addr to a node",
1057 NULL); 1053 "");
1058 return; 1054 return;
1059 } 1055 }
1060 1056
1061 /* Now map the sys_addr to a CSROW */ 1057 /* Now map the sys_addr to a CSROW */
1062 csrow = sys_addr_to_csrow(src_mci, sys_addr); 1058 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1063 if (csrow < 0) { 1059 if (csrow < 0) {
1064 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1060 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1065 page, offset, syndrome, 1061 page, offset, syndrome,
1066 -1, -1, -1, 1062 -1, -1, -1,
1067 EDAC_MOD_STR,
1068 "failed to map error addr to a csrow", 1063 "failed to map error addr to a csrow",
1069 NULL); 1064 "");
1070 return; 1065 return;
1071 } 1066 }
1072 1067
@@ -1082,12 +1077,11 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1082 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - " 1077 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1083 "possible error reporting race\n", 1078 "possible error reporting race\n",
1084 syndrome); 1079 syndrome);
1085 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1080 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1086 page, offset, syndrome, 1081 page, offset, syndrome,
1087 csrow, -1, -1, 1082 csrow, -1, -1,
1088 EDAC_MOD_STR,
1089 "unknown syndrome - possible error reporting race", 1083 "unknown syndrome - possible error reporting race",
1090 NULL); 1084 "");
1091 return; 1085 return;
1092 } 1086 }
1093 } else { 1087 } else {
@@ -1102,10 +1096,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1102 channel = ((sys_addr & BIT(3)) != 0); 1096 channel = ((sys_addr & BIT(3)) != 0);
1103 } 1097 }
1104 1098
1105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1099 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1,
1106 page, offset, syndrome, 1100 page, offset, syndrome,
1107 csrow, channel, -1, 1101 csrow, channel, -1,
1108 EDAC_MOD_STR, "", NULL); 1102 "", "");
1109} 1103}
1110 1104
1111static int ddr2_cs_size(unsigned i, bool dct_width) 1105static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1193,7 +1187,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
1193 * Need to check DCT0[0] and DCT1[0] to see if only one of them has 1187 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1194 * their CSEnable bit on. If so, then SINGLE DIMM case. 1188 * their CSEnable bit on. If so, then SINGLE DIMM case.
1195 */ 1189 */
1196 debugf0("Data width is not 128 bits - need more decoding\n"); 1190 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1197 1191
1198 /* 1192 /*
1199 * Check DRAM Bank Address Mapping values for each DIMM to see if there 1193 * Check DRAM Bank Address Mapping values for each DIMM to see if there
@@ -1272,25 +1266,24 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
1272 return; 1266 return;
1273 1267
1274 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { 1268 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1275 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", 1269 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1276 pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); 1270 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1277 1271
1278 debugf0(" DCTs operate in %s mode.\n", 1272 edac_dbg(0, " DCTs operate in %s mode\n",
1279 (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); 1273 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1280 1274
1281 if (!dct_ganging_enabled(pvt)) 1275 if (!dct_ganging_enabled(pvt))
1282 debugf0(" Address range split per DCT: %s\n", 1276 edac_dbg(0, " Address range split per DCT: %s\n",
1283 (dct_high_range_enabled(pvt) ? "yes" : "no")); 1277 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1284 1278
1285 debugf0(" data interleave for ECC: %s, " 1279 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1286 "DRAM cleared since last warm reset: %s\n", 1280 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1287 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), 1281 (dct_memory_cleared(pvt) ? "yes" : "no"));
1288 (dct_memory_cleared(pvt) ? "yes" : "no"));
1289 1282
1290 debugf0(" channel interleave: %s, " 1283 edac_dbg(0, " channel interleave: %s, "
1291 "interleave bits selector: 0x%x\n", 1284 "interleave bits selector: 0x%x\n",
1292 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), 1285 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1293 dct_sel_interleave_addr(pvt)); 1286 dct_sel_interleave_addr(pvt));
1294 } 1287 }
1295 1288
1296 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); 1289 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
@@ -1428,7 +1421,7 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1428 1421
1429 pvt = mci->pvt_info; 1422 pvt = mci->pvt_info;
1430 1423
1431 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); 1424 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1432 1425
1433 for_each_chip_select(csrow, dct, pvt) { 1426 for_each_chip_select(csrow, dct, pvt) {
1434 if (!csrow_enabled(csrow, dct, pvt)) 1427 if (!csrow_enabled(csrow, dct, pvt))
@@ -1436,19 +1429,18 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1436 1429
1437 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); 1430 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1438 1431
1439 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", 1432 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1440 csrow, cs_base, cs_mask); 1433 csrow, cs_base, cs_mask);
1441 1434
1442 cs_mask = ~cs_mask; 1435 cs_mask = ~cs_mask;
1443 1436
1444 debugf1(" (InputAddr & ~CSMask)=0x%llx " 1437 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1445 "(CSBase & ~CSMask)=0x%llx\n", 1438 (in_addr & cs_mask), (cs_base & cs_mask));
1446 (in_addr & cs_mask), (cs_base & cs_mask));
1447 1439
1448 if ((in_addr & cs_mask) == (cs_base & cs_mask)) { 1440 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1449 cs_found = f10_process_possible_spare(pvt, dct, csrow); 1441 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1450 1442
1451 debugf1(" MATCH csrow=%d\n", cs_found); 1443 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1452 break; 1444 break;
1453 } 1445 }
1454 } 1446 }
@@ -1505,8 +1497,8 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1505 u8 intlv_en = dram_intlv_en(pvt, range); 1497 u8 intlv_en = dram_intlv_en(pvt, range);
1506 u32 intlv_sel = dram_intlv_sel(pvt, range); 1498 u32 intlv_sel = dram_intlv_sel(pvt, range);
1507 1499
1508 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", 1500 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1509 range, sys_addr, get_dram_limit(pvt, range)); 1501 range, sys_addr, get_dram_limit(pvt, range));
1510 1502
1511 if (dhar_valid(pvt) && 1503 if (dhar_valid(pvt) &&
1512 dhar_base(pvt) <= sys_addr && 1504 dhar_base(pvt) <= sys_addr &&
@@ -1562,7 +1554,7 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1562 (chan_addr & 0xfff); 1554 (chan_addr & 0xfff);
1563 } 1555 }
1564 1556
1565 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); 1557 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1566 1558
1567 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); 1559 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1568 1560
@@ -1616,12 +1608,11 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1616 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1608 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1617 1609
1618 if (csrow < 0) { 1610 if (csrow < 0) {
1619 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1611 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1620 page, offset, syndrome, 1612 page, offset, syndrome,
1621 -1, -1, -1, 1613 -1, -1, -1,
1622 EDAC_MOD_STR,
1623 "failed to map error addr to a csrow", 1614 "failed to map error addr to a csrow",
1624 NULL); 1615 "");
1625 return; 1616 return;
1626 } 1617 }
1627 1618
@@ -1633,10 +1624,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1633 if (dct_ganging_enabled(pvt)) 1624 if (dct_ganging_enabled(pvt))
1634 chan = get_channel_from_ecc_syndrome(mci, syndrome); 1625 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1635 1626
1636 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1627 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1637 page, offset, syndrome, 1628 page, offset, syndrome,
1638 csrow, chan, -1, 1629 csrow, chan, -1,
1639 EDAC_MOD_STR, "", NULL); 1630 "", "");
1640} 1631}
1641 1632
1642/* 1633/*
@@ -1664,7 +1655,8 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1664 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases 1655 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1665 : pvt->csels[0].csbases; 1656 : pvt->csels[0].csbases;
1666 1657
1667 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); 1658 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1659 ctrl, dbam);
1668 1660
1669 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); 1661 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1670 1662
@@ -1840,7 +1832,7 @@ static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1840 } 1832 }
1841 } 1833 }
1842 1834
1843 debugf0("syndrome(%x) not found\n", syndrome); 1835 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1844 return -1; 1836 return -1;
1845} 1837}
1846 1838
@@ -1917,12 +1909,11 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1917 /* Ensure that the Error Address is VALID */ 1909 /* Ensure that the Error Address is VALID */
1918 if (!(m->status & MCI_STATUS_ADDRV)) { 1910 if (!(m->status & MCI_STATUS_ADDRV)) {
1919 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1911 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1920 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1912 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1921 0, 0, 0, 1913 0, 0, 0,
1922 -1, -1, -1, 1914 -1, -1, -1,
1923 EDAC_MOD_STR,
1924 "HW has no ERROR_ADDRESS available", 1915 "HW has no ERROR_ADDRESS available",
1925 NULL); 1916 "");
1926 return; 1917 return;
1927 } 1918 }
1928 1919
@@ -1946,12 +1937,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1946 1937
1947 if (!(m->status & MCI_STATUS_ADDRV)) { 1938 if (!(m->status & MCI_STATUS_ADDRV)) {
1948 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1939 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1949 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1940 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1950 0, 0, 0, 1941 0, 0, 0,
1951 -1, -1, -1, 1942 -1, -1, -1,
1952 EDAC_MOD_STR,
1953 "HW has no ERROR_ADDRESS available", 1943 "HW has no ERROR_ADDRESS available",
1954 NULL); 1944 "");
1955 return; 1945 return;
1956 } 1946 }
1957 1947
@@ -1966,11 +1956,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1966 if (!src_mci) { 1956 if (!src_mci) {
1967 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", 1957 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1968 (unsigned long)sys_addr); 1958 (unsigned long)sys_addr);
1969 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1959 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1970 page, offset, 0, 1960 page, offset, 0,
1971 -1, -1, -1, 1961 -1, -1, -1,
1972 EDAC_MOD_STR, 1962 "ERROR ADDRESS NOT mapped to a MC",
1973 "ERROR ADDRESS NOT mapped to a MC", NULL); 1963 "");
1974 return; 1964 return;
1975 } 1965 }
1976 1966
@@ -1980,17 +1970,16 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1980 if (csrow < 0) { 1970 if (csrow < 0) {
1981 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", 1971 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1982 (unsigned long)sys_addr); 1972 (unsigned long)sys_addr);
1983 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1973 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1984 page, offset, 0, 1974 page, offset, 0,
1985 -1, -1, -1, 1975 -1, -1, -1,
1986 EDAC_MOD_STR,
1987 "ERROR ADDRESS NOT mapped to CS", 1976 "ERROR ADDRESS NOT mapped to CS",
1988 NULL); 1977 "");
1989 } else { 1978 } else {
1990 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1979 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1991 page, offset, 0, 1980 page, offset, 0,
1992 csrow, -1, -1, 1981 csrow, -1, -1,
1993 EDAC_MOD_STR, "", NULL); 1982 "", "");
1994 } 1983 }
1995} 1984}
1996 1985
@@ -2047,9 +2036,9 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2047 2036
2048 return -ENODEV; 2037 return -ENODEV;
2049 } 2038 }
2050 debugf1("F1: %s\n", pci_name(pvt->F1)); 2039 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2051 debugf1("F2: %s\n", pci_name(pvt->F2)); 2040 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2052 debugf1("F3: %s\n", pci_name(pvt->F3)); 2041 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2053 2042
2054 return 0; 2043 return 0;
2055} 2044}
@@ -2076,15 +2065,15 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2076 * those are Read-As-Zero 2065 * those are Read-As-Zero
2077 */ 2066 */
2078 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); 2067 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2079 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); 2068 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2080 2069
2081 /* check first whether TOP_MEM2 is enabled */ 2070 /* check first whether TOP_MEM2 is enabled */
2082 rdmsrl(MSR_K8_SYSCFG, msr_val); 2071 rdmsrl(MSR_K8_SYSCFG, msr_val);
2083 if (msr_val & (1U << 21)) { 2072 if (msr_val & (1U << 21)) {
2084 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); 2073 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2085 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); 2074 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2086 } else 2075 } else
2087 debugf0(" TOP_MEM2 disabled.\n"); 2076 edac_dbg(0, " TOP_MEM2 disabled\n");
2088 2077
2089 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); 2078 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2090 2079
@@ -2100,17 +2089,17 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2100 if (!rw) 2089 if (!rw)
2101 continue; 2090 continue;
2102 2091
2103 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", 2092 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2104 range, 2093 range,
2105 get_dram_base(pvt, range), 2094 get_dram_base(pvt, range),
2106 get_dram_limit(pvt, range)); 2095 get_dram_limit(pvt, range));
2107 2096
2108 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", 2097 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2109 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", 2098 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2110 (rw & 0x1) ? "R" : "-", 2099 (rw & 0x1) ? "R" : "-",
2111 (rw & 0x2) ? "W" : "-", 2100 (rw & 0x2) ? "W" : "-",
2112 dram_intlv_sel(pvt, range), 2101 dram_intlv_sel(pvt, range),
2113 dram_dst_node(pvt, range)); 2102 dram_dst_node(pvt, range));
2114 } 2103 }
2115 2104
2116 read_dct_base_mask(pvt); 2105 read_dct_base_mask(pvt);
@@ -2191,9 +2180,9 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2191 2180
2192 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); 2181 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2193 2182
2194 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); 2183 edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2195 debugf0(" nr_pages/channel= %u channel-count = %d\n", 2184 edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
2196 nr_pages, pvt->channel_count); 2185 nr_pages, pvt->channel_count);
2197 2186
2198 return nr_pages; 2187 return nr_pages;
2199} 2188}
@@ -2205,6 +2194,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2205static int init_csrows(struct mem_ctl_info *mci) 2194static int init_csrows(struct mem_ctl_info *mci)
2206{ 2195{
2207 struct csrow_info *csrow; 2196 struct csrow_info *csrow;
2197 struct dimm_info *dimm;
2208 struct amd64_pvt *pvt = mci->pvt_info; 2198 struct amd64_pvt *pvt = mci->pvt_info;
2209 u64 base, mask; 2199 u64 base, mask;
2210 u32 val; 2200 u32 val;
@@ -2217,22 +2207,19 @@ static int init_csrows(struct mem_ctl_info *mci)
2217 2207
2218 pvt->nbcfg = val; 2208 pvt->nbcfg = val;
2219 2209
2220 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2210 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2221 pvt->mc_node_id, val, 2211 pvt->mc_node_id, val,
2222 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); 2212 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2223 2213
2224 for_each_chip_select(i, 0, pvt) { 2214 for_each_chip_select(i, 0, pvt) {
2225 csrow = &mci->csrows[i]; 2215 csrow = mci->csrows[i];
2226 2216
2227 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) { 2217 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2228 debugf1("----CSROW %d EMPTY for node %d\n", i, 2218 edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
2229 pvt->mc_node_id); 2219 i, pvt->mc_node_id);
2230 continue; 2220 continue;
2231 } 2221 }
2232 2222
2233 debugf1("----CSROW %d VALID for MC node %d\n",
2234 i, pvt->mc_node_id);
2235
2236 empty = 0; 2223 empty = 0;
2237 if (csrow_enabled(i, 0, pvt)) 2224 if (csrow_enabled(i, 0, pvt))
2238 nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2225 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
@@ -2244,8 +2231,9 @@ static int init_csrows(struct mem_ctl_info *mci)
2244 2231
2245 mtype = amd64_determine_memory_type(pvt, i); 2232 mtype = amd64_determine_memory_type(pvt, i);
2246 2233
2247 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); 2234 edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2248 debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count); 2235 edac_dbg(1, " nr_pages: %u\n",
2236 nr_pages * pvt->channel_count);
2249 2237
2250 /* 2238 /*
2251 * determine whether CHIPKILL or JUST ECC or NO ECC is operating 2239 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
@@ -2257,9 +2245,10 @@ static int init_csrows(struct mem_ctl_info *mci)
2257 edac_mode = EDAC_NONE; 2245 edac_mode = EDAC_NONE;
2258 2246
2259 for (j = 0; j < pvt->channel_count; j++) { 2247 for (j = 0; j < pvt->channel_count; j++) {
2260 csrow->channels[j].dimm->mtype = mtype; 2248 dimm = csrow->channels[j]->dimm;
2261 csrow->channels[j].dimm->edac_mode = edac_mode; 2249 dimm->mtype = mtype;
2262 csrow->channels[j].dimm->nr_pages = nr_pages; 2250 dimm->edac_mode = edac_mode;
2251 dimm->nr_pages = nr_pages;
2263 } 2252 }
2264 } 2253 }
2265 2254
@@ -2296,9 +2285,9 @@ static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2296 struct msr *reg = per_cpu_ptr(msrs, cpu); 2285 struct msr *reg = per_cpu_ptr(msrs, cpu);
2297 nbe = reg->l & MSR_MCGCTL_NBE; 2286 nbe = reg->l & MSR_MCGCTL_NBE;
2298 2287
2299 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2288 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2300 cpu, reg->q, 2289 cpu, reg->q,
2301 (nbe ? "enabled" : "disabled")); 2290 (nbe ? "enabled" : "disabled"));
2302 2291
2303 if (!nbe) 2292 if (!nbe)
2304 goto out; 2293 goto out;
@@ -2369,8 +2358,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2369 2358
2370 amd64_read_pci_cfg(F3, NBCFG, &value); 2359 amd64_read_pci_cfg(F3, NBCFG, &value);
2371 2360
2372 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", 2361 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2373 nid, value, !!(value & NBCFG_ECC_ENABLE)); 2362 nid, value, !!(value & NBCFG_ECC_ENABLE));
2374 2363
2375 if (!(value & NBCFG_ECC_ENABLE)) { 2364 if (!(value & NBCFG_ECC_ENABLE)) {
2376 amd64_warn("DRAM ECC disabled on this node, enabling...\n"); 2365 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
@@ -2394,8 +2383,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2394 s->flags.nb_ecc_prev = 1; 2383 s->flags.nb_ecc_prev = 1;
2395 } 2384 }
2396 2385
2397 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", 2386 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2398 nid, value, !!(value & NBCFG_ECC_ENABLE)); 2387 nid, value, !!(value & NBCFG_ECC_ENABLE));
2399 2388
2400 return ret; 2389 return ret;
2401} 2390}
@@ -2463,26 +2452,29 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2463 return true; 2452 return true;
2464} 2453}
2465 2454
2466struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + 2455static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2467 ARRAY_SIZE(amd64_inj_attrs) +
2468 1];
2469
2470struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2471
2472static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2473{ 2456{
2474 unsigned int i = 0, j = 0; 2457 int rc;
2475 2458
2476 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) 2459 rc = amd64_create_sysfs_dbg_files(mci);
2477 sysfs_attrs[i] = amd64_dbg_attrs[i]; 2460 if (rc < 0)
2461 return rc;
2478 2462
2479 if (boot_cpu_data.x86 >= 0x10) 2463 if (boot_cpu_data.x86 >= 0x10) {
2480 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) 2464 rc = amd64_create_sysfs_inject_files(mci);
2481 sysfs_attrs[i] = amd64_inj_attrs[j]; 2465 if (rc < 0)
2466 return rc;
2467 }
2468
2469 return 0;
2470}
2482 2471
2483 sysfs_attrs[i] = terminator; 2472static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2473{
2474 amd64_remove_sysfs_dbg_files(mci);
2484 2475
2485 mci->mc_driver_sysfs_attributes = sysfs_attrs; 2476 if (boot_cpu_data.x86 >= 0x10)
2477 amd64_remove_sysfs_inject_files(mci);
2486} 2478}
2487 2479
2488static void setup_mci_misc_attrs(struct mem_ctl_info *mci, 2480static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
@@ -2601,20 +2593,22 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2601 goto err_siblings; 2593 goto err_siblings;
2602 2594
2603 mci->pvt_info = pvt; 2595 mci->pvt_info = pvt;
2604 mci->dev = &pvt->F2->dev; 2596 mci->pdev = &pvt->F2->dev;
2605 2597
2606 setup_mci_misc_attrs(mci, fam_type); 2598 setup_mci_misc_attrs(mci, fam_type);
2607 2599
2608 if (init_csrows(mci)) 2600 if (init_csrows(mci))
2609 mci->edac_cap = EDAC_FLAG_NONE; 2601 mci->edac_cap = EDAC_FLAG_NONE;
2610 2602
2611 set_mc_sysfs_attrs(mci);
2612
2613 ret = -ENODEV; 2603 ret = -ENODEV;
2614 if (edac_mc_add_mc(mci)) { 2604 if (edac_mc_add_mc(mci)) {
2615 debugf1("failed edac_mc_add_mc()\n"); 2605 edac_dbg(1, "failed edac_mc_add_mc()\n");
2616 goto err_add_mc; 2606 goto err_add_mc;
2617 } 2607 }
2608 if (set_mc_sysfs_attrs(mci)) {
2609 edac_dbg(1, "failed edac_mc_add_mc()\n");
2610 goto err_add_sysfs;
2611 }
2618 2612
2619 /* register stuff with EDAC MCE */ 2613 /* register stuff with EDAC MCE */
2620 if (report_gart_errors) 2614 if (report_gart_errors)
@@ -2628,6 +2622,8 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2628 2622
2629 return 0; 2623 return 0;
2630 2624
2625err_add_sysfs:
2626 edac_mc_del_mc(mci->pdev);
2631err_add_mc: 2627err_add_mc:
2632 edac_mc_free(mci); 2628 edac_mc_free(mci);
2633 2629
@@ -2651,7 +2647,7 @@ static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2651 2647
2652 ret = pci_enable_device(pdev); 2648 ret = pci_enable_device(pdev);
2653 if (ret < 0) { 2649 if (ret < 0) {
2654 debugf0("ret=%d\n", ret); 2650 edac_dbg(0, "ret=%d\n", ret);
2655 return -EIO; 2651 return -EIO;
2656 } 2652 }
2657 2653
@@ -2698,6 +2694,8 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2698 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2694 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2699 struct ecc_settings *s = ecc_stngs[nid]; 2695 struct ecc_settings *s = ecc_stngs[nid];
2700 2696
2697 mci = find_mci_by_dev(&pdev->dev);
2698 del_mc_sysfs_attrs(mci);
2701 /* Remove from EDAC CORE tracking list */ 2699 /* Remove from EDAC CORE tracking list */
2702 mci = edac_mc_del_mc(&pdev->dev); 2700 mci = edac_mc_del_mc(&pdev->dev);
2703 if (!mci) 2701 if (!mci)
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 9a666cb985b2..8d4804732bac 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -413,20 +413,33 @@ struct ecc_settings {
413}; 413};
414 414
415#ifdef CONFIG_EDAC_DEBUG 415#ifdef CONFIG_EDAC_DEBUG
416#define NUM_DBG_ATTRS 5 416int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci);
417void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci);
418
417#else 419#else
418#define NUM_DBG_ATTRS 0 420static inline int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci)
421{
422 return 0;
423}
424static void inline amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci)
425{
426}
419#endif 427#endif
420 428
421#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION 429#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
422#define NUM_INJ_ATTRS 5 430int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci);
431void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci);
432
423#else 433#else
424#define NUM_INJ_ATTRS 0 434static inline int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci)
435{
436 return 0;
437}
438static inline void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci)
439{
440}
425#endif 441#endif
426 442
427extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
428 amd64_inj_attrs[NUM_INJ_ATTRS];
429
430/* 443/*
431 * Each of the PCI Device IDs types have their own set of hardware accessor 444 * Each of the PCI Device IDs types have their own set of hardware accessor
432 * functions and per device encoding/decoding logic. 445 * functions and per device encoding/decoding logic.
@@ -460,3 +473,5 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
460 473
461int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 474int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
462 u64 *hole_offset, u64 *hole_size); 475 u64 *hole_offset, u64 *hole_size);
476
477#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
index e3562288f4ce..2c1bbf740605 100644
--- a/drivers/edac/amd64_edac_dbg.c
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -1,8 +1,11 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2 2
3#define EDAC_DCT_ATTR_SHOW(reg) \ 3#define EDAC_DCT_ATTR_SHOW(reg) \
4static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data) \ 4static ssize_t amd64_##reg##_show(struct device *dev, \
5 struct device_attribute *mattr, \
6 char *data) \
5{ \ 7{ \
8 struct mem_ctl_info *mci = to_mci(dev); \
6 struct amd64_pvt *pvt = mci->pvt_info; \ 9 struct amd64_pvt *pvt = mci->pvt_info; \
7 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ 10 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
8} 11}
@@ -12,8 +15,12 @@ EDAC_DCT_ATTR_SHOW(dbam0);
12EDAC_DCT_ATTR_SHOW(top_mem); 15EDAC_DCT_ATTR_SHOW(top_mem);
13EDAC_DCT_ATTR_SHOW(top_mem2); 16EDAC_DCT_ATTR_SHOW(top_mem2);
14 17
15static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data) 18static ssize_t amd64_hole_show(struct device *dev,
19 struct device_attribute *mattr,
20 char *data)
16{ 21{
22 struct mem_ctl_info *mci = to_mci(dev);
23
17 u64 hole_base = 0; 24 u64 hole_base = 0;
18 u64 hole_offset = 0; 25 u64 hole_offset = 0;
19 u64 hole_size = 0; 26 u64 hole_size = 0;
@@ -27,46 +34,40 @@ static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
27/* 34/*
28 * update NUM_DBG_ATTRS in case you add new members 35 * update NUM_DBG_ATTRS in case you add new members
29 */ 36 */
30struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { 37static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL);
38static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL);
39static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL);
40static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL);
41static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL);
42
43int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci)
44{
45 int rc;
46
47 rc = device_create_file(&mci->dev, &dev_attr_dhar);
48 if (rc < 0)
49 return rc;
50 rc = device_create_file(&mci->dev, &dev_attr_dbam);
51 if (rc < 0)
52 return rc;
53 rc = device_create_file(&mci->dev, &dev_attr_topmem);
54 if (rc < 0)
55 return rc;
56 rc = device_create_file(&mci->dev, &dev_attr_topmem2);
57 if (rc < 0)
58 return rc;
59 rc = device_create_file(&mci->dev, &dev_attr_dram_hole);
60 if (rc < 0)
61 return rc;
31 62
32 { 63 return 0;
33 .attr = { 64}
34 .name = "dhar", 65
35 .mode = (S_IRUGO) 66void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci)
36 }, 67{
37 .show = amd64_dhar_show, 68 device_remove_file(&mci->dev, &dev_attr_dhar);
38 .store = NULL, 69 device_remove_file(&mci->dev, &dev_attr_dbam);
39 }, 70 device_remove_file(&mci->dev, &dev_attr_topmem);
40 { 71 device_remove_file(&mci->dev, &dev_attr_topmem2);
41 .attr = { 72 device_remove_file(&mci->dev, &dev_attr_dram_hole);
42 .name = "dbam", 73}
43 .mode = (S_IRUGO)
44 },
45 .show = amd64_dbam0_show,
46 .store = NULL,
47 },
48 {
49 .attr = {
50 .name = "topmem",
51 .mode = (S_IRUGO)
52 },
53 .show = amd64_top_mem_show,
54 .store = NULL,
55 },
56 {
57 .attr = {
58 .name = "topmem2",
59 .mode = (S_IRUGO)
60 },
61 .show = amd64_top_mem2_show,
62 .store = NULL,
63 },
64 {
65 .attr = {
66 .name = "dram_hole",
67 .mode = (S_IRUGO)
68 },
69 .show = amd64_hole_show,
70 .store = NULL,
71 },
72};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 303f10e03dda..53d972e00dfb 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,7 +1,10 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2 2
3static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) 3static ssize_t amd64_inject_section_show(struct device *dev,
4 struct device_attribute *mattr,
5 char *buf)
4{ 6{
7 struct mem_ctl_info *mci = to_mci(dev);
5 struct amd64_pvt *pvt = mci->pvt_info; 8 struct amd64_pvt *pvt = mci->pvt_info;
6 return sprintf(buf, "0x%x\n", pvt->injection.section); 9 return sprintf(buf, "0x%x\n", pvt->injection.section);
7} 10}
@@ -12,9 +15,11 @@ static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
12 * 15 *
13 * range: 0..3 16 * range: 0..3
14 */ 17 */
15static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, 18static ssize_t amd64_inject_section_store(struct device *dev,
19 struct device_attribute *mattr,
16 const char *data, size_t count) 20 const char *data, size_t count)
17{ 21{
22 struct mem_ctl_info *mci = to_mci(dev);
18 struct amd64_pvt *pvt = mci->pvt_info; 23 struct amd64_pvt *pvt = mci->pvt_info;
19 unsigned long value; 24 unsigned long value;
20 int ret = 0; 25 int ret = 0;
@@ -33,8 +38,11 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
33 return ret; 38 return ret;
34} 39}
35 40
36static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) 41static ssize_t amd64_inject_word_show(struct device *dev,
42 struct device_attribute *mattr,
43 char *buf)
37{ 44{
45 struct mem_ctl_info *mci = to_mci(dev);
38 struct amd64_pvt *pvt = mci->pvt_info; 46 struct amd64_pvt *pvt = mci->pvt_info;
39 return sprintf(buf, "0x%x\n", pvt->injection.word); 47 return sprintf(buf, "0x%x\n", pvt->injection.word);
40} 48}
@@ -45,9 +53,11 @@ static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
45 * 53 *
46 * range: 0..8 54 * range: 0..8
47 */ 55 */
48static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, 56static ssize_t amd64_inject_word_store(struct device *dev,
49 const char *data, size_t count) 57 struct device_attribute *mattr,
58 const char *data, size_t count)
50{ 59{
60 struct mem_ctl_info *mci = to_mci(dev);
51 struct amd64_pvt *pvt = mci->pvt_info; 61 struct amd64_pvt *pvt = mci->pvt_info;
52 unsigned long value; 62 unsigned long value;
53 int ret = 0; 63 int ret = 0;
@@ -66,8 +76,11 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
66 return ret; 76 return ret;
67} 77}
68 78
69static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) 79static ssize_t amd64_inject_ecc_vector_show(struct device *dev,
80 struct device_attribute *mattr,
81 char *buf)
70{ 82{
83 struct mem_ctl_info *mci = to_mci(dev);
71 struct amd64_pvt *pvt = mci->pvt_info; 84 struct amd64_pvt *pvt = mci->pvt_info;
72 return sprintf(buf, "0x%x\n", pvt->injection.bit_map); 85 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
73} 86}
@@ -77,9 +90,11 @@ static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
77 * corresponding bit within the error injection word above. When used during a 90 * corresponding bit within the error injection word above. When used during a
78 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits. 91 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
79 */ 92 */
80static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, 93static ssize_t amd64_inject_ecc_vector_store(struct device *dev,
81 const char *data, size_t count) 94 struct device_attribute *mattr,
95 const char *data, size_t count)
82{ 96{
97 struct mem_ctl_info *mci = to_mci(dev);
83 struct amd64_pvt *pvt = mci->pvt_info; 98 struct amd64_pvt *pvt = mci->pvt_info;
84 unsigned long value; 99 unsigned long value;
85 int ret = 0; 100 int ret = 0;
@@ -103,9 +118,11 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
103 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into 118 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
104 * fields needed by the injection registers and read the NB Array Data Port. 119 * fields needed by the injection registers and read the NB Array Data Port.
105 */ 120 */
106static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, 121static ssize_t amd64_inject_read_store(struct device *dev,
107 const char *data, size_t count) 122 struct device_attribute *mattr,
123 const char *data, size_t count)
108{ 124{
125 struct mem_ctl_info *mci = to_mci(dev);
109 struct amd64_pvt *pvt = mci->pvt_info; 126 struct amd64_pvt *pvt = mci->pvt_info;
110 unsigned long value; 127 unsigned long value;
111 u32 section, word_bits; 128 u32 section, word_bits;
@@ -125,7 +142,8 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
125 /* Issue 'word' and 'bit' along with the READ request */ 142 /* Issue 'word' and 'bit' along with the READ request */
126 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); 143 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
127 144
128 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); 145 edac_dbg(0, "section=0x%x word_bits=0x%x\n",
146 section, word_bits);
129 147
130 return count; 148 return count;
131 } 149 }
@@ -136,9 +154,11 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
136 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into 154 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
137 * fields needed by the injection registers. 155 * fields needed by the injection registers.
138 */ 156 */
139static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, 157static ssize_t amd64_inject_write_store(struct device *dev,
158 struct device_attribute *mattr,
140 const char *data, size_t count) 159 const char *data, size_t count)
141{ 160{
161 struct mem_ctl_info *mci = to_mci(dev);
142 struct amd64_pvt *pvt = mci->pvt_info; 162 struct amd64_pvt *pvt = mci->pvt_info;
143 unsigned long value; 163 unsigned long value;
144 u32 section, word_bits; 164 u32 section, word_bits;
@@ -158,7 +178,8 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
158 /* Issue 'word' and 'bit' along with the READ request */ 178 /* Issue 'word' and 'bit' along with the READ request */
159 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); 179 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
160 180
161 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); 181 edac_dbg(0, "section=0x%x word_bits=0x%x\n",
182 section, word_bits);
162 183
163 return count; 184 return count;
164 } 185 }
@@ -168,46 +189,47 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
168/* 189/*
169 * update NUM_INJ_ATTRS in case you add new members 190 * update NUM_INJ_ATTRS in case you add new members
170 */ 191 */
171struct mcidev_sysfs_attribute amd64_inj_attrs[] = { 192
172 193static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
173 { 194 amd64_inject_section_show, amd64_inject_section_store);
174 .attr = { 195static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
175 .name = "inject_section", 196 amd64_inject_word_show, amd64_inject_word_store);
176 .mode = (S_IRUGO | S_IWUSR) 197static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
177 }, 198 amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
178 .show = amd64_inject_section_show, 199static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR,
179 .store = amd64_inject_section_store, 200 NULL, amd64_inject_write_store);
180 }, 201static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR,
181 { 202 NULL, amd64_inject_read_store);
182 .attr = { 203
183 .name = "inject_word", 204
184 .mode = (S_IRUGO | S_IWUSR) 205int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci)
185 }, 206{
186 .show = amd64_inject_word_show, 207 int rc;
187 .store = amd64_inject_word_store, 208
188 }, 209 rc = device_create_file(&mci->dev, &dev_attr_inject_section);
189 { 210 if (rc < 0)
190 .attr = { 211 return rc;
191 .name = "inject_ecc_vector", 212 rc = device_create_file(&mci->dev, &dev_attr_inject_word);
192 .mode = (S_IRUGO | S_IWUSR) 213 if (rc < 0)
193 }, 214 return rc;
194 .show = amd64_inject_ecc_vector_show, 215 rc = device_create_file(&mci->dev, &dev_attr_inject_ecc_vector);
195 .store = amd64_inject_ecc_vector_store, 216 if (rc < 0)
196 }, 217 return rc;
197 { 218 rc = device_create_file(&mci->dev, &dev_attr_inject_write);
198 .attr = { 219 if (rc < 0)
199 .name = "inject_write", 220 return rc;
200 .mode = (S_IRUGO | S_IWUSR) 221 rc = device_create_file(&mci->dev, &dev_attr_inject_read);
201 }, 222 if (rc < 0)
202 .show = NULL, 223 return rc;
203 .store = amd64_inject_write_store, 224
204 }, 225 return 0;
205 { 226}
206 .attr = { 227
207 .name = "inject_read", 228void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci)
208 .mode = (S_IRUGO | S_IWUSR) 229{
209 }, 230 device_remove_file(&mci->dev, &dev_attr_inject_section);
210 .show = NULL, 231 device_remove_file(&mci->dev, &dev_attr_inject_word);
211 .store = amd64_inject_read_store, 232 device_remove_file(&mci->dev, &dev_attr_inject_ecc_vector);
212 }, 233 device_remove_file(&mci->dev, &dev_attr_inject_write);
213}; 234 device_remove_file(&mci->dev, &dev_attr_inject_read);
235}
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 9774d443fa57..29eeb68a200c 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -105,7 +105,7 @@ static void amd76x_get_error_info(struct mem_ctl_info *mci,
105{ 105{
106 struct pci_dev *pdev; 106 struct pci_dev *pdev;
107 107
108 pdev = to_pci_dev(mci->dev); 108 pdev = to_pci_dev(mci->pdev);
109 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, 109 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
110 &info->ecc_mode_status); 110 &info->ecc_mode_status);
111 111
@@ -145,10 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
145 145
146 if (handle_errors) { 146 if (handle_errors) {
147 row = (info->ecc_mode_status >> 4) & 0xf; 147 row = (info->ecc_mode_status >> 4) & 0xf;
148 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 148 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
149 mci->csrows[row].first_page, 0, 0, 149 mci->csrows[row]->first_page, 0, 0,
150 row, 0, -1, 150 row, 0, -1,
151 mci->ctl_name, "", NULL); 151 mci->ctl_name, "");
152 } 152 }
153 } 153 }
154 154
@@ -160,10 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
160 160
161 if (handle_errors) { 161 if (handle_errors) {
162 row = info->ecc_mode_status & 0xf; 162 row = info->ecc_mode_status & 0xf;
163 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 163 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
164 mci->csrows[row].first_page, 0, 0, 164 mci->csrows[row]->first_page, 0, 0,
165 row, 0, -1, 165 row, 0, -1,
166 mci->ctl_name, "", NULL); 166 mci->ctl_name, "");
167 } 167 }
168 } 168 }
169 169
@@ -180,7 +180,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
180static void amd76x_check(struct mem_ctl_info *mci) 180static void amd76x_check(struct mem_ctl_info *mci)
181{ 181{
182 struct amd76x_error_info info; 182 struct amd76x_error_info info;
183 debugf3("%s()\n", __func__); 183 edac_dbg(3, "\n");
184 amd76x_get_error_info(mci, &info); 184 amd76x_get_error_info(mci, &info);
185 amd76x_process_error_info(mci, &info, 1); 185 amd76x_process_error_info(mci, &info, 1);
186} 186}
@@ -194,8 +194,8 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
194 int index; 194 int index;
195 195
196 for (index = 0; index < mci->nr_csrows; index++) { 196 for (index = 0; index < mci->nr_csrows; index++) {
197 csrow = &mci->csrows[index]; 197 csrow = mci->csrows[index];
198 dimm = csrow->channels[0].dimm; 198 dimm = csrow->channels[0]->dimm;
199 199
200 /* find the DRAM Chip Select Base address and mask */ 200 /* find the DRAM Chip Select Base address and mask */
201 pci_read_config_dword(pdev, 201 pci_read_config_dword(pdev,
@@ -241,7 +241,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
241 u32 ems_mode; 241 u32 ems_mode;
242 struct amd76x_error_info discard; 242 struct amd76x_error_info discard;
243 243
244 debugf0("%s()\n", __func__); 244 edac_dbg(0, "\n");
245 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); 245 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
246 ems_mode = (ems >> 10) & 0x3; 246 ems_mode = (ems >> 10) & 0x3;
247 247
@@ -256,8 +256,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
256 if (mci == NULL) 256 if (mci == NULL)
257 return -ENOMEM; 257 return -ENOMEM;
258 258
259 debugf0("%s(): mci = %p\n", __func__, mci); 259 edac_dbg(0, "mci = %p\n", mci);
260 mci->dev = &pdev->dev; 260 mci->pdev = &pdev->dev;
261 mci->mtype_cap = MEM_FLAG_RDDR; 261 mci->mtype_cap = MEM_FLAG_RDDR;
262 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 262 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
263 mci->edac_cap = ems_mode ? 263 mci->edac_cap = ems_mode ?
@@ -276,7 +276,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
276 * type of memory controller. The ID is therefore hardcoded to 0. 276 * type of memory controller. The ID is therefore hardcoded to 0.
277 */ 277 */
278 if (edac_mc_add_mc(mci)) { 278 if (edac_mc_add_mc(mci)) {
279 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 279 edac_dbg(3, "failed edac_mc_add_mc()\n");
280 goto fail; 280 goto fail;
281 } 281 }
282 282
@@ -292,7 +292,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
292 } 292 }
293 293
294 /* get this far and it's successful */ 294 /* get this far and it's successful */
295 debugf3("%s(): success\n", __func__); 295 edac_dbg(3, "success\n");
296 return 0; 296 return 0;
297 297
298fail: 298fail:
@@ -304,7 +304,7 @@ fail:
304static int __devinit amd76x_init_one(struct pci_dev *pdev, 304static int __devinit amd76x_init_one(struct pci_dev *pdev,
305 const struct pci_device_id *ent) 305 const struct pci_device_id *ent)
306{ 306{
307 debugf0("%s()\n", __func__); 307 edac_dbg(0, "\n");
308 308
309 /* don't need to call pci_enable_device() */ 309 /* don't need to call pci_enable_device() */
310 return amd76x_probe1(pdev, ent->driver_data); 310 return amd76x_probe1(pdev, ent->driver_data);
@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
322{ 322{
323 struct mem_ctl_info *mci; 323 struct mem_ctl_info *mci;
324 324
325 debugf0("%s()\n", __func__); 325 edac_dbg(0, "\n");
326 326
327 if (amd76x_pci) 327 if (amd76x_pci)
328 edac_pci_release_generic_ctl(amd76x_pci); 328 edac_pci_release_generic_ctl(amd76x_pci);
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 69ee6aab5c71..a1bbd8edd257 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -33,10 +33,10 @@ struct cell_edac_priv
33static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) 33static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
34{ 34{
35 struct cell_edac_priv *priv = mci->pvt_info; 35 struct cell_edac_priv *priv = mci->pvt_info;
36 struct csrow_info *csrow = &mci->csrows[0]; 36 struct csrow_info *csrow = mci->csrows[0];
37 unsigned long address, pfn, offset, syndrome; 37 unsigned long address, pfn, offset, syndrome;
38 38
39 dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", 39 dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
40 priv->node, chan, ar); 40 priv->node, chan, ar);
41 41
42 /* Address decoding is likely a bit bogus, to dbl check */ 42 /* Address decoding is likely a bit bogus, to dbl check */
@@ -48,18 +48,18 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
48 syndrome = (ar & 0x000000001fe00000ul) >> 21; 48 syndrome = (ar & 0x000000001fe00000ul) >> 21;
49 49
50 /* TODO: Decoding of the error address */ 50 /* TODO: Decoding of the error address */
51 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 51 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
52 csrow->first_page + pfn, offset, syndrome, 52 csrow->first_page + pfn, offset, syndrome,
53 0, chan, -1, "", "", NULL); 53 0, chan, -1, "", "");
54} 54}
55 55
56static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) 56static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
57{ 57{
58 struct cell_edac_priv *priv = mci->pvt_info; 58 struct cell_edac_priv *priv = mci->pvt_info;
59 struct csrow_info *csrow = &mci->csrows[0]; 59 struct csrow_info *csrow = mci->csrows[0];
60 unsigned long address, pfn, offset; 60 unsigned long address, pfn, offset;
61 61
62 dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", 62 dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
63 priv->node, chan, ar); 63 priv->node, chan, ar);
64 64
65 /* Address decoding is likely a bit bogus, to dbl check */ 65 /* Address decoding is likely a bit bogus, to dbl check */
@@ -70,9 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
70 offset = address & ~PAGE_MASK; 70 offset = address & ~PAGE_MASK;
71 71
72 /* TODO: Decoding of the error address */ 72 /* TODO: Decoding of the error address */
73 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 73 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
74 csrow->first_page + pfn, offset, 0, 74 csrow->first_page + pfn, offset, 0,
75 0, chan, -1, "", "", NULL); 75 0, chan, -1, "", "");
76} 76}
77 77
78static void cell_edac_check(struct mem_ctl_info *mci) 78static void cell_edac_check(struct mem_ctl_info *mci)
@@ -83,7 +83,7 @@ static void cell_edac_check(struct mem_ctl_info *mci)
83 fir = in_be64(&priv->regs->mic_fir); 83 fir = in_be64(&priv->regs->mic_fir);
84#ifdef DEBUG 84#ifdef DEBUG
85 if (fir != priv->prev_fir) { 85 if (fir != priv->prev_fir) {
86 dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir); 86 dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
87 priv->prev_fir = fir; 87 priv->prev_fir = fir;
88 } 88 }
89#endif 89#endif
@@ -119,14 +119,14 @@ static void cell_edac_check(struct mem_ctl_info *mci)
119 mb(); /* sync up */ 119 mb(); /* sync up */
120#ifdef DEBUG 120#ifdef DEBUG
121 fir = in_be64(&priv->regs->mic_fir); 121 fir = in_be64(&priv->regs->mic_fir);
122 dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir); 122 dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
123#endif 123#endif
124 } 124 }
125} 125}
126 126
127static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) 127static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
128{ 128{
129 struct csrow_info *csrow = &mci->csrows[0]; 129 struct csrow_info *csrow = mci->csrows[0];
130 struct dimm_info *dimm; 130 struct dimm_info *dimm;
131 struct cell_edac_priv *priv = mci->pvt_info; 131 struct cell_edac_priv *priv = mci->pvt_info;
132 struct device_node *np; 132 struct device_node *np;
@@ -150,12 +150,12 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
150 csrow->last_page = csrow->first_page + nr_pages - 1; 150 csrow->last_page = csrow->first_page + nr_pages - 1;
151 151
152 for (j = 0; j < csrow->nr_channels; j++) { 152 for (j = 0; j < csrow->nr_channels; j++) {
153 dimm = csrow->channels[j].dimm; 153 dimm = csrow->channels[j]->dimm;
154 dimm->mtype = MEM_XDR; 154 dimm->mtype = MEM_XDR;
155 dimm->edac_mode = EDAC_SECDED; 155 dimm->edac_mode = EDAC_SECDED;
156 dimm->nr_pages = nr_pages / csrow->nr_channels; 156 dimm->nr_pages = nr_pages / csrow->nr_channels;
157 } 157 }
158 dev_dbg(mci->dev, 158 dev_dbg(mci->pdev,
159 "Initialized on node %d, chanmask=0x%x," 159 "Initialized on node %d, chanmask=0x%x,"
160 " first_page=0x%lx, nr_pages=0x%x\n", 160 " first_page=0x%lx, nr_pages=0x%x\n",
161 priv->node, priv->chanmask, 161 priv->node, priv->chanmask,
@@ -212,7 +212,7 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
212 priv->regs = regs; 212 priv->regs = regs;
213 priv->node = pdev->id; 213 priv->node = pdev->id;
214 priv->chanmask = chanmask; 214 priv->chanmask = chanmask;
215 mci->dev = &pdev->dev; 215 mci->pdev = &pdev->dev;
216 mci->mtype_cap = MEM_FLAG_XDR; 216 mci->mtype_cap = MEM_FLAG_XDR;
217 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 217 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
218 mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED; 218 mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index e22030a9de66..c2ef13495873 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -316,13 +316,12 @@ static void get_total_mem(struct cpc925_mc_pdata *pdata)
316 reg += aw; 316 reg += aw;
317 size = of_read_number(reg, sw); 317 size = of_read_number(reg, sw);
318 reg += sw; 318 reg += sw;
319 debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, 319 edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size);
320 start, size);
321 pdata->total_mem += size; 320 pdata->total_mem += size;
322 } while (reg < reg_end); 321 } while (reg < reg_end);
323 322
324 of_node_put(np); 323 of_node_put(np);
325 debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); 324 edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem);
326} 325}
327 326
328static void cpc925_init_csrows(struct mem_ctl_info *mci) 327static void cpc925_init_csrows(struct mem_ctl_info *mci)
@@ -330,8 +329,9 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
330 struct cpc925_mc_pdata *pdata = mci->pvt_info; 329 struct cpc925_mc_pdata *pdata = mci->pvt_info;
331 struct csrow_info *csrow; 330 struct csrow_info *csrow;
332 struct dimm_info *dimm; 331 struct dimm_info *dimm;
332 enum dev_type dtype;
333 int index, j; 333 int index, j;
334 u32 mbmr, mbbar, bba; 334 u32 mbmr, mbbar, bba, grain;
335 unsigned long row_size, nr_pages, last_nr_pages = 0; 335 unsigned long row_size, nr_pages, last_nr_pages = 0;
336 336
337 get_total_mem(pdata); 337 get_total_mem(pdata);
@@ -347,7 +347,7 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
347 if (bba == 0) 347 if (bba == 0)
348 continue; /* not populated */ 348 continue; /* not populated */
349 349
350 csrow = &mci->csrows[index]; 350 csrow = mci->csrows[index];
351 351
352 row_size = bba * (1UL << 28); /* 256M */ 352 row_size = bba * (1UL << 28); /* 256M */
353 csrow->first_page = last_nr_pages; 353 csrow->first_page = last_nr_pages;
@@ -355,37 +355,36 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
355 csrow->last_page = csrow->first_page + nr_pages - 1; 355 csrow->last_page = csrow->first_page + nr_pages - 1;
356 last_nr_pages = csrow->last_page + 1; 356 last_nr_pages = csrow->last_page + 1;
357 357
358 switch (csrow->nr_channels) {
359 case 1: /* Single channel */
360 grain = 32; /* four-beat burst of 32 bytes */
361 break;
362 case 2: /* Dual channel */
363 default:
364 grain = 64; /* four-beat burst of 64 bytes */
365 break;
366 }
367 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
368 case 6: /* 0110, no way to differentiate X8 VS X16 */
369 case 5: /* 0101 */
370 case 8: /* 1000 */
371 dtype = DEV_X16;
372 break;
373 case 7: /* 0111 */
374 case 9: /* 1001 */
375 dtype = DEV_X8;
376 break;
377 default:
378 dtype = DEV_UNKNOWN;
379 break;
380 }
358 for (j = 0; j < csrow->nr_channels; j++) { 381 for (j = 0; j < csrow->nr_channels; j++) {
359 dimm = csrow->channels[j].dimm; 382 dimm = csrow->channels[j]->dimm;
360
361 dimm->nr_pages = nr_pages / csrow->nr_channels; 383 dimm->nr_pages = nr_pages / csrow->nr_channels;
362 dimm->mtype = MEM_RDDR; 384 dimm->mtype = MEM_RDDR;
363 dimm->edac_mode = EDAC_SECDED; 385 dimm->edac_mode = EDAC_SECDED;
364 386 dimm->grain = grain;
365 switch (csrow->nr_channels) { 387 dimm->dtype = dtype;
366 case 1: /* Single channel */
367 dimm->grain = 32; /* four-beat burst of 32 bytes */
368 break;
369 case 2: /* Dual channel */
370 default:
371 dimm->grain = 64; /* four-beat burst of 64 bytes */
372 break;
373 }
374
375 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
376 case 6: /* 0110, no way to differentiate X8 VS X16 */
377 case 5: /* 0101 */
378 case 8: /* 1000 */
379 dimm->dtype = DEV_X16;
380 break;
381 case 7: /* 0111 */
382 case 9: /* 1001 */
383 dimm->dtype = DEV_X8;
384 break;
385 default:
386 dimm->dtype = DEV_UNKNOWN;
387 break;
388 }
389 } 388 }
390 } 389 }
391} 390}
@@ -463,7 +462,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
463 *csrow = rank; 462 *csrow = rank;
464 463
465#ifdef CONFIG_EDAC_DEBUG 464#ifdef CONFIG_EDAC_DEBUG
466 if (mci->csrows[rank].first_page == 0) { 465 if (mci->csrows[rank]->first_page == 0) {
467 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " 466 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
468 "non-populated csrow, broken hardware?\n"); 467 "non-populated csrow, broken hardware?\n");
469 return; 468 return;
@@ -471,7 +470,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
471#endif 470#endif
472 471
473 /* Revert csrow number */ 472 /* Revert csrow number */
474 pa = mci->csrows[rank].first_page << PAGE_SHIFT; 473 pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
475 474
476 /* Revert column address */ 475 /* Revert column address */
477 col += bcnt; 476 col += bcnt;
@@ -512,7 +511,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
512 *offset = pa & (PAGE_SIZE - 1); 511 *offset = pa & (PAGE_SIZE - 1);
513 *pfn = pa >> PAGE_SHIFT; 512 *pfn = pa >> PAGE_SHIFT;
514 513
515 debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); 514 edac_dbg(0, "ECC physical address 0x%lx\n", pa);
516} 515}
517 516
518static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) 517static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
@@ -555,18 +554,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
555 if (apiexcp & CECC_EXCP_DETECTED) { 554 if (apiexcp & CECC_EXCP_DETECTED) {
556 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); 555 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
557 channel = cpc925_mc_find_channel(mci, syndrome); 556 channel = cpc925_mc_find_channel(mci, syndrome);
558 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 557 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
559 pfn, offset, syndrome, 558 pfn, offset, syndrome,
560 csrow, channel, -1, 559 csrow, channel, -1,
561 mci->ctl_name, "", NULL); 560 mci->ctl_name, "");
562 } 561 }
563 562
564 if (apiexcp & UECC_EXCP_DETECTED) { 563 if (apiexcp & UECC_EXCP_DETECTED) {
565 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); 564 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
566 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 565 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
567 pfn, offset, 0, 566 pfn, offset, 0,
568 csrow, -1, -1, 567 csrow, -1, -1,
569 mci->ctl_name, "", NULL); 568 mci->ctl_name, "");
570 } 569 }
571 570
572 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); 571 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -852,8 +851,8 @@ static void cpc925_add_edac_devices(void __iomem *vbase)
852 goto err2; 851 goto err2;
853 } 852 }
854 853
855 debugf0("%s: Successfully added edac device for %s\n", 854 edac_dbg(0, "Successfully added edac device for %s\n",
856 __func__, dev_info->ctl_name); 855 dev_info->ctl_name);
857 856
858 continue; 857 continue;
859 858
@@ -884,8 +883,8 @@ static void cpc925_del_edac_devices(void)
884 if (dev_info->exit) 883 if (dev_info->exit)
885 dev_info->exit(dev_info); 884 dev_info->exit(dev_info);
886 885
887 debugf0("%s: Successfully deleted edac device for %s\n", 886 edac_dbg(0, "Successfully deleted edac device for %s\n",
888 __func__, dev_info->ctl_name); 887 dev_info->ctl_name);
889 } 888 }
890} 889}
891 890
@@ -900,7 +899,7 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
900 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); 899 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
901 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; 900 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
902 901
903 debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); 902 edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr);
904 903
905 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || 904 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
906 (si == 0)) { 905 (si == 0)) {
@@ -928,8 +927,7 @@ static int cpc925_mc_get_channels(void __iomem *vbase)
928 ((mbcr & MBCR_64BITBUS_MASK) == 0)) 927 ((mbcr & MBCR_64BITBUS_MASK) == 0))
929 dual = 1; 928 dual = 1;
930 929
931 debugf0("%s: %s channel\n", __func__, 930 edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single");
932 (dual > 0) ? "Dual" : "Single");
933 931
934 return dual; 932 return dual;
935} 933}
@@ -944,7 +942,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
944 struct resource *r; 942 struct resource *r;
945 int res = 0, nr_channels; 943 int res = 0, nr_channels;
946 944
947 debugf0("%s: %s platform device found!\n", __func__, pdev->name); 945 edac_dbg(0, "%s platform device found!\n", pdev->name);
948 946
949 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { 947 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
950 res = -ENOMEM; 948 res = -ENOMEM;
@@ -995,7 +993,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
995 pdata->edac_idx = edac_mc_idx++; 993 pdata->edac_idx = edac_mc_idx++;
996 pdata->name = pdev->name; 994 pdata->name = pdev->name;
997 995
998 mci->dev = &pdev->dev; 996 mci->pdev = &pdev->dev;
999 platform_set_drvdata(pdev, mci); 997 platform_set_drvdata(pdev, mci);
1000 mci->dev_name = dev_name(&pdev->dev); 998 mci->dev_name = dev_name(&pdev->dev);
1001 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 999 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
@@ -1026,7 +1024,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
1026 cpc925_add_edac_devices(vbase); 1024 cpc925_add_edac_devices(vbase);
1027 1025
1028 /* get this far and it's successful */ 1026 /* get this far and it's successful */
1029 debugf0("%s: success\n", __func__); 1027 edac_dbg(0, "success\n");
1030 1028
1031 res = 0; 1029 res = 0;
1032 goto out; 1030 goto out;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 3186512c9739..a5ed6b795fd4 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -309,7 +309,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
309 u32 remap; 309 u32 remap;
310 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 310 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
311 311
312 debugf3("%s()\n", __func__); 312 edac_dbg(3, "\n");
313 313
314 if (page < pvt->tolm) 314 if (page < pvt->tolm)
315 return page; 315 return page;
@@ -335,7 +335,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
335 int i; 335 int i;
336 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 336 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
337 337
338 debugf3("%s()\n", __func__); 338 edac_dbg(3, "\n");
339 339
340 /* convert the addr to 4k page */ 340 /* convert the addr to 4k page */
341 page = sec1_add >> (PAGE_SHIFT - 4); 341 page = sec1_add >> (PAGE_SHIFT - 4);
@@ -371,10 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
371 channel = !(error_one & 1); 371 channel = !(error_one & 1);
372 372
373 /* e752x mc reads 34:6 of the DRAM linear address */ 373 /* e752x mc reads 34:6 of the DRAM linear address */
374 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 374 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
375 page, offset_in_page(sec1_add << 4), sec1_syndrome, 375 page, offset_in_page(sec1_add << 4), sec1_syndrome,
376 row, channel, -1, 376 row, channel, -1,
377 "e752x CE", "", NULL); 377 "e752x CE", "");
378} 378}
379 379
380static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, 380static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -394,7 +394,7 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
394 int row; 394 int row;
395 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 395 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
396 396
397 debugf3("%s()\n", __func__); 397 edac_dbg(3, "\n");
398 398
399 if (error_one & 0x0202) { 399 if (error_one & 0x0202) {
400 error_2b = ded_add; 400 error_2b = ded_add;
@@ -408,11 +408,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
408 edac_mc_find_csrow_by_page(mci, block_page); 408 edac_mc_find_csrow_by_page(mci, block_page);
409 409
410 /* e752x mc reads 34:6 of the DRAM linear address */ 410 /* e752x mc reads 34:6 of the DRAM linear address */
411 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 411 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
412 block_page, 412 block_page,
413 offset_in_page(error_2b << 4), 0, 413 offset_in_page(error_2b << 4), 0,
414 row, -1, -1, 414 row, -1, -1,
415 "e752x UE from Read", "", NULL); 415 "e752x UE from Read", "");
416 416
417 } 417 }
418 if (error_one & 0x0404) { 418 if (error_one & 0x0404) {
@@ -427,11 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
427 edac_mc_find_csrow_by_page(mci, block_page); 427 edac_mc_find_csrow_by_page(mci, block_page);
428 428
429 /* e752x mc reads 34:6 of the DRAM linear address */ 429 /* e752x mc reads 34:6 of the DRAM linear address */
430 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 430 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
431 block_page, 431 block_page,
432 offset_in_page(error_2b << 4), 0, 432 offset_in_page(error_2b << 4), 0,
433 row, -1, -1, 433 row, -1, -1,
434 "e752x UE from Scruber", "", NULL); 434 "e752x UE from Scruber", "");
435 } 435 }
436} 436}
437 437
@@ -453,10 +453,10 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
453 if (!handle_error) 453 if (!handle_error)
454 return; 454 return;
455 455
456 debugf3("%s()\n", __func__); 456 edac_dbg(3, "\n");
457 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 457 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
458 -1, -1, -1, 458 -1, -1, -1,
459 "e752x UE log memory write", "", NULL); 459 "e752x UE log memory write", "");
460} 460}
461 461
462static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, 462static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -982,7 +982,7 @@ static void e752x_check(struct mem_ctl_info *mci)
982{ 982{
983 struct e752x_error_info info; 983 struct e752x_error_info info;
984 984
985 debugf3("%s()\n", __func__); 985 edac_dbg(3, "\n");
986 e752x_get_error_info(mci, &info); 986 e752x_get_error_info(mci, &info);
987 e752x_process_error_info(mci, &info, 1); 987 e752x_process_error_info(mci, &info, 1);
988} 988}
@@ -1069,6 +1069,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1069 u16 ddrcsr) 1069 u16 ddrcsr)
1070{ 1070{
1071 struct csrow_info *csrow; 1071 struct csrow_info *csrow;
1072 enum edac_type edac_mode;
1072 unsigned long last_cumul_size; 1073 unsigned long last_cumul_size;
1073 int index, mem_dev, drc_chan; 1074 int index, mem_dev, drc_chan;
1074 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ 1075 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
@@ -1095,14 +1096,13 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1095 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 1096 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1096 /* mem_dev 0=x8, 1=x4 */ 1097 /* mem_dev 0=x8, 1=x4 */
1097 mem_dev = (dra >> (index * 4 + 2)) & 0x3; 1098 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1098 csrow = &mci->csrows[remap_csrow_index(mci, index)]; 1099 csrow = mci->csrows[remap_csrow_index(mci, index)];
1099 1100
1100 mem_dev = (mem_dev == 2); 1101 mem_dev = (mem_dev == 2);
1101 pci_read_config_byte(pdev, E752X_DRB + index, &value); 1102 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1102 /* convert a 128 or 64 MiB DRB to a page size. */ 1103 /* convert a 128 or 64 MiB DRB to a page size. */
1103 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 1104 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1104 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 1105 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
1105 cumul_size);
1106 if (cumul_size == last_cumul_size) 1106 if (cumul_size == last_cumul_size)
1107 continue; /* not populated */ 1107 continue; /* not populated */
1108 1108
@@ -1111,29 +1111,29 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1111 nr_pages = cumul_size - last_cumul_size; 1111 nr_pages = cumul_size - last_cumul_size;
1112 last_cumul_size = cumul_size; 1112 last_cumul_size = cumul_size;
1113 1113
1114 /*
1115 * if single channel or x8 devices then SECDED
1116 * if dual channel and x4 then S4ECD4ED
1117 */
1118 if (drc_ddim) {
1119 if (drc_chan && mem_dev) {
1120 edac_mode = EDAC_S4ECD4ED;
1121 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1122 } else {
1123 edac_mode = EDAC_SECDED;
1124 mci->edac_cap |= EDAC_FLAG_SECDED;
1125 }
1126 } else
1127 edac_mode = EDAC_NONE;
1114 for (i = 0; i < csrow->nr_channels; i++) { 1128 for (i = 0; i < csrow->nr_channels; i++) {
1115 struct dimm_info *dimm = csrow->channels[i].dimm; 1129 struct dimm_info *dimm = csrow->channels[i]->dimm;
1116 1130
1117 debugf3("Initializing rank at (%i,%i)\n", index, i); 1131 edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
1118 dimm->nr_pages = nr_pages / csrow->nr_channels; 1132 dimm->nr_pages = nr_pages / csrow->nr_channels;
1119 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 1133 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
1120 dimm->mtype = MEM_RDDR; /* only one type supported */ 1134 dimm->mtype = MEM_RDDR; /* only one type supported */
1121 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; 1135 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1122 1136 dimm->edac_mode = edac_mode;
1123 /*
1124 * if single channel or x8 devices then SECDED
1125 * if dual channel and x4 then S4ECD4ED
1126 */
1127 if (drc_ddim) {
1128 if (drc_chan && mem_dev) {
1129 dimm->edac_mode = EDAC_S4ECD4ED;
1130 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1131 } else {
1132 dimm->edac_mode = EDAC_SECDED;
1133 mci->edac_cap |= EDAC_FLAG_SECDED;
1134 }
1135 } else
1136 dimm->edac_mode = EDAC_NONE;
1137 } 1137 }
1138 } 1138 }
1139} 1139}
@@ -1269,8 +1269,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1269 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 1269 int drc_chan; /* Number of channels 0=1chan,1=2chan */
1270 struct e752x_error_info discard; 1270 struct e752x_error_info discard;
1271 1271
1272 debugf0("%s(): mci\n", __func__); 1272 edac_dbg(0, "mci\n");
1273 debugf0("Starting Probe1\n"); 1273 edac_dbg(0, "Starting Probe1\n");
1274 1274
1275 /* check to see if device 0 function 1 is enabled; if it isn't, we 1275 /* check to see if device 0 function 1 is enabled; if it isn't, we
1276 * assume the BIOS has reserved it for a reason and is expecting 1276 * assume the BIOS has reserved it for a reason and is expecting
@@ -1300,7 +1300,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1300 if (mci == NULL) 1300 if (mci == NULL)
1301 return -ENOMEM; 1301 return -ENOMEM;
1302 1302
1303 debugf3("%s(): init mci\n", __func__); 1303 edac_dbg(3, "init mci\n");
1304 mci->mtype_cap = MEM_FLAG_RDDR; 1304 mci->mtype_cap = MEM_FLAG_RDDR;
1305 /* 3100 IMCH supports SECDEC only */ 1305 /* 3100 IMCH supports SECDEC only */
1306 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : 1306 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
@@ -1308,9 +1308,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1308 /* FIXME - what if different memory types are in different csrows? */ 1308 /* FIXME - what if different memory types are in different csrows? */
1309 mci->mod_name = EDAC_MOD_STR; 1309 mci->mod_name = EDAC_MOD_STR;
1310 mci->mod_ver = E752X_REVISION; 1310 mci->mod_ver = E752X_REVISION;
1311 mci->dev = &pdev->dev; 1311 mci->pdev = &pdev->dev;
1312 1312
1313 debugf3("%s(): init pvt\n", __func__); 1313 edac_dbg(3, "init pvt\n");
1314 pvt = (struct e752x_pvt *)mci->pvt_info; 1314 pvt = (struct e752x_pvt *)mci->pvt_info;
1315 pvt->dev_info = &e752x_devs[dev_idx]; 1315 pvt->dev_info = &e752x_devs[dev_idx];
1316 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); 1316 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
@@ -1320,7 +1320,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1320 return -ENODEV; 1320 return -ENODEV;
1321 } 1321 }
1322 1322
1323 debugf3("%s(): more mci init\n", __func__); 1323 edac_dbg(3, "more mci init\n");
1324 mci->ctl_name = pvt->dev_info->ctl_name; 1324 mci->ctl_name = pvt->dev_info->ctl_name;
1325 mci->dev_name = pci_name(pdev); 1325 mci->dev_name = pci_name(pdev);
1326 mci->edac_check = e752x_check; 1326 mci->edac_check = e752x_check;
@@ -1342,7 +1342,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1342 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ 1342 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1343 else 1343 else
1344 mci->edac_cap |= EDAC_FLAG_NONE; 1344 mci->edac_cap |= EDAC_FLAG_NONE;
1345 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 1345 edac_dbg(3, "tolm, remapbase, remaplimit\n");
1346 1346
1347 /* load the top of low memory, remap base, and remap limit vars */ 1347 /* load the top of low memory, remap base, and remap limit vars */
1348 pci_read_config_word(pdev, E752X_TOLM, &pci_data); 1348 pci_read_config_word(pdev, E752X_TOLM, &pci_data);
@@ -1359,7 +1359,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1359 * type of memory controller. The ID is therefore hardcoded to 0. 1359 * type of memory controller. The ID is therefore hardcoded to 0.
1360 */ 1360 */
1361 if (edac_mc_add_mc(mci)) { 1361 if (edac_mc_add_mc(mci)) {
1362 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1362 edac_dbg(3, "failed edac_mc_add_mc()\n");
1363 goto fail; 1363 goto fail;
1364 } 1364 }
1365 1365
@@ -1377,7 +1377,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1377 } 1377 }
1378 1378
1379 /* get this far and it's successful */ 1379 /* get this far and it's successful */
1380 debugf3("%s(): success\n", __func__); 1380 edac_dbg(3, "success\n");
1381 return 0; 1381 return 0;
1382 1382
1383fail: 1383fail:
@@ -1393,7 +1393,7 @@ fail:
1393static int __devinit e752x_init_one(struct pci_dev *pdev, 1393static int __devinit e752x_init_one(struct pci_dev *pdev,
1394 const struct pci_device_id *ent) 1394 const struct pci_device_id *ent)
1395{ 1395{
1396 debugf0("%s()\n", __func__); 1396 edac_dbg(0, "\n");
1397 1397
1398 /* wake up and enable device */ 1398 /* wake up and enable device */
1399 if (pci_enable_device(pdev) < 0) 1399 if (pci_enable_device(pdev) < 0)
@@ -1407,7 +1407,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
1407 struct mem_ctl_info *mci; 1407 struct mem_ctl_info *mci;
1408 struct e752x_pvt *pvt; 1408 struct e752x_pvt *pvt;
1409 1409
1410 debugf0("%s()\n", __func__); 1410 edac_dbg(0, "\n");
1411 1411
1412 if (e752x_pci) 1412 if (e752x_pci)
1413 edac_pci_release_generic_ctl(e752x_pci); 1413 edac_pci_release_generic_ctl(e752x_pci);
@@ -1453,7 +1453,7 @@ static int __init e752x_init(void)
1453{ 1453{
1454 int pci_rc; 1454 int pci_rc;
1455 1455
1456 debugf3("%s()\n", __func__); 1456 edac_dbg(3, "\n");
1457 1457
1458 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1458 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1459 opstate_init(); 1459 opstate_init();
@@ -1464,7 +1464,7 @@ static int __init e752x_init(void)
1464 1464
1465static void __exit e752x_exit(void) 1465static void __exit e752x_exit(void)
1466{ 1466{
1467 debugf3("%s()\n", __func__); 1467 edac_dbg(3, "\n");
1468 pci_unregister_driver(&e752x_driver); 1468 pci_unregister_driver(&e752x_driver);
1469} 1469}
1470 1470
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 9a9c1a546797..9ff57f361a43 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -166,7 +166,7 @@ static const struct e7xxx_dev_info e7xxx_devs[] = {
166/* FIXME - is this valid for both SECDED and S4ECD4ED? */ 166/* FIXME - is this valid for both SECDED and S4ECD4ED? */
167static inline int e7xxx_find_channel(u16 syndrome) 167static inline int e7xxx_find_channel(u16 syndrome)
168{ 168{
169 debugf3("%s()\n", __func__); 169 edac_dbg(3, "\n");
170 170
171 if ((syndrome & 0xff00) == 0) 171 if ((syndrome & 0xff00) == 0)
172 return 0; 172 return 0;
@@ -186,7 +186,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
186 u32 remap; 186 u32 remap;
187 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; 187 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
188 188
189 debugf3("%s()\n", __func__); 189 edac_dbg(3, "\n");
190 190
191 if ((page < pvt->tolm) || 191 if ((page < pvt->tolm) ||
192 ((page >= 0x100000) && (page < pvt->remapbase))) 192 ((page >= 0x100000) && (page < pvt->remapbase)))
@@ -208,7 +208,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
208 int row; 208 int row;
209 int channel; 209 int channel;
210 210
211 debugf3("%s()\n", __func__); 211 edac_dbg(3, "\n");
212 /* read the error address */ 212 /* read the error address */
213 error_1b = info->dram_celog_add; 213 error_1b = info->dram_celog_add;
214 /* FIXME - should use PAGE_SHIFT */ 214 /* FIXME - should use PAGE_SHIFT */
@@ -219,15 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
219 row = edac_mc_find_csrow_by_page(mci, page); 219 row = edac_mc_find_csrow_by_page(mci, page);
220 /* convert syndrome to channel */ 220 /* convert syndrome to channel */
221 channel = e7xxx_find_channel(syndrome); 221 channel = e7xxx_find_channel(syndrome);
222 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome, 222 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome,
223 row, channel, -1, "e7xxx CE", "", NULL); 223 row, channel, -1, "e7xxx CE", "");
224} 224}
225 225
226static void process_ce_no_info(struct mem_ctl_info *mci) 226static void process_ce_no_info(struct mem_ctl_info *mci)
227{ 227{
228 debugf3("%s()\n", __func__); 228 edac_dbg(3, "\n");
229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1, 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
230 "e7xxx CE log register overflow", "", NULL); 230 "e7xxx CE log register overflow", "");
231} 231}
232 232
233static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 233static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -235,23 +235,23 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
235 u32 error_2b, block_page; 235 u32 error_2b, block_page;
236 int row; 236 int row;
237 237
238 debugf3("%s()\n", __func__); 238 edac_dbg(3, "\n");
239 /* read the error address */ 239 /* read the error address */
240 error_2b = info->dram_uelog_add; 240 error_2b = info->dram_uelog_add;
241 /* FIXME - should use PAGE_SHIFT */ 241 /* FIXME - should use PAGE_SHIFT */
242 block_page = error_2b >> 6; /* convert to 4k address */ 242 block_page = error_2b >> 6; /* convert to 4k address */
243 row = edac_mc_find_csrow_by_page(mci, block_page); 243 row = edac_mc_find_csrow_by_page(mci, block_page);
244 244
245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0, 245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, 0, 0,
246 row, -1, -1, "e7xxx UE", "", NULL); 246 row, -1, -1, "e7xxx UE", "");
247} 247}
248 248
249static void process_ue_no_info(struct mem_ctl_info *mci) 249static void process_ue_no_info(struct mem_ctl_info *mci)
250{ 250{
251 debugf3("%s()\n", __func__); 251 edac_dbg(3, "\n");
252 252
253 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1, 253 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
254 "e7xxx UE log register overflow", "", NULL); 254 "e7xxx UE log register overflow", "");
255} 255}
256 256
257static void e7xxx_get_error_info(struct mem_ctl_info *mci, 257static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -334,7 +334,7 @@ static void e7xxx_check(struct mem_ctl_info *mci)
334{ 334{
335 struct e7xxx_error_info info; 335 struct e7xxx_error_info info;
336 336
337 debugf3("%s()\n", __func__); 337 edac_dbg(3, "\n");
338 e7xxx_get_error_info(mci, &info); 338 e7xxx_get_error_info(mci, &info);
339 e7xxx_process_error_info(mci, &info, 1); 339 e7xxx_process_error_info(mci, &info, 1);
340} 340}
@@ -362,6 +362,7 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
362 int drc_chan, drc_drbg, drc_ddim, mem_dev; 362 int drc_chan, drc_drbg, drc_ddim, mem_dev;
363 struct csrow_info *csrow; 363 struct csrow_info *csrow;
364 struct dimm_info *dimm; 364 struct dimm_info *dimm;
365 enum edac_type edac_mode;
365 366
366 pci_read_config_dword(pdev, E7XXX_DRA, &dra); 367 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
367 drc_chan = dual_channel_active(drc, dev_idx); 368 drc_chan = dual_channel_active(drc, dev_idx);
@@ -377,13 +378,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
377 for (index = 0; index < mci->nr_csrows; index++) { 378 for (index = 0; index < mci->nr_csrows; index++) {
378 /* mem_dev 0=x8, 1=x4 */ 379 /* mem_dev 0=x8, 1=x4 */
379 mem_dev = (dra >> (index * 4 + 3)) & 0x1; 380 mem_dev = (dra >> (index * 4 + 3)) & 0x1;
380 csrow = &mci->csrows[index]; 381 csrow = mci->csrows[index];
381 382
382 pci_read_config_byte(pdev, E7XXX_DRB + index, &value); 383 pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
383 /* convert a 64 or 32 MiB DRB to a page size. */ 384 /* convert a 64 or 32 MiB DRB to a page size. */
384 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 385 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
385 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 386 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
386 cumul_size);
387 if (cumul_size == last_cumul_size) 387 if (cumul_size == last_cumul_size)
388 continue; /* not populated */ 388 continue; /* not populated */
389 389
@@ -392,28 +392,29 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
392 nr_pages = cumul_size - last_cumul_size; 392 nr_pages = cumul_size - last_cumul_size;
393 last_cumul_size = cumul_size; 393 last_cumul_size = cumul_size;
394 394
395 /*
396 * if single channel or x8 devices then SECDED
397 * if dual channel and x4 then S4ECD4ED
398 */
399 if (drc_ddim) {
400 if (drc_chan && mem_dev) {
401 edac_mode = EDAC_S4ECD4ED;
402 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
403 } else {
404 edac_mode = EDAC_SECDED;
405 mci->edac_cap |= EDAC_FLAG_SECDED;
406 }
407 } else
408 edac_mode = EDAC_NONE;
409
395 for (j = 0; j < drc_chan + 1; j++) { 410 for (j = 0; j < drc_chan + 1; j++) {
396 dimm = csrow->channels[j].dimm; 411 dimm = csrow->channels[j]->dimm;
397 412
398 dimm->nr_pages = nr_pages / (drc_chan + 1); 413 dimm->nr_pages = nr_pages / (drc_chan + 1);
399 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 414 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
400 dimm->mtype = MEM_RDDR; /* only one type supported */ 415 dimm->mtype = MEM_RDDR; /* only one type supported */
401 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; 416 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
402 417 dimm->edac_mode = edac_mode;
403 /*
404 * if single channel or x8 devices then SECDED
405 * if dual channel and x4 then S4ECD4ED
406 */
407 if (drc_ddim) {
408 if (drc_chan && mem_dev) {
409 dimm->edac_mode = EDAC_S4ECD4ED;
410 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
411 } else {
412 dimm->edac_mode = EDAC_SECDED;
413 mci->edac_cap |= EDAC_FLAG_SECDED;
414 }
415 } else
416 dimm->edac_mode = EDAC_NONE;
417 } 418 }
418 } 419 }
419} 420}
@@ -428,7 +429,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
428 int drc_chan; 429 int drc_chan;
429 struct e7xxx_error_info discard; 430 struct e7xxx_error_info discard;
430 431
431 debugf0("%s(): mci\n", __func__); 432 edac_dbg(0, "mci\n");
432 433
433 pci_read_config_dword(pdev, E7XXX_DRC, &drc); 434 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
434 435
@@ -451,15 +452,15 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
451 if (mci == NULL) 452 if (mci == NULL)
452 return -ENOMEM; 453 return -ENOMEM;
453 454
454 debugf3("%s(): init mci\n", __func__); 455 edac_dbg(3, "init mci\n");
455 mci->mtype_cap = MEM_FLAG_RDDR; 456 mci->mtype_cap = MEM_FLAG_RDDR;
456 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | 457 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
457 EDAC_FLAG_S4ECD4ED; 458 EDAC_FLAG_S4ECD4ED;
458 /* FIXME - what if different memory types are in different csrows? */ 459 /* FIXME - what if different memory types are in different csrows? */
459 mci->mod_name = EDAC_MOD_STR; 460 mci->mod_name = EDAC_MOD_STR;
460 mci->mod_ver = E7XXX_REVISION; 461 mci->mod_ver = E7XXX_REVISION;
461 mci->dev = &pdev->dev; 462 mci->pdev = &pdev->dev;
462 debugf3("%s(): init pvt\n", __func__); 463 edac_dbg(3, "init pvt\n");
463 pvt = (struct e7xxx_pvt *)mci->pvt_info; 464 pvt = (struct e7xxx_pvt *)mci->pvt_info;
464 pvt->dev_info = &e7xxx_devs[dev_idx]; 465 pvt->dev_info = &e7xxx_devs[dev_idx];
465 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 466 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
@@ -472,14 +473,14 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
472 goto fail0; 473 goto fail0;
473 } 474 }
474 475
475 debugf3("%s(): more mci init\n", __func__); 476 edac_dbg(3, "more mci init\n");
476 mci->ctl_name = pvt->dev_info->ctl_name; 477 mci->ctl_name = pvt->dev_info->ctl_name;
477 mci->dev_name = pci_name(pdev); 478 mci->dev_name = pci_name(pdev);
478 mci->edac_check = e7xxx_check; 479 mci->edac_check = e7xxx_check;
479 mci->ctl_page_to_phys = ctl_page_to_phys; 480 mci->ctl_page_to_phys = ctl_page_to_phys;
480 e7xxx_init_csrows(mci, pdev, dev_idx, drc); 481 e7xxx_init_csrows(mci, pdev, dev_idx, drc);
481 mci->edac_cap |= EDAC_FLAG_NONE; 482 mci->edac_cap |= EDAC_FLAG_NONE;
482 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 483 edac_dbg(3, "tolm, remapbase, remaplimit\n");
483 /* load the top of low memory, remap base, and remap limit vars */ 484 /* load the top of low memory, remap base, and remap limit vars */
484 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); 485 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
485 pvt->tolm = ((u32) pci_data) << 4; 486 pvt->tolm = ((u32) pci_data) << 4;
@@ -498,7 +499,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
498 * type of memory controller. The ID is therefore hardcoded to 0. 499 * type of memory controller. The ID is therefore hardcoded to 0.
499 */ 500 */
500 if (edac_mc_add_mc(mci)) { 501 if (edac_mc_add_mc(mci)) {
501 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 502 edac_dbg(3, "failed edac_mc_add_mc()\n");
502 goto fail1; 503 goto fail1;
503 } 504 }
504 505
@@ -514,7 +515,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
514 } 515 }
515 516
516 /* get this far and it's successful */ 517 /* get this far and it's successful */
517 debugf3("%s(): success\n", __func__); 518 edac_dbg(3, "success\n");
518 return 0; 519 return 0;
519 520
520fail1: 521fail1:
@@ -530,7 +531,7 @@ fail0:
530static int __devinit e7xxx_init_one(struct pci_dev *pdev, 531static int __devinit e7xxx_init_one(struct pci_dev *pdev,
531 const struct pci_device_id *ent) 532 const struct pci_device_id *ent)
532{ 533{
533 debugf0("%s()\n", __func__); 534 edac_dbg(0, "\n");
534 535
535 /* wake up and enable device */ 536 /* wake up and enable device */
536 return pci_enable_device(pdev) ? 537 return pci_enable_device(pdev) ?
@@ -542,7 +543,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
542 struct mem_ctl_info *mci; 543 struct mem_ctl_info *mci;
543 struct e7xxx_pvt *pvt; 544 struct e7xxx_pvt *pvt;
544 545
545 debugf0("%s()\n", __func__); 546 edac_dbg(0, "\n");
546 547
547 if (e7xxx_pci) 548 if (e7xxx_pci)
548 edac_pci_release_generic_ctl(e7xxx_pci); 549 edac_pci_release_generic_ctl(e7xxx_pci);
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 117490d4f835..23bb99fa44f1 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -71,26 +71,21 @@ extern const char *edac_mem_types[];
71#ifdef CONFIG_EDAC_DEBUG 71#ifdef CONFIG_EDAC_DEBUG
72extern int edac_debug_level; 72extern int edac_debug_level;
73 73
74#define edac_debug_printk(level, fmt, arg...) \ 74#define edac_dbg(level, fmt, ...) \
75 do { \ 75do { \
76 if (level <= edac_debug_level) \ 76 if (level <= edac_debug_level) \
77 edac_printk(KERN_DEBUG, EDAC_DEBUG, \ 77 edac_printk(KERN_DEBUG, EDAC_DEBUG, \
78 "%s: " fmt, __func__, ##arg); \ 78 "%s: " fmt, __func__, ##__VA_ARGS__); \
79 } while (0) 79} while (0)
80
81#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
82#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
83#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
84#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
85#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
86 80
87#else /* !CONFIG_EDAC_DEBUG */ 81#else /* !CONFIG_EDAC_DEBUG */
88 82
89#define debugf0( ... ) 83#define edac_dbg(level, fmt, ...) \
90#define debugf1( ... ) 84do { \
91#define debugf2( ... ) 85 if (0) \
92#define debugf3( ... ) 86 edac_printk(KERN_DEBUG, EDAC_DEBUG, \
93#define debugf4( ... ) 87 "%s: " fmt, __func__, ##__VA_ARGS__); \
88} while (0)
94 89
95#endif /* !CONFIG_EDAC_DEBUG */ 90#endif /* !CONFIG_EDAC_DEBUG */
96 91
@@ -460,15 +455,15 @@ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
460 unsigned long page); 455 unsigned long page);
461void edac_mc_handle_error(const enum hw_event_mc_err_type type, 456void edac_mc_handle_error(const enum hw_event_mc_err_type type,
462 struct mem_ctl_info *mci, 457 struct mem_ctl_info *mci,
458 const u16 error_count,
463 const unsigned long page_frame_number, 459 const unsigned long page_frame_number,
464 const unsigned long offset_in_page, 460 const unsigned long offset_in_page,
465 const unsigned long syndrome, 461 const unsigned long syndrome,
466 const int layer0, 462 const int top_layer,
467 const int layer1, 463 const int mid_layer,
468 const int layer2, 464 const int low_layer,
469 const char *msg, 465 const char *msg,
470 const char *other_detail, 466 const char *other_detail);
471 const void *mcelog);
472 467
473/* 468/*
474 * edac_device APIs 469 * edac_device APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index ee3f1f810c1e..211021dfec73 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -40,12 +40,13 @@ static LIST_HEAD(edac_device_list);
40#ifdef CONFIG_EDAC_DEBUG 40#ifdef CONFIG_EDAC_DEBUG
41static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) 41static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
42{ 42{
43 debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx); 43 edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
44 debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check); 44 edac_dev, edac_dev->dev_idx);
45 debugf3("\tdev = %p\n", edac_dev->dev); 45 edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
46 debugf3("\tmod_name:ctl_name = %s:%s\n", 46 edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
47 edac_dev->mod_name, edac_dev->ctl_name); 47 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
48 debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info); 48 edac_dev->mod_name, edac_dev->ctl_name);
49 edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
49} 50}
50#endif /* CONFIG_EDAC_DEBUG */ 51#endif /* CONFIG_EDAC_DEBUG */
51 52
@@ -82,8 +83,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
82 void *pvt, *p; 83 void *pvt, *p;
83 int err; 84 int err;
84 85
85 debugf4("%s() instances=%d blocks=%d\n", 86 edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
86 __func__, nr_instances, nr_blocks);
87 87
88 /* Calculate the size of memory we need to allocate AND 88 /* Calculate the size of memory we need to allocate AND
89 * determine the offsets of the various item arrays 89 * determine the offsets of the various item arrays
@@ -156,8 +156,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
156 /* Name of this edac device */ 156 /* Name of this edac device */
157 snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name); 157 snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
158 158
159 debugf4("%s() edac_dev=%p next after end=%p\n", 159 edac_dbg(4, "edac_dev=%p next after end=%p\n",
160 __func__, dev_ctl, pvt + sz_private ); 160 dev_ctl, pvt + sz_private);
161 161
162 /* Initialize every Instance */ 162 /* Initialize every Instance */
163 for (instance = 0; instance < nr_instances; instance++) { 163 for (instance = 0; instance < nr_instances; instance++) {
@@ -178,10 +178,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
178 snprintf(blk->name, sizeof(blk->name), 178 snprintf(blk->name, sizeof(blk->name),
179 "%s%d", edac_block_name, block+offset_value); 179 "%s%d", edac_block_name, block+offset_value);
180 180
181 debugf4("%s() instance=%d inst_p=%p block=#%d " 181 edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
182 "block_p=%p name='%s'\n", 182 instance, inst, block, blk, blk->name);
183 __func__, instance, inst, block,
184 blk, blk->name);
185 183
186 /* if there are NO attributes OR no attribute pointer 184 /* if there are NO attributes OR no attribute pointer
187 * then continue on to next block iteration 185 * then continue on to next block iteration
@@ -194,8 +192,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
194 attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; 192 attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
195 blk->block_attributes = attrib_p; 193 blk->block_attributes = attrib_p;
196 194
197 debugf4("%s() THIS BLOCK_ATTRIB=%p\n", 195 edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
198 __func__, blk->block_attributes); 196 blk->block_attributes);
199 197
200 /* Initialize every user specified attribute in this 198 /* Initialize every user specified attribute in this
201 * block with the data the caller passed in 199 * block with the data the caller passed in
@@ -214,11 +212,10 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
214 212
215 attrib->block = blk; /* up link */ 213 attrib->block = blk; /* up link */
216 214
217 debugf4("%s() alloc-attrib=%p attrib_name='%s' " 215 edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
218 "attrib-spec=%p spec-name=%s\n", 216 attrib, attrib->attr.name,
219 __func__, attrib, attrib->attr.name, 217 &attrib_spec[attr],
220 &attrib_spec[attr], 218 attrib_spec[attr].attr.name
221 attrib_spec[attr].attr.name
222 ); 219 );
223 } 220 }
224 } 221 }
@@ -273,7 +270,7 @@ static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
273 struct edac_device_ctl_info *edac_dev; 270 struct edac_device_ctl_info *edac_dev;
274 struct list_head *item; 271 struct list_head *item;
275 272
276 debugf0("%s()\n", __func__); 273 edac_dbg(0, "\n");
277 274
278 list_for_each(item, &edac_device_list) { 275 list_for_each(item, &edac_device_list) {
279 edac_dev = list_entry(item, struct edac_device_ctl_info, link); 276 edac_dev = list_entry(item, struct edac_device_ctl_info, link);
@@ -408,7 +405,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
408void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, 405void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
409 unsigned msec) 406 unsigned msec)
410{ 407{
411 debugf0("%s()\n", __func__); 408 edac_dbg(0, "\n");
412 409
413 /* take the arg 'msec' and set it into the control structure 410 /* take the arg 'msec' and set it into the control structure
414 * to used in the time period calculation 411 * to used in the time period calculation
@@ -496,7 +493,7 @@ EXPORT_SYMBOL_GPL(edac_device_alloc_index);
496 */ 493 */
497int edac_device_add_device(struct edac_device_ctl_info *edac_dev) 494int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
498{ 495{
499 debugf0("%s()\n", __func__); 496 edac_dbg(0, "\n");
500 497
501#ifdef CONFIG_EDAC_DEBUG 498#ifdef CONFIG_EDAC_DEBUG
502 if (edac_debug_level >= 3) 499 if (edac_debug_level >= 3)
@@ -570,7 +567,7 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
570{ 567{
571 struct edac_device_ctl_info *edac_dev; 568 struct edac_device_ctl_info *edac_dev;
572 569
573 debugf0("%s()\n", __func__); 570 edac_dbg(0, "\n");
574 571
575 mutex_lock(&device_ctls_mutex); 572 mutex_lock(&device_ctls_mutex);
576 573
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index b4ea185ccebf..fb68a06ad683 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -202,7 +202,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj)
202{ 202{
203 struct edac_device_ctl_info *edac_dev = to_edacdev(kobj); 203 struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
204 204
205 debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx); 205 edac_dbg(4, "control index=%d\n", edac_dev->dev_idx);
206 206
207 /* decrement the EDAC CORE module ref count */ 207 /* decrement the EDAC CORE module ref count */
208 module_put(edac_dev->owner); 208 module_put(edac_dev->owner);
@@ -233,12 +233,12 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
233 struct bus_type *edac_subsys; 233 struct bus_type *edac_subsys;
234 int err; 234 int err;
235 235
236 debugf1("%s()\n", __func__); 236 edac_dbg(1, "\n");
237 237
238 /* get the /sys/devices/system/edac reference */ 238 /* get the /sys/devices/system/edac reference */
239 edac_subsys = edac_get_sysfs_subsys(); 239 edac_subsys = edac_get_sysfs_subsys();
240 if (edac_subsys == NULL) { 240 if (edac_subsys == NULL) {
241 debugf1("%s() no edac_subsys error\n", __func__); 241 edac_dbg(1, "no edac_subsys error\n");
242 err = -ENODEV; 242 err = -ENODEV;
243 goto err_out; 243 goto err_out;
244 } 244 }
@@ -264,8 +264,8 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
264 &edac_subsys->dev_root->kobj, 264 &edac_subsys->dev_root->kobj,
265 "%s", edac_dev->name); 265 "%s", edac_dev->name);
266 if (err) { 266 if (err) {
267 debugf1("%s()Failed to register '.../edac/%s'\n", 267 edac_dbg(1, "Failed to register '.../edac/%s'\n",
268 __func__, edac_dev->name); 268 edac_dev->name);
269 goto err_kobj_reg; 269 goto err_kobj_reg;
270 } 270 }
271 kobject_uevent(&edac_dev->kobj, KOBJ_ADD); 271 kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
@@ -274,8 +274,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
274 * edac_device_unregister_sysfs_main_kobj() must be used 274 * edac_device_unregister_sysfs_main_kobj() must be used
275 */ 275 */
276 276
277 debugf4("%s() Registered '.../edac/%s' kobject\n", 277 edac_dbg(4, "Registered '.../edac/%s' kobject\n", edac_dev->name);
278 __func__, edac_dev->name);
279 278
280 return 0; 279 return 0;
281 280
@@ -296,9 +295,8 @@ err_out:
296 */ 295 */
297void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) 296void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
298{ 297{
299 debugf0("%s()\n", __func__); 298 edac_dbg(0, "\n");
300 debugf4("%s() name of kobject is: %s\n", 299 edac_dbg(4, "name of kobject is: %s\n", kobject_name(&dev->kobj));
301 __func__, kobject_name(&dev->kobj));
302 300
303 /* 301 /*
304 * Unregister the edac device's kobject and 302 * Unregister the edac device's kobject and
@@ -336,7 +334,7 @@ static void edac_device_ctrl_instance_release(struct kobject *kobj)
336{ 334{
337 struct edac_device_instance *instance; 335 struct edac_device_instance *instance;
338 336
339 debugf1("%s()\n", __func__); 337 edac_dbg(1, "\n");
340 338
341 /* map from this kobj to the main control struct 339 /* map from this kobj to the main control struct
342 * and then dec the main kobj count 340 * and then dec the main kobj count
@@ -442,7 +440,7 @@ static void edac_device_ctrl_block_release(struct kobject *kobj)
442{ 440{
443 struct edac_device_block *block; 441 struct edac_device_block *block;
444 442
445 debugf1("%s()\n", __func__); 443 edac_dbg(1, "\n");
446 444
447 /* get the container of the kobj */ 445 /* get the container of the kobj */
448 block = to_block(kobj); 446 block = to_block(kobj);
@@ -524,10 +522,10 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
524 struct edac_dev_sysfs_block_attribute *sysfs_attrib; 522 struct edac_dev_sysfs_block_attribute *sysfs_attrib;
525 struct kobject *main_kobj; 523 struct kobject *main_kobj;
526 524
527 debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n", 525 edac_dbg(4, "Instance '%s' inst_p=%p block '%s' block_p=%p\n",
528 __func__, instance->name, instance, block->name, block); 526 instance->name, instance, block->name, block);
529 debugf4("%s() block kobj=%p block kobj->parent=%p\n", 527 edac_dbg(4, "block kobj=%p block kobj->parent=%p\n",
530 __func__, &block->kobj, &block->kobj.parent); 528 &block->kobj, &block->kobj.parent);
531 529
532 /* init this block's kobject */ 530 /* init this block's kobject */
533 memset(&block->kobj, 0, sizeof(struct kobject)); 531 memset(&block->kobj, 0, sizeof(struct kobject));
@@ -546,8 +544,7 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
546 &instance->kobj, 544 &instance->kobj,
547 "%s", block->name); 545 "%s", block->name);
548 if (err) { 546 if (err) {
549 debugf1("%s() Failed to register instance '%s'\n", 547 edac_dbg(1, "Failed to register instance '%s'\n", block->name);
550 __func__, block->name);
551 kobject_put(main_kobj); 548 kobject_put(main_kobj);
552 err = -ENODEV; 549 err = -ENODEV;
553 goto err_out; 550 goto err_out;
@@ -560,11 +557,9 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
560 if (sysfs_attrib && block->nr_attribs) { 557 if (sysfs_attrib && block->nr_attribs) {
561 for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { 558 for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
562 559
563 debugf4("%s() creating block attrib='%s' " 560 edac_dbg(4, "creating block attrib='%s' attrib->%p to kobj=%p\n",
564 "attrib->%p to kobj=%p\n", 561 sysfs_attrib->attr.name,
565 __func__, 562 sysfs_attrib, &block->kobj);
566 sysfs_attrib->attr.name,
567 sysfs_attrib, &block->kobj);
568 563
569 /* Create each block_attribute file */ 564 /* Create each block_attribute file */
570 err = sysfs_create_file(&block->kobj, 565 err = sysfs_create_file(&block->kobj,
@@ -647,14 +642,14 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
647 err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl, 642 err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
648 &edac_dev->kobj, "%s", instance->name); 643 &edac_dev->kobj, "%s", instance->name);
649 if (err != 0) { 644 if (err != 0) {
650 debugf2("%s() Failed to register instance '%s'\n", 645 edac_dbg(2, "Failed to register instance '%s'\n",
651 __func__, instance->name); 646 instance->name);
652 kobject_put(main_kobj); 647 kobject_put(main_kobj);
653 goto err_out; 648 goto err_out;
654 } 649 }
655 650
656 debugf4("%s() now register '%d' blocks for instance %d\n", 651 edac_dbg(4, "now register '%d' blocks for instance %d\n",
657 __func__, instance->nr_blocks, idx); 652 instance->nr_blocks, idx);
658 653
659 /* register all blocks of this instance */ 654 /* register all blocks of this instance */
660 for (i = 0; i < instance->nr_blocks; i++) { 655 for (i = 0; i < instance->nr_blocks; i++) {
@@ -670,8 +665,8 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
670 } 665 }
671 kobject_uevent(&instance->kobj, KOBJ_ADD); 666 kobject_uevent(&instance->kobj, KOBJ_ADD);
672 667
673 debugf4("%s() Registered instance %d '%s' kobject\n", 668 edac_dbg(4, "Registered instance %d '%s' kobject\n",
674 __func__, idx, instance->name); 669 idx, instance->name);
675 670
676 return 0; 671 return 0;
677 672
@@ -715,7 +710,7 @@ static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
715 int i, j; 710 int i, j;
716 int err; 711 int err;
717 712
718 debugf0("%s()\n", __func__); 713 edac_dbg(0, "\n");
719 714
720 /* iterate over creation of the instances */ 715 /* iterate over creation of the instances */
721 for (i = 0; i < edac_dev->nr_instances; i++) { 716 for (i = 0; i < edac_dev->nr_instances; i++) {
@@ -817,12 +812,12 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
817 int err; 812 int err;
818 struct kobject *edac_kobj = &edac_dev->kobj; 813 struct kobject *edac_kobj = &edac_dev->kobj;
819 814
820 debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx); 815 edac_dbg(0, "idx=%d\n", edac_dev->dev_idx);
821 816
822 /* go create any main attributes callers wants */ 817 /* go create any main attributes callers wants */
823 err = edac_device_add_main_sysfs_attributes(edac_dev); 818 err = edac_device_add_main_sysfs_attributes(edac_dev);
824 if (err) { 819 if (err) {
825 debugf0("%s() failed to add sysfs attribs\n", __func__); 820 edac_dbg(0, "failed to add sysfs attribs\n");
826 goto err_out; 821 goto err_out;
827 } 822 }
828 823
@@ -832,8 +827,7 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
832 err = sysfs_create_link(edac_kobj, 827 err = sysfs_create_link(edac_kobj,
833 &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK); 828 &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
834 if (err) { 829 if (err) {
835 debugf0("%s() sysfs_create_link() returned err= %d\n", 830 edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
836 __func__, err);
837 goto err_remove_main_attribs; 831 goto err_remove_main_attribs;
838 } 832 }
839 833
@@ -843,14 +837,13 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
843 */ 837 */
844 err = edac_device_create_instances(edac_dev); 838 err = edac_device_create_instances(edac_dev);
845 if (err) { 839 if (err) {
846 debugf0("%s() edac_device_create_instances() " 840 edac_dbg(0, "edac_device_create_instances() returned err= %d\n",
847 "returned err= %d\n", __func__, err); 841 err);
848 goto err_remove_link; 842 goto err_remove_link;
849 } 843 }
850 844
851 845
852 debugf4("%s() create-instances done, idx=%d\n", 846 edac_dbg(4, "create-instances done, idx=%d\n", edac_dev->dev_idx);
853 __func__, edac_dev->dev_idx);
854 847
855 return 0; 848 return 0;
856 849
@@ -873,7 +866,7 @@ err_out:
873 */ 866 */
874void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev) 867void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
875{ 868{
876 debugf0("%s()\n", __func__); 869 edac_dbg(0, "\n");
877 870
878 /* remove any main attributes for this device */ 871 /* remove any main attributes for this device */
879 edac_device_remove_main_sysfs_attributes(edac_dev); 872 edac_device_remove_main_sysfs_attributes(edac_dev);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 10f375032e96..616d90bcb3a4 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -27,70 +27,95 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/edac.h> 29#include <linux/edac.h>
30#include <linux/bitops.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/page.h> 32#include <asm/page.h>
32#include <asm/edac.h> 33#include <asm/edac.h>
33#include "edac_core.h" 34#include "edac_core.h"
34#include "edac_module.h" 35#include "edac_module.h"
35 36
37#define CREATE_TRACE_POINTS
38#define TRACE_INCLUDE_PATH ../../include/ras
39#include <ras/ras_event.h>
40
36/* lock to memory controller's control array */ 41/* lock to memory controller's control array */
37static DEFINE_MUTEX(mem_ctls_mutex); 42static DEFINE_MUTEX(mem_ctls_mutex);
38static LIST_HEAD(mc_devices); 43static LIST_HEAD(mc_devices);
39 44
45unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
46 unsigned len)
47{
48 struct mem_ctl_info *mci = dimm->mci;
49 int i, n, count = 0;
50 char *p = buf;
51
52 for (i = 0; i < mci->n_layers; i++) {
53 n = snprintf(p, len, "%s %d ",
54 edac_layer_name[mci->layers[i].type],
55 dimm->location[i]);
56 p += n;
57 len -= n;
58 count += n;
59 if (!len)
60 break;
61 }
62
63 return count;
64}
65
40#ifdef CONFIG_EDAC_DEBUG 66#ifdef CONFIG_EDAC_DEBUG
41 67
42static void edac_mc_dump_channel(struct rank_info *chan) 68static void edac_mc_dump_channel(struct rank_info *chan)
43{ 69{
44 debugf4("\tchannel = %p\n", chan); 70 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
45 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); 71 edac_dbg(4, " channel = %p\n", chan);
46 debugf4("\tchannel->csrow = %p\n\n", chan->csrow); 72 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
47 debugf4("\tchannel->dimm = %p\n", chan->dimm); 73 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
48} 74}
49 75
50static void edac_mc_dump_dimm(struct dimm_info *dimm) 76static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
51{ 77{
52 int i; 78 char location[80];
53 79
54 debugf4("\tdimm = %p\n", dimm); 80 edac_dimm_info_location(dimm, location, sizeof(location));
55 debugf4("\tdimm->label = '%s'\n", dimm->label); 81
56 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages); 82 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
57 debugf4("\tdimm location "); 83 dimm->mci->mem_is_per_rank ? "rank" : "dimm",
58 for (i = 0; i < dimm->mci->n_layers; i++) { 84 number, location, dimm->csrow, dimm->cschannel);
59 printk(KERN_CONT "%d", dimm->location[i]); 85 edac_dbg(4, " dimm = %p\n", dimm);
60 if (i < dimm->mci->n_layers - 1) 86 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
61 printk(KERN_CONT "."); 87 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
62 } 88 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
63 printk(KERN_CONT "\n"); 89 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
64 debugf4("\tdimm->grain = %d\n", dimm->grain);
65 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
66} 90}
67 91
68static void edac_mc_dump_csrow(struct csrow_info *csrow) 92static void edac_mc_dump_csrow(struct csrow_info *csrow)
69{ 93{
70 debugf4("\tcsrow = %p\n", csrow); 94 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
71 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx); 95 edac_dbg(4, " csrow = %p\n", csrow);
72 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page); 96 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
73 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); 97 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
74 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); 98 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
75 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels); 99 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
76 debugf4("\tcsrow->channels = %p\n", csrow->channels); 100 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
77 debugf4("\tcsrow->mci = %p\n\n", csrow->mci); 101 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
78} 102}
79 103
80static void edac_mc_dump_mci(struct mem_ctl_info *mci) 104static void edac_mc_dump_mci(struct mem_ctl_info *mci)
81{ 105{
82 debugf3("\tmci = %p\n", mci); 106 edac_dbg(3, "\tmci = %p\n", mci);
83 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap); 107 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
84 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); 108 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
85 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap); 109 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
86 debugf4("\tmci->edac_check = %p\n", mci->edac_check); 110 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
87 debugf3("\tmci->nr_csrows = %d, csrows = %p\n", 111 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
88 mci->nr_csrows, mci->csrows); 112 mci->nr_csrows, mci->csrows);
89 debugf3("\tmci->nr_dimms = %d, dimms = %p\n", 113 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
90 mci->tot_dimms, mci->dimms); 114 mci->tot_dimms, mci->dimms);
91 debugf3("\tdev = %p\n", mci->dev); 115 edac_dbg(3, "\tdev = %p\n", mci->pdev);
92 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); 116 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
93 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 117 mci->mod_name, mci->ctl_name);
118 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
94} 119}
95 120
96#endif /* CONFIG_EDAC_DEBUG */ 121#endif /* CONFIG_EDAC_DEBUG */
@@ -164,7 +189,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
164 else 189 else
165 return (char *)ptr; 190 return (char *)ptr;
166 191
167 r = size % align; 192 r = (unsigned long)p % align;
168 193
169 if (r == 0) 194 if (r == 0)
170 return (char *)ptr; 195 return (char *)ptr;
@@ -205,15 +230,15 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
205{ 230{
206 struct mem_ctl_info *mci; 231 struct mem_ctl_info *mci;
207 struct edac_mc_layer *layer; 232 struct edac_mc_layer *layer;
208 struct csrow_info *csi, *csr; 233 struct csrow_info *csr;
209 struct rank_info *chi, *chp, *chan; 234 struct rank_info *chan;
210 struct dimm_info *dimm; 235 struct dimm_info *dimm;
211 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; 236 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
212 unsigned pos[EDAC_MAX_LAYERS]; 237 unsigned pos[EDAC_MAX_LAYERS];
213 unsigned size, tot_dimms = 1, count = 1; 238 unsigned size, tot_dimms = 1, count = 1;
214 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; 239 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
215 void *pvt, *p, *ptr = NULL; 240 void *pvt, *p, *ptr = NULL;
216 int i, j, err, row, chn, n, len; 241 int i, j, row, chn, n, len, off;
217 bool per_rank = false; 242 bool per_rank = false;
218 243
219 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); 244 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
@@ -239,26 +264,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
239 */ 264 */
240 mci = edac_align_ptr(&ptr, sizeof(*mci), 1); 265 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
241 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); 266 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
242 csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
243 chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
244 dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
245 for (i = 0; i < n_layers; i++) { 267 for (i = 0; i < n_layers; i++) {
246 count *= layers[i].size; 268 count *= layers[i].size;
247 debugf4("%s: errcount layer %d size %d\n", __func__, i, count); 269 edac_dbg(4, "errcount layer %d size %d\n", i, count);
248 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); 270 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
249 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); 271 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
250 tot_errcount += 2 * count; 272 tot_errcount += 2 * count;
251 } 273 }
252 274
253 debugf4("%s: allocating %d error counters\n", __func__, tot_errcount); 275 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
254 pvt = edac_align_ptr(&ptr, sz_pvt, 1); 276 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
255 size = ((unsigned long)pvt) + sz_pvt; 277 size = ((unsigned long)pvt) + sz_pvt;
256 278
257 debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", 279 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
258 __func__, size, 280 size,
259 tot_dimms, 281 tot_dimms,
260 per_rank ? "ranks" : "dimms", 282 per_rank ? "ranks" : "dimms",
261 tot_csrows * tot_channels); 283 tot_csrows * tot_channels);
284
262 mci = kzalloc(size, GFP_KERNEL); 285 mci = kzalloc(size, GFP_KERNEL);
263 if (mci == NULL) 286 if (mci == NULL)
264 return NULL; 287 return NULL;
@@ -267,9 +290,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
267 * rather than an imaginary chunk of memory located at address 0. 290 * rather than an imaginary chunk of memory located at address 0.
268 */ 291 */
269 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); 292 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
270 csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
271 chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
272 dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
273 for (i = 0; i < n_layers; i++) { 293 for (i = 0; i < n_layers; i++) {
274 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); 294 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
275 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); 295 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
@@ -278,8 +298,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
278 298
279 /* setup index and various internal pointers */ 299 /* setup index and various internal pointers */
280 mci->mc_idx = mc_num; 300 mci->mc_idx = mc_num;
281 mci->csrows = csi;
282 mci->dimms = dimm;
283 mci->tot_dimms = tot_dimms; 301 mci->tot_dimms = tot_dimms;
284 mci->pvt_info = pvt; 302 mci->pvt_info = pvt;
285 mci->n_layers = n_layers; 303 mci->n_layers = n_layers;
@@ -290,40 +308,57 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
290 mci->mem_is_per_rank = per_rank; 308 mci->mem_is_per_rank = per_rank;
291 309
292 /* 310 /*
293 * Fill the csrow struct 311 * Alocate and fill the csrow/channels structs
294 */ 312 */
313 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
314 if (!mci->csrows)
315 goto error;
295 for (row = 0; row < tot_csrows; row++) { 316 for (row = 0; row < tot_csrows; row++) {
296 csr = &csi[row]; 317 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
318 if (!csr)
319 goto error;
320 mci->csrows[row] = csr;
297 csr->csrow_idx = row; 321 csr->csrow_idx = row;
298 csr->mci = mci; 322 csr->mci = mci;
299 csr->nr_channels = tot_channels; 323 csr->nr_channels = tot_channels;
300 chp = &chi[row * tot_channels]; 324 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
301 csr->channels = chp; 325 GFP_KERNEL);
326 if (!csr->channels)
327 goto error;
302 328
303 for (chn = 0; chn < tot_channels; chn++) { 329 for (chn = 0; chn < tot_channels; chn++) {
304 chan = &chp[chn]; 330 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
331 if (!chan)
332 goto error;
333 csr->channels[chn] = chan;
305 chan->chan_idx = chn; 334 chan->chan_idx = chn;
306 chan->csrow = csr; 335 chan->csrow = csr;
307 } 336 }
308 } 337 }
309 338
310 /* 339 /*
311 * Fill the dimm struct 340 * Allocate and fill the dimm structs
312 */ 341 */
342 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
343 if (!mci->dimms)
344 goto error;
345
313 memset(&pos, 0, sizeof(pos)); 346 memset(&pos, 0, sizeof(pos));
314 row = 0; 347 row = 0;
315 chn = 0; 348 chn = 0;
316 debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
317 per_rank ? "ranks" : "dimms");
318 for (i = 0; i < tot_dimms; i++) { 349 for (i = 0; i < tot_dimms; i++) {
319 chan = &csi[row].channels[chn]; 350 chan = mci->csrows[row]->channels[chn];
320 dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers, 351 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
321 pos[0], pos[1], pos[2]); 352 if (off < 0 || off >= tot_dimms) {
322 dimm->mci = mci; 353 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
354 goto error;
355 }
323 356
324 debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__, 357 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
325 i, per_rank ? "rank" : "dimm", (dimm - mci->dimms), 358 if (!dimm)
326 pos[0], pos[1], pos[2], row, chn); 359 goto error;
360 mci->dimms[off] = dimm;
361 dimm->mci = mci;
327 362
328 /* 363 /*
329 * Copy DIMM location and initialize it. 364 * Copy DIMM location and initialize it.
@@ -367,16 +402,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
367 } 402 }
368 403
369 mci->op_state = OP_ALLOC; 404 mci->op_state = OP_ALLOC;
370 INIT_LIST_HEAD(&mci->grp_kobj_list);
371
372 /*
373 * Initialize the 'root' kobj for the edac_mc controller
374 */
375 err = edac_mc_register_sysfs_main_kobj(mci);
376 if (err) {
377 kfree(mci);
378 return NULL;
379 }
380 405
381 /* at this point, the root kobj is valid, and in order to 406 /* at this point, the root kobj is valid, and in order to
382 * 'free' the object, then the function: 407 * 'free' the object, then the function:
@@ -384,7 +409,30 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
384 * which will perform kobj unregistration and the actual free 409 * which will perform kobj unregistration and the actual free
385 * will occur during the kobject callback operation 410 * will occur during the kobject callback operation
386 */ 411 */
412
387 return mci; 413 return mci;
414
415error:
416 if (mci->dimms) {
417 for (i = 0; i < tot_dimms; i++)
418 kfree(mci->dimms[i]);
419 kfree(mci->dimms);
420 }
421 if (mci->csrows) {
422 for (chn = 0; chn < tot_channels; chn++) {
423 csr = mci->csrows[chn];
424 if (csr) {
425 for (chn = 0; chn < tot_channels; chn++)
426 kfree(csr->channels[chn]);
427 kfree(csr);
428 }
429 kfree(mci->csrows[i]);
430 }
431 kfree(mci->csrows);
432 }
433 kfree(mci);
434
435 return NULL;
388} 436}
389EXPORT_SYMBOL_GPL(edac_mc_alloc); 437EXPORT_SYMBOL_GPL(edac_mc_alloc);
390 438
@@ -395,12 +443,10 @@ EXPORT_SYMBOL_GPL(edac_mc_alloc);
395 */ 443 */
396void edac_mc_free(struct mem_ctl_info *mci) 444void edac_mc_free(struct mem_ctl_info *mci)
397{ 445{
398 debugf1("%s()\n", __func__); 446 edac_dbg(1, "\n");
399 447
400 edac_mc_unregister_sysfs_main_kobj(mci); 448 /* the mci instance is freed here, when the sysfs object is dropped */
401 449 edac_unregister_sysfs(mci);
402 /* free the mci instance memory here */
403 kfree(mci);
404} 450}
405EXPORT_SYMBOL_GPL(edac_mc_free); 451EXPORT_SYMBOL_GPL(edac_mc_free);
406 452
@@ -417,12 +463,12 @@ struct mem_ctl_info *find_mci_by_dev(struct device *dev)
417 struct mem_ctl_info *mci; 463 struct mem_ctl_info *mci;
418 struct list_head *item; 464 struct list_head *item;
419 465
420 debugf3("%s()\n", __func__); 466 edac_dbg(3, "\n");
421 467
422 list_for_each(item, &mc_devices) { 468 list_for_each(item, &mc_devices) {
423 mci = list_entry(item, struct mem_ctl_info, link); 469 mci = list_entry(item, struct mem_ctl_info, link);
424 470
425 if (mci->dev == dev) 471 if (mci->pdev == dev)
426 return mci; 472 return mci;
427 } 473 }
428 474
@@ -485,7 +531,7 @@ static void edac_mc_workq_function(struct work_struct *work_req)
485 */ 531 */
486static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) 532static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
487{ 533{
488 debugf0("%s()\n", __func__); 534 edac_dbg(0, "\n");
489 535
490 /* if this instance is not in the POLL state, then simply return */ 536 /* if this instance is not in the POLL state, then simply return */
491 if (mci->op_state != OP_RUNNING_POLL) 537 if (mci->op_state != OP_RUNNING_POLL)
@@ -512,8 +558,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
512 558
513 status = cancel_delayed_work(&mci->work); 559 status = cancel_delayed_work(&mci->work);
514 if (status == 0) { 560 if (status == 0) {
515 debugf0("%s() not canceled, flush the queue\n", 561 edac_dbg(0, "not canceled, flush the queue\n");
516 __func__);
517 562
518 /* workq instance might be running, wait for it */ 563 /* workq instance might be running, wait for it */
519 flush_workqueue(edac_workqueue); 564 flush_workqueue(edac_workqueue);
@@ -574,7 +619,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
574 619
575 insert_before = &mc_devices; 620 insert_before = &mc_devices;
576 621
577 p = find_mci_by_dev(mci->dev); 622 p = find_mci_by_dev(mci->pdev);
578 if (unlikely(p != NULL)) 623 if (unlikely(p != NULL))
579 goto fail0; 624 goto fail0;
580 625
@@ -596,7 +641,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
596 641
597fail0: 642fail0:
598 edac_printk(KERN_WARNING, EDAC_MC, 643 edac_printk(KERN_WARNING, EDAC_MC,
599 "%s (%s) %s %s already assigned %d\n", dev_name(p->dev), 644 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
600 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); 645 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
601 return 1; 646 return 1;
602 647
@@ -660,7 +705,7 @@ EXPORT_SYMBOL(edac_mc_find);
660/* FIXME - should a warning be printed if no error detection? correction? */ 705/* FIXME - should a warning be printed if no error detection? correction? */
661int edac_mc_add_mc(struct mem_ctl_info *mci) 706int edac_mc_add_mc(struct mem_ctl_info *mci)
662{ 707{
663 debugf0("%s()\n", __func__); 708 edac_dbg(0, "\n");
664 709
665#ifdef CONFIG_EDAC_DEBUG 710#ifdef CONFIG_EDAC_DEBUG
666 if (edac_debug_level >= 3) 711 if (edac_debug_level >= 3)
@@ -670,15 +715,22 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
670 int i; 715 int i;
671 716
672 for (i = 0; i < mci->nr_csrows; i++) { 717 for (i = 0; i < mci->nr_csrows; i++) {
718 struct csrow_info *csrow = mci->csrows[i];
719 u32 nr_pages = 0;
673 int j; 720 int j;
674 721
675 edac_mc_dump_csrow(&mci->csrows[i]); 722 for (j = 0; j < csrow->nr_channels; j++)
676 for (j = 0; j < mci->csrows[i].nr_channels; j++) 723 nr_pages += csrow->channels[j]->dimm->nr_pages;
677 edac_mc_dump_channel(&mci->csrows[i]. 724 if (!nr_pages)
678 channels[j]); 725 continue;
726 edac_mc_dump_csrow(csrow);
727 for (j = 0; j < csrow->nr_channels; j++)
728 if (csrow->channels[j]->dimm->nr_pages)
729 edac_mc_dump_channel(csrow->channels[j]);
679 } 730 }
680 for (i = 0; i < mci->tot_dimms; i++) 731 for (i = 0; i < mci->tot_dimms; i++)
681 edac_mc_dump_dimm(&mci->dimms[i]); 732 if (mci->dimms[i]->nr_pages)
733 edac_mc_dump_dimm(mci->dimms[i], i);
682 } 734 }
683#endif 735#endif
684 mutex_lock(&mem_ctls_mutex); 736 mutex_lock(&mem_ctls_mutex);
@@ -732,7 +784,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
732{ 784{
733 struct mem_ctl_info *mci; 785 struct mem_ctl_info *mci;
734 786
735 debugf0("%s()\n", __func__); 787 edac_dbg(0, "\n");
736 788
737 mutex_lock(&mem_ctls_mutex); 789 mutex_lock(&mem_ctls_mutex);
738 790
@@ -770,7 +822,7 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
770 void *virt_addr; 822 void *virt_addr;
771 unsigned long flags = 0; 823 unsigned long flags = 0;
772 824
773 debugf3("%s()\n", __func__); 825 edac_dbg(3, "\n");
774 826
775 /* ECC error page was not in our memory. Ignore it. */ 827 /* ECC error page was not in our memory. Ignore it. */
776 if (!pfn_valid(page)) 828 if (!pfn_valid(page))
@@ -797,26 +849,26 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
797/* FIXME - should return -1 */ 849/* FIXME - should return -1 */
798int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) 850int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
799{ 851{
800 struct csrow_info *csrows = mci->csrows; 852 struct csrow_info **csrows = mci->csrows;
801 int row, i, j, n; 853 int row, i, j, n;
802 854
803 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); 855 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
804 row = -1; 856 row = -1;
805 857
806 for (i = 0; i < mci->nr_csrows; i++) { 858 for (i = 0; i < mci->nr_csrows; i++) {
807 struct csrow_info *csrow = &csrows[i]; 859 struct csrow_info *csrow = csrows[i];
808 n = 0; 860 n = 0;
809 for (j = 0; j < csrow->nr_channels; j++) { 861 for (j = 0; j < csrow->nr_channels; j++) {
810 struct dimm_info *dimm = csrow->channels[j].dimm; 862 struct dimm_info *dimm = csrow->channels[j]->dimm;
811 n += dimm->nr_pages; 863 n += dimm->nr_pages;
812 } 864 }
813 if (n == 0) 865 if (n == 0)
814 continue; 866 continue;
815 867
816 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " 868 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
817 "mask(0x%lx)\n", mci->mc_idx, __func__, 869 mci->mc_idx,
818 csrow->first_page, page, csrow->last_page, 870 csrow->first_page, page, csrow->last_page,
819 csrow->page_mask); 871 csrow->page_mask);
820 872
821 if ((page >= csrow->first_page) && 873 if ((page >= csrow->first_page) &&
822 (page <= csrow->last_page) && 874 (page <= csrow->last_page) &&
@@ -845,15 +897,16 @@ const char *edac_layer_name[] = {
845EXPORT_SYMBOL_GPL(edac_layer_name); 897EXPORT_SYMBOL_GPL(edac_layer_name);
846 898
847static void edac_inc_ce_error(struct mem_ctl_info *mci, 899static void edac_inc_ce_error(struct mem_ctl_info *mci,
848 bool enable_per_layer_report, 900 bool enable_per_layer_report,
849 const int pos[EDAC_MAX_LAYERS]) 901 const int pos[EDAC_MAX_LAYERS],
902 const u16 count)
850{ 903{
851 int i, index = 0; 904 int i, index = 0;
852 905
853 mci->ce_mc++; 906 mci->ce_mc += count;
854 907
855 if (!enable_per_layer_report) { 908 if (!enable_per_layer_report) {
856 mci->ce_noinfo_count++; 909 mci->ce_noinfo_count += count;
857 return; 910 return;
858 } 911 }
859 912
@@ -861,7 +914,7 @@ static void edac_inc_ce_error(struct mem_ctl_info *mci,
861 if (pos[i] < 0) 914 if (pos[i] < 0)
862 break; 915 break;
863 index += pos[i]; 916 index += pos[i];
864 mci->ce_per_layer[i][index]++; 917 mci->ce_per_layer[i][index] += count;
865 918
866 if (i < mci->n_layers - 1) 919 if (i < mci->n_layers - 1)
867 index *= mci->layers[i + 1].size; 920 index *= mci->layers[i + 1].size;
@@ -870,14 +923,15 @@ static void edac_inc_ce_error(struct mem_ctl_info *mci,
870 923
871static void edac_inc_ue_error(struct mem_ctl_info *mci, 924static void edac_inc_ue_error(struct mem_ctl_info *mci,
872 bool enable_per_layer_report, 925 bool enable_per_layer_report,
873 const int pos[EDAC_MAX_LAYERS]) 926 const int pos[EDAC_MAX_LAYERS],
927 const u16 count)
874{ 928{
875 int i, index = 0; 929 int i, index = 0;
876 930
877 mci->ue_mc++; 931 mci->ue_mc += count;
878 932
879 if (!enable_per_layer_report) { 933 if (!enable_per_layer_report) {
880 mci->ce_noinfo_count++; 934 mci->ce_noinfo_count += count;
881 return; 935 return;
882 } 936 }
883 937
@@ -885,7 +939,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
885 if (pos[i] < 0) 939 if (pos[i] < 0)
886 break; 940 break;
887 index += pos[i]; 941 index += pos[i];
888 mci->ue_per_layer[i][index]++; 942 mci->ue_per_layer[i][index] += count;
889 943
890 if (i < mci->n_layers - 1) 944 if (i < mci->n_layers - 1)
891 index *= mci->layers[i + 1].size; 945 index *= mci->layers[i + 1].size;
@@ -893,6 +947,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
893} 947}
894 948
895static void edac_ce_error(struct mem_ctl_info *mci, 949static void edac_ce_error(struct mem_ctl_info *mci,
950 const u16 error_count,
896 const int pos[EDAC_MAX_LAYERS], 951 const int pos[EDAC_MAX_LAYERS],
897 const char *msg, 952 const char *msg,
898 const char *location, 953 const char *location,
@@ -902,23 +957,25 @@ static void edac_ce_error(struct mem_ctl_info *mci,
902 const bool enable_per_layer_report, 957 const bool enable_per_layer_report,
903 const unsigned long page_frame_number, 958 const unsigned long page_frame_number,
904 const unsigned long offset_in_page, 959 const unsigned long offset_in_page,
905 u32 grain) 960 long grain)
906{ 961{
907 unsigned long remapped_page; 962 unsigned long remapped_page;
908 963
909 if (edac_mc_get_log_ce()) { 964 if (edac_mc_get_log_ce()) {
910 if (other_detail && *other_detail) 965 if (other_detail && *other_detail)
911 edac_mc_printk(mci, KERN_WARNING, 966 edac_mc_printk(mci, KERN_WARNING,
912 "CE %s on %s (%s%s - %s)\n", 967 "%d CE %s on %s (%s %s - %s)\n",
968 error_count,
913 msg, label, location, 969 msg, label, location,
914 detail, other_detail); 970 detail, other_detail);
915 else 971 else
916 edac_mc_printk(mci, KERN_WARNING, 972 edac_mc_printk(mci, KERN_WARNING,
917 "CE %s on %s (%s%s)\n", 973 "%d CE %s on %s (%s %s)\n",
974 error_count,
918 msg, label, location, 975 msg, label, location,
919 detail); 976 detail);
920 } 977 }
921 edac_inc_ce_error(mci, enable_per_layer_report, pos); 978 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
922 979
923 if (mci->scrub_mode & SCRUB_SW_SRC) { 980 if (mci->scrub_mode & SCRUB_SW_SRC) {
924 /* 981 /*
@@ -942,6 +999,7 @@ static void edac_ce_error(struct mem_ctl_info *mci,
942} 999}
943 1000
944static void edac_ue_error(struct mem_ctl_info *mci, 1001static void edac_ue_error(struct mem_ctl_info *mci,
1002 const u16 error_count,
945 const int pos[EDAC_MAX_LAYERS], 1003 const int pos[EDAC_MAX_LAYERS],
946 const char *msg, 1004 const char *msg,
947 const char *location, 1005 const char *location,
@@ -953,12 +1011,14 @@ static void edac_ue_error(struct mem_ctl_info *mci,
953 if (edac_mc_get_log_ue()) { 1011 if (edac_mc_get_log_ue()) {
954 if (other_detail && *other_detail) 1012 if (other_detail && *other_detail)
955 edac_mc_printk(mci, KERN_WARNING, 1013 edac_mc_printk(mci, KERN_WARNING,
956 "UE %s on %s (%s%s - %s)\n", 1014 "%d UE %s on %s (%s %s - %s)\n",
1015 error_count,
957 msg, label, location, detail, 1016 msg, label, location, detail,
958 other_detail); 1017 other_detail);
959 else 1018 else
960 edac_mc_printk(mci, KERN_WARNING, 1019 edac_mc_printk(mci, KERN_WARNING,
961 "UE %s on %s (%s%s)\n", 1020 "%d UE %s on %s (%s %s)\n",
1021 error_count,
962 msg, label, location, detail); 1022 msg, label, location, detail);
963 } 1023 }
964 1024
@@ -971,33 +1031,53 @@ static void edac_ue_error(struct mem_ctl_info *mci,
971 msg, label, location, detail); 1031 msg, label, location, detail);
972 } 1032 }
973 1033
974 edac_inc_ue_error(mci, enable_per_layer_report, pos); 1034 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
975} 1035}
976 1036
977#define OTHER_LABEL " or " 1037#define OTHER_LABEL " or "
1038
1039/**
1040 * edac_mc_handle_error - reports a memory event to userspace
1041 *
1042 * @type: severity of the error (CE/UE/Fatal)
1043 * @mci: a struct mem_ctl_info pointer
1044 * @error_count: Number of errors of the same type
1045 * @page_frame_number: mem page where the error occurred
1046 * @offset_in_page: offset of the error inside the page
1047 * @syndrome: ECC syndrome
1048 * @top_layer: Memory layer[0] position
1049 * @mid_layer: Memory layer[1] position
1050 * @low_layer: Memory layer[2] position
1051 * @msg: Message meaningful to the end users that
1052 * explains the event
1053 * @other_detail: Technical details about the event that
1054 * may help hardware manufacturers and
1055 * EDAC developers to analyse the event
1056 */
978void edac_mc_handle_error(const enum hw_event_mc_err_type type, 1057void edac_mc_handle_error(const enum hw_event_mc_err_type type,
979 struct mem_ctl_info *mci, 1058 struct mem_ctl_info *mci,
1059 const u16 error_count,
980 const unsigned long page_frame_number, 1060 const unsigned long page_frame_number,
981 const unsigned long offset_in_page, 1061 const unsigned long offset_in_page,
982 const unsigned long syndrome, 1062 const unsigned long syndrome,
983 const int layer0, 1063 const int top_layer,
984 const int layer1, 1064 const int mid_layer,
985 const int layer2, 1065 const int low_layer,
986 const char *msg, 1066 const char *msg,
987 const char *other_detail, 1067 const char *other_detail)
988 const void *mcelog)
989{ 1068{
990 /* FIXME: too much for stack: move it to some pre-alocated area */ 1069 /* FIXME: too much for stack: move it to some pre-alocated area */
991 char detail[80], location[80]; 1070 char detail[80], location[80];
992 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms]; 1071 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
993 char *p; 1072 char *p;
994 int row = -1, chan = -1; 1073 int row = -1, chan = -1;
995 int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 }; 1074 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
996 int i; 1075 int i;
997 u32 grain; 1076 long grain;
998 bool enable_per_layer_report = false; 1077 bool enable_per_layer_report = false;
1078 u8 grain_bits;
999 1079
1000 debugf3("MC%d: %s()\n", mci->mc_idx, __func__); 1080 edac_dbg(3, "MC%d\n", mci->mc_idx);
1001 1081
1002 /* 1082 /*
1003 * Check if the event report is consistent and if the memory 1083 * Check if the event report is consistent and if the memory
@@ -1043,13 +1123,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1043 p = label; 1123 p = label;
1044 *p = '\0'; 1124 *p = '\0';
1045 for (i = 0; i < mci->tot_dimms; i++) { 1125 for (i = 0; i < mci->tot_dimms; i++) {
1046 struct dimm_info *dimm = &mci->dimms[i]; 1126 struct dimm_info *dimm = mci->dimms[i];
1047 1127
1048 if (layer0 >= 0 && layer0 != dimm->location[0]) 1128 if (top_layer >= 0 && top_layer != dimm->location[0])
1049 continue; 1129 continue;
1050 if (layer1 >= 0 && layer1 != dimm->location[1]) 1130 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1051 continue; 1131 continue;
1052 if (layer2 >= 0 && layer2 != dimm->location[2]) 1132 if (low_layer >= 0 && low_layer != dimm->location[2])
1053 continue; 1133 continue;
1054 1134
1055 /* get the max grain, over the error match range */ 1135 /* get the max grain, over the error match range */
@@ -1075,11 +1155,9 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1075 * get csrow/channel of the DIMM, in order to allow 1155 * get csrow/channel of the DIMM, in order to allow
1076 * incrementing the compat API counters 1156 * incrementing the compat API counters
1077 */ 1157 */
1078 debugf4("%s: %s csrows map: (%d,%d)\n", 1158 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1079 __func__, 1159 mci->mem_is_per_rank ? "rank" : "dimm",
1080 mci->mem_is_per_rank ? "rank" : "dimm", 1160 dimm->csrow, dimm->cschannel);
1081 dimm->csrow, dimm->cschannel);
1082
1083 if (row == -1) 1161 if (row == -1)
1084 row = dimm->csrow; 1162 row = dimm->csrow;
1085 else if (row >= 0 && row != dimm->csrow) 1163 else if (row >= 0 && row != dimm->csrow)
@@ -1095,19 +1173,18 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1095 if (!enable_per_layer_report) { 1173 if (!enable_per_layer_report) {
1096 strcpy(label, "any memory"); 1174 strcpy(label, "any memory");
1097 } else { 1175 } else {
1098 debugf4("%s: csrow/channel to increment: (%d,%d)\n", 1176 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
1099 __func__, row, chan);
1100 if (p == label) 1177 if (p == label)
1101 strcpy(label, "unknown memory"); 1178 strcpy(label, "unknown memory");
1102 if (type == HW_EVENT_ERR_CORRECTED) { 1179 if (type == HW_EVENT_ERR_CORRECTED) {
1103 if (row >= 0) { 1180 if (row >= 0) {
1104 mci->csrows[row].ce_count++; 1181 mci->csrows[row]->ce_count += error_count;
1105 if (chan >= 0) 1182 if (chan >= 0)
1106 mci->csrows[row].channels[chan].ce_count++; 1183 mci->csrows[row]->channels[chan]->ce_count += error_count;
1107 } 1184 }
1108 } else 1185 } else
1109 if (row >= 0) 1186 if (row >= 0)
1110 mci->csrows[row].ue_count++; 1187 mci->csrows[row]->ue_count += error_count;
1111 } 1188 }
1112 1189
1113 /* Fill the RAM location data */ 1190 /* Fill the RAM location data */
@@ -1120,23 +1197,33 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1120 edac_layer_name[mci->layers[i].type], 1197 edac_layer_name[mci->layers[i].type],
1121 pos[i]); 1198 pos[i]);
1122 } 1199 }
1200 if (p > location)
1201 *(p - 1) = '\0';
1202
1203 /* Report the error via the trace interface */
1204
1205 grain_bits = fls_long(grain) + 1;
1206 trace_mc_event(type, msg, label, error_count,
1207 mci->mc_idx, top_layer, mid_layer, low_layer,
1208 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1209 grain_bits, syndrome, other_detail);
1123 1210
1124 /* Memory type dependent details about the error */ 1211 /* Memory type dependent details about the error */
1125 if (type == HW_EVENT_ERR_CORRECTED) { 1212 if (type == HW_EVENT_ERR_CORRECTED) {
1126 snprintf(detail, sizeof(detail), 1213 snprintf(detail, sizeof(detail),
1127 "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx", 1214 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1128 page_frame_number, offset_in_page, 1215 page_frame_number, offset_in_page,
1129 grain, syndrome); 1216 grain, syndrome);
1130 edac_ce_error(mci, pos, msg, location, label, detail, 1217 edac_ce_error(mci, error_count, pos, msg, location, label,
1131 other_detail, enable_per_layer_report, 1218 detail, other_detail, enable_per_layer_report,
1132 page_frame_number, offset_in_page, grain); 1219 page_frame_number, offset_in_page, grain);
1133 } else { 1220 } else {
1134 snprintf(detail, sizeof(detail), 1221 snprintf(detail, sizeof(detail),
1135 "page:0x%lx offset:0x%lx grain:%d", 1222 "page:0x%lx offset:0x%lx grain:%ld",
1136 page_frame_number, offset_in_page, grain); 1223 page_frame_number, offset_in_page, grain);
1137 1224
1138 edac_ue_error(mci, pos, msg, location, label, detail, 1225 edac_ue_error(mci, error_count, pos, msg, location, label,
1139 other_detail, enable_per_layer_report); 1226 detail, other_detail, enable_per_layer_report);
1140 } 1227 }
1141} 1228}
1142EXPORT_SYMBOL_GPL(edac_mc_handle_error); 1229EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index f6a29b0eedc8..ed0bc07b8503 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -7,17 +7,21 @@
7 * 7 *
8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com 8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9 * 9 *
10 * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com>
11 * The entire API were re-written, and ported to use struct device
12 *
10 */ 13 */
11 14
12#include <linux/ctype.h> 15#include <linux/ctype.h>
13#include <linux/slab.h> 16#include <linux/slab.h>
14#include <linux/edac.h> 17#include <linux/edac.h>
15#include <linux/bug.h> 18#include <linux/bug.h>
19#include <linux/pm_runtime.h>
20#include <linux/uaccess.h>
16 21
17#include "edac_core.h" 22#include "edac_core.h"
18#include "edac_module.h" 23#include "edac_module.h"
19 24
20
21/* MC EDAC Controls, setable by module parameter, and sysfs */ 25/* MC EDAC Controls, setable by module parameter, and sysfs */
22static int edac_mc_log_ue = 1; 26static int edac_mc_log_ue = 1;
23static int edac_mc_log_ce = 1; 27static int edac_mc_log_ce = 1;
@@ -78,6 +82,8 @@ module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
78 &edac_mc_poll_msec, 0644); 82 &edac_mc_poll_msec, 0644);
79MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); 83MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
80 84
85static struct device *mci_pdev;
86
81/* 87/*
82 * various constants for Memory Controllers 88 * various constants for Memory Controllers
83 */ 89 */
@@ -125,317 +131,526 @@ static const char *edac_caps[] = {
125 [EDAC_S16ECD16ED] = "S16ECD16ED" 131 [EDAC_S16ECD16ED] = "S16ECD16ED"
126}; 132};
127 133
128/* EDAC sysfs CSROW data structures and methods 134#ifdef CONFIG_EDAC_LEGACY_SYSFS
135/*
136 * EDAC sysfs CSROW data structures and methods
137 */
138
139#define to_csrow(k) container_of(k, struct csrow_info, dev)
140
141/*
142 * We need it to avoid namespace conflicts between the legacy API
143 * and the per-dimm/per-rank one
129 */ 144 */
145#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
146 struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
147
148struct dev_ch_attribute {
149 struct device_attribute attr;
150 int channel;
151};
152
153#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
154 struct dev_ch_attribute dev_attr_legacy_##_name = \
155 { __ATTR(_name, _mode, _show, _store), (_var) }
156
157#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
130 158
131/* Set of more default csrow<id> attribute show/store functions */ 159/* Set of more default csrow<id> attribute show/store functions */
132static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, 160static ssize_t csrow_ue_count_show(struct device *dev,
133 int private) 161 struct device_attribute *mattr, char *data)
134{ 162{
163 struct csrow_info *csrow = to_csrow(dev);
164
135 return sprintf(data, "%u\n", csrow->ue_count); 165 return sprintf(data, "%u\n", csrow->ue_count);
136} 166}
137 167
138static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, 168static ssize_t csrow_ce_count_show(struct device *dev,
139 int private) 169 struct device_attribute *mattr, char *data)
140{ 170{
171 struct csrow_info *csrow = to_csrow(dev);
172
141 return sprintf(data, "%u\n", csrow->ce_count); 173 return sprintf(data, "%u\n", csrow->ce_count);
142} 174}
143 175
144static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, 176static ssize_t csrow_size_show(struct device *dev,
145 int private) 177 struct device_attribute *mattr, char *data)
146{ 178{
179 struct csrow_info *csrow = to_csrow(dev);
147 int i; 180 int i;
148 u32 nr_pages = 0; 181 u32 nr_pages = 0;
149 182
150 for (i = 0; i < csrow->nr_channels; i++) 183 for (i = 0; i < csrow->nr_channels; i++)
151 nr_pages += csrow->channels[i].dimm->nr_pages; 184 nr_pages += csrow->channels[i]->dimm->nr_pages;
152
153 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); 185 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
154} 186}
155 187
156static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, 188static ssize_t csrow_mem_type_show(struct device *dev,
157 int private) 189 struct device_attribute *mattr, char *data)
158{ 190{
159 return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]); 191 struct csrow_info *csrow = to_csrow(dev);
192
193 return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
160} 194}
161 195
162static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, 196static ssize_t csrow_dev_type_show(struct device *dev,
163 int private) 197 struct device_attribute *mattr, char *data)
164{ 198{
165 return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]); 199 struct csrow_info *csrow = to_csrow(dev);
200
201 return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
166} 202}
167 203
168static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, 204static ssize_t csrow_edac_mode_show(struct device *dev,
169 int private) 205 struct device_attribute *mattr,
206 char *data)
170{ 207{
171 return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]); 208 struct csrow_info *csrow = to_csrow(dev);
209
210 return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
172} 211}
173 212
174/* show/store functions for DIMM Label attributes */ 213/* show/store functions for DIMM Label attributes */
175static ssize_t channel_dimm_label_show(struct csrow_info *csrow, 214static ssize_t channel_dimm_label_show(struct device *dev,
176 char *data, int channel) 215 struct device_attribute *mattr,
216 char *data)
177{ 217{
218 struct csrow_info *csrow = to_csrow(dev);
219 unsigned chan = to_channel(mattr);
220 struct rank_info *rank = csrow->channels[chan];
221
178 /* if field has not been initialized, there is nothing to send */ 222 /* if field has not been initialized, there is nothing to send */
179 if (!csrow->channels[channel].dimm->label[0]) 223 if (!rank->dimm->label[0])
180 return 0; 224 return 0;
181 225
182 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 226 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
183 csrow->channels[channel].dimm->label); 227 rank->dimm->label);
184} 228}
185 229
186static ssize_t channel_dimm_label_store(struct csrow_info *csrow, 230static ssize_t channel_dimm_label_store(struct device *dev,
187 const char *data, 231 struct device_attribute *mattr,
188 size_t count, int channel) 232 const char *data, size_t count)
189{ 233{
234 struct csrow_info *csrow = to_csrow(dev);
235 unsigned chan = to_channel(mattr);
236 struct rank_info *rank = csrow->channels[chan];
237
190 ssize_t max_size = 0; 238 ssize_t max_size = 0;
191 239
192 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 240 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
193 strncpy(csrow->channels[channel].dimm->label, data, max_size); 241 strncpy(rank->dimm->label, data, max_size);
194 csrow->channels[channel].dimm->label[max_size] = '\0'; 242 rank->dimm->label[max_size] = '\0';
195 243
196 return max_size; 244 return max_size;
197} 245}
198 246
199/* show function for dynamic chX_ce_count attribute */ 247/* show function for dynamic chX_ce_count attribute */
200static ssize_t channel_ce_count_show(struct csrow_info *csrow, 248static ssize_t channel_ce_count_show(struct device *dev,
201 char *data, int channel) 249 struct device_attribute *mattr, char *data)
202{ 250{
203 return sprintf(data, "%u\n", csrow->channels[channel].ce_count); 251 struct csrow_info *csrow = to_csrow(dev);
252 unsigned chan = to_channel(mattr);
253 struct rank_info *rank = csrow->channels[chan];
254
255 return sprintf(data, "%u\n", rank->ce_count);
204} 256}
205 257
206/* csrow specific attribute structure */ 258/* cwrow<id>/attribute files */
207struct csrowdev_attribute { 259DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
208 struct attribute attr; 260DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
209 ssize_t(*show) (struct csrow_info *, char *, int); 261DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
210 ssize_t(*store) (struct csrow_info *, const char *, size_t, int); 262DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
211 int private; 263DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
212}; 264DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
213 265
214#define to_csrow(k) container_of(k, struct csrow_info, kobj) 266/* default attributes of the CSROW<id> object */
215#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr) 267static struct attribute *csrow_attrs[] = {
268 &dev_attr_legacy_dev_type.attr,
269 &dev_attr_legacy_mem_type.attr,
270 &dev_attr_legacy_edac_mode.attr,
271 &dev_attr_legacy_size_mb.attr,
272 &dev_attr_legacy_ue_count.attr,
273 &dev_attr_legacy_ce_count.attr,
274 NULL,
275};
216 276
217/* Set of show/store higher level functions for default csrow attributes */ 277static struct attribute_group csrow_attr_grp = {
218static ssize_t csrowdev_show(struct kobject *kobj, 278 .attrs = csrow_attrs,
219 struct attribute *attr, char *buffer) 279};
220{
221 struct csrow_info *csrow = to_csrow(kobj);
222 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
223 280
224 if (csrowdev_attr->show) 281static const struct attribute_group *csrow_attr_groups[] = {
225 return csrowdev_attr->show(csrow, 282 &csrow_attr_grp,
226 buffer, csrowdev_attr->private); 283 NULL
227 return -EIO; 284};
228}
229 285
230static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, 286static void csrow_attr_release(struct device *dev)
231 const char *buffer, size_t count)
232{ 287{
233 struct csrow_info *csrow = to_csrow(kobj); 288 struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
234 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
235
236 if (csrowdev_attr->store)
237 return csrowdev_attr->store(csrow,
238 buffer,
239 count, csrowdev_attr->private);
240 return -EIO;
241}
242 289
243static const struct sysfs_ops csrowfs_ops = { 290 edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
244 .show = csrowdev_show, 291 kfree(csrow);
245 .store = csrowdev_store 292}
246};
247 293
248#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \ 294static struct device_type csrow_attr_type = {
249static struct csrowdev_attribute attr_##_name = { \ 295 .groups = csrow_attr_groups,
250 .attr = {.name = __stringify(_name), .mode = _mode }, \ 296 .release = csrow_attr_release,
251 .show = _show, \
252 .store = _store, \
253 .private = _private, \
254}; 297};
255 298
256/* default cwrow<id>/attribute files */ 299/*
257CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0); 300 * possible dynamic channel DIMM Label attribute files
258CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0); 301 *
259CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0); 302 */
260CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
261CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
262CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
263 303
264/* default attributes of the CSROW<id> object */ 304#define EDAC_NR_CHANNELS 6
265static struct csrowdev_attribute *default_csrow_attr[] = {
266 &attr_dev_type,
267 &attr_mem_type,
268 &attr_edac_mode,
269 &attr_size_mb,
270 &attr_ue_count,
271 &attr_ce_count,
272 NULL,
273};
274 305
275/* possible dynamic channel DIMM Label attribute files */ 306DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
276CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
277 channel_dimm_label_show, channel_dimm_label_store, 0); 307 channel_dimm_label_show, channel_dimm_label_store, 0);
278CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR, 308DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
279 channel_dimm_label_show, channel_dimm_label_store, 1); 309 channel_dimm_label_show, channel_dimm_label_store, 1);
280CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR, 310DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
281 channel_dimm_label_show, channel_dimm_label_store, 2); 311 channel_dimm_label_show, channel_dimm_label_store, 2);
282CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR, 312DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
283 channel_dimm_label_show, channel_dimm_label_store, 3); 313 channel_dimm_label_show, channel_dimm_label_store, 3);
284CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR, 314DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
285 channel_dimm_label_show, channel_dimm_label_store, 4); 315 channel_dimm_label_show, channel_dimm_label_store, 4);
286CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR, 316DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
287 channel_dimm_label_show, channel_dimm_label_store, 5); 317 channel_dimm_label_show, channel_dimm_label_store, 5);
288 318
289/* Total possible dynamic DIMM Label attribute file table */ 319/* Total possible dynamic DIMM Label attribute file table */
290static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = { 320static struct device_attribute *dynamic_csrow_dimm_attr[] = {
291 &attr_ch0_dimm_label, 321 &dev_attr_legacy_ch0_dimm_label.attr,
292 &attr_ch1_dimm_label, 322 &dev_attr_legacy_ch1_dimm_label.attr,
293 &attr_ch2_dimm_label, 323 &dev_attr_legacy_ch2_dimm_label.attr,
294 &attr_ch3_dimm_label, 324 &dev_attr_legacy_ch3_dimm_label.attr,
295 &attr_ch4_dimm_label, 325 &dev_attr_legacy_ch4_dimm_label.attr,
296 &attr_ch5_dimm_label 326 &dev_attr_legacy_ch5_dimm_label.attr
297}; 327};
298 328
299/* possible dynamic channel ce_count attribute files */ 329/* possible dynamic channel ce_count attribute files */
300CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0); 330DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
301CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1); 331 channel_ce_count_show, NULL, 0);
302CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2); 332DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
303CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3); 333 channel_ce_count_show, NULL, 1);
304CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4); 334DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
305CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5); 335 channel_ce_count_show, NULL, 2);
336DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
337 channel_ce_count_show, NULL, 3);
338DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
339 channel_ce_count_show, NULL, 4);
340DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
341 channel_ce_count_show, NULL, 5);
306 342
307/* Total possible dynamic ce_count attribute file table */ 343/* Total possible dynamic ce_count attribute file table */
308static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = { 344static struct device_attribute *dynamic_csrow_ce_count_attr[] = {
309 &attr_ch0_ce_count, 345 &dev_attr_legacy_ch0_ce_count.attr,
310 &attr_ch1_ce_count, 346 &dev_attr_legacy_ch1_ce_count.attr,
311 &attr_ch2_ce_count, 347 &dev_attr_legacy_ch2_ce_count.attr,
312 &attr_ch3_ce_count, 348 &dev_attr_legacy_ch3_ce_count.attr,
313 &attr_ch4_ce_count, 349 &dev_attr_legacy_ch4_ce_count.attr,
314 &attr_ch5_ce_count 350 &dev_attr_legacy_ch5_ce_count.attr
315}; 351};
316 352
317#define EDAC_NR_CHANNELS 6 353static inline int nr_pages_per_csrow(struct csrow_info *csrow)
354{
355 int chan, nr_pages = 0;
356
357 for (chan = 0; chan < csrow->nr_channels; chan++)
358 nr_pages += csrow->channels[chan]->dimm->nr_pages;
359
360 return nr_pages;
361}
318 362
319/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */ 363/* Create a CSROW object under specifed edac_mc_device */
320static int edac_create_channel_files(struct kobject *kobj, int chan) 364static int edac_create_csrow_object(struct mem_ctl_info *mci,
365 struct csrow_info *csrow, int index)
321{ 366{
322 int err = -ENODEV; 367 int err, chan;
368
369 if (csrow->nr_channels >= EDAC_NR_CHANNELS)
370 return -ENODEV;
371
372 csrow->dev.type = &csrow_attr_type;
373 csrow->dev.bus = &mci->bus;
374 device_initialize(&csrow->dev);
375 csrow->dev.parent = &mci->dev;
376 dev_set_name(&csrow->dev, "csrow%d", index);
377 dev_set_drvdata(&csrow->dev, csrow);
323 378
324 if (chan >= EDAC_NR_CHANNELS) 379 edac_dbg(0, "creating (virtual) csrow node %s\n",
380 dev_name(&csrow->dev));
381
382 err = device_add(&csrow->dev);
383 if (err < 0)
325 return err; 384 return err;
326 385
327 /* create the DIMM label attribute file */ 386 for (chan = 0; chan < csrow->nr_channels; chan++) {
328 err = sysfs_create_file(kobj, 387 /* Only expose populated DIMMs */
329 (struct attribute *) 388 if (!csrow->channels[chan]->dimm->nr_pages)
330 dynamic_csrow_dimm_attr[chan]); 389 continue;
331 390 err = device_create_file(&csrow->dev,
332 if (!err) { 391 dynamic_csrow_dimm_attr[chan]);
333 /* create the CE Count attribute file */ 392 if (err < 0)
334 err = sysfs_create_file(kobj, 393 goto error;
335 (struct attribute *) 394 err = device_create_file(&csrow->dev,
336 dynamic_csrow_ce_count_attr[chan]); 395 dynamic_csrow_ce_count_attr[chan]);
337 } else { 396 if (err < 0) {
338 debugf1("%s() dimm labels and ce_count files created", 397 device_remove_file(&csrow->dev,
339 __func__); 398 dynamic_csrow_dimm_attr[chan]);
399 goto error;
400 }
401 }
402
403 return 0;
404
405error:
406 for (--chan; chan >= 0; chan--) {
407 device_remove_file(&csrow->dev,
408 dynamic_csrow_dimm_attr[chan]);
409 device_remove_file(&csrow->dev,
410 dynamic_csrow_ce_count_attr[chan]);
340 } 411 }
412 put_device(&csrow->dev);
341 413
342 return err; 414 return err;
343} 415}
344 416
345/* No memory to release for this kobj */ 417/* Create a CSROW object under specifed edac_mc_device */
346static void edac_csrow_instance_release(struct kobject *kobj) 418static int edac_create_csrow_objects(struct mem_ctl_info *mci)
347{ 419{
348 struct mem_ctl_info *mci; 420 int err, i, chan;
349 struct csrow_info *cs; 421 struct csrow_info *csrow;
422
423 for (i = 0; i < mci->nr_csrows; i++) {
424 csrow = mci->csrows[i];
425 if (!nr_pages_per_csrow(csrow))
426 continue;
427 err = edac_create_csrow_object(mci, mci->csrows[i], i);
428 if (err < 0)
429 goto error;
430 }
431 return 0;
350 432
351 debugf1("%s()\n", __func__); 433error:
434 for (--i; i >= 0; i--) {
435 csrow = mci->csrows[i];
436 if (!nr_pages_per_csrow(csrow))
437 continue;
438 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
439 if (!csrow->channels[chan]->dimm->nr_pages)
440 continue;
441 device_remove_file(&csrow->dev,
442 dynamic_csrow_dimm_attr[chan]);
443 device_remove_file(&csrow->dev,
444 dynamic_csrow_ce_count_attr[chan]);
445 }
446 put_device(&mci->csrows[i]->dev);
447 }
352 448
353 cs = container_of(kobj, struct csrow_info, kobj); 449 return err;
354 mci = cs->mci; 450}
355 451
356 kobject_put(&mci->edac_mci_kobj); 452static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
453{
454 int i, chan;
455 struct csrow_info *csrow;
456
457 for (i = mci->nr_csrows - 1; i >= 0; i--) {
458 csrow = mci->csrows[i];
459 if (!nr_pages_per_csrow(csrow))
460 continue;
461 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
462 if (!csrow->channels[chan]->dimm->nr_pages)
463 continue;
464 edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n",
465 i, chan);
466 device_remove_file(&csrow->dev,
467 dynamic_csrow_dimm_attr[chan]);
468 device_remove_file(&csrow->dev,
469 dynamic_csrow_ce_count_attr[chan]);
470 }
471 put_device(&mci->csrows[i]->dev);
472 device_del(&mci->csrows[i]->dev);
473 }
357} 474}
475#endif
358 476
359/* the kobj_type instance for a CSROW */ 477/*
360static struct kobj_type ktype_csrow = { 478 * Per-dimm (or per-rank) devices
361 .release = edac_csrow_instance_release, 479 */
362 .sysfs_ops = &csrowfs_ops, 480
363 .default_attrs = (struct attribute **)default_csrow_attr, 481#define to_dimm(k) container_of(k, struct dimm_info, dev)
482
483/* show/store functions for DIMM Label attributes */
484static ssize_t dimmdev_location_show(struct device *dev,
485 struct device_attribute *mattr, char *data)
486{
487 struct dimm_info *dimm = to_dimm(dev);
488
489 return edac_dimm_info_location(dimm, data, PAGE_SIZE);
490}
491
492static ssize_t dimmdev_label_show(struct device *dev,
493 struct device_attribute *mattr, char *data)
494{
495 struct dimm_info *dimm = to_dimm(dev);
496
497 /* if field has not been initialized, there is nothing to send */
498 if (!dimm->label[0])
499 return 0;
500
501 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
502}
503
504static ssize_t dimmdev_label_store(struct device *dev,
505 struct device_attribute *mattr,
506 const char *data,
507 size_t count)
508{
509 struct dimm_info *dimm = to_dimm(dev);
510
511 ssize_t max_size = 0;
512
513 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
514 strncpy(dimm->label, data, max_size);
515 dimm->label[max_size] = '\0';
516
517 return max_size;
518}
519
520static ssize_t dimmdev_size_show(struct device *dev,
521 struct device_attribute *mattr, char *data)
522{
523 struct dimm_info *dimm = to_dimm(dev);
524
525 return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
526}
527
528static ssize_t dimmdev_mem_type_show(struct device *dev,
529 struct device_attribute *mattr, char *data)
530{
531 struct dimm_info *dimm = to_dimm(dev);
532
533 return sprintf(data, "%s\n", mem_types[dimm->mtype]);
534}
535
536static ssize_t dimmdev_dev_type_show(struct device *dev,
537 struct device_attribute *mattr, char *data)
538{
539 struct dimm_info *dimm = to_dimm(dev);
540
541 return sprintf(data, "%s\n", dev_types[dimm->dtype]);
542}
543
544static ssize_t dimmdev_edac_mode_show(struct device *dev,
545 struct device_attribute *mattr,
546 char *data)
547{
548 struct dimm_info *dimm = to_dimm(dev);
549
550 return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
551}
552
553/* dimm/rank attribute files */
554static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
555 dimmdev_label_show, dimmdev_label_store);
556static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
557static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
558static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
559static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
560static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
561
562/* attributes of the dimm<id>/rank<id> object */
563static struct attribute *dimm_attrs[] = {
564 &dev_attr_dimm_label.attr,
565 &dev_attr_dimm_location.attr,
566 &dev_attr_size.attr,
567 &dev_attr_dimm_mem_type.attr,
568 &dev_attr_dimm_dev_type.attr,
569 &dev_attr_dimm_edac_mode.attr,
570 NULL,
364}; 571};
365 572
366/* Create a CSROW object under specifed edac_mc_device */ 573static struct attribute_group dimm_attr_grp = {
367static int edac_create_csrow_object(struct mem_ctl_info *mci, 574 .attrs = dimm_attrs,
368 struct csrow_info *csrow, int index) 575};
576
577static const struct attribute_group *dimm_attr_groups[] = {
578 &dimm_attr_grp,
579 NULL
580};
581
582static void dimm_attr_release(struct device *dev)
369{ 583{
370 struct kobject *kobj_mci = &mci->edac_mci_kobj; 584 struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
371 struct kobject *kobj;
372 int chan;
373 int err;
374 585
375 /* generate ..../edac/mc/mc<id>/csrow<index> */ 586 edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
376 memset(&csrow->kobj, 0, sizeof(csrow->kobj)); 587 kfree(dimm);
377 csrow->mci = mci; /* include container up link */ 588}
378 589
379 /* bump the mci instance's kobject's ref count */ 590static struct device_type dimm_attr_type = {
380 kobj = kobject_get(&mci->edac_mci_kobj); 591 .groups = dimm_attr_groups,
381 if (!kobj) { 592 .release = dimm_attr_release,
382 err = -ENODEV; 593};
383 goto err_out; 594
384 } 595/* Create a DIMM object under specifed memory controller device */
596static int edac_create_dimm_object(struct mem_ctl_info *mci,
597 struct dimm_info *dimm,
598 int index)
599{
600 int err;
601 dimm->mci = mci;
385 602
386 /* Instanstiate the csrow object */ 603 dimm->dev.type = &dimm_attr_type;
387 err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci, 604 dimm->dev.bus = &mci->bus;
388 "csrow%d", index); 605 device_initialize(&dimm->dev);
389 if (err)
390 goto err_release_top_kobj;
391 606
392 /* At this point, to release a csrow kobj, one must 607 dimm->dev.parent = &mci->dev;
393 * call the kobject_put and allow that tear down 608 if (mci->mem_is_per_rank)
394 * to work the releasing 609 dev_set_name(&dimm->dev, "rank%d", index);
395 */ 610 else
611 dev_set_name(&dimm->dev, "dimm%d", index);
612 dev_set_drvdata(&dimm->dev, dimm);
613 pm_runtime_forbid(&mci->dev);
396 614
397 /* Create the dyanmic attribute files on this csrow, 615 err = device_add(&dimm->dev);
398 * namely, the DIMM labels and the channel ce_count
399 */
400 for (chan = 0; chan < csrow->nr_channels; chan++) {
401 err = edac_create_channel_files(&csrow->kobj, chan);
402 if (err) {
403 /* special case the unregister here */
404 kobject_put(&csrow->kobj);
405 goto err_out;
406 }
407 }
408 kobject_uevent(&csrow->kobj, KOBJ_ADD);
409 return 0;
410 616
411 /* error unwind stack */ 617 edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
412err_release_top_kobj:
413 kobject_put(&mci->edac_mci_kobj);
414 618
415err_out:
416 return err; 619 return err;
417} 620}
418 621
419/* default sysfs methods and data structures for the main MCI kobject */ 622/*
623 * Memory controller device
624 */
625
626#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
420 627
421static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, 628static ssize_t mci_reset_counters_store(struct device *dev,
629 struct device_attribute *mattr,
422 const char *data, size_t count) 630 const char *data, size_t count)
423{ 631{
424 int row, chan; 632 struct mem_ctl_info *mci = to_mci(dev);
425 633 int cnt, row, chan, i;
426 mci->ue_noinfo_count = 0;
427 mci->ce_noinfo_count = 0;
428 mci->ue_mc = 0; 634 mci->ue_mc = 0;
429 mci->ce_mc = 0; 635 mci->ce_mc = 0;
636 mci->ue_noinfo_count = 0;
637 mci->ce_noinfo_count = 0;
430 638
431 for (row = 0; row < mci->nr_csrows; row++) { 639 for (row = 0; row < mci->nr_csrows; row++) {
432 struct csrow_info *ri = &mci->csrows[row]; 640 struct csrow_info *ri = mci->csrows[row];
433 641
434 ri->ue_count = 0; 642 ri->ue_count = 0;
435 ri->ce_count = 0; 643 ri->ce_count = 0;
436 644
437 for (chan = 0; chan < ri->nr_channels; chan++) 645 for (chan = 0; chan < ri->nr_channels; chan++)
438 ri->channels[chan].ce_count = 0; 646 ri->channels[chan]->ce_count = 0;
647 }
648
649 cnt = 1;
650 for (i = 0; i < mci->n_layers; i++) {
651 cnt *= mci->layers[i].size;
652 memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
653 memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
439 } 654 }
440 655
441 mci->start_time = jiffies; 656 mci->start_time = jiffies;
@@ -451,9 +666,11 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
451 * Negative value still means that an error has occurred while setting 666 * Negative value still means that an error has occurred while setting
452 * the scrub rate. 667 * the scrub rate.
453 */ 668 */
454static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, 669static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
670 struct device_attribute *mattr,
455 const char *data, size_t count) 671 const char *data, size_t count)
456{ 672{
673 struct mem_ctl_info *mci = to_mci(dev);
457 unsigned long bandwidth = 0; 674 unsigned long bandwidth = 0;
458 int new_bw = 0; 675 int new_bw = 0;
459 676
@@ -476,8 +693,11 @@ static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
476/* 693/*
477 * ->get_sdram_scrub_rate() return value semantics same as above. 694 * ->get_sdram_scrub_rate() return value semantics same as above.
478 */ 695 */
479static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) 696static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
697 struct device_attribute *mattr,
698 char *data)
480{ 699{
700 struct mem_ctl_info *mci = to_mci(dev);
481 int bandwidth = 0; 701 int bandwidth = 0;
482 702
483 if (!mci->get_sdram_scrub_rate) 703 if (!mci->get_sdram_scrub_rate)
@@ -493,45 +713,72 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
493} 713}
494 714
495/* default attribute files for the MCI object */ 715/* default attribute files for the MCI object */
496static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) 716static ssize_t mci_ue_count_show(struct device *dev,
717 struct device_attribute *mattr,
718 char *data)
497{ 719{
720 struct mem_ctl_info *mci = to_mci(dev);
721
498 return sprintf(data, "%d\n", mci->ue_mc); 722 return sprintf(data, "%d\n", mci->ue_mc);
499} 723}
500 724
501static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) 725static ssize_t mci_ce_count_show(struct device *dev,
726 struct device_attribute *mattr,
727 char *data)
502{ 728{
729 struct mem_ctl_info *mci = to_mci(dev);
730
503 return sprintf(data, "%d\n", mci->ce_mc); 731 return sprintf(data, "%d\n", mci->ce_mc);
504} 732}
505 733
506static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) 734static ssize_t mci_ce_noinfo_show(struct device *dev,
735 struct device_attribute *mattr,
736 char *data)
507{ 737{
738 struct mem_ctl_info *mci = to_mci(dev);
739
508 return sprintf(data, "%d\n", mci->ce_noinfo_count); 740 return sprintf(data, "%d\n", mci->ce_noinfo_count);
509} 741}
510 742
511static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data) 743static ssize_t mci_ue_noinfo_show(struct device *dev,
744 struct device_attribute *mattr,
745 char *data)
512{ 746{
747 struct mem_ctl_info *mci = to_mci(dev);
748
513 return sprintf(data, "%d\n", mci->ue_noinfo_count); 749 return sprintf(data, "%d\n", mci->ue_noinfo_count);
514} 750}
515 751
516static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data) 752static ssize_t mci_seconds_show(struct device *dev,
753 struct device_attribute *mattr,
754 char *data)
517{ 755{
756 struct mem_ctl_info *mci = to_mci(dev);
757
518 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); 758 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
519} 759}
520 760
521static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data) 761static ssize_t mci_ctl_name_show(struct device *dev,
762 struct device_attribute *mattr,
763 char *data)
522{ 764{
765 struct mem_ctl_info *mci = to_mci(dev);
766
523 return sprintf(data, "%s\n", mci->ctl_name); 767 return sprintf(data, "%s\n", mci->ctl_name);
524} 768}
525 769
526static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) 770static ssize_t mci_size_mb_show(struct device *dev,
771 struct device_attribute *mattr,
772 char *data)
527{ 773{
774 struct mem_ctl_info *mci = to_mci(dev);
528 int total_pages = 0, csrow_idx, j; 775 int total_pages = 0, csrow_idx, j;
529 776
530 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { 777 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
531 struct csrow_info *csrow = &mci->csrows[csrow_idx]; 778 struct csrow_info *csrow = mci->csrows[csrow_idx];
532 779
533 for (j = 0; j < csrow->nr_channels; j++) { 780 for (j = 0; j < csrow->nr_channels; j++) {
534 struct dimm_info *dimm = csrow->channels[j].dimm; 781 struct dimm_info *dimm = csrow->channels[j]->dimm;
535 782
536 total_pages += dimm->nr_pages; 783 total_pages += dimm->nr_pages;
537 } 784 }
@@ -540,361 +787,187 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
540 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); 787 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
541} 788}
542 789
543#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj) 790static ssize_t mci_max_location_show(struct device *dev,
544#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr) 791 struct device_attribute *mattr,
545 792 char *data)
546/* MCI show/store functions for top most object */
547static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
548 char *buffer)
549{ 793{
550 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 794 struct mem_ctl_info *mci = to_mci(dev);
551 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); 795 int i;
552 796 char *p = data;
553 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
554 797
555 if (mcidev_attr->show) 798 for (i = 0; i < mci->n_layers; i++) {
556 return mcidev_attr->show(mem_ctl_info, buffer); 799 p += sprintf(p, "%s %d ",
800 edac_layer_name[mci->layers[i].type],
801 mci->layers[i].size - 1);
802 }
557 803
558 return -EIO; 804 return p - data;
559} 805}
560 806
561static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, 807#ifdef CONFIG_EDAC_DEBUG
562 const char *buffer, size_t count) 808static ssize_t edac_fake_inject_write(struct file *file,
809 const char __user *data,
810 size_t count, loff_t *ppos)
563{ 811{
564 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 812 struct device *dev = file->private_data;
565 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); 813 struct mem_ctl_info *mci = to_mci(dev);
566 814 static enum hw_event_mc_err_type type;
567 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); 815 u16 errcount = mci->fake_inject_count;
568 816
569 if (mcidev_attr->store) 817 if (!errcount)
570 return mcidev_attr->store(mem_ctl_info, buffer, count); 818 errcount = 1;
819
820 type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
821 : HW_EVENT_ERR_CORRECTED;
822
823 printk(KERN_DEBUG
824 "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
825 errcount,
826 (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
827 errcount > 1 ? "s" : "",
828 mci->fake_inject_layer[0],
829 mci->fake_inject_layer[1],
830 mci->fake_inject_layer[2]
831 );
832 edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
833 mci->fake_inject_layer[0],
834 mci->fake_inject_layer[1],
835 mci->fake_inject_layer[2],
836 "FAKE ERROR", "for EDAC testing only");
571 837
572 return -EIO; 838 return count;
573} 839}
574 840
575/* Intermediate show/store table */ 841static int debugfs_open(struct inode *inode, struct file *file)
576static const struct sysfs_ops mci_ops = { 842{
577 .show = mcidev_show, 843 file->private_data = inode->i_private;
578 .store = mcidev_store 844 return 0;
579}; 845}
580 846
581#define MCIDEV_ATTR(_name,_mode,_show,_store) \ 847static const struct file_operations debug_fake_inject_fops = {
582static struct mcidev_sysfs_attribute mci_attr_##_name = { \ 848 .open = debugfs_open,
583 .attr = {.name = __stringify(_name), .mode = _mode }, \ 849 .write = edac_fake_inject_write,
584 .show = _show, \ 850 .llseek = generic_file_llseek,
585 .store = _store, \
586}; 851};
852#endif
587 853
588/* default Control file */ 854/* default Control file */
589MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); 855DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
590 856
591/* default Attribute files */ 857/* default Attribute files */
592MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); 858DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
593MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); 859DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
594MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); 860DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
595MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); 861DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
596MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); 862DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
597MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); 863DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
598MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); 864DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
865DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
599 866
600/* memory scrubber attribute file */ 867/* memory scrubber attribute file */
601MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, 868DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
602 mci_sdram_scrub_rate_store); 869 mci_sdram_scrub_rate_store);
603 870
604static struct mcidev_sysfs_attribute *mci_attr[] = { 871static struct attribute *mci_attrs[] = {
605 &mci_attr_reset_counters, 872 &dev_attr_reset_counters.attr,
606 &mci_attr_mc_name, 873 &dev_attr_mc_name.attr,
607 &mci_attr_size_mb, 874 &dev_attr_size_mb.attr,
608 &mci_attr_seconds_since_reset, 875 &dev_attr_seconds_since_reset.attr,
609 &mci_attr_ue_noinfo_count, 876 &dev_attr_ue_noinfo_count.attr,
610 &mci_attr_ce_noinfo_count, 877 &dev_attr_ce_noinfo_count.attr,
611 &mci_attr_ue_count, 878 &dev_attr_ue_count.attr,
612 &mci_attr_ce_count, 879 &dev_attr_ce_count.attr,
613 &mci_attr_sdram_scrub_rate, 880 &dev_attr_sdram_scrub_rate.attr,
881 &dev_attr_max_location.attr,
614 NULL 882 NULL
615}; 883};
616 884
885static struct attribute_group mci_attr_grp = {
886 .attrs = mci_attrs,
887};
617 888
618/* 889static const struct attribute_group *mci_attr_groups[] = {
619 * Release of a MC controlling instance 890 &mci_attr_grp,
620 * 891 NULL
621 * each MC control instance has the following resources upon entry: 892};
622 * a) a ref count on the top memctl kobj
623 * b) a ref count on this module
624 *
625 * this function must decrement those ref counts and then
626 * issue a free on the instance's memory
627 */
628static void edac_mci_control_release(struct kobject *kobj)
629{
630 struct mem_ctl_info *mci;
631
632 mci = to_mci(kobj);
633 893
634 debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx); 894static void mci_attr_release(struct device *dev)
895{
896 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
635 897
636 /* decrement the module ref count */ 898 edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
637 module_put(mci->owner); 899 kfree(mci);
638} 900}
639 901
640static struct kobj_type ktype_mci = { 902static struct device_type mci_attr_type = {
641 .release = edac_mci_control_release, 903 .groups = mci_attr_groups,
642 .sysfs_ops = &mci_ops, 904 .release = mci_attr_release,
643 .default_attrs = (struct attribute **)mci_attr,
644}; 905};
645 906
646/* EDAC memory controller sysfs kset: 907#ifdef CONFIG_EDAC_DEBUG
647 * /sys/devices/system/edac/mc 908static struct dentry *edac_debugfs;
648 */
649static struct kset *mc_kset;
650 909
651/* 910int __init edac_debugfs_init(void)
652 * edac_mc_register_sysfs_main_kobj
653 *
654 * setups and registers the main kobject for each mci
655 */
656int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
657{ 911{
658 struct kobject *kobj_mci; 912 edac_debugfs = debugfs_create_dir("edac", NULL);
659 int err; 913 if (IS_ERR(edac_debugfs)) {
660 914 edac_debugfs = NULL;
661 debugf1("%s()\n", __func__); 915 return -ENOMEM;
662
663 kobj_mci = &mci->edac_mci_kobj;
664
665 /* Init the mci's kobject */
666 memset(kobj_mci, 0, sizeof(*kobj_mci));
667
668 /* Record which module 'owns' this control structure
669 * and bump the ref count of the module
670 */
671 mci->owner = THIS_MODULE;
672
673 /* bump ref count on this module */
674 if (!try_module_get(mci->owner)) {
675 err = -ENODEV;
676 goto fail_out;
677 }
678
679 /* this instance become part of the mc_kset */
680 kobj_mci->kset = mc_kset;
681
682 /* register the mc<id> kobject to the mc_kset */
683 err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
684 "mc%d", mci->mc_idx);
685 if (err) {
686 debugf1("%s()Failed to register '.../edac/mc%d'\n",
687 __func__, mci->mc_idx);
688 goto kobj_reg_fail;
689 } 916 }
690 kobject_uevent(kobj_mci, KOBJ_ADD);
691
692 /* At this point, to 'free' the control struct,
693 * edac_mc_unregister_sysfs_main_kobj() must be used
694 */
695
696 debugf1("%s() Registered '.../edac/mc%d' kobject\n",
697 __func__, mci->mc_idx);
698
699 return 0; 917 return 0;
700
701 /* Error exit stack */
702
703kobj_reg_fail:
704 module_put(mci->owner);
705
706fail_out:
707 return err;
708}
709
710/*
711 * edac_mc_register_sysfs_main_kobj
712 *
713 * tears down and the main mci kobject from the mc_kset
714 */
715void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
716{
717 debugf1("%s()\n", __func__);
718
719 /* delete the kobj from the mc_kset */
720 kobject_put(&mci->edac_mci_kobj);
721}
722
723#define EDAC_DEVICE_SYMLINK "device"
724
725#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
726
727/* MCI show/store functions for top most object */
728static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
729 char *buffer)
730{
731 struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
732 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
733
734 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
735
736 if (mcidev_attr->show)
737 return mcidev_attr->show(mem_ctl_info, buffer);
738
739 return -EIO;
740} 918}
741 919
742static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr, 920void __exit edac_debugfs_exit(void)
743 const char *buffer, size_t count)
744{ 921{
745 struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj); 922 debugfs_remove(edac_debugfs);
746 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
747
748 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
749
750 if (mcidev_attr->store)
751 return mcidev_attr->store(mem_ctl_info, buffer, count);
752
753 return -EIO;
754} 923}
755 924
756/* No memory to release for this kobj */ 925int edac_create_debug_nodes(struct mem_ctl_info *mci)
757static void edac_inst_grp_release(struct kobject *kobj)
758{ 926{
759 struct mcidev_sysfs_group_kobj *grp; 927 struct dentry *d, *parent;
760 struct mem_ctl_info *mci; 928 char name[80];
761 929 int i;
762 debugf1("%s()\n", __func__);
763
764 grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
765 mci = grp->mci;
766}
767
768/* Intermediate show/store table */
769static struct sysfs_ops inst_grp_ops = {
770 .show = inst_grp_show,
771 .store = inst_grp_store
772};
773
774/* the kobj_type instance for a instance group */
775static struct kobj_type ktype_inst_grp = {
776 .release = edac_inst_grp_release,
777 .sysfs_ops = &inst_grp_ops,
778};
779
780 930
781/* 931 if (!edac_debugfs)
782 * edac_create_mci_instance_attributes 932 return -ENODEV;
783 * create MC driver specific attributes bellow an specified kobj
784 * This routine calls itself recursively, in order to create an entire
785 * object tree.
786 */
787static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
788 const struct mcidev_sysfs_attribute *sysfs_attrib,
789 struct kobject *kobj)
790{
791 int err;
792 933
793 debugf4("%s()\n", __func__); 934 d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
794 935 if (!d)
795 while (sysfs_attrib) { 936 return -ENOMEM;
796 debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); 937 parent = d;
797 if (sysfs_attrib->grp) { 938
798 struct mcidev_sysfs_group_kobj *grp_kobj; 939 for (i = 0; i < mci->n_layers; i++) {
799 940 sprintf(name, "fake_inject_%s",
800 grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL); 941 edac_layer_name[mci->layers[i].type]);
801 if (!grp_kobj) 942 d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
802 return -ENOMEM; 943 &mci->fake_inject_layer[i]);
803 944 if (!d)
804 grp_kobj->grp = sysfs_attrib->grp; 945 goto nomem;
805 grp_kobj->mci = mci;
806 list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
807
808 debugf0("%s() grp %s, mci %p\n", __func__,
809 sysfs_attrib->grp->name, mci);
810
811 err = kobject_init_and_add(&grp_kobj->kobj,
812 &ktype_inst_grp,
813 &mci->edac_mci_kobj,
814 sysfs_attrib->grp->name);
815 if (err < 0) {
816 printk(KERN_ERR "kobject_init_and_add failed: %d\n", err);
817 return err;
818 }
819 err = edac_create_mci_instance_attributes(mci,
820 grp_kobj->grp->mcidev_attr,
821 &grp_kobj->kobj);
822
823 if (err < 0)
824 return err;
825 } else if (sysfs_attrib->attr.name) {
826 debugf4("%s() file %s\n", __func__,
827 sysfs_attrib->attr.name);
828
829 err = sysfs_create_file(kobj, &sysfs_attrib->attr);
830 if (err < 0) {
831 printk(KERN_ERR "sysfs_create_file failed: %d\n", err);
832 return err;
833 }
834 } else
835 break;
836
837 sysfs_attrib++;
838 } 946 }
839 947
840 return 0; 948 d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
841} 949 &mci->fake_inject_ue);
950 if (!d)
951 goto nomem;
842 952
843/* 953 d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
844 * edac_remove_mci_instance_attributes 954 &mci->fake_inject_count);
845 * remove MC driver specific attributes at the topmost level 955 if (!d)
846 * directory of this mci instance. 956 goto nomem;
847 */
848static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
849 const struct mcidev_sysfs_attribute *sysfs_attrib,
850 struct kobject *kobj, int count)
851{
852 struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
853 957
854 debugf1("%s()\n", __func__); 958 d = debugfs_create_file("fake_inject", S_IWUSR, parent,
855 959 &mci->dev,
856 /* 960 &debug_fake_inject_fops);
857 * loop if there are attributes and until we hit a NULL entry 961 if (!d)
858 * Remove first all the attributes 962 goto nomem;
859 */
860 while (sysfs_attrib) {
861 debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
862 if (sysfs_attrib->grp) {
863 debugf4("%s() seeking for group %s\n",
864 __func__, sysfs_attrib->grp->name);
865 list_for_each_entry(grp_kobj,
866 &mci->grp_kobj_list, list) {
867 debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
868 if (grp_kobj->grp == sysfs_attrib->grp) {
869 edac_remove_mci_instance_attributes(mci,
870 grp_kobj->grp->mcidev_attr,
871 &grp_kobj->kobj, count + 1);
872 debugf4("%s() group %s\n", __func__,
873 sysfs_attrib->grp->name);
874 kobject_put(&grp_kobj->kobj);
875 }
876 }
877 debugf4("%s() end of seeking for group %s\n",
878 __func__, sysfs_attrib->grp->name);
879 } else if (sysfs_attrib->attr.name) {
880 debugf4("%s() file %s\n", __func__,
881 sysfs_attrib->attr.name);
882 sysfs_remove_file(kobj, &sysfs_attrib->attr);
883 } else
884 break;
885 sysfs_attrib++;
886 }
887 963
888 /* Remove the group objects */ 964 mci->debugfs = parent;
889 if (count) 965 return 0;
890 return; 966nomem:
891 list_for_each_entry_safe(grp_kobj, tmp, 967 debugfs_remove(mci->debugfs);
892 &mci->grp_kobj_list, list) { 968 return -ENOMEM;
893 list_del(&grp_kobj->list);
894 kfree(grp_kobj);
895 }
896} 969}
897 970#endif
898 971
899/* 972/*
900 * Create a new Memory Controller kobject instance, 973 * Create a new Memory Controller kobject instance,
@@ -906,77 +979,87 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
906 */ 979 */
907int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) 980int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
908{ 981{
909 int i, j; 982 int i, err;
910 int err;
911 struct csrow_info *csrow;
912 struct kobject *kobj_mci = &mci->edac_mci_kobj;
913 983
914 debugf0("%s() idx=%d\n", __func__, mci->mc_idx); 984 /*
915 985 * The memory controller needs its own bus, in order to avoid
916 INIT_LIST_HEAD(&mci->grp_kobj_list); 986 * namespace conflicts at /sys/bus/edac.
987 */
988 mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
989 if (!mci->bus.name)
990 return -ENOMEM;
991 edac_dbg(0, "creating bus %s\n", mci->bus.name);
992 err = bus_register(&mci->bus);
993 if (err < 0)
994 return err;
917 995
918 /* create a symlink for the device */ 996 /* get the /sys/devices/system/edac subsys reference */
919 err = sysfs_create_link(kobj_mci, &mci->dev->kobj, 997 mci->dev.type = &mci_attr_type;
920 EDAC_DEVICE_SYMLINK); 998 device_initialize(&mci->dev);
921 if (err) { 999
922 debugf1("%s() failure to create symlink\n", __func__); 1000 mci->dev.parent = mci_pdev;
923 goto fail0; 1001 mci->dev.bus = &mci->bus;
1002 dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
1003 dev_set_drvdata(&mci->dev, mci);
1004 pm_runtime_forbid(&mci->dev);
1005
1006 edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
1007 err = device_add(&mci->dev);
1008 if (err < 0) {
1009 bus_unregister(&mci->bus);
1010 kfree(mci->bus.name);
1011 return err;
924 } 1012 }
925 1013
926 /* If the low level driver desires some attributes, 1014 /*
927 * then create them now for the driver. 1015 * Create the dimm/rank devices
928 */ 1016 */
929 if (mci->mc_driver_sysfs_attributes) { 1017 for (i = 0; i < mci->tot_dimms; i++) {
930 err = edac_create_mci_instance_attributes(mci, 1018 struct dimm_info *dimm = mci->dimms[i];
931 mci->mc_driver_sysfs_attributes, 1019 /* Only expose populated DIMMs */
932 &mci->edac_mci_kobj); 1020 if (dimm->nr_pages == 0)
1021 continue;
1022#ifdef CONFIG_EDAC_DEBUG
1023 edac_dbg(1, "creating dimm%d, located at ", i);
1024 if (edac_debug_level >= 1) {
1025 int lay;
1026 for (lay = 0; lay < mci->n_layers; lay++)
1027 printk(KERN_CONT "%s %d ",
1028 edac_layer_name[mci->layers[lay].type],
1029 dimm->location[lay]);
1030 printk(KERN_CONT "\n");
1031 }
1032#endif
1033 err = edac_create_dimm_object(mci, dimm, i);
933 if (err) { 1034 if (err) {
934 debugf1("%s() failure to create mci attributes\n", 1035 edac_dbg(1, "failure: create dimm %d obj\n", i);
935 __func__); 1036 goto fail;
936 goto fail0;
937 } 1037 }
938 } 1038 }
939 1039
940 /* Make directories for each CSROW object under the mc<id> kobject 1040#ifdef CONFIG_EDAC_LEGACY_SYSFS
941 */ 1041 err = edac_create_csrow_objects(mci);
942 for (i = 0; i < mci->nr_csrows; i++) { 1042 if (err < 0)
943 int nr_pages = 0; 1043 goto fail;
944 1044#endif
945 csrow = &mci->csrows[i];
946 for (j = 0; j < csrow->nr_channels; j++)
947 nr_pages += csrow->channels[j].dimm->nr_pages;
948
949 if (nr_pages > 0) {
950 err = edac_create_csrow_object(mci, csrow, i);
951 if (err) {
952 debugf1("%s() failure: create csrow %d obj\n",
953 __func__, i);
954 goto fail1;
955 }
956 }
957 }
958 1045
1046#ifdef CONFIG_EDAC_DEBUG
1047 edac_create_debug_nodes(mci);
1048#endif
959 return 0; 1049 return 0;
960 1050
961fail1: 1051fail:
962 for (i--; i >= 0; i--) { 1052 for (i--; i >= 0; i--) {
963 int nr_pages = 0; 1053 struct dimm_info *dimm = mci->dimms[i];
964 1054 if (dimm->nr_pages == 0)
965 csrow = &mci->csrows[i]; 1055 continue;
966 for (j = 0; j < csrow->nr_channels; j++) 1056 put_device(&dimm->dev);
967 nr_pages += csrow->channels[j].dimm->nr_pages; 1057 device_del(&dimm->dev);
968 if (nr_pages > 0)
969 kobject_put(&mci->csrows[i].kobj);
970 } 1058 }
971 1059 put_device(&mci->dev);
972 /* remove the mci instance's attributes, if any */ 1060 device_del(&mci->dev);
973 edac_remove_mci_instance_attributes(mci, 1061 bus_unregister(&mci->bus);
974 mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0); 1062 kfree(mci->bus.name);
975
976 /* remove the symlink */
977 sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
978
979fail0:
980 return err; 1063 return err;
981} 1064}
982 1065
@@ -985,98 +1068,84 @@ fail0:
985 */ 1068 */
986void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) 1069void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
987{ 1070{
988 struct csrow_info *csrow; 1071 int i;
989 int i, j;
990
991 debugf0("%s()\n", __func__);
992
993 /* remove all csrow kobjects */
994 debugf4("%s() unregister this mci kobj\n", __func__);
995 for (i = 0; i < mci->nr_csrows; i++) {
996 int nr_pages = 0;
997
998 csrow = &mci->csrows[i];
999 for (j = 0; j < csrow->nr_channels; j++)
1000 nr_pages += csrow->channels[j].dimm->nr_pages;
1001 if (nr_pages > 0) {
1002 debugf0("%s() unreg csrow-%d\n", __func__, i);
1003 kobject_put(&mci->csrows[i].kobj);
1004 }
1005 }
1006 1072
1007 /* remove this mci instance's attribtes */ 1073 edac_dbg(0, "\n");
1008 if (mci->mc_driver_sysfs_attributes) { 1074
1009 debugf4("%s() unregister mci private attributes\n", __func__); 1075#ifdef CONFIG_EDAC_DEBUG
1010 edac_remove_mci_instance_attributes(mci, 1076 debugfs_remove(mci->debugfs);
1011 mci->mc_driver_sysfs_attributes, 1077#endif
1012 &mci->edac_mci_kobj, 0); 1078#ifdef CONFIG_EDAC_LEGACY_SYSFS
1079 edac_delete_csrow_objects(mci);
1080#endif
1081
1082 for (i = 0; i < mci->tot_dimms; i++) {
1083 struct dimm_info *dimm = mci->dimms[i];
1084 if (dimm->nr_pages == 0)
1085 continue;
1086 edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
1087 put_device(&dimm->dev);
1088 device_del(&dimm->dev);
1013 } 1089 }
1014
1015 /* remove the symlink */
1016 debugf4("%s() remove_link\n", __func__);
1017 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1018
1019 /* unregister this instance's kobject */
1020 debugf4("%s() remove_mci_instance\n", __func__);
1021 kobject_put(&mci->edac_mci_kobj);
1022} 1090}
1023 1091
1092void edac_unregister_sysfs(struct mem_ctl_info *mci)
1093{
1094 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1095 put_device(&mci->dev);
1096 device_del(&mci->dev);
1097 bus_unregister(&mci->bus);
1098 kfree(mci->bus.name);
1099}
1024 1100
1101static void mc_attr_release(struct device *dev)
1102{
1103 /*
1104 * There's no container structure here, as this is just the mci
1105 * parent device, used to create the /sys/devices/mc sysfs node.
1106 * So, there are no attributes on it.
1107 */
1108 edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1109 kfree(dev);
1110}
1025 1111
1026 1112static struct device_type mc_attr_type = {
1113 .release = mc_attr_release,
1114};
1027/* 1115/*
1028 * edac_setup_sysfs_mc_kset(void) 1116 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1029 *
1030 * Initialize the mc_kset for the 'mc' entry
1031 * This requires creating the top 'mc' directory with a kset
1032 * and its controls/attributes.
1033 *
1034 * To this 'mc' kset, instance 'mci' will be grouped as children.
1035 *
1036 * Return: 0 SUCCESS
1037 * !0 FAILURE error code
1038 */ 1117 */
1039int edac_sysfs_setup_mc_kset(void) 1118int __init edac_mc_sysfs_init(void)
1040{ 1119{
1041 int err = -EINVAL;
1042 struct bus_type *edac_subsys; 1120 struct bus_type *edac_subsys;
1043 1121 int err;
1044 debugf1("%s()\n", __func__);
1045 1122
1046 /* get the /sys/devices/system/edac subsys reference */ 1123 /* get the /sys/devices/system/edac subsys reference */
1047 edac_subsys = edac_get_sysfs_subsys(); 1124 edac_subsys = edac_get_sysfs_subsys();
1048 if (edac_subsys == NULL) { 1125 if (edac_subsys == NULL) {
1049 debugf1("%s() no edac_subsys error=%d\n", __func__, err); 1126 edac_dbg(1, "no edac_subsys\n");
1050 goto fail_out; 1127 return -EINVAL;
1051 } 1128 }
1052 1129
1053 /* Init the MC's kobject */ 1130 mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1054 mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj);
1055 if (!mc_kset) {
1056 err = -ENOMEM;
1057 debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
1058 goto fail_kset;
1059 }
1060 1131
1061 debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); 1132 mci_pdev->bus = edac_subsys;
1133 mci_pdev->type = &mc_attr_type;
1134 device_initialize(mci_pdev);
1135 dev_set_name(mci_pdev, "mc");
1062 1136
1063 return 0; 1137 err = device_add(mci_pdev);
1138 if (err < 0)
1139 return err;
1064 1140
1065fail_kset: 1141 edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1066 edac_put_sysfs_subsys();
1067 1142
1068fail_out: 1143 return 0;
1069 return err;
1070} 1144}
1071 1145
1072/* 1146void __exit edac_mc_sysfs_exit(void)
1073 * edac_sysfs_teardown_mc_kset
1074 *
1075 * deconstruct the mc_ket for memory controllers
1076 */
1077void edac_sysfs_teardown_mc_kset(void)
1078{ 1147{
1079 kset_unregister(mc_kset); 1148 put_device(mci_pdev);
1149 device_del(mci_pdev);
1080 edac_put_sysfs_subsys(); 1150 edac_put_sysfs_subsys();
1081} 1151}
1082
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 5ddaa86d6a6e..58a28d838f37 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -15,7 +15,7 @@
15#include "edac_core.h" 15#include "edac_core.h"
16#include "edac_module.h" 16#include "edac_module.h"
17 17
18#define EDAC_VERSION "Ver: 2.1.0" 18#define EDAC_VERSION "Ver: 3.0.0"
19 19
20#ifdef CONFIG_EDAC_DEBUG 20#ifdef CONFIG_EDAC_DEBUG
21/* Values of 0 to 4 will generate output */ 21/* Values of 0 to 4 will generate output */
@@ -90,26 +90,21 @@ static int __init edac_init(void)
90 */ 90 */
91 edac_pci_clear_parity_errors(); 91 edac_pci_clear_parity_errors();
92 92
93 /* 93 err = edac_mc_sysfs_init();
94 * now set up the mc_kset under the edac class object
95 */
96 err = edac_sysfs_setup_mc_kset();
97 if (err) 94 if (err)
98 goto error; 95 goto error;
99 96
97 edac_debugfs_init();
98
100 /* Setup/Initialize the workq for this core */ 99 /* Setup/Initialize the workq for this core */
101 err = edac_workqueue_setup(); 100 err = edac_workqueue_setup();
102 if (err) { 101 if (err) {
103 edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n"); 102 edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
104 goto workq_fail; 103 goto error;
105 } 104 }
106 105
107 return 0; 106 return 0;
108 107
109 /* Error teardown stack */
110workq_fail:
111 edac_sysfs_teardown_mc_kset();
112
113error: 108error:
114 return err; 109 return err;
115} 110}
@@ -120,11 +115,12 @@ error:
120 */ 115 */
121static void __exit edac_exit(void) 116static void __exit edac_exit(void)
122{ 117{
123 debugf0("%s()\n", __func__); 118 edac_dbg(0, "\n");
124 119
125 /* tear down the various subsystems */ 120 /* tear down the various subsystems */
126 edac_workqueue_teardown(); 121 edac_workqueue_teardown();
127 edac_sysfs_teardown_mc_kset(); 122 edac_mc_sysfs_exit();
123 edac_debugfs_exit();
128} 124}
129 125
130/* 126/*
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 0ea7d14cb930..3d139c6e7fe3 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -19,12 +19,12 @@
19 * 19 *
20 * edac_mc objects 20 * edac_mc objects
21 */ 21 */
22extern int edac_sysfs_setup_mc_kset(void); 22 /* on edac_mc_sysfs.c */
23extern void edac_sysfs_teardown_mc_kset(void); 23int edac_mc_sysfs_init(void);
24extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci); 24void edac_mc_sysfs_exit(void);
25extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
26extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); 25extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
27extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); 26extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
27void edac_unregister_sysfs(struct mem_ctl_info *mci);
28extern int edac_get_log_ue(void); 28extern int edac_get_log_ue(void);
29extern int edac_get_log_ce(void); 29extern int edac_get_log_ce(void);
30extern int edac_get_panic_on_ue(void); 30extern int edac_get_panic_on_ue(void);
@@ -34,6 +34,10 @@ extern int edac_mc_get_panic_on_ue(void);
34extern int edac_get_poll_msec(void); 34extern int edac_get_poll_msec(void);
35extern int edac_mc_get_poll_msec(void); 35extern int edac_mc_get_poll_msec(void);
36 36
37unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
38 unsigned len);
39
40 /* on edac_device.c */
37extern int edac_device_register_sysfs_main_kobj( 41extern int edac_device_register_sysfs_main_kobj(
38 struct edac_device_ctl_info *edac_dev); 42 struct edac_device_ctl_info *edac_dev);
39extern void edac_device_unregister_sysfs_main_kobj( 43extern void edac_device_unregister_sysfs_main_kobj(
@@ -53,6 +57,20 @@ extern void edac_mc_reset_delay_period(int value);
53extern void *edac_align_ptr(void **p, unsigned size, int n_elems); 57extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
54 58
55/* 59/*
60 * EDAC debugfs functions
61 */
62#ifdef CONFIG_EDAC_DEBUG
63int edac_debugfs_init(void);
64void edac_debugfs_exit(void);
65#else
66static inline int edac_debugfs_init(void)
67{
68 return -ENODEV;
69}
70static inline void edac_debugfs_exit(void) {}
71#endif
72
73/*
56 * EDAC PCI functions 74 * EDAC PCI functions
57 */ 75 */
58#ifdef CONFIG_PCI 76#ifdef CONFIG_PCI
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index f1ac86649886..ee87ef972ead 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -45,7 +45,7 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
45 void *p = NULL, *pvt; 45 void *p = NULL, *pvt;
46 unsigned int size; 46 unsigned int size;
47 47
48 debugf1("%s()\n", __func__); 48 edac_dbg(1, "\n");
49 49
50 pci = edac_align_ptr(&p, sizeof(*pci), 1); 50 pci = edac_align_ptr(&p, sizeof(*pci), 1);
51 pvt = edac_align_ptr(&p, 1, sz_pvt); 51 pvt = edac_align_ptr(&p, 1, sz_pvt);
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
80 */ 80 */
81void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci) 81void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
82{ 82{
83 debugf1("%s()\n", __func__); 83 edac_dbg(1, "\n");
84 84
85 edac_pci_remove_sysfs(pci); 85 edac_pci_remove_sysfs(pci);
86} 86}
@@ -97,7 +97,7 @@ static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
97 struct edac_pci_ctl_info *pci; 97 struct edac_pci_ctl_info *pci;
98 struct list_head *item; 98 struct list_head *item;
99 99
100 debugf1("%s()\n", __func__); 100 edac_dbg(1, "\n");
101 101
102 list_for_each(item, &edac_pci_list) { 102 list_for_each(item, &edac_pci_list) {
103 pci = list_entry(item, struct edac_pci_ctl_info, link); 103 pci = list_entry(item, struct edac_pci_ctl_info, link);
@@ -122,7 +122,7 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
122 struct list_head *item, *insert_before; 122 struct list_head *item, *insert_before;
123 struct edac_pci_ctl_info *rover; 123 struct edac_pci_ctl_info *rover;
124 124
125 debugf1("%s()\n", __func__); 125 edac_dbg(1, "\n");
126 126
127 insert_before = &edac_pci_list; 127 insert_before = &edac_pci_list;
128 128
@@ -226,7 +226,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
226 int msec; 226 int msec;
227 unsigned long delay; 227 unsigned long delay;
228 228
229 debugf3("%s() checking\n", __func__); 229 edac_dbg(3, "checking\n");
230 230
231 mutex_lock(&edac_pci_ctls_mutex); 231 mutex_lock(&edac_pci_ctls_mutex);
232 232
@@ -261,7 +261,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
261static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, 261static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
262 unsigned int msec) 262 unsigned int msec)
263{ 263{
264 debugf0("%s()\n", __func__); 264 edac_dbg(0, "\n");
265 265
266 INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function); 266 INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
267 queue_delayed_work(edac_workqueue, &pci->work, 267 queue_delayed_work(edac_workqueue, &pci->work,
@@ -276,7 +276,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
276{ 276{
277 int status; 277 int status;
278 278
279 debugf0("%s()\n", __func__); 279 edac_dbg(0, "\n");
280 280
281 status = cancel_delayed_work(&pci->work); 281 status = cancel_delayed_work(&pci->work);
282 if (status == 0) 282 if (status == 0)
@@ -293,7 +293,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
293void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, 293void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
294 unsigned long value) 294 unsigned long value)
295{ 295{
296 debugf0("%s()\n", __func__); 296 edac_dbg(0, "\n");
297 297
298 edac_pci_workq_teardown(pci); 298 edac_pci_workq_teardown(pci);
299 299
@@ -333,7 +333,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
333 */ 333 */
334int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) 334int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
335{ 335{
336 debugf0("%s()\n", __func__); 336 edac_dbg(0, "\n");
337 337
338 pci->pci_idx = edac_idx; 338 pci->pci_idx = edac_idx;
339 pci->start_time = jiffies; 339 pci->start_time = jiffies;
@@ -393,7 +393,7 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
393{ 393{
394 struct edac_pci_ctl_info *pci; 394 struct edac_pci_ctl_info *pci;
395 395
396 debugf0("%s()\n", __func__); 396 edac_dbg(0, "\n");
397 397
398 mutex_lock(&edac_pci_ctls_mutex); 398 mutex_lock(&edac_pci_ctls_mutex);
399 399
@@ -430,7 +430,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device);
430 */ 430 */
431static void edac_pci_generic_check(struct edac_pci_ctl_info *pci) 431static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
432{ 432{
433 debugf4("%s()\n", __func__); 433 edac_dbg(4, "\n");
434 edac_pci_do_parity_check(); 434 edac_pci_do_parity_check();
435} 435}
436 436
@@ -475,7 +475,7 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
475 pdata->edac_idx = edac_pci_idx++; 475 pdata->edac_idx = edac_pci_idx++;
476 476
477 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 477 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
478 debugf3("%s(): failed edac_pci_add_device()\n", __func__); 478 edac_dbg(3, "failed edac_pci_add_device()\n");
479 edac_pci_free_ctl_info(pci); 479 edac_pci_free_ctl_info(pci);
480 return NULL; 480 return NULL;
481 } 481 }
@@ -491,7 +491,7 @@ EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
491 */ 491 */
492void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci) 492void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
493{ 493{
494 debugf0("%s() pci mod=%s\n", __func__, pci->mod_name); 494 edac_dbg(0, "pci mod=%s\n", pci->mod_name);
495 495
496 edac_pci_del_device(pci->dev); 496 edac_pci_del_device(pci->dev);
497 edac_pci_free_ctl_info(pci); 497 edac_pci_free_ctl_info(pci);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 97f5064e3992..e164c555a337 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -78,7 +78,7 @@ static void edac_pci_instance_release(struct kobject *kobj)
78{ 78{
79 struct edac_pci_ctl_info *pci; 79 struct edac_pci_ctl_info *pci;
80 80
81 debugf0("%s()\n", __func__); 81 edac_dbg(0, "\n");
82 82
83 /* Form pointer to containing struct, the pci control struct */ 83 /* Form pointer to containing struct, the pci control struct */
84 pci = to_instance(kobj); 84 pci = to_instance(kobj);
@@ -161,7 +161,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
161 struct kobject *main_kobj; 161 struct kobject *main_kobj;
162 int err; 162 int err;
163 163
164 debugf0("%s()\n", __func__); 164 edac_dbg(0, "\n");
165 165
166 /* First bump the ref count on the top main kobj, which will 166 /* First bump the ref count on the top main kobj, which will
167 * track the number of PCI instances we have, and thus nest 167 * track the number of PCI instances we have, and thus nest
@@ -177,14 +177,13 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
177 err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance, 177 err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
178 edac_pci_top_main_kobj, "pci%d", idx); 178 edac_pci_top_main_kobj, "pci%d", idx);
179 if (err != 0) { 179 if (err != 0) {
180 debugf2("%s() failed to register instance pci%d\n", 180 edac_dbg(2, "failed to register instance pci%d\n", idx);
181 __func__, idx);
182 kobject_put(edac_pci_top_main_kobj); 181 kobject_put(edac_pci_top_main_kobj);
183 goto error_out; 182 goto error_out;
184 } 183 }
185 184
186 kobject_uevent(&pci->kobj, KOBJ_ADD); 185 kobject_uevent(&pci->kobj, KOBJ_ADD);
187 debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx); 186 edac_dbg(1, "Register instance 'pci%d' kobject\n", idx);
188 187
189 return 0; 188 return 0;
190 189
@@ -201,7 +200,7 @@ error_out:
201static void edac_pci_unregister_sysfs_instance_kobj( 200static void edac_pci_unregister_sysfs_instance_kobj(
202 struct edac_pci_ctl_info *pci) 201 struct edac_pci_ctl_info *pci)
203{ 202{
204 debugf0("%s()\n", __func__); 203 edac_dbg(0, "\n");
205 204
206 /* Unregister the instance kobject and allow its release 205 /* Unregister the instance kobject and allow its release
207 * function release the main reference count and then 206 * function release the main reference count and then
@@ -317,7 +316,7 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
317 */ 316 */
318static void edac_pci_release_main_kobj(struct kobject *kobj) 317static void edac_pci_release_main_kobj(struct kobject *kobj)
319{ 318{
320 debugf0("%s() here to module_put(THIS_MODULE)\n", __func__); 319 edac_dbg(0, "here to module_put(THIS_MODULE)\n");
321 320
322 kfree(kobj); 321 kfree(kobj);
323 322
@@ -345,7 +344,7 @@ static int edac_pci_main_kobj_setup(void)
345 int err; 344 int err;
346 struct bus_type *edac_subsys; 345 struct bus_type *edac_subsys;
347 346
348 debugf0("%s()\n", __func__); 347 edac_dbg(0, "\n");
349 348
350 /* check and count if we have already created the main kobject */ 349 /* check and count if we have already created the main kobject */
351 if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1) 350 if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
@@ -356,7 +355,7 @@ static int edac_pci_main_kobj_setup(void)
356 */ 355 */
357 edac_subsys = edac_get_sysfs_subsys(); 356 edac_subsys = edac_get_sysfs_subsys();
358 if (edac_subsys == NULL) { 357 if (edac_subsys == NULL) {
359 debugf1("%s() no edac_subsys\n", __func__); 358 edac_dbg(1, "no edac_subsys\n");
360 err = -ENODEV; 359 err = -ENODEV;
361 goto decrement_count_fail; 360 goto decrement_count_fail;
362 } 361 }
@@ -366,14 +365,14 @@ static int edac_pci_main_kobj_setup(void)
366 * level main kobj for EDAC PCI 365 * level main kobj for EDAC PCI
367 */ 366 */
368 if (!try_module_get(THIS_MODULE)) { 367 if (!try_module_get(THIS_MODULE)) {
369 debugf1("%s() try_module_get() failed\n", __func__); 368 edac_dbg(1, "try_module_get() failed\n");
370 err = -ENODEV; 369 err = -ENODEV;
371 goto mod_get_fail; 370 goto mod_get_fail;
372 } 371 }
373 372
374 edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); 373 edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
375 if (!edac_pci_top_main_kobj) { 374 if (!edac_pci_top_main_kobj) {
376 debugf1("Failed to allocate\n"); 375 edac_dbg(1, "Failed to allocate\n");
377 err = -ENOMEM; 376 err = -ENOMEM;
378 goto kzalloc_fail; 377 goto kzalloc_fail;
379 } 378 }
@@ -383,7 +382,7 @@ static int edac_pci_main_kobj_setup(void)
383 &ktype_edac_pci_main_kobj, 382 &ktype_edac_pci_main_kobj,
384 &edac_subsys->dev_root->kobj, "pci"); 383 &edac_subsys->dev_root->kobj, "pci");
385 if (err) { 384 if (err) {
386 debugf1("Failed to register '.../edac/pci'\n"); 385 edac_dbg(1, "Failed to register '.../edac/pci'\n");
387 goto kobject_init_and_add_fail; 386 goto kobject_init_and_add_fail;
388 } 387 }
389 388
@@ -392,7 +391,7 @@ static int edac_pci_main_kobj_setup(void)
392 * must be used, for resources to be cleaned up properly 391 * must be used, for resources to be cleaned up properly
393 */ 392 */
394 kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD); 393 kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
395 debugf1("Registered '.../edac/pci' kobject\n"); 394 edac_dbg(1, "Registered '.../edac/pci' kobject\n");
396 395
397 return 0; 396 return 0;
398 397
@@ -421,15 +420,14 @@ decrement_count_fail:
421 */ 420 */
422static void edac_pci_main_kobj_teardown(void) 421static void edac_pci_main_kobj_teardown(void)
423{ 422{
424 debugf0("%s()\n", __func__); 423 edac_dbg(0, "\n");
425 424
426 /* Decrement the count and only if no more controller instances 425 /* Decrement the count and only if no more controller instances
427 * are connected perform the unregisteration of the top level 426 * are connected perform the unregisteration of the top level
428 * main kobj 427 * main kobj
429 */ 428 */
430 if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) { 429 if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
431 debugf0("%s() called kobject_put on main kobj\n", 430 edac_dbg(0, "called kobject_put on main kobj\n");
432 __func__);
433 kobject_put(edac_pci_top_main_kobj); 431 kobject_put(edac_pci_top_main_kobj);
434 } 432 }
435 edac_put_sysfs_subsys(); 433 edac_put_sysfs_subsys();
@@ -446,7 +444,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
446 int err; 444 int err;
447 struct kobject *edac_kobj = &pci->kobj; 445 struct kobject *edac_kobj = &pci->kobj;
448 446
449 debugf0("%s() idx=%d\n", __func__, pci->pci_idx); 447 edac_dbg(0, "idx=%d\n", pci->pci_idx);
450 448
451 /* create the top main EDAC PCI kobject, IF needed */ 449 /* create the top main EDAC PCI kobject, IF needed */
452 err = edac_pci_main_kobj_setup(); 450 err = edac_pci_main_kobj_setup();
@@ -460,8 +458,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
460 458
461 err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK); 459 err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
462 if (err) { 460 if (err) {
463 debugf0("%s() sysfs_create_link() returned err= %d\n", 461 edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
464 __func__, err);
465 goto symlink_fail; 462 goto symlink_fail;
466 } 463 }
467 464
@@ -484,7 +481,7 @@ unregister_cleanup:
484 */ 481 */
485void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci) 482void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
486{ 483{
487 debugf0("%s() index=%d\n", __func__, pci->pci_idx); 484 edac_dbg(0, "index=%d\n", pci->pci_idx);
488 485
489 /* Remove the symlink */ 486 /* Remove the symlink */
490 sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK); 487 sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
@@ -496,7 +493,7 @@ void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
496 * if this 'pci' is the last instance. 493 * if this 'pci' is the last instance.
497 * If it is, the main kobject will be unregistered as a result 494 * If it is, the main kobject will be unregistered as a result
498 */ 495 */
499 debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__); 496 edac_dbg(0, "calling edac_pci_main_kobj_teardown()\n");
500 edac_pci_main_kobj_teardown(); 497 edac_pci_main_kobj_teardown();
501} 498}
502 499
@@ -572,7 +569,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
572 569
573 local_irq_restore(flags); 570 local_irq_restore(flags);
574 571
575 debugf4("PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); 572 edac_dbg(4, "PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
576 573
577 /* check the status reg for errors on boards NOT marked as broken 574 /* check the status reg for errors on boards NOT marked as broken
578 * if broken, we cannot trust any of the status bits 575 * if broken, we cannot trust any of the status bits
@@ -603,13 +600,15 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
603 } 600 }
604 601
605 602
606 debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev)); 603 edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n",
604 header_type, dev_name(&dev->dev));
607 605
608 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { 606 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
609 /* On bridges, need to examine secondary status register */ 607 /* On bridges, need to examine secondary status register */
610 status = get_pci_parity_status(dev, 1); 608 status = get_pci_parity_status(dev, 1);
611 609
612 debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); 610 edac_dbg(4, "PCI SEC_STATUS= 0x%04x %s\n",
611 status, dev_name(&dev->dev));
613 612
614 /* check the secondary status reg for errors, 613 /* check the secondary status reg for errors,
615 * on NOT broken boards 614 * on NOT broken boards
@@ -671,7 +670,7 @@ void edac_pci_do_parity_check(void)
671{ 670{
672 int before_count; 671 int before_count;
673 672
674 debugf3("%s()\n", __func__); 673 edac_dbg(3, "\n");
675 674
676 /* if policy has PCI check off, leave now */ 675 /* if policy has PCI check off, leave now */
677 if (!check_pci_errors) 676 if (!check_pci_errors)
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
new file mode 100644
index 000000000000..e599b00c05a8
--- /dev/null
+++ b/drivers/edac/highbank_l2_edac.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/ctype.h>
19#include <linux/edac.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/of_platform.h>
23
24#include "edac_core.h"
25#include "edac_module.h"
26
27#define SR_CLR_SB_ECC_INTR 0x0
28#define SR_CLR_DB_ECC_INTR 0x4
29
30struct hb_l2_drvdata {
31 void __iomem *base;
32 int sb_irq;
33 int db_irq;
34};
35
36static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id)
37{
38 struct edac_device_ctl_info *dci = dev_id;
39 struct hb_l2_drvdata *drvdata = dci->pvt_info;
40
41 if (irq == drvdata->sb_irq) {
42 writel(1, drvdata->base + SR_CLR_SB_ECC_INTR);
43 edac_device_handle_ce(dci, 0, 0, dci->ctl_name);
44 }
45 if (irq == drvdata->db_irq) {
46 writel(1, drvdata->base + SR_CLR_DB_ECC_INTR);
47 edac_device_handle_ue(dci, 0, 0, dci->ctl_name);
48 }
49
50 return IRQ_HANDLED;
51}
52
53static int __devinit highbank_l2_err_probe(struct platform_device *pdev)
54{
55 struct edac_device_ctl_info *dci;
56 struct hb_l2_drvdata *drvdata;
57 struct resource *r;
58 int res = 0;
59
60 dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu",
61 1, "L", 1, 2, NULL, 0, 0);
62 if (!dci)
63 return -ENOMEM;
64
65 drvdata = dci->pvt_info;
66 dci->dev = &pdev->dev;
67 platform_set_drvdata(pdev, dci);
68
69 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
70 return -ENOMEM;
71
72 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
73 if (!r) {
74 dev_err(&pdev->dev, "Unable to get mem resource\n");
75 res = -ENODEV;
76 goto err;
77 }
78
79 if (!devm_request_mem_region(&pdev->dev, r->start,
80 resource_size(r), dev_name(&pdev->dev))) {
81 dev_err(&pdev->dev, "Error while requesting mem region\n");
82 res = -EBUSY;
83 goto err;
84 }
85
86 drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
87 if (!drvdata->base) {
88 dev_err(&pdev->dev, "Unable to map regs\n");
89 res = -ENOMEM;
90 goto err;
91 }
92
93 drvdata->db_irq = platform_get_irq(pdev, 0);
94 res = devm_request_irq(&pdev->dev, drvdata->db_irq,
95 highbank_l2_err_handler,
96 0, dev_name(&pdev->dev), dci);
97 if (res < 0)
98 goto err;
99
100 drvdata->sb_irq = platform_get_irq(pdev, 1);
101 res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
102 highbank_l2_err_handler,
103 0, dev_name(&pdev->dev), dci);
104 if (res < 0)
105 goto err;
106
107 dci->mod_name = dev_name(&pdev->dev);
108 dci->dev_name = dev_name(&pdev->dev);
109
110 if (edac_device_add_device(dci))
111 goto err;
112
113 devres_close_group(&pdev->dev, NULL);
114 return 0;
115err:
116 devres_release_group(&pdev->dev, NULL);
117 edac_device_free_ctl_info(dci);
118 return res;
119}
120
121static int highbank_l2_err_remove(struct platform_device *pdev)
122{
123 struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
124
125 edac_device_del_device(&pdev->dev);
126 edac_device_free_ctl_info(dci);
127 return 0;
128}
129
130static const struct of_device_id hb_l2_err_of_match[] = {
131 { .compatible = "calxeda,hb-sregs-l2-ecc", },
132 {},
133};
134MODULE_DEVICE_TABLE(of, hb_l2_err_of_match);
135
136static struct platform_driver highbank_l2_edac_driver = {
137 .probe = highbank_l2_err_probe,
138 .remove = highbank_l2_err_remove,
139 .driver = {
140 .name = "hb_l2_edac",
141 .of_match_table = hb_l2_err_of_match,
142 },
143};
144
145module_platform_driver(highbank_l2_edac_driver);
146
147MODULE_LICENSE("GPL v2");
148MODULE_AUTHOR("Calxeda, Inc.");
149MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank L2 Cache");
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
new file mode 100644
index 000000000000..c769f477fd22
--- /dev/null
+++ b/drivers/edac/highbank_mc_edac.c
@@ -0,0 +1,264 @@
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/ctype.h>
19#include <linux/edac.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/of_platform.h>
23#include <linux/uaccess.h>
24
25#include "edac_core.h"
26#include "edac_module.h"
27
28/* DDR Ctrlr Error Registers */
29#define HB_DDR_ECC_OPT 0x128
30#define HB_DDR_ECC_U_ERR_ADDR 0x130
31#define HB_DDR_ECC_U_ERR_STAT 0x134
32#define HB_DDR_ECC_U_ERR_DATAL 0x138
33#define HB_DDR_ECC_U_ERR_DATAH 0x13c
34#define HB_DDR_ECC_C_ERR_ADDR 0x140
35#define HB_DDR_ECC_C_ERR_STAT 0x144
36#define HB_DDR_ECC_C_ERR_DATAL 0x148
37#define HB_DDR_ECC_C_ERR_DATAH 0x14c
38#define HB_DDR_ECC_INT_STATUS 0x180
39#define HB_DDR_ECC_INT_ACK 0x184
40#define HB_DDR_ECC_U_ERR_ID 0x424
41#define HB_DDR_ECC_C_ERR_ID 0x428
42
43#define HB_DDR_ECC_INT_STAT_CE 0x8
44#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
45#define HB_DDR_ECC_INT_STAT_UE 0x20
46#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
47
48#define HB_DDR_ECC_OPT_MODE_MASK 0x3
49#define HB_DDR_ECC_OPT_FWC 0x100
50#define HB_DDR_ECC_OPT_XOR_SHIFT 16
51
52struct hb_mc_drvdata {
53 void __iomem *mc_vbase;
54};
55
56static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
57{
58 struct mem_ctl_info *mci = dev_id;
59 struct hb_mc_drvdata *drvdata = mci->pvt_info;
60 u32 status, err_addr;
61
62 /* Read the interrupt status register */
63 status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS);
64
65 if (status & HB_DDR_ECC_INT_STAT_UE) {
66 err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR);
67 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
68 err_addr >> PAGE_SHIFT,
69 err_addr & ~PAGE_MASK, 0,
70 0, 0, -1,
71 mci->ctl_name, "");
72 }
73 if (status & HB_DDR_ECC_INT_STAT_CE) {
74 u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT);
75 syndrome = (syndrome >> 8) & 0xff;
76 err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR);
77 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
78 err_addr >> PAGE_SHIFT,
79 err_addr & ~PAGE_MASK, syndrome,
80 0, 0, -1,
81 mci->ctl_name, "");
82 }
83
84 /* clear the error, clears the interrupt */
85 writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK);
86 return IRQ_HANDLED;
87}
88
89#ifdef CONFIG_EDAC_DEBUG
90static ssize_t highbank_mc_err_inject_write(struct file *file,
91 const char __user *data,
92 size_t count, loff_t *ppos)
93{
94 struct mem_ctl_info *mci = file->private_data;
95 struct hb_mc_drvdata *pdata = mci->pvt_info;
96 char buf[32];
97 size_t buf_size;
98 u32 reg;
99 u8 synd;
100
101 buf_size = min(count, (sizeof(buf)-1));
102 if (copy_from_user(buf, data, buf_size))
103 return -EFAULT;
104 buf[buf_size] = 0;
105
106 if (!kstrtou8(buf, 16, &synd)) {
107 reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT);
108 reg &= HB_DDR_ECC_OPT_MODE_MASK;
109 reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
110 writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT);
111 }
112
113 return count;
114}
115
116static int debugfs_open(struct inode *inode, struct file *file)
117{
118 file->private_data = inode->i_private;
119 return 0;
120}
121
122static const struct file_operations highbank_mc_debug_inject_fops = {
123 .open = debugfs_open,
124 .write = highbank_mc_err_inject_write,
125 .llseek = generic_file_llseek,
126};
127
128static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
129{
130 if (mci->debugfs)
131 debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
132 &highbank_mc_debug_inject_fops);
133;
134}
135#else
136static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
137{}
138#endif
139
140static int __devinit highbank_mc_probe(struct platform_device *pdev)
141{
142 struct edac_mc_layer layers[2];
143 struct mem_ctl_info *mci;
144 struct hb_mc_drvdata *drvdata;
145 struct dimm_info *dimm;
146 struct resource *r;
147 u32 control;
148 int irq;
149 int res = 0;
150
151 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
152 layers[0].size = 1;
153 layers[0].is_virt_csrow = true;
154 layers[1].type = EDAC_MC_LAYER_CHANNEL;
155 layers[1].size = 1;
156 layers[1].is_virt_csrow = false;
157 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
158 sizeof(struct hb_mc_drvdata));
159 if (!mci)
160 return -ENOMEM;
161
162 mci->pdev = &pdev->dev;
163 drvdata = mci->pvt_info;
164 platform_set_drvdata(pdev, mci);
165
166 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
167 return -ENOMEM;
168
169 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
170 if (!r) {
171 dev_err(&pdev->dev, "Unable to get mem resource\n");
172 res = -ENODEV;
173 goto err;
174 }
175
176 if (!devm_request_mem_region(&pdev->dev, r->start,
177 resource_size(r), dev_name(&pdev->dev))) {
178 dev_err(&pdev->dev, "Error while requesting mem region\n");
179 res = -EBUSY;
180 goto err;
181 }
182
183 drvdata->mc_vbase = devm_ioremap(&pdev->dev,
184 r->start, resource_size(r));
185 if (!drvdata->mc_vbase) {
186 dev_err(&pdev->dev, "Unable to map regs\n");
187 res = -ENOMEM;
188 goto err;
189 }
190
191 control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3;
192 if (!control || (control == 0x2)) {
193 dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
194 res = -ENODEV;
195 goto err;
196 }
197
198 irq = platform_get_irq(pdev, 0);
199 res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
200 0, dev_name(&pdev->dev), mci);
201 if (res < 0) {
202 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
203 goto err;
204 }
205
206 mci->mtype_cap = MEM_FLAG_DDR3;
207 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
208 mci->edac_cap = EDAC_FLAG_SECDED;
209 mci->mod_name = dev_name(&pdev->dev);
210 mci->mod_ver = "1";
211 mci->ctl_name = dev_name(&pdev->dev);
212 mci->scrub_mode = SCRUB_SW_SRC;
213
214 /* Only a single 4GB DIMM is supported */
215 dimm = *mci->dimms;
216 dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
217 dimm->grain = 8;
218 dimm->dtype = DEV_X8;
219 dimm->mtype = MEM_DDR3;
220 dimm->edac_mode = EDAC_SECDED;
221
222 res = edac_mc_add_mc(mci);
223 if (res < 0)
224 goto err;
225
226 highbank_mc_create_debugfs_nodes(mci);
227
228 devres_close_group(&pdev->dev, NULL);
229 return 0;
230err:
231 devres_release_group(&pdev->dev, NULL);
232 edac_mc_free(mci);
233 return res;
234}
235
236static int highbank_mc_remove(struct platform_device *pdev)
237{
238 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
239
240 edac_mc_del_mc(&pdev->dev);
241 edac_mc_free(mci);
242 return 0;
243}
244
245static const struct of_device_id hb_ddr_ctrl_of_match[] = {
246 { .compatible = "calxeda,hb-ddr-ctrl", },
247 {},
248};
249MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
250
251static struct platform_driver highbank_mc_edac_driver = {
252 .probe = highbank_mc_probe,
253 .remove = highbank_mc_remove,
254 .driver = {
255 .name = "hb_mc_edac",
256 .of_match_table = hb_ddr_ctrl_of_match,
257 },
258};
259
260module_platform_driver(highbank_mc_edac_driver);
261
262MODULE_LICENSE("GPL v2");
263MODULE_AUTHOR("Calxeda, Inc.");
264MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 8ad1744faacd..d3d19cc4e9a1 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -194,7 +194,7 @@ static void i3000_get_error_info(struct mem_ctl_info *mci,
194{ 194{
195 struct pci_dev *pdev; 195 struct pci_dev *pdev;
196 196
197 pdev = to_pci_dev(mci->dev); 197 pdev = to_pci_dev(mci->pdev);
198 198
199 /* 199 /*
200 * This is a mess because there is no atomic way to read all the 200 * This is a mess because there is no atomic way to read all the
@@ -236,7 +236,7 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
236 int row, multi_chan, channel; 236 int row, multi_chan, channel;
237 unsigned long pfn, offset; 237 unsigned long pfn, offset;
238 238
239 multi_chan = mci->csrows[0].nr_channels - 1; 239 multi_chan = mci->csrows[0]->nr_channels - 1;
240 240
241 if (!(info->errsts & I3000_ERRSTS_BITS)) 241 if (!(info->errsts & I3000_ERRSTS_BITS))
242 return 0; 242 return 0;
@@ -245,9 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
245 return 1; 245 return 1;
246 246
247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { 247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
249 -1, -1, -1, 249 -1, -1, -1,
250 "UE overwrote CE", "", NULL); 250 "UE overwrote CE", "");
251 info->errsts = info->errsts2; 251 info->errsts = info->errsts2;
252 } 252 }
253 253
@@ -258,15 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
258 row = edac_mc_find_csrow_by_page(mci, pfn); 258 row = edac_mc_find_csrow_by_page(mci, pfn);
259 259
260 if (info->errsts & I3000_ERRSTS_UE) 260 if (info->errsts & I3000_ERRSTS_UE)
261 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 261 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
262 pfn, offset, 0, 262 pfn, offset, 0,
263 row, -1, -1, 263 row, -1, -1,
264 "i3000 UE", "", NULL); 264 "i3000 UE", "");
265 else 265 else
266 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 266 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
267 pfn, offset, info->derrsyn, 267 pfn, offset, info->derrsyn,
268 row, multi_chan ? channel : 0, -1, 268 row, multi_chan ? channel : 0, -1,
269 "i3000 CE", "", NULL); 269 "i3000 CE", "");
270 270
271 return 1; 271 return 1;
272} 272}
@@ -275,7 +275,7 @@ static void i3000_check(struct mem_ctl_info *mci)
275{ 275{
276 struct i3000_error_info info; 276 struct i3000_error_info info;
277 277
278 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 278 edac_dbg(1, "MC%d\n", mci->mc_idx);
279 i3000_get_error_info(mci, &info); 279 i3000_get_error_info(mci, &info);
280 i3000_process_error_info(mci, &info, 1); 280 i3000_process_error_info(mci, &info, 1);
281} 281}
@@ -322,7 +322,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
322 unsigned long mchbar; 322 unsigned long mchbar;
323 void __iomem *window; 323 void __iomem *window;
324 324
325 debugf0("MC: %s()\n", __func__); 325 edac_dbg(0, "MC:\n");
326 326
327 pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); 327 pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
328 mchbar &= I3000_MCHBAR_MASK; 328 mchbar &= I3000_MCHBAR_MASK;
@@ -366,9 +366,9 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
366 if (!mci) 366 if (!mci)
367 return -ENOMEM; 367 return -ENOMEM;
368 368
369 debugf3("MC: %s(): init mci\n", __func__); 369 edac_dbg(3, "MC: init mci\n");
370 370
371 mci->dev = &pdev->dev; 371 mci->pdev = &pdev->dev;
372 mci->mtype_cap = MEM_FLAG_DDR2; 372 mci->mtype_cap = MEM_FLAG_DDR2;
373 373
374 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 374 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -393,14 +393,13 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
393 for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { 393 for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
394 u8 value; 394 u8 value;
395 u32 cumul_size; 395 u32 cumul_size;
396 struct csrow_info *csrow = &mci->csrows[i]; 396 struct csrow_info *csrow = mci->csrows[i];
397 397
398 value = drb[i]; 398 value = drb[i];
399 cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); 399 cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
400 if (interleaved) 400 if (interleaved)
401 cumul_size <<= 1; 401 cumul_size <<= 1;
402 debugf3("MC: %s(): (%d) cumul_size 0x%x\n", 402 edac_dbg(3, "MC: (%d) cumul_size 0x%x\n", i, cumul_size);
403 __func__, i, cumul_size);
404 if (cumul_size == last_cumul_size) 403 if (cumul_size == last_cumul_size)
405 continue; 404 continue;
406 405
@@ -410,7 +409,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
410 last_cumul_size = cumul_size; 409 last_cumul_size = cumul_size;
411 410
412 for (j = 0; j < nr_channels; j++) { 411 for (j = 0; j < nr_channels; j++) {
413 struct dimm_info *dimm = csrow->channels[j].dimm; 412 struct dimm_info *dimm = csrow->channels[j]->dimm;
414 413
415 dimm->nr_pages = nr_pages / nr_channels; 414 dimm->nr_pages = nr_pages / nr_channels;
416 dimm->grain = I3000_DEAP_GRAIN; 415 dimm->grain = I3000_DEAP_GRAIN;
@@ -429,7 +428,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
429 428
430 rc = -ENODEV; 429 rc = -ENODEV;
431 if (edac_mc_add_mc(mci)) { 430 if (edac_mc_add_mc(mci)) {
432 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 431 edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
433 goto fail; 432 goto fail;
434 } 433 }
435 434
@@ -445,7 +444,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
445 } 444 }
446 445
447 /* get this far and it's successful */ 446 /* get this far and it's successful */
448 debugf3("MC: %s(): success\n", __func__); 447 edac_dbg(3, "MC: success\n");
449 return 0; 448 return 0;
450 449
451fail: 450fail:
@@ -461,7 +460,7 @@ static int __devinit i3000_init_one(struct pci_dev *pdev,
461{ 460{
462 int rc; 461 int rc;
463 462
464 debugf0("MC: %s()\n", __func__); 463 edac_dbg(0, "MC:\n");
465 464
466 if (pci_enable_device(pdev) < 0) 465 if (pci_enable_device(pdev) < 0)
467 return -EIO; 466 return -EIO;
@@ -477,7 +476,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
477{ 476{
478 struct mem_ctl_info *mci; 477 struct mem_ctl_info *mci;
479 478
480 debugf0("%s()\n", __func__); 479 edac_dbg(0, "\n");
481 480
482 if (i3000_pci) 481 if (i3000_pci)
483 edac_pci_release_generic_ctl(i3000_pci); 482 edac_pci_release_generic_ctl(i3000_pci);
@@ -511,7 +510,7 @@ static int __init i3000_init(void)
511{ 510{
512 int pci_rc; 511 int pci_rc;
513 512
514 debugf3("MC: %s()\n", __func__); 513 edac_dbg(3, "MC:\n");
515 514
516 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 515 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
517 opstate_init(); 516 opstate_init();
@@ -525,14 +524,14 @@ static int __init i3000_init(void)
525 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 524 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
526 PCI_DEVICE_ID_INTEL_3000_HB, NULL); 525 PCI_DEVICE_ID_INTEL_3000_HB, NULL);
527 if (!mci_pdev) { 526 if (!mci_pdev) {
528 debugf0("i3000 pci_get_device fail\n"); 527 edac_dbg(0, "i3000 pci_get_device fail\n");
529 pci_rc = -ENODEV; 528 pci_rc = -ENODEV;
530 goto fail1; 529 goto fail1;
531 } 530 }
532 531
533 pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl); 532 pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
534 if (pci_rc < 0) { 533 if (pci_rc < 0) {
535 debugf0("i3000 init fail\n"); 534 edac_dbg(0, "i3000 init fail\n");
536 pci_rc = -ENODEV; 535 pci_rc = -ENODEV;
537 goto fail1; 536 goto fail1;
538 } 537 }
@@ -552,7 +551,7 @@ fail0:
552 551
553static void __exit i3000_exit(void) 552static void __exit i3000_exit(void)
554{ 553{
555 debugf3("MC: %s()\n", __func__); 554 edac_dbg(3, "MC:\n");
556 555
557 pci_unregister_driver(&i3000_driver); 556 pci_unregister_driver(&i3000_driver);
558 if (!i3000_registered) { 557 if (!i3000_registered) {
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index bbe43ef71823..47180a08edad 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -110,10 +110,10 @@ static int how_many_channels(struct pci_dev *pdev)
110 110
111 pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); 111 pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
112 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ 112 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
113 debugf0("In single channel mode.\n"); 113 edac_dbg(0, "In single channel mode\n");
114 return 1; 114 return 1;
115 } else { 115 } else {
116 debugf0("In dual channel mode.\n"); 116 edac_dbg(0, "In dual channel mode\n");
117 return 2; 117 return 2;
118 } 118 }
119} 119}
@@ -159,7 +159,7 @@ static void i3200_clear_error_info(struct mem_ctl_info *mci)
159{ 159{
160 struct pci_dev *pdev; 160 struct pci_dev *pdev;
161 161
162 pdev = to_pci_dev(mci->dev); 162 pdev = to_pci_dev(mci->pdev);
163 163
164 /* 164 /*
165 * Clear any error bits. 165 * Clear any error bits.
@@ -176,7 +176,7 @@ static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
176 struct i3200_priv *priv = mci->pvt_info; 176 struct i3200_priv *priv = mci->pvt_info;
177 void __iomem *window = priv->window; 177 void __iomem *window = priv->window;
178 178
179 pdev = to_pci_dev(mci->dev); 179 pdev = to_pci_dev(mci->pdev);
180 180
181 /* 181 /*
182 * This is a mess because there is no atomic way to read all the 182 * This is a mess because there is no atomic way to read all the
@@ -218,25 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
218 return; 218 return;
219 219
220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { 220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
222 -1, -1, -1, "UE overwrote CE", "", NULL); 222 -1, -1, -1, "UE overwrote CE", "");
223 info->errsts = info->errsts2; 223 info->errsts = info->errsts2;
224 } 224 }
225 225
226 for (channel = 0; channel < nr_channels; channel++) { 226 for (channel = 0; channel < nr_channels; channel++) {
227 log = info->eccerrlog[channel]; 227 log = info->eccerrlog[channel];
228 if (log & I3200_ECCERRLOG_UE) { 228 if (log & I3200_ECCERRLOG_UE) {
229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
230 0, 0, 0, 230 0, 0, 0,
231 eccerrlog_row(channel, log), 231 eccerrlog_row(channel, log),
232 -1, -1, 232 -1, -1,
233 "i3000 UE", "", NULL); 233 "i3000 UE", "");
234 } else if (log & I3200_ECCERRLOG_CE) { 234 } else if (log & I3200_ECCERRLOG_CE) {
235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
236 0, 0, eccerrlog_syndrome(log), 236 0, 0, eccerrlog_syndrome(log),
237 eccerrlog_row(channel, log), 237 eccerrlog_row(channel, log),
238 -1, -1, 238 -1, -1,
239 "i3000 UE", "", NULL); 239 "i3000 UE", "");
240 } 240 }
241 } 241 }
242} 242}
@@ -245,7 +245,7 @@ static void i3200_check(struct mem_ctl_info *mci)
245{ 245{
246 struct i3200_error_info info; 246 struct i3200_error_info info;
247 247
248 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 248 edac_dbg(1, "MC%d\n", mci->mc_idx);
249 i3200_get_and_clear_error_info(mci, &info); 249 i3200_get_and_clear_error_info(mci, &info);
250 i3200_process_error_info(mci, &info); 250 i3200_process_error_info(mci, &info);
251} 251}
@@ -332,7 +332,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
332 void __iomem *window; 332 void __iomem *window;
333 struct i3200_priv *priv; 333 struct i3200_priv *priv;
334 334
335 debugf0("MC: %s()\n", __func__); 335 edac_dbg(0, "MC:\n");
336 336
337 window = i3200_map_mchbar(pdev); 337 window = i3200_map_mchbar(pdev);
338 if (!window) 338 if (!window)
@@ -352,9 +352,9 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
352 if (!mci) 352 if (!mci)
353 return -ENOMEM; 353 return -ENOMEM;
354 354
355 debugf3("MC: %s(): init mci\n", __func__); 355 edac_dbg(3, "MC: init mci\n");
356 356
357 mci->dev = &pdev->dev; 357 mci->pdev = &pdev->dev;
358 mci->mtype_cap = MEM_FLAG_DDR2; 358 mci->mtype_cap = MEM_FLAG_DDR2;
359 359
360 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 360 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -379,7 +379,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
379 */ 379 */
380 for (i = 0; i < mci->nr_csrows; i++) { 380 for (i = 0; i < mci->nr_csrows; i++) {
381 unsigned long nr_pages; 381 unsigned long nr_pages;
382 struct csrow_info *csrow = &mci->csrows[i]; 382 struct csrow_info *csrow = mci->csrows[i];
383 383
384 nr_pages = drb_to_nr_pages(drbs, stacked, 384 nr_pages = drb_to_nr_pages(drbs, stacked,
385 i / I3200_RANKS_PER_CHANNEL, 385 i / I3200_RANKS_PER_CHANNEL,
@@ -389,7 +389,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
389 continue; 389 continue;
390 390
391 for (j = 0; j < nr_channels; j++) { 391 for (j = 0; j < nr_channels; j++) {
392 struct dimm_info *dimm = csrow->channels[j].dimm; 392 struct dimm_info *dimm = csrow->channels[j]->dimm;
393 393
394 dimm->nr_pages = nr_pages / nr_channels; 394 dimm->nr_pages = nr_pages / nr_channels;
395 dimm->grain = nr_pages << PAGE_SHIFT; 395 dimm->grain = nr_pages << PAGE_SHIFT;
@@ -403,12 +403,12 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
403 403
404 rc = -ENODEV; 404 rc = -ENODEV;
405 if (edac_mc_add_mc(mci)) { 405 if (edac_mc_add_mc(mci)) {
406 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 406 edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
407 goto fail; 407 goto fail;
408 } 408 }
409 409
410 /* get this far and it's successful */ 410 /* get this far and it's successful */
411 debugf3("MC: %s(): success\n", __func__); 411 edac_dbg(3, "MC: success\n");
412 return 0; 412 return 0;
413 413
414fail: 414fail:
@@ -424,7 +424,7 @@ static int __devinit i3200_init_one(struct pci_dev *pdev,
424{ 424{
425 int rc; 425 int rc;
426 426
427 debugf0("MC: %s()\n", __func__); 427 edac_dbg(0, "MC:\n");
428 428
429 if (pci_enable_device(pdev) < 0) 429 if (pci_enable_device(pdev) < 0)
430 return -EIO; 430 return -EIO;
@@ -441,7 +441,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
441 struct mem_ctl_info *mci; 441 struct mem_ctl_info *mci;
442 struct i3200_priv *priv; 442 struct i3200_priv *priv;
443 443
444 debugf0("%s()\n", __func__); 444 edac_dbg(0, "\n");
445 445
446 mci = edac_mc_del_mc(&pdev->dev); 446 mci = edac_mc_del_mc(&pdev->dev);
447 if (!mci) 447 if (!mci)
@@ -475,7 +475,7 @@ static int __init i3200_init(void)
475{ 475{
476 int pci_rc; 476 int pci_rc;
477 477
478 debugf3("MC: %s()\n", __func__); 478 edac_dbg(3, "MC:\n");
479 479
480 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 480 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
481 opstate_init(); 481 opstate_init();
@@ -489,14 +489,14 @@ static int __init i3200_init(void)
489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
490 PCI_DEVICE_ID_INTEL_3200_HB, NULL); 490 PCI_DEVICE_ID_INTEL_3200_HB, NULL);
491 if (!mci_pdev) { 491 if (!mci_pdev) {
492 debugf0("i3200 pci_get_device fail\n"); 492 edac_dbg(0, "i3200 pci_get_device fail\n");
493 pci_rc = -ENODEV; 493 pci_rc = -ENODEV;
494 goto fail1; 494 goto fail1;
495 } 495 }
496 496
497 pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); 497 pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
498 if (pci_rc < 0) { 498 if (pci_rc < 0) {
499 debugf0("i3200 init fail\n"); 499 edac_dbg(0, "i3200 init fail\n");
500 pci_rc = -ENODEV; 500 pci_rc = -ENODEV;
501 goto fail1; 501 goto fail1;
502 } 502 }
@@ -516,7 +516,7 @@ fail0:
516 516
517static void __exit i3200_exit(void) 517static void __exit i3200_exit(void)
518{ 518{
519 debugf3("MC: %s()\n", __func__); 519 edac_dbg(3, "MC:\n");
520 520
521 pci_unregister_driver(&i3200_driver); 521 pci_unregister_driver(&i3200_driver);
522 if (!i3200_registered) { 522 if (!i3200_registered) {
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 11ea835f155a..39c63757c2a1 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -273,7 +273,7 @@
273#define CHANNELS_PER_BRANCH 2 273#define CHANNELS_PER_BRANCH 2
274#define MAX_BRANCHES 2 274#define MAX_BRANCHES 2
275 275
276/* Defines to extract the vaious fields from the 276/* Defines to extract the various fields from the
277 * MTRx - Memory Technology Registers 277 * MTRx - Memory Technology Registers
278 */ 278 */
279#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8)) 279#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8))
@@ -287,22 +287,6 @@
287#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) 287#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
288#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) 288#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
289 289
290#ifdef CONFIG_EDAC_DEBUG
291static char *numrow_toString[] = {
292 "8,192 - 13 rows",
293 "16,384 - 14 rows",
294 "32,768 - 15 rows",
295 "reserved"
296};
297
298static char *numcol_toString[] = {
299 "1,024 - 10 columns",
300 "2,048 - 11 columns",
301 "4,096 - 12 columns",
302 "reserved"
303};
304#endif
305
306/* enables the report of miscellaneous messages as CE errors - default off */ 290/* enables the report of miscellaneous messages as CE errors - default off */
307static int misc_messages; 291static int misc_messages;
308 292
@@ -344,7 +328,13 @@ struct i5000_pvt {
344 struct pci_dev *branch_1; /* 22.0 */ 328 struct pci_dev *branch_1; /* 22.0 */
345 329
346 u16 tolm; /* top of low memory */ 330 u16 tolm; /* top of low memory */
347 u64 ambase; /* AMB BAR */ 331 union {
332 u64 ambase; /* AMB BAR */
333 struct {
334 u32 ambase_bottom;
335 u32 ambase_top;
336 } u __packed;
337 };
348 338
349 u16 mir0, mir1, mir2; 339 u16 mir0, mir1, mir2;
350 340
@@ -494,10 +484,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
494 ras = NREC_RAS(info->nrecmemb); 484 ras = NREC_RAS(info->nrecmemb);
495 cas = NREC_CAS(info->nrecmemb); 485 cas = NREC_CAS(info->nrecmemb);
496 486
497 debugf0("\t\tCSROW= %d Channel= %d " 487 edac_dbg(0, "\t\tCSROW= %d Channel= %d (DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
498 "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 488 rank, channel, bank,
499 rank, channel, bank, 489 rdwr ? "Write" : "Read", ras, cas);
500 rdwr ? "Write" : "Read", ras, cas);
501 490
502 /* Only 1 bit will be on */ 491 /* Only 1 bit will be on */
503 switch (allErrors) { 492 switch (allErrors) {
@@ -536,10 +525,10 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
536 bank, ras, cas, allErrors, specific); 525 bank, ras, cas, allErrors, specific);
537 526
538 /* Call the helper to output message */ 527 /* Call the helper to output message */
539 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0, 528 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
540 channel >> 1, channel & 1, rank, 529 channel >> 1, channel & 1, rank,
541 rdwr ? "Write error" : "Read error", 530 rdwr ? "Write error" : "Read error",
542 msg, NULL); 531 msg);
543} 532}
544 533
545/* 534/*
@@ -574,7 +563,7 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
574 /* ONLY ONE of the possible error bits will be set, as per the docs */ 563 /* ONLY ONE of the possible error bits will be set, as per the docs */
575 ue_errors = allErrors & FERR_NF_UNCORRECTABLE; 564 ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
576 if (ue_errors) { 565 if (ue_errors) {
577 debugf0("\tUncorrected bits= 0x%x\n", ue_errors); 566 edac_dbg(0, "\tUncorrected bits= 0x%x\n", ue_errors);
578 567
579 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); 568 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
580 569
@@ -590,11 +579,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
590 ras = NREC_RAS(info->nrecmemb); 579 ras = NREC_RAS(info->nrecmemb);
591 cas = NREC_CAS(info->nrecmemb); 580 cas = NREC_CAS(info->nrecmemb);
592 581
593 debugf0 582 edac_dbg(0, "\t\tCSROW= %d Channels= %d,%d (Branch= %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
594 ("\t\tCSROW= %d Channels= %d,%d (Branch= %d " 583 rank, channel, channel + 1, branch >> 1, bank,
595 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 584 rdwr ? "Write" : "Read", ras, cas);
596 rank, channel, channel + 1, branch >> 1, bank,
597 rdwr ? "Write" : "Read", ras, cas);
598 585
599 switch (ue_errors) { 586 switch (ue_errors) {
600 case FERR_NF_M12ERR: 587 case FERR_NF_M12ERR:
@@ -637,16 +624,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
637 rank, bank, ras, cas, ue_errors, specific); 624 rank, bank, ras, cas, ue_errors, specific);
638 625
639 /* Call the helper to output message */ 626 /* Call the helper to output message */
640 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 627 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
641 channel >> 1, -1, rank, 628 channel >> 1, -1, rank,
642 rdwr ? "Write error" : "Read error", 629 rdwr ? "Write error" : "Read error",
643 msg, NULL); 630 msg);
644 } 631 }
645 632
646 /* Check correctable errors */ 633 /* Check correctable errors */
647 ce_errors = allErrors & FERR_NF_CORRECTABLE; 634 ce_errors = allErrors & FERR_NF_CORRECTABLE;
648 if (ce_errors) { 635 if (ce_errors) {
649 debugf0("\tCorrected bits= 0x%x\n", ce_errors); 636 edac_dbg(0, "\tCorrected bits= 0x%x\n", ce_errors);
650 637
651 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); 638 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
652 639
@@ -664,10 +651,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
664 ras = REC_RAS(info->recmemb); 651 ras = REC_RAS(info->recmemb);
665 cas = REC_CAS(info->recmemb); 652 cas = REC_CAS(info->recmemb);
666 653
667 debugf0("\t\tCSROW= %d Channel= %d (Branch %d " 654 edac_dbg(0, "\t\tCSROW= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
668 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 655 rank, channel, branch >> 1, bank,
669 rank, channel, branch >> 1, bank, 656 rdwr ? "Write" : "Read", ras, cas);
670 rdwr ? "Write" : "Read", ras, cas);
671 657
672 switch (ce_errors) { 658 switch (ce_errors) {
673 case FERR_NF_M17ERR: 659 case FERR_NF_M17ERR:
@@ -692,10 +678,10 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
692 specific); 678 specific);
693 679
694 /* Call the helper to output message */ 680 /* Call the helper to output message */
695 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, 681 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
696 channel >> 1, channel % 2, rank, 682 channel >> 1, channel % 2, rank,
697 rdwr ? "Write error" : "Read error", 683 rdwr ? "Write error" : "Read error",
698 msg, NULL); 684 msg);
699 } 685 }
700 686
701 if (!misc_messages) 687 if (!misc_messages)
@@ -738,9 +724,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
738 "Err=%#x (%s)", misc_errors, specific); 724 "Err=%#x (%s)", misc_errors, specific);
739 725
740 /* Call the helper to output message */ 726 /* Call the helper to output message */
741 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, 727 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
742 branch >> 1, -1, -1, 728 branch >> 1, -1, -1,
743 "Misc error", msg, NULL); 729 "Misc error", msg);
744 } 730 }
745} 731}
746 732
@@ -779,7 +765,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
779static void i5000_check_error(struct mem_ctl_info *mci) 765static void i5000_check_error(struct mem_ctl_info *mci)
780{ 766{
781 struct i5000_error_info info; 767 struct i5000_error_info info;
782 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); 768 edac_dbg(4, "MC%d\n", mci->mc_idx);
783 i5000_get_error_info(mci, &info); 769 i5000_get_error_info(mci, &info);
784 i5000_process_error_info(mci, &info, 1); 770 i5000_process_error_info(mci, &info, 1);
785} 771}
@@ -850,15 +836,16 @@ static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
850 836
851 pvt->fsb_error_regs = pdev; 837 pvt->fsb_error_regs = pdev;
852 838
853 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", 839 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
854 pci_name(pvt->system_address), 840 pci_name(pvt->system_address),
855 pvt->system_address->vendor, pvt->system_address->device); 841 pvt->system_address->vendor, pvt->system_address->device);
856 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 842 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
857 pci_name(pvt->branchmap_werrors), 843 pci_name(pvt->branchmap_werrors),
858 pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); 844 pvt->branchmap_werrors->vendor,
859 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", 845 pvt->branchmap_werrors->device);
860 pci_name(pvt->fsb_error_regs), 846 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
861 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); 847 pci_name(pvt->fsb_error_regs),
848 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
862 849
863 pdev = NULL; 850 pdev = NULL;
864 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 851 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
@@ -981,16 +968,25 @@ static void decode_mtr(int slot_row, u16 mtr)
981 968
982 ans = MTR_DIMMS_PRESENT(mtr); 969 ans = MTR_DIMMS_PRESENT(mtr);
983 970
984 debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, 971 edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
985 ans ? "Present" : "NOT Present"); 972 slot_row, mtr, ans ? "" : "NOT ");
986 if (!ans) 973 if (!ans)
987 return; 974 return;
988 975
989 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 976 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
990 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 977 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
991 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); 978 edac_dbg(2, "\t\tNUMRANK: %s\n",
992 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); 979 MTR_DIMM_RANK(mtr) ? "double" : "single");
993 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 980 edac_dbg(2, "\t\tNUMROW: %s\n",
981 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
982 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
983 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
984 "reserved");
985 edac_dbg(2, "\t\tNUMCOL: %s\n",
986 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
987 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
988 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
989 "reserved");
994} 990}
995 991
996static void handle_channel(struct i5000_pvt *pvt, int slot, int channel, 992static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
@@ -1061,7 +1057,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1061 "--------------------------------"); 1057 "--------------------------------");
1062 p += n; 1058 p += n;
1063 space -= n; 1059 space -= n;
1064 debugf2("%s\n", mem_buffer); 1060 edac_dbg(2, "%s\n", mem_buffer);
1065 p = mem_buffer; 1061 p = mem_buffer;
1066 space = PAGE_SIZE; 1062 space = PAGE_SIZE;
1067 } 1063 }
@@ -1082,7 +1078,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1082 } 1078 }
1083 p += n; 1079 p += n;
1084 space -= n; 1080 space -= n;
1085 debugf2("%s\n", mem_buffer); 1081 edac_dbg(2, "%s\n", mem_buffer);
1086 p = mem_buffer; 1082 p = mem_buffer;
1087 space = PAGE_SIZE; 1083 space = PAGE_SIZE;
1088 } 1084 }
@@ -1092,7 +1088,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1092 "--------------------------------"); 1088 "--------------------------------");
1093 p += n; 1089 p += n;
1094 space -= n; 1090 space -= n;
1095 debugf2("%s\n", mem_buffer); 1091 edac_dbg(2, "%s\n", mem_buffer);
1096 p = mem_buffer; 1092 p = mem_buffer;
1097 space = PAGE_SIZE; 1093 space = PAGE_SIZE;
1098 1094
@@ -1105,7 +1101,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1105 p += n; 1101 p += n;
1106 space -= n; 1102 space -= n;
1107 } 1103 }
1108 debugf2("%s\n", mem_buffer); 1104 edac_dbg(2, "%s\n", mem_buffer);
1109 p = mem_buffer; 1105 p = mem_buffer;
1110 space = PAGE_SIZE; 1106 space = PAGE_SIZE;
1111 1107
@@ -1118,7 +1114,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1118 } 1114 }
1119 1115
1120 /* output the last message and free buffer */ 1116 /* output the last message and free buffer */
1121 debugf2("%s\n", mem_buffer); 1117 edac_dbg(2, "%s\n", mem_buffer);
1122 kfree(mem_buffer); 1118 kfree(mem_buffer);
1123} 1119}
1124 1120
@@ -1141,24 +1137,25 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1141 pvt = mci->pvt_info; 1137 pvt = mci->pvt_info;
1142 1138
1143 pci_read_config_dword(pvt->system_address, AMBASE, 1139 pci_read_config_dword(pvt->system_address, AMBASE,
1144 (u32 *) & pvt->ambase); 1140 &pvt->u.ambase_bottom);
1145 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), 1141 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
1146 ((u32 *) & pvt->ambase) + sizeof(u32)); 1142 &pvt->u.ambase_top);
1147 1143
1148 maxdimmperch = pvt->maxdimmperch; 1144 maxdimmperch = pvt->maxdimmperch;
1149 maxch = pvt->maxch; 1145 maxch = pvt->maxch;
1150 1146
1151 debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", 1147 edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
1152 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); 1148 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
1153 1149
1154 /* Get the Branch Map regs */ 1150 /* Get the Branch Map regs */
1155 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); 1151 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
1156 pvt->tolm >>= 12; 1152 pvt->tolm >>= 12;
1157 debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, 1153 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
1158 pvt->tolm); 1154 pvt->tolm, pvt->tolm);
1159 1155
1160 actual_tolm = pvt->tolm << 28; 1156 actual_tolm = pvt->tolm << 28;
1161 debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm); 1157 edac_dbg(2, "Actual TOLM byte addr=%u (0x%x)\n",
1158 actual_tolm, actual_tolm);
1162 1159
1163 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); 1160 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
1164 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); 1161 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
@@ -1168,15 +1165,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1168 limit = (pvt->mir0 >> 4) & 0x0FFF; 1165 limit = (pvt->mir0 >> 4) & 0x0FFF;
1169 way0 = pvt->mir0 & 0x1; 1166 way0 = pvt->mir0 & 0x1;
1170 way1 = pvt->mir0 & 0x2; 1167 way1 = pvt->mir0 & 0x2;
1171 debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1168 edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
1169 limit, way1, way0);
1172 limit = (pvt->mir1 >> 4) & 0x0FFF; 1170 limit = (pvt->mir1 >> 4) & 0x0FFF;
1173 way0 = pvt->mir1 & 0x1; 1171 way0 = pvt->mir1 & 0x1;
1174 way1 = pvt->mir1 & 0x2; 1172 way1 = pvt->mir1 & 0x2;
1175 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1173 edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
1174 limit, way1, way0);
1176 limit = (pvt->mir2 >> 4) & 0x0FFF; 1175 limit = (pvt->mir2 >> 4) & 0x0FFF;
1177 way0 = pvt->mir2 & 0x1; 1176 way0 = pvt->mir2 & 0x1;
1178 way1 = pvt->mir2 & 0x2; 1177 way1 = pvt->mir2 & 0x2;
1179 debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1178 edac_dbg(2, "MIR2: limit= 0x%x WAY1= %u WAY0= %x\n",
1179 limit, way1, way0);
1180 1180
1181 /* Get the MTR[0-3] regs */ 1181 /* Get the MTR[0-3] regs */
1182 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { 1182 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
@@ -1185,31 +1185,31 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1185 pci_read_config_word(pvt->branch_0, where, 1185 pci_read_config_word(pvt->branch_0, where,
1186 &pvt->b0_mtr[slot_row]); 1186 &pvt->b0_mtr[slot_row]);
1187 1187
1188 debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, 1188 edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
1189 pvt->b0_mtr[slot_row]); 1189 slot_row, where, pvt->b0_mtr[slot_row]);
1190 1190
1191 if (pvt->maxch >= CHANNELS_PER_BRANCH) { 1191 if (pvt->maxch >= CHANNELS_PER_BRANCH) {
1192 pci_read_config_word(pvt->branch_1, where, 1192 pci_read_config_word(pvt->branch_1, where,
1193 &pvt->b1_mtr[slot_row]); 1193 &pvt->b1_mtr[slot_row]);
1194 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, 1194 edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
1195 where, pvt->b1_mtr[slot_row]); 1195 slot_row, where, pvt->b1_mtr[slot_row]);
1196 } else { 1196 } else {
1197 pvt->b1_mtr[slot_row] = 0; 1197 pvt->b1_mtr[slot_row] = 0;
1198 } 1198 }
1199 } 1199 }
1200 1200
1201 /* Read and dump branch 0's MTRs */ 1201 /* Read and dump branch 0's MTRs */
1202 debugf2("\nMemory Technology Registers:\n"); 1202 edac_dbg(2, "Memory Technology Registers:\n");
1203 debugf2(" Branch 0:\n"); 1203 edac_dbg(2, " Branch 0:\n");
1204 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { 1204 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
1205 decode_mtr(slot_row, pvt->b0_mtr[slot_row]); 1205 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1206 } 1206 }
1207 pci_read_config_word(pvt->branch_0, AMB_PRESENT_0, 1207 pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
1208 &pvt->b0_ambpresent0); 1208 &pvt->b0_ambpresent0);
1209 debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); 1209 edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
1210 pci_read_config_word(pvt->branch_0, AMB_PRESENT_1, 1210 pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
1211 &pvt->b0_ambpresent1); 1211 &pvt->b0_ambpresent1);
1212 debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); 1212 edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
1213 1213
1214 /* Only if we have 2 branchs (4 channels) */ 1214 /* Only if we have 2 branchs (4 channels) */
1215 if (pvt->maxch < CHANNELS_PER_BRANCH) { 1215 if (pvt->maxch < CHANNELS_PER_BRANCH) {
@@ -1217,18 +1217,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1217 pvt->b1_ambpresent1 = 0; 1217 pvt->b1_ambpresent1 = 0;
1218 } else { 1218 } else {
1219 /* Read and dump branch 1's MTRs */ 1219 /* Read and dump branch 1's MTRs */
1220 debugf2(" Branch 1:\n"); 1220 edac_dbg(2, " Branch 1:\n");
1221 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { 1221 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
1222 decode_mtr(slot_row, pvt->b1_mtr[slot_row]); 1222 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1223 } 1223 }
1224 pci_read_config_word(pvt->branch_1, AMB_PRESENT_0, 1224 pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
1225 &pvt->b1_ambpresent0); 1225 &pvt->b1_ambpresent0);
1226 debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", 1226 edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
1227 pvt->b1_ambpresent0); 1227 pvt->b1_ambpresent0);
1228 pci_read_config_word(pvt->branch_1, AMB_PRESENT_1, 1228 pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
1229 &pvt->b1_ambpresent1); 1229 &pvt->b1_ambpresent1);
1230 debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", 1230 edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
1231 pvt->b1_ambpresent1); 1231 pvt->b1_ambpresent1);
1232 } 1232 }
1233 1233
1234 /* Go and determine the size of each DIMM and place in an 1234 /* Go and determine the size of each DIMM and place in an
@@ -1363,10 +1363,9 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1363 int num_channels; 1363 int num_channels;
1364 int num_dimms_per_channel; 1364 int num_dimms_per_channel;
1365 1365
1366 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1366 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1367 __FILE__, __func__, 1367 pdev->bus->number,
1368 pdev->bus->number, 1368 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1369 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1370 1369
1371 /* We only are looking for func 0 of the set */ 1370 /* We only are looking for func 0 of the set */
1372 if (PCI_FUNC(pdev->devfn) != 0) 1371 if (PCI_FUNC(pdev->devfn) != 0)
@@ -1388,8 +1387,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1388 i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, 1387 i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
1389 &num_channels); 1388 &num_channels);
1390 1389
1391 debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n", 1390 edac_dbg(0, "MC: Number of Branches=2 Channels= %d DIMMS= %d\n",
1392 __func__, num_channels, num_dimms_per_channel); 1391 num_channels, num_dimms_per_channel);
1393 1392
1394 /* allocate a new MC control structure */ 1393 /* allocate a new MC control structure */
1395 1394
@@ -1406,10 +1405,9 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1406 if (mci == NULL) 1405 if (mci == NULL)
1407 return -ENOMEM; 1406 return -ENOMEM;
1408 1407
1409 kobject_get(&mci->edac_mci_kobj); 1408 edac_dbg(0, "MC: mci = %p\n", mci);
1410 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1411 1409
1412 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1410 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1413 1411
1414 pvt = mci->pvt_info; 1412 pvt = mci->pvt_info;
1415 pvt->system_address = pdev; /* Record this device in our private */ 1413 pvt->system_address = pdev; /* Record this device in our private */
@@ -1439,19 +1437,16 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1439 /* initialize the MC control structure 'csrows' table 1437 /* initialize the MC control structure 'csrows' table
1440 * with the mapping and control information */ 1438 * with the mapping and control information */
1441 if (i5000_init_csrows(mci)) { 1439 if (i5000_init_csrows(mci)) {
1442 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1440 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5000_init_csrows() returned nonzero value\n");
1443 " because i5000_init_csrows() returned nonzero "
1444 "value\n");
1445 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1441 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1446 } else { 1442 } else {
1447 debugf1("MC: Enable error reporting now\n"); 1443 edac_dbg(1, "MC: Enable error reporting now\n");
1448 i5000_enable_error_reporting(mci); 1444 i5000_enable_error_reporting(mci);
1449 } 1445 }
1450 1446
1451 /* add this new MC control structure to EDAC's list of MCs */ 1447 /* add this new MC control structure to EDAC's list of MCs */
1452 if (edac_mc_add_mc(mci)) { 1448 if (edac_mc_add_mc(mci)) {
1453 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", 1449 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1454 __FILE__, __func__);
1455 /* FIXME: perhaps some code should go here that disables error 1450 /* FIXME: perhaps some code should go here that disables error
1456 * reporting if we just enabled it 1451 * reporting if we just enabled it
1457 */ 1452 */
@@ -1479,7 +1474,6 @@ fail1:
1479 i5000_put_devices(mci); 1474 i5000_put_devices(mci);
1480 1475
1481fail0: 1476fail0:
1482 kobject_put(&mci->edac_mci_kobj);
1483 edac_mc_free(mci); 1477 edac_mc_free(mci);
1484 return -ENODEV; 1478 return -ENODEV;
1485} 1479}
@@ -1496,7 +1490,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
1496{ 1490{
1497 int rc; 1491 int rc;
1498 1492
1499 debugf0("MC: %s: %s()\n", __FILE__, __func__); 1493 edac_dbg(0, "MC:\n");
1500 1494
1501 /* wake up device */ 1495 /* wake up device */
1502 rc = pci_enable_device(pdev); 1496 rc = pci_enable_device(pdev);
@@ -1515,7 +1509,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1515{ 1509{
1516 struct mem_ctl_info *mci; 1510 struct mem_ctl_info *mci;
1517 1511
1518 debugf0("%s: %s()\n", __FILE__, __func__); 1512 edac_dbg(0, "\n");
1519 1513
1520 if (i5000_pci) 1514 if (i5000_pci)
1521 edac_pci_release_generic_ctl(i5000_pci); 1515 edac_pci_release_generic_ctl(i5000_pci);
@@ -1525,7 +1519,6 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1525 1519
1526 /* retrieve references to resources, and free those resources */ 1520 /* retrieve references to resources, and free those resources */
1527 i5000_put_devices(mci); 1521 i5000_put_devices(mci);
1528 kobject_put(&mci->edac_mci_kobj);
1529 edac_mc_free(mci); 1522 edac_mc_free(mci);
1530} 1523}
1531 1524
@@ -1562,7 +1555,7 @@ static int __init i5000_init(void)
1562{ 1555{
1563 int pci_rc; 1556 int pci_rc;
1564 1557
1565 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1558 edac_dbg(2, "MC:\n");
1566 1559
1567 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1560 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1568 opstate_init(); 1561 opstate_init();
@@ -1578,7 +1571,7 @@ static int __init i5000_init(void)
1578 */ 1571 */
1579static void __exit i5000_exit(void) 1572static void __exit i5000_exit(void)
1580{ 1573{
1581 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1574 edac_dbg(2, "MC:\n");
1582 pci_unregister_driver(&i5000_driver); 1575 pci_unregister_driver(&i5000_driver);
1583} 1576}
1584 1577
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index e9e7c2a29dc3..c4b5e5f868e8 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -431,10 +431,10 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
431 "bank %u, cas %u, ras %u\n", 431 "bank %u, cas %u, ras %u\n",
432 bank, cas, ras); 432 bank, cas, ras);
433 433
434 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 434 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
435 0, 0, syndrome, 435 0, 0, syndrome,
436 chan, rank, -1, 436 chan, rank, -1,
437 msg, detail, NULL); 437 msg, detail);
438} 438}
439 439
440static void i5100_handle_ue(struct mem_ctl_info *mci, 440static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -453,10 +453,10 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
453 "bank %u, cas %u, ras %u\n", 453 "bank %u, cas %u, ras %u\n",
454 bank, cas, ras); 454 bank, cas, ras);
455 455
456 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 456 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
457 0, 0, syndrome, 457 0, 0, syndrome,
458 chan, rank, -1, 458 chan, rank, -1,
459 msg, detail, NULL); 459 msg, detail);
460} 460}
461 461
462static void i5100_read_log(struct mem_ctl_info *mci, int chan, 462static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -859,8 +859,8 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
859 i5100_rank_to_slot(mci, chan, rank)); 859 i5100_rank_to_slot(mci, chan, rank));
860 } 860 }
861 861
862 debugf2("dimm channel %d, rank %d, size %ld\n", 862 edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
863 chan, rank, (long)PAGES_TO_MiB(npages)); 863 chan, rank, (long)PAGES_TO_MiB(npages));
864 } 864 }
865} 865}
866 866
@@ -943,7 +943,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
943 goto bail_disable_ch1; 943 goto bail_disable_ch1;
944 } 944 }
945 945
946 mci->dev = &pdev->dev; 946 mci->pdev = &pdev->dev;
947 947
948 priv = mci->pvt_info; 948 priv = mci->pvt_info;
949 priv->ranksperchan = ranksperch; 949 priv->ranksperchan = ranksperch;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6640c29e1885..277246998b80 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -300,24 +300,6 @@ static inline int extract_fbdchan_indx(u32 x)
300 return (x>>28) & 0x3; 300 return (x>>28) & 0x3;
301} 301}
302 302
303#ifdef CONFIG_EDAC_DEBUG
304/* MTR NUMROW */
305static const char *numrow_toString[] = {
306 "8,192 - 13 rows",
307 "16,384 - 14 rows",
308 "32,768 - 15 rows",
309 "65,536 - 16 rows"
310};
311
312/* MTR NUMCOL */
313static const char *numcol_toString[] = {
314 "1,024 - 10 columns",
315 "2,048 - 11 columns",
316 "4,096 - 12 columns",
317 "reserved"
318};
319#endif
320
321/* Device name and register DID (Device ID) */ 303/* Device name and register DID (Device ID) */
322struct i5400_dev_info { 304struct i5400_dev_info {
323 const char *ctl_name; /* name for this device */ 305 const char *ctl_name; /* name for this device */
@@ -345,7 +327,13 @@ struct i5400_pvt {
345 struct pci_dev *branch_1; /* 22.0 */ 327 struct pci_dev *branch_1; /* 22.0 */
346 328
347 u16 tolm; /* top of low memory */ 329 u16 tolm; /* top of low memory */
348 u64 ambase; /* AMB BAR */ 330 union {
331 u64 ambase; /* AMB BAR */
332 struct {
333 u32 ambase_bottom;
334 u32 ambase_top;
335 } u __packed;
336 };
349 337
350 u16 mir0, mir1; 338 u16 mir0, mir1;
351 339
@@ -560,10 +548,9 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
560 ras = nrec_ras(info); 548 ras = nrec_ras(info);
561 cas = nrec_cas(info); 549 cas = nrec_cas(info);
562 550
563 debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d " 551 edac_dbg(0, "\t\tDIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
564 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", 552 rank, channel, channel + 1, branch >> 1, bank,
565 rank, channel, channel + 1, branch >> 1, bank, 553 buf_id, rdwr_str(rdwr), ras, cas);
566 buf_id, rdwr_str(rdwr), ras, cas);
567 554
568 /* Only 1 bit will be on */ 555 /* Only 1 bit will be on */
569 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 556 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
@@ -573,10 +560,10 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
573 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)", 560 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
574 bank, buf_id, ras, cas, allErrors, error_name[errnum]); 561 bank, buf_id, ras, cas, allErrors, error_name[errnum]);
575 562
576 edac_mc_handle_error(tp_event, mci, 0, 0, 0, 563 edac_mc_handle_error(tp_event, mci, 1, 0, 0, 0,
577 branch >> 1, -1, rank, 564 branch >> 1, -1, rank,
578 rdwr ? "Write error" : "Read error", 565 rdwr ? "Write error" : "Read error",
579 msg, NULL); 566 msg);
580} 567}
581 568
582/* 569/*
@@ -613,7 +600,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
613 600
614 /* Correctable errors */ 601 /* Correctable errors */
615 if (allErrors & ERROR_NF_CORRECTABLE) { 602 if (allErrors & ERROR_NF_CORRECTABLE) {
616 debugf0("\tCorrected bits= 0x%lx\n", allErrors); 603 edac_dbg(0, "\tCorrected bits= 0x%lx\n", allErrors);
617 604
618 branch = extract_fbdchan_indx(info->ferr_nf_fbd); 605 branch = extract_fbdchan_indx(info->ferr_nf_fbd);
619 606
@@ -634,10 +621,9 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
634 /* Only 1 bit will be on */ 621 /* Only 1 bit will be on */
635 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 622 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
636 623
637 debugf0("\t\tDIMM= %d Channel= %d (Branch %d " 624 edac_dbg(0, "\t\tDIMM= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
638 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 625 rank, channel, branch >> 1, bank,
639 rank, channel, branch >> 1, bank, 626 rdwr_str(rdwr), ras, cas);
640 rdwr_str(rdwr), ras, cas);
641 627
642 /* Form out message */ 628 /* Form out message */
643 snprintf(msg, sizeof(msg), 629 snprintf(msg, sizeof(msg),
@@ -646,10 +632,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
646 branch >> 1, bank, rdwr_str(rdwr), ras, cas, 632 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
647 allErrors, error_name[errnum]); 633 allErrors, error_name[errnum]);
648 634
649 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, 635 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
650 branch >> 1, channel % 2, rank, 636 branch >> 1, channel % 2, rank,
651 rdwr ? "Write error" : "Read error", 637 rdwr ? "Write error" : "Read error",
652 msg, NULL); 638 msg);
653 639
654 return; 640 return;
655 } 641 }
@@ -700,7 +686,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
700static void i5400_check_error(struct mem_ctl_info *mci) 686static void i5400_check_error(struct mem_ctl_info *mci)
701{ 687{
702 struct i5400_error_info info; 688 struct i5400_error_info info;
703 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); 689 edac_dbg(4, "MC%d\n", mci->mc_idx);
704 i5400_get_error_info(mci, &info); 690 i5400_get_error_info(mci, &info);
705 i5400_process_error_info(mci, &info); 691 i5400_process_error_info(mci, &info);
706} 692}
@@ -786,15 +772,16 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
786 } 772 }
787 pvt->fsb_error_regs = pdev; 773 pvt->fsb_error_regs = pdev;
788 774
789 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", 775 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
790 pci_name(pvt->system_address), 776 pci_name(pvt->system_address),
791 pvt->system_address->vendor, pvt->system_address->device); 777 pvt->system_address->vendor, pvt->system_address->device);
792 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 778 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
793 pci_name(pvt->branchmap_werrors), 779 pci_name(pvt->branchmap_werrors),
794 pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); 780 pvt->branchmap_werrors->vendor,
795 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", 781 pvt->branchmap_werrors->device);
796 pci_name(pvt->fsb_error_regs), 782 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
797 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); 783 pci_name(pvt->fsb_error_regs),
784 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
798 785
799 pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL, 786 pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
800 PCI_DEVICE_ID_INTEL_5400_FBD0, NULL); 787 PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
@@ -882,8 +869,8 @@ static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
882 n = dimm; 869 n = dimm;
883 870
884 if (n >= DIMMS_PER_CHANNEL) { 871 if (n >= DIMMS_PER_CHANNEL) {
885 debugf0("ERROR: trying to access an invalid dimm: %d\n", 872 edac_dbg(0, "ERROR: trying to access an invalid dimm: %d\n",
886 dimm); 873 dimm);
887 return 0; 874 return 0;
888 } 875 }
889 876
@@ -903,20 +890,29 @@ static void decode_mtr(int slot_row, u16 mtr)
903 890
904 ans = MTR_DIMMS_PRESENT(mtr); 891 ans = MTR_DIMMS_PRESENT(mtr);
905 892
906 debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, 893 edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
907 ans ? "Present" : "NOT Present"); 894 slot_row, mtr, ans ? "" : "NOT ");
908 if (!ans) 895 if (!ans)
909 return; 896 return;
910 897
911 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 898 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
912 899
913 debugf2("\t\tELECTRICAL THROTTLING is %s\n", 900 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
914 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); 901 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
915 902
916 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 903 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
917 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); 904 edac_dbg(2, "\t\tNUMRANK: %s\n",
918 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); 905 MTR_DIMM_RANK(mtr) ? "double" : "single");
919 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 906 edac_dbg(2, "\t\tNUMROW: %s\n",
907 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
908 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
909 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
910 "65,536 - 16 rows");
911 edac_dbg(2, "\t\tNUMCOL: %s\n",
912 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
913 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
914 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
915 "reserved");
920} 916}
921 917
922static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel, 918static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
@@ -989,7 +985,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
989 "-------------------------------"); 985 "-------------------------------");
990 p += n; 986 p += n;
991 space -= n; 987 space -= n;
992 debugf2("%s\n", mem_buffer); 988 edac_dbg(2, "%s\n", mem_buffer);
993 p = mem_buffer; 989 p = mem_buffer;
994 space = PAGE_SIZE; 990 space = PAGE_SIZE;
995 } 991 }
@@ -1004,7 +1000,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1004 p += n; 1000 p += n;
1005 space -= n; 1001 space -= n;
1006 } 1002 }
1007 debugf2("%s\n", mem_buffer); 1003 edac_dbg(2, "%s\n", mem_buffer);
1008 p = mem_buffer; 1004 p = mem_buffer;
1009 space = PAGE_SIZE; 1005 space = PAGE_SIZE;
1010 } 1006 }
@@ -1014,7 +1010,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1014 "-------------------------------"); 1010 "-------------------------------");
1015 p += n; 1011 p += n;
1016 space -= n; 1012 space -= n;
1017 debugf2("%s\n", mem_buffer); 1013 edac_dbg(2, "%s\n", mem_buffer);
1018 p = mem_buffer; 1014 p = mem_buffer;
1019 space = PAGE_SIZE; 1015 space = PAGE_SIZE;
1020 1016
@@ -1029,7 +1025,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1029 } 1025 }
1030 1026
1031 space -= n; 1027 space -= n;
1032 debugf2("%s\n", mem_buffer); 1028 edac_dbg(2, "%s\n", mem_buffer);
1033 p = mem_buffer; 1029 p = mem_buffer;
1034 space = PAGE_SIZE; 1030 space = PAGE_SIZE;
1035 1031
@@ -1042,7 +1038,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1042 } 1038 }
1043 1039
1044 /* output the last message and free buffer */ 1040 /* output the last message and free buffer */
1045 debugf2("%s\n", mem_buffer); 1041 edac_dbg(2, "%s\n", mem_buffer);
1046 kfree(mem_buffer); 1042 kfree(mem_buffer);
1047} 1043}
1048 1044
@@ -1065,25 +1061,25 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1065 pvt = mci->pvt_info; 1061 pvt = mci->pvt_info;
1066 1062
1067 pci_read_config_dword(pvt->system_address, AMBASE, 1063 pci_read_config_dword(pvt->system_address, AMBASE,
1068 (u32 *) &pvt->ambase); 1064 &pvt->u.ambase_bottom);
1069 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), 1065 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
1070 ((u32 *) &pvt->ambase) + sizeof(u32)); 1066 &pvt->u.ambase_top);
1071 1067
1072 maxdimmperch = pvt->maxdimmperch; 1068 maxdimmperch = pvt->maxdimmperch;
1073 maxch = pvt->maxch; 1069 maxch = pvt->maxch;
1074 1070
1075 debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", 1071 edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
1076 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); 1072 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
1077 1073
1078 /* Get the Branch Map regs */ 1074 /* Get the Branch Map regs */
1079 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); 1075 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
1080 pvt->tolm >>= 12; 1076 pvt->tolm >>= 12;
1081 debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, 1077 edac_dbg(2, "\nTOLM (number of 256M regions) =%u (0x%x)\n",
1082 pvt->tolm); 1078 pvt->tolm, pvt->tolm);
1083 1079
1084 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); 1080 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
1085 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", 1081 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
1086 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); 1082 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
1087 1083
1088 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); 1084 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
1089 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); 1085 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
@@ -1092,11 +1088,13 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1092 limit = (pvt->mir0 >> 4) & 0x0fff; 1088 limit = (pvt->mir0 >> 4) & 0x0fff;
1093 way0 = pvt->mir0 & 0x1; 1089 way0 = pvt->mir0 & 0x1;
1094 way1 = pvt->mir0 & 0x2; 1090 way1 = pvt->mir0 & 0x2;
1095 debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1091 edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
1092 limit, way1, way0);
1096 limit = (pvt->mir1 >> 4) & 0xfff; 1093 limit = (pvt->mir1 >> 4) & 0xfff;
1097 way0 = pvt->mir1 & 0x1; 1094 way0 = pvt->mir1 & 0x1;
1098 way1 = pvt->mir1 & 0x2; 1095 way1 = pvt->mir1 & 0x2;
1099 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1096 edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
1097 limit, way1, way0);
1100 1098
1101 /* Get the set of MTR[0-3] regs by each branch */ 1099 /* Get the set of MTR[0-3] regs by each branch */
1102 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) { 1100 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
@@ -1106,8 +1104,8 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1106 pci_read_config_word(pvt->branch_0, where, 1104 pci_read_config_word(pvt->branch_0, where,
1107 &pvt->b0_mtr[slot_row]); 1105 &pvt->b0_mtr[slot_row]);
1108 1106
1109 debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, 1107 edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
1110 pvt->b0_mtr[slot_row]); 1108 slot_row, where, pvt->b0_mtr[slot_row]);
1111 1109
1112 if (pvt->maxch < CHANNELS_PER_BRANCH) { 1110 if (pvt->maxch < CHANNELS_PER_BRANCH) {
1113 pvt->b1_mtr[slot_row] = 0; 1111 pvt->b1_mtr[slot_row] = 0;
@@ -1117,22 +1115,22 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1117 /* Branch 1 set of MTR registers */ 1115 /* Branch 1 set of MTR registers */
1118 pci_read_config_word(pvt->branch_1, where, 1116 pci_read_config_word(pvt->branch_1, where,
1119 &pvt->b1_mtr[slot_row]); 1117 &pvt->b1_mtr[slot_row]);
1120 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where, 1118 edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
1121 pvt->b1_mtr[slot_row]); 1119 slot_row, where, pvt->b1_mtr[slot_row]);
1122 } 1120 }
1123 1121
1124 /* Read and dump branch 0's MTRs */ 1122 /* Read and dump branch 0's MTRs */
1125 debugf2("\nMemory Technology Registers:\n"); 1123 edac_dbg(2, "Memory Technology Registers:\n");
1126 debugf2(" Branch 0:\n"); 1124 edac_dbg(2, " Branch 0:\n");
1127 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) 1125 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1128 decode_mtr(slot_row, pvt->b0_mtr[slot_row]); 1126 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1129 1127
1130 pci_read_config_word(pvt->branch_0, AMBPRESENT_0, 1128 pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
1131 &pvt->b0_ambpresent0); 1129 &pvt->b0_ambpresent0);
1132 debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); 1130 edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
1133 pci_read_config_word(pvt->branch_0, AMBPRESENT_1, 1131 pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
1134 &pvt->b0_ambpresent1); 1132 &pvt->b0_ambpresent1);
1135 debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); 1133 edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
1136 1134
1137 /* Only if we have 2 branchs (4 channels) */ 1135 /* Only if we have 2 branchs (4 channels) */
1138 if (pvt->maxch < CHANNELS_PER_BRANCH) { 1136 if (pvt->maxch < CHANNELS_PER_BRANCH) {
@@ -1140,18 +1138,18 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1140 pvt->b1_ambpresent1 = 0; 1138 pvt->b1_ambpresent1 = 0;
1141 } else { 1139 } else {
1142 /* Read and dump branch 1's MTRs */ 1140 /* Read and dump branch 1's MTRs */
1143 debugf2(" Branch 1:\n"); 1141 edac_dbg(2, " Branch 1:\n");
1144 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) 1142 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1145 decode_mtr(slot_row, pvt->b1_mtr[slot_row]); 1143 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1146 1144
1147 pci_read_config_word(pvt->branch_1, AMBPRESENT_0, 1145 pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
1148 &pvt->b1_ambpresent0); 1146 &pvt->b1_ambpresent0);
1149 debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", 1147 edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
1150 pvt->b1_ambpresent0); 1148 pvt->b1_ambpresent0);
1151 pci_read_config_word(pvt->branch_1, AMBPRESENT_1, 1149 pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
1152 &pvt->b1_ambpresent1); 1150 &pvt->b1_ambpresent1);
1153 debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", 1151 edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
1154 pvt->b1_ambpresent1); 1152 pvt->b1_ambpresent1);
1155 } 1153 }
1156 1154
1157 /* Go and determine the size of each DIMM and place in an 1155 /* Go and determine the size of each DIMM and place in an
@@ -1203,10 +1201,9 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1203 1201
1204 size_mb = pvt->dimm_info[slot][channel].megabytes; 1202 size_mb = pvt->dimm_info[slot][channel].megabytes;
1205 1203
1206 debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n", 1204 edac_dbg(2, "dimm (branch %d channel %d slot %d): %d.%03d GB\n",
1207 __func__, dimm - mci->dimms, 1205 channel / 2, channel % 2, slot,
1208 channel / 2, channel % 2, slot, 1206 size_mb / 1000, size_mb % 1000);
1209 size_mb / 1000, size_mb % 1000);
1210 1207
1211 dimm->nr_pages = size_mb << 8; 1208 dimm->nr_pages = size_mb << 8;
1212 dimm->grain = 8; 1209 dimm->grain = 8;
@@ -1227,7 +1224,7 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1227 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+. 1224 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
1228 */ 1225 */
1229 if (ndimms == 1) 1226 if (ndimms == 1)
1230 mci->dimms[0].edac_mode = EDAC_SECDED; 1227 mci->dimms[0]->edac_mode = EDAC_SECDED;
1231 1228
1232 return (ndimms == 0); 1229 return (ndimms == 0);
1233} 1230}
@@ -1270,10 +1267,9 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1270 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1267 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1271 return -EINVAL; 1268 return -EINVAL;
1272 1269
1273 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1270 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1274 __FILE__, __func__, 1271 pdev->bus->number,
1275 pdev->bus->number, 1272 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1276 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1277 1273
1278 /* We only are looking for func 0 of the set */ 1274 /* We only are looking for func 0 of the set */
1279 if (PCI_FUNC(pdev->devfn) != 0) 1275 if (PCI_FUNC(pdev->devfn) != 0)
@@ -1297,9 +1293,9 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1297 if (mci == NULL) 1293 if (mci == NULL)
1298 return -ENOMEM; 1294 return -ENOMEM;
1299 1295
1300 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); 1296 edac_dbg(0, "MC: mci = %p\n", mci);
1301 1297
1302 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1298 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1303 1299
1304 pvt = mci->pvt_info; 1300 pvt = mci->pvt_info;
1305 pvt->system_address = pdev; /* Record this device in our private */ 1301 pvt->system_address = pdev; /* Record this device in our private */
@@ -1329,19 +1325,16 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1329 /* initialize the MC control structure 'dimms' table 1325 /* initialize the MC control structure 'dimms' table
1330 * with the mapping and control information */ 1326 * with the mapping and control information */
1331 if (i5400_init_dimms(mci)) { 1327 if (i5400_init_dimms(mci)) {
1332 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1328 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5400_init_dimms() returned nonzero value\n");
1333 " because i5400_init_dimms() returned nonzero "
1334 "value\n");
1335 mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */ 1329 mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
1336 } else { 1330 } else {
1337 debugf1("MC: Enable error reporting now\n"); 1331 edac_dbg(1, "MC: Enable error reporting now\n");
1338 i5400_enable_error_reporting(mci); 1332 i5400_enable_error_reporting(mci);
1339 } 1333 }
1340 1334
1341 /* add this new MC control structure to EDAC's list of MCs */ 1335 /* add this new MC control structure to EDAC's list of MCs */
1342 if (edac_mc_add_mc(mci)) { 1336 if (edac_mc_add_mc(mci)) {
1343 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", 1337 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1344 __FILE__, __func__);
1345 /* FIXME: perhaps some code should go here that disables error 1338 /* FIXME: perhaps some code should go here that disables error
1346 * reporting if we just enabled it 1339 * reporting if we just enabled it
1347 */ 1340 */
@@ -1385,7 +1378,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
1385{ 1378{
1386 int rc; 1379 int rc;
1387 1380
1388 debugf0("MC: %s: %s()\n", __FILE__, __func__); 1381 edac_dbg(0, "MC:\n");
1389 1382
1390 /* wake up device */ 1383 /* wake up device */
1391 rc = pci_enable_device(pdev); 1384 rc = pci_enable_device(pdev);
@@ -1404,7 +1397,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
1404{ 1397{
1405 struct mem_ctl_info *mci; 1398 struct mem_ctl_info *mci;
1406 1399
1407 debugf0("%s: %s()\n", __FILE__, __func__); 1400 edac_dbg(0, "\n");
1408 1401
1409 if (i5400_pci) 1402 if (i5400_pci)
1410 edac_pci_release_generic_ctl(i5400_pci); 1403 edac_pci_release_generic_ctl(i5400_pci);
@@ -1450,7 +1443,7 @@ static int __init i5400_init(void)
1450{ 1443{
1451 int pci_rc; 1444 int pci_rc;
1452 1445
1453 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1446 edac_dbg(2, "MC:\n");
1454 1447
1455 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1448 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1456 opstate_init(); 1449 opstate_init();
@@ -1466,7 +1459,7 @@ static int __init i5400_init(void)
1466 */ 1459 */
1467static void __exit i5400_exit(void) 1460static void __exit i5400_exit(void)
1468{ 1461{
1469 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1462 edac_dbg(2, "MC:\n");
1470 pci_unregister_driver(&i5400_driver); 1463 pci_unregister_driver(&i5400_driver);
1471} 1464}
1472 1465
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 97c22fd650ee..a09d0667f72a 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -182,24 +182,6 @@ static const u16 mtr_regs[MAX_SLOTS] = {
182#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) 182#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
183#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) 183#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
184 184
185#ifdef CONFIG_EDAC_DEBUG
186/* MTR NUMROW */
187static const char *numrow_toString[] = {
188 "8,192 - 13 rows",
189 "16,384 - 14 rows",
190 "32,768 - 15 rows",
191 "65,536 - 16 rows"
192};
193
194/* MTR NUMCOL */
195static const char *numcol_toString[] = {
196 "1,024 - 10 columns",
197 "2,048 - 11 columns",
198 "4,096 - 12 columns",
199 "reserved"
200};
201#endif
202
203/************************************************ 185/************************************************
204 * i7300 Register definitions for error detection 186 * i7300 Register definitions for error detection
205 ************************************************/ 187 ************************************************/
@@ -467,10 +449,10 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
467 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))", 449 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
468 bank, ras, cas, errors, specific); 450 bank, ras, cas, errors, specific);
469 451
470 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0, 452 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
471 branch, -1, rank, 453 branch, -1, rank,
472 is_wr ? "Write error" : "Read error", 454 is_wr ? "Write error" : "Read error",
473 pvt->tmp_prt_buffer, NULL); 455 pvt->tmp_prt_buffer);
474 456
475 } 457 }
476 458
@@ -513,11 +495,11 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
513 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))", 495 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
514 bank, ras, cas, errors, specific); 496 bank, ras, cas, errors, specific);
515 497
516 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 498 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
517 syndrome, 499 syndrome,
518 branch >> 1, channel % 2, rank, 500 branch >> 1, channel % 2, rank,
519 is_wr ? "Write error" : "Read error", 501 is_wr ? "Write error" : "Read error",
520 pvt->tmp_prt_buffer, NULL); 502 pvt->tmp_prt_buffer);
521 } 503 }
522 return; 504 return;
523} 505}
@@ -614,9 +596,8 @@ static int decode_mtr(struct i7300_pvt *pvt,
614 mtr = pvt->mtr[slot][branch]; 596 mtr = pvt->mtr[slot][branch];
615 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; 597 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
616 598
617 debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n", 599 edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
618 slot, channel, 600 slot, channel, ans ? "" : "NOT ");
619 ans ? "Present" : "NOT Present");
620 601
621 /* Determine if there is a DIMM present in this DIMM slot */ 602 /* Determine if there is a DIMM present in this DIMM slot */
622 if (!ans) 603 if (!ans)
@@ -638,16 +619,25 @@ static int decode_mtr(struct i7300_pvt *pvt,
638 619
639 dinfo->megabytes = 1 << addrBits; 620 dinfo->megabytes = 1 << addrBits;
640 621
641 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 622 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
642 623
643 debugf2("\t\tELECTRICAL THROTTLING is %s\n", 624 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
644 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); 625 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
645 626
646 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 627 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
647 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single"); 628 edac_dbg(2, "\t\tNUMRANK: %s\n",
648 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); 629 MTR_DIMM_RANKS(mtr) ? "double" : "single");
649 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 630 edac_dbg(2, "\t\tNUMROW: %s\n",
650 debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes); 631 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
632 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
633 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
634 "65,536 - 16 rows");
635 edac_dbg(2, "\t\tNUMCOL: %s\n",
636 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
637 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
638 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
639 "reserved");
640 edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
651 641
652 /* 642 /*
653 * The type of error detection actually depends of the 643 * The type of error detection actually depends of the
@@ -663,9 +653,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
663 dimm->mtype = MEM_FB_DDR2; 653 dimm->mtype = MEM_FB_DDR2;
664 if (IS_SINGLE_MODE(pvt->mc_settings_a)) { 654 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
665 dimm->edac_mode = EDAC_SECDED; 655 dimm->edac_mode = EDAC_SECDED;
666 debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); 656 edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
667 } else { 657 } else {
668 debugf2("\t\tECC code is on Lockstep mode\n"); 658 edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
669 if (MTR_DRAM_WIDTH(mtr) == 8) 659 if (MTR_DRAM_WIDTH(mtr) == 8)
670 dimm->edac_mode = EDAC_S8ECD8ED; 660 dimm->edac_mode = EDAC_S8ECD8ED;
671 else 661 else
@@ -674,9 +664,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
674 664
675 /* ask what device type on this row */ 665 /* ask what device type on this row */
676 if (MTR_DRAM_WIDTH(mtr) == 8) { 666 if (MTR_DRAM_WIDTH(mtr) == 8) {
677 debugf2("\t\tScrub algorithm for x8 is on %s mode\n", 667 edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
678 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? 668 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
679 "enhanced" : "normal"); 669 "enhanced" : "normal");
680 670
681 dimm->dtype = DEV_X8; 671 dimm->dtype = DEV_X8;
682 } else 672 } else
@@ -710,14 +700,14 @@ static void print_dimm_size(struct i7300_pvt *pvt)
710 p += n; 700 p += n;
711 space -= n; 701 space -= n;
712 } 702 }
713 debugf2("%s\n", pvt->tmp_prt_buffer); 703 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
714 p = pvt->tmp_prt_buffer; 704 p = pvt->tmp_prt_buffer;
715 space = PAGE_SIZE; 705 space = PAGE_SIZE;
716 n = snprintf(p, space, "-------------------------------" 706 n = snprintf(p, space, "-------------------------------"
717 "------------------------------"); 707 "------------------------------");
718 p += n; 708 p += n;
719 space -= n; 709 space -= n;
720 debugf2("%s\n", pvt->tmp_prt_buffer); 710 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
721 p = pvt->tmp_prt_buffer; 711 p = pvt->tmp_prt_buffer;
722 space = PAGE_SIZE; 712 space = PAGE_SIZE;
723 713
@@ -733,7 +723,7 @@ static void print_dimm_size(struct i7300_pvt *pvt)
733 space -= n; 723 space -= n;
734 } 724 }
735 725
736 debugf2("%s\n", pvt->tmp_prt_buffer); 726 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
737 p = pvt->tmp_prt_buffer; 727 p = pvt->tmp_prt_buffer;
738 space = PAGE_SIZE; 728 space = PAGE_SIZE;
739 } 729 }
@@ -742,7 +732,7 @@ static void print_dimm_size(struct i7300_pvt *pvt)
742 "------------------------------"); 732 "------------------------------");
743 p += n; 733 p += n;
744 space -= n; 734 space -= n;
745 debugf2("%s\n", pvt->tmp_prt_buffer); 735 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
746 p = pvt->tmp_prt_buffer; 736 p = pvt->tmp_prt_buffer;
747 space = PAGE_SIZE; 737 space = PAGE_SIZE;
748#endif 738#endif
@@ -765,7 +755,7 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
765 755
766 pvt = mci->pvt_info; 756 pvt = mci->pvt_info;
767 757
768 debugf2("Memory Technology Registers:\n"); 758 edac_dbg(2, "Memory Technology Registers:\n");
769 759
770 /* Get the AMB present registers for the four channels */ 760 /* Get the AMB present registers for the four channels */
771 for (branch = 0; branch < MAX_BRANCHES; branch++) { 761 for (branch = 0; branch < MAX_BRANCHES; branch++) {
@@ -774,15 +764,15 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
774 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 764 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
775 AMBPRESENT_0, 765 AMBPRESENT_0,
776 &pvt->ambpresent[channel]); 766 &pvt->ambpresent[channel]);
777 debugf2("\t\tAMB-present CH%d = 0x%x:\n", 767 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
778 channel, pvt->ambpresent[channel]); 768 channel, pvt->ambpresent[channel]);
779 769
780 channel = to_channel(1, branch); 770 channel = to_channel(1, branch);
781 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 771 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
782 AMBPRESENT_1, 772 AMBPRESENT_1,
783 &pvt->ambpresent[channel]); 773 &pvt->ambpresent[channel]);
784 debugf2("\t\tAMB-present CH%d = 0x%x:\n", 774 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
785 channel, pvt->ambpresent[channel]); 775 channel, pvt->ambpresent[channel]);
786 } 776 }
787 777
788 /* Get the set of MTR[0-7] regs by each branch */ 778 /* Get the set of MTR[0-7] regs by each branch */
@@ -824,12 +814,11 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
824static void decode_mir(int mir_no, u16 mir[MAX_MIR]) 814static void decode_mir(int mir_no, u16 mir[MAX_MIR])
825{ 815{
826 if (mir[mir_no] & 3) 816 if (mir[mir_no] & 3)
827 debugf2("MIR%d: limit= 0x%x Branch(es) that participate:" 817 edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
828 " %s %s\n", 818 mir_no,
829 mir_no, 819 (mir[mir_no] >> 4) & 0xfff,
830 (mir[mir_no] >> 4) & 0xfff, 820 (mir[mir_no] & 1) ? "B0" : "",
831 (mir[mir_no] & 1) ? "B0" : "", 821 (mir[mir_no] & 2) ? "B1" : "");
832 (mir[mir_no] & 2) ? "B1" : "");
833} 822}
834 823
835/** 824/**
@@ -849,17 +838,17 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
849 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, 838 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
850 (u32 *) &pvt->ambase); 839 (u32 *) &pvt->ambase);
851 840
852 debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); 841 edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
853 842
854 /* Get the Branch Map regs */ 843 /* Get the Branch Map regs */
855 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); 844 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
856 pvt->tolm >>= 12; 845 pvt->tolm >>= 12;
857 debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, 846 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
858 pvt->tolm); 847 pvt->tolm, pvt->tolm);
859 848
860 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); 849 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
861 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", 850 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
862 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); 851 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
863 852
864 /* Get memory controller settings */ 853 /* Get memory controller settings */
865 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, 854 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
@@ -868,15 +857,15 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
868 &pvt->mc_settings_a); 857 &pvt->mc_settings_a);
869 858
870 if (IS_SINGLE_MODE(pvt->mc_settings_a)) 859 if (IS_SINGLE_MODE(pvt->mc_settings_a))
871 debugf0("Memory controller operating on single mode\n"); 860 edac_dbg(0, "Memory controller operating on single mode\n");
872 else 861 else
873 debugf0("Memory controller operating on %s mode\n", 862 edac_dbg(0, "Memory controller operating on %smirrored mode\n",
874 IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored"); 863 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
875 864
876 debugf0("Error detection is %s\n", 865 edac_dbg(0, "Error detection is %s\n",
877 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 866 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
878 debugf0("Retry is %s\n", 867 edac_dbg(0, "Retry is %s\n",
879 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 868 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
880 869
881 /* Get Memory Interleave Range registers */ 870 /* Get Memory Interleave Range registers */
882 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, 871 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
@@ -970,18 +959,18 @@ static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
970 } 959 }
971 } 960 }
972 961
973 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", 962 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
974 pci_name(pvt->pci_dev_16_0_fsb_ctlr), 963 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
975 pvt->pci_dev_16_0_fsb_ctlr->vendor, 964 pvt->pci_dev_16_0_fsb_ctlr->vendor,
976 pvt->pci_dev_16_0_fsb_ctlr->device); 965 pvt->pci_dev_16_0_fsb_ctlr->device);
977 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 966 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
978 pci_name(pvt->pci_dev_16_1_fsb_addr_map), 967 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
979 pvt->pci_dev_16_1_fsb_addr_map->vendor, 968 pvt->pci_dev_16_1_fsb_addr_map->vendor,
980 pvt->pci_dev_16_1_fsb_addr_map->device); 969 pvt->pci_dev_16_1_fsb_addr_map->device);
981 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", 970 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
982 pci_name(pvt->pci_dev_16_2_fsb_err_regs), 971 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
983 pvt->pci_dev_16_2_fsb_err_regs->vendor, 972 pvt->pci_dev_16_2_fsb_err_regs->vendor,
984 pvt->pci_dev_16_2_fsb_err_regs->device); 973 pvt->pci_dev_16_2_fsb_err_regs->device);
985 974
986 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, 975 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
987 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, 976 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
@@ -1032,10 +1021,9 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1032 if (rc == -EIO) 1021 if (rc == -EIO)
1033 return rc; 1022 return rc;
1034 1023
1035 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1024 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1036 __func__, 1025 pdev->bus->number,
1037 pdev->bus->number, 1026 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1038 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1039 1027
1040 /* We only are looking for func 0 of the set */ 1028 /* We only are looking for func 0 of the set */
1041 if (PCI_FUNC(pdev->devfn) != 0) 1029 if (PCI_FUNC(pdev->devfn) != 0)
@@ -1055,9 +1043,9 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1055 if (mci == NULL) 1043 if (mci == NULL)
1056 return -ENOMEM; 1044 return -ENOMEM;
1057 1045
1058 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1046 edac_dbg(0, "MC: mci = %p\n", mci);
1059 1047
1060 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1048 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1061 1049
1062 pvt = mci->pvt_info; 1050 pvt = mci->pvt_info;
1063 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ 1051 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
@@ -1088,19 +1076,16 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1088 /* initialize the MC control structure 'csrows' table 1076 /* initialize the MC control structure 'csrows' table
1089 * with the mapping and control information */ 1077 * with the mapping and control information */
1090 if (i7300_get_mc_regs(mci)) { 1078 if (i7300_get_mc_regs(mci)) {
1091 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1079 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1092 " because i7300_init_csrows() returned nonzero "
1093 "value\n");
1094 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1080 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1095 } else { 1081 } else {
1096 debugf1("MC: Enable error reporting now\n"); 1082 edac_dbg(1, "MC: Enable error reporting now\n");
1097 i7300_enable_error_reporting(mci); 1083 i7300_enable_error_reporting(mci);
1098 } 1084 }
1099 1085
1100 /* add this new MC control structure to EDAC's list of MCs */ 1086 /* add this new MC control structure to EDAC's list of MCs */
1101 if (edac_mc_add_mc(mci)) { 1087 if (edac_mc_add_mc(mci)) {
1102 debugf0("MC: " __FILE__ 1088 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1103 ": %s(): failed edac_mc_add_mc()\n", __func__);
1104 /* FIXME: perhaps some code should go here that disables error 1089 /* FIXME: perhaps some code should go here that disables error
1105 * reporting if we just enabled it 1090 * reporting if we just enabled it
1106 */ 1091 */
@@ -1142,7 +1127,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
1142 struct mem_ctl_info *mci; 1127 struct mem_ctl_info *mci;
1143 char *tmp; 1128 char *tmp;
1144 1129
1145 debugf0(__FILE__ ": %s()\n", __func__); 1130 edac_dbg(0, "\n");
1146 1131
1147 if (i7300_pci) 1132 if (i7300_pci)
1148 edac_pci_release_generic_ctl(i7300_pci); 1133 edac_pci_release_generic_ctl(i7300_pci);
@@ -1189,7 +1174,7 @@ static int __init i7300_init(void)
1189{ 1174{
1190 int pci_rc; 1175 int pci_rc;
1191 1176
1192 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1177 edac_dbg(2, "\n");
1193 1178
1194 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1179 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1195 opstate_init(); 1180 opstate_init();
@@ -1204,7 +1189,7 @@ static int __init i7300_init(void)
1204 */ 1189 */
1205static void __exit i7300_exit(void) 1190static void __exit i7300_exit(void)
1206{ 1191{
1207 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1192 edac_dbg(2, "\n");
1208 pci_unregister_driver(&i7300_driver); 1193 pci_unregister_driver(&i7300_driver);
1209} 1194}
1210 1195
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index d27778f65a5d..3672101023bd 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -248,6 +248,8 @@ struct i7core_dev {
248}; 248};
249 249
250struct i7core_pvt { 250struct i7core_pvt {
251 struct device *addrmatch_dev, *chancounts_dev;
252
251 struct pci_dev *pci_noncore; 253 struct pci_dev *pci_noncore;
252 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; 254 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
253 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; 255 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
@@ -514,29 +516,28 @@ static int get_dimm_config(struct mem_ctl_info *mci)
514 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); 516 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
515 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); 517 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
516 518
517 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", 519 edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
518 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status, 520 pvt->i7core_dev->socket, pvt->info.mc_control,
519 pvt->info.max_dod, pvt->info.ch_map); 521 pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
520 522
521 if (ECC_ENABLED(pvt)) { 523 if (ECC_ENABLED(pvt)) {
522 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); 524 edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
523 if (ECCx8(pvt)) 525 if (ECCx8(pvt))
524 mode = EDAC_S8ECD8ED; 526 mode = EDAC_S8ECD8ED;
525 else 527 else
526 mode = EDAC_S4ECD4ED; 528 mode = EDAC_S4ECD4ED;
527 } else { 529 } else {
528 debugf0("ECC disabled\n"); 530 edac_dbg(0, "ECC disabled\n");
529 mode = EDAC_NONE; 531 mode = EDAC_NONE;
530 } 532 }
531 533
532 /* FIXME: need to handle the error codes */ 534 /* FIXME: need to handle the error codes */
533 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked " 535 edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
534 "x%x x 0x%x\n", 536 numdimms(pvt->info.max_dod),
535 numdimms(pvt->info.max_dod), 537 numrank(pvt->info.max_dod >> 2),
536 numrank(pvt->info.max_dod >> 2), 538 numbank(pvt->info.max_dod >> 4),
537 numbank(pvt->info.max_dod >> 4), 539 numrow(pvt->info.max_dod >> 6),
538 numrow(pvt->info.max_dod >> 6), 540 numcol(pvt->info.max_dod >> 9));
539 numcol(pvt->info.max_dod >> 9));
540 541
541 for (i = 0; i < NUM_CHANS; i++) { 542 for (i = 0; i < NUM_CHANS; i++) {
542 u32 data, dimm_dod[3], value[8]; 543 u32 data, dimm_dod[3], value[8];
@@ -545,11 +546,11 @@ static int get_dimm_config(struct mem_ctl_info *mci)
545 continue; 546 continue;
546 547
547 if (!CH_ACTIVE(pvt, i)) { 548 if (!CH_ACTIVE(pvt, i)) {
548 debugf0("Channel %i is not active\n", i); 549 edac_dbg(0, "Channel %i is not active\n", i);
549 continue; 550 continue;
550 } 551 }
551 if (CH_DISABLED(pvt, i)) { 552 if (CH_DISABLED(pvt, i)) {
552 debugf0("Channel %i is disabled\n", i); 553 edac_dbg(0, "Channel %i is disabled\n", i);
553 continue; 554 continue;
554 } 555 }
555 556
@@ -580,15 +581,14 @@ static int get_dimm_config(struct mem_ctl_info *mci)
580 pci_read_config_dword(pvt->pci_ch[i][1], 581 pci_read_config_dword(pvt->pci_ch[i][1],
581 MC_DOD_CH_DIMM2, &dimm_dod[2]); 582 MC_DOD_CH_DIMM2, &dimm_dod[2]);
582 583
583 debugf0("Ch%d phy rd%d, wr%d (0x%08x): " 584 edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
584 "%s%s%s%cDIMMs\n", 585 i,
585 i, 586 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
586 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), 587 data,
587 data, 588 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
588 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "", 589 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
589 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "", 590 pvt->channel[i].has_4rank ? "HAS_4R " : "",
590 pvt->channel[i].has_4rank ? "HAS_4R " : "", 591 (data & REGISTERED_DIMM) ? 'R' : 'U');
591 (data & REGISTERED_DIMM) ? 'R' : 'U');
592 592
593 for (j = 0; j < 3; j++) { 593 for (j = 0; j < 3; j++) {
594 u32 banks, ranks, rows, cols; 594 u32 banks, ranks, rows, cols;
@@ -607,11 +607,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
607 /* DDR3 has 8 I/O banks */ 607 /* DDR3 has 8 I/O banks */
608 size = (rows * cols * banks * ranks) >> (20 - 3); 608 size = (rows * cols * banks * ranks) >> (20 - 3);
609 609
610 debugf0("\tdimm %d %d Mb offset: %x, " 610 edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
611 "bank: %d, rank: %d, row: %#x, col: %#x\n", 611 j, size,
612 j, size, 612 RANKOFFSET(dimm_dod[j]),
613 RANKOFFSET(dimm_dod[j]), 613 banks, ranks, rows, cols);
614 banks, ranks, rows, cols);
615 614
616 npages = MiB_TO_PAGES(size); 615 npages = MiB_TO_PAGES(size);
617 616
@@ -647,12 +646,12 @@ static int get_dimm_config(struct mem_ctl_info *mci)
647 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); 646 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
648 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); 647 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
649 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); 648 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
650 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); 649 edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
651 for (j = 0; j < 8; j++) 650 for (j = 0; j < 8; j++)
652 debugf1("\t\t%#x\t%#x\t%#x\n", 651 edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
653 (value[j] >> 27) & 0x1, 652 (value[j] >> 27) & 0x1,
654 (value[j] >> 24) & 0x7, 653 (value[j] >> 24) & 0x7,
655 (value[j] & ((1 << 24) - 1))); 654 (value[j] & ((1 << 24) - 1)));
656 } 655 }
657 656
658 return 0; 657 return 0;
@@ -662,6 +661,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
662 Error insertion routines 661 Error insertion routines
663 ****************************************************************************/ 662 ****************************************************************************/
664 663
664#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
665
665/* The i7core has independent error injection features per channel. 666/* The i7core has independent error injection features per channel.
666 However, to have a simpler code, we don't allow enabling error injection 667 However, to have a simpler code, we don't allow enabling error injection
667 on more than one channel. 668 on more than one channel.
@@ -691,9 +692,11 @@ static int disable_inject(const struct mem_ctl_info *mci)
691 * bit 0 - refers to the lower 32-byte half cacheline 692 * bit 0 - refers to the lower 32-byte half cacheline
692 * bit 1 - refers to the upper 32-byte half cacheline 693 * bit 1 - refers to the upper 32-byte half cacheline
693 */ 694 */
694static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci, 695static ssize_t i7core_inject_section_store(struct device *dev,
696 struct device_attribute *mattr,
695 const char *data, size_t count) 697 const char *data, size_t count)
696{ 698{
699 struct mem_ctl_info *mci = to_mci(dev);
697 struct i7core_pvt *pvt = mci->pvt_info; 700 struct i7core_pvt *pvt = mci->pvt_info;
698 unsigned long value; 701 unsigned long value;
699 int rc; 702 int rc;
@@ -709,9 +712,11 @@ static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
709 return count; 712 return count;
710} 713}
711 714
712static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci, 715static ssize_t i7core_inject_section_show(struct device *dev,
713 char *data) 716 struct device_attribute *mattr,
717 char *data)
714{ 718{
719 struct mem_ctl_info *mci = to_mci(dev);
715 struct i7core_pvt *pvt = mci->pvt_info; 720 struct i7core_pvt *pvt = mci->pvt_info;
716 return sprintf(data, "0x%08x\n", pvt->inject.section); 721 return sprintf(data, "0x%08x\n", pvt->inject.section);
717} 722}
@@ -724,10 +729,12 @@ static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
724 * bit 1 - inject ECC error 729 * bit 1 - inject ECC error
725 * bit 2 - inject parity error 730 * bit 2 - inject parity error
726 */ 731 */
727static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci, 732static ssize_t i7core_inject_type_store(struct device *dev,
733 struct device_attribute *mattr,
728 const char *data, size_t count) 734 const char *data, size_t count)
729{ 735{
730 struct i7core_pvt *pvt = mci->pvt_info; 736 struct mem_ctl_info *mci = to_mci(dev);
737struct i7core_pvt *pvt = mci->pvt_info;
731 unsigned long value; 738 unsigned long value;
732 int rc; 739 int rc;
733 740
@@ -742,10 +749,13 @@ static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
742 return count; 749 return count;
743} 750}
744 751
745static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci, 752static ssize_t i7core_inject_type_show(struct device *dev,
746 char *data) 753 struct device_attribute *mattr,
754 char *data)
747{ 755{
756 struct mem_ctl_info *mci = to_mci(dev);
748 struct i7core_pvt *pvt = mci->pvt_info; 757 struct i7core_pvt *pvt = mci->pvt_info;
758
749 return sprintf(data, "0x%08x\n", pvt->inject.type); 759 return sprintf(data, "0x%08x\n", pvt->inject.type);
750} 760}
751 761
@@ -759,9 +769,11 @@ static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
759 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an 769 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
760 * uncorrectable error to be injected. 770 * uncorrectable error to be injected.
761 */ 771 */
762static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci, 772static ssize_t i7core_inject_eccmask_store(struct device *dev,
763 const char *data, size_t count) 773 struct device_attribute *mattr,
774 const char *data, size_t count)
764{ 775{
776 struct mem_ctl_info *mci = to_mci(dev);
765 struct i7core_pvt *pvt = mci->pvt_info; 777 struct i7core_pvt *pvt = mci->pvt_info;
766 unsigned long value; 778 unsigned long value;
767 int rc; 779 int rc;
@@ -777,10 +789,13 @@ static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
777 return count; 789 return count;
778} 790}
779 791
780static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci, 792static ssize_t i7core_inject_eccmask_show(struct device *dev,
781 char *data) 793 struct device_attribute *mattr,
794 char *data)
782{ 795{
796 struct mem_ctl_info *mci = to_mci(dev);
783 struct i7core_pvt *pvt = mci->pvt_info; 797 struct i7core_pvt *pvt = mci->pvt_info;
798
784 return sprintf(data, "0x%08x\n", pvt->inject.eccmask); 799 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
785} 800}
786 801
@@ -797,14 +812,16 @@ static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
797 812
798#define DECLARE_ADDR_MATCH(param, limit) \ 813#define DECLARE_ADDR_MATCH(param, limit) \
799static ssize_t i7core_inject_store_##param( \ 814static ssize_t i7core_inject_store_##param( \
800 struct mem_ctl_info *mci, \ 815 struct device *dev, \
801 const char *data, size_t count) \ 816 struct device_attribute *mattr, \
817 const char *data, size_t count) \
802{ \ 818{ \
819 struct mem_ctl_info *mci = to_mci(dev); \
803 struct i7core_pvt *pvt; \ 820 struct i7core_pvt *pvt; \
804 long value; \ 821 long value; \
805 int rc; \ 822 int rc; \
806 \ 823 \
807 debugf1("%s()\n", __func__); \ 824 edac_dbg(1, "\n"); \
808 pvt = mci->pvt_info; \ 825 pvt = mci->pvt_info; \
809 \ 826 \
810 if (pvt->inject.enable) \ 827 if (pvt->inject.enable) \
@@ -824,13 +841,15 @@ static ssize_t i7core_inject_store_##param( \
824} \ 841} \
825 \ 842 \
826static ssize_t i7core_inject_show_##param( \ 843static ssize_t i7core_inject_show_##param( \
827 struct mem_ctl_info *mci, \ 844 struct device *dev, \
828 char *data) \ 845 struct device_attribute *mattr, \
846 char *data) \
829{ \ 847{ \
848 struct mem_ctl_info *mci = to_mci(dev); \
830 struct i7core_pvt *pvt; \ 849 struct i7core_pvt *pvt; \
831 \ 850 \
832 pvt = mci->pvt_info; \ 851 pvt = mci->pvt_info; \
833 debugf1("%s() pvt=%p\n", __func__, pvt); \ 852 edac_dbg(1, "pvt=%p\n", pvt); \
834 if (pvt->inject.param < 0) \ 853 if (pvt->inject.param < 0) \
835 return sprintf(data, "any\n"); \ 854 return sprintf(data, "any\n"); \
836 else \ 855 else \
@@ -838,14 +857,9 @@ static ssize_t i7core_inject_show_##param( \
838} 857}
839 858
840#define ATTR_ADDR_MATCH(param) \ 859#define ATTR_ADDR_MATCH(param) \
841 { \ 860 static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \
842 .attr = { \ 861 i7core_inject_show_##param, \
843 .name = #param, \ 862 i7core_inject_store_##param)
844 .mode = (S_IRUGO | S_IWUSR) \
845 }, \
846 .show = i7core_inject_show_##param, \
847 .store = i7core_inject_store_##param, \
848 }
849 863
850DECLARE_ADDR_MATCH(channel, 3); 864DECLARE_ADDR_MATCH(channel, 3);
851DECLARE_ADDR_MATCH(dimm, 3); 865DECLARE_ADDR_MATCH(dimm, 3);
@@ -854,14 +868,21 @@ DECLARE_ADDR_MATCH(bank, 32);
854DECLARE_ADDR_MATCH(page, 0x10000); 868DECLARE_ADDR_MATCH(page, 0x10000);
855DECLARE_ADDR_MATCH(col, 0x4000); 869DECLARE_ADDR_MATCH(col, 0x4000);
856 870
871ATTR_ADDR_MATCH(channel);
872ATTR_ADDR_MATCH(dimm);
873ATTR_ADDR_MATCH(rank);
874ATTR_ADDR_MATCH(bank);
875ATTR_ADDR_MATCH(page);
876ATTR_ADDR_MATCH(col);
877
857static int write_and_test(struct pci_dev *dev, const int where, const u32 val) 878static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
858{ 879{
859 u32 read; 880 u32 read;
860 int count; 881 int count;
861 882
862 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n", 883 edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
863 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), 884 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
864 where, val); 885 where, val);
865 886
866 for (count = 0; count < 10; count++) { 887 for (count = 0; count < 10; count++) {
867 if (count) 888 if (count)
@@ -899,9 +920,11 @@ static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
899 * is reliable enough to check if the MC is using the 920 * is reliable enough to check if the MC is using the
900 * three channels. However, this is not clear at the datasheet. 921 * three channels. However, this is not clear at the datasheet.
901 */ 922 */
902static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, 923static ssize_t i7core_inject_enable_store(struct device *dev,
903 const char *data, size_t count) 924 struct device_attribute *mattr,
925 const char *data, size_t count)
904{ 926{
927 struct mem_ctl_info *mci = to_mci(dev);
905 struct i7core_pvt *pvt = mci->pvt_info; 928 struct i7core_pvt *pvt = mci->pvt_info;
906 u32 injectmask; 929 u32 injectmask;
907 u64 mask = 0; 930 u64 mask = 0;
@@ -994,17 +1017,18 @@ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
994 pci_write_config_dword(pvt->pci_noncore, 1017 pci_write_config_dword(pvt->pci_noncore,
995 MC_CFG_CONTROL, 8); 1018 MC_CFG_CONTROL, 8);
996 1019
997 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x," 1020 edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
998 " inject 0x%08x\n", 1021 mask, pvt->inject.eccmask, injectmask);
999 mask, pvt->inject.eccmask, injectmask);
1000 1022
1001 1023
1002 return count; 1024 return count;
1003} 1025}
1004 1026
1005static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, 1027static ssize_t i7core_inject_enable_show(struct device *dev,
1006 char *data) 1028 struct device_attribute *mattr,
1029 char *data)
1007{ 1030{
1031 struct mem_ctl_info *mci = to_mci(dev);
1008 struct i7core_pvt *pvt = mci->pvt_info; 1032 struct i7core_pvt *pvt = mci->pvt_info;
1009 u32 injectmask; 1033 u32 injectmask;
1010 1034
@@ -1014,7 +1038,7 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1014 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], 1038 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1015 MC_CHANNEL_ERROR_INJECT, &injectmask); 1039 MC_CHANNEL_ERROR_INJECT, &injectmask);
1016 1040
1017 debugf0("Inject error read: 0x%018x\n", injectmask); 1041 edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
1018 1042
1019 if (injectmask & 0x0c) 1043 if (injectmask & 0x0c)
1020 pvt->inject.enable = 1; 1044 pvt->inject.enable = 1;
@@ -1024,12 +1048,14 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1024 1048
1025#define DECLARE_COUNTER(param) \ 1049#define DECLARE_COUNTER(param) \
1026static ssize_t i7core_show_counter_##param( \ 1050static ssize_t i7core_show_counter_##param( \
1027 struct mem_ctl_info *mci, \ 1051 struct device *dev, \
1028 char *data) \ 1052 struct device_attribute *mattr, \
1053 char *data) \
1029{ \ 1054{ \
1055 struct mem_ctl_info *mci = to_mci(dev); \
1030 struct i7core_pvt *pvt = mci->pvt_info; \ 1056 struct i7core_pvt *pvt = mci->pvt_info; \
1031 \ 1057 \
1032 debugf1("%s() \n", __func__); \ 1058 edac_dbg(1, "\n"); \
1033 if (!pvt->ce_count_available || (pvt->is_registered)) \ 1059 if (!pvt->ce_count_available || (pvt->is_registered)) \
1034 return sprintf(data, "data unavailable\n"); \ 1060 return sprintf(data, "data unavailable\n"); \
1035 return sprintf(data, "%lu\n", \ 1061 return sprintf(data, "%lu\n", \
@@ -1037,121 +1063,179 @@ static ssize_t i7core_show_counter_##param( \
1037} 1063}
1038 1064
1039#define ATTR_COUNTER(param) \ 1065#define ATTR_COUNTER(param) \
1040 { \ 1066 static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \
1041 .attr = { \ 1067 i7core_show_counter_##param, \
1042 .name = __stringify(udimm##param), \ 1068 NULL)
1043 .mode = (S_IRUGO | S_IWUSR) \
1044 }, \
1045 .show = i7core_show_counter_##param \
1046 }
1047 1069
1048DECLARE_COUNTER(0); 1070DECLARE_COUNTER(0);
1049DECLARE_COUNTER(1); 1071DECLARE_COUNTER(1);
1050DECLARE_COUNTER(2); 1072DECLARE_COUNTER(2);
1051 1073
1074ATTR_COUNTER(0);
1075ATTR_COUNTER(1);
1076ATTR_COUNTER(2);
1077
1052/* 1078/*
1053 * Sysfs struct 1079 * inject_addrmatch device sysfs struct
1054 */ 1080 */
1055 1081
1056static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { 1082static struct attribute *i7core_addrmatch_attrs[] = {
1057 ATTR_ADDR_MATCH(channel), 1083 &dev_attr_channel.attr,
1058 ATTR_ADDR_MATCH(dimm), 1084 &dev_attr_dimm.attr,
1059 ATTR_ADDR_MATCH(rank), 1085 &dev_attr_rank.attr,
1060 ATTR_ADDR_MATCH(bank), 1086 &dev_attr_bank.attr,
1061 ATTR_ADDR_MATCH(page), 1087 &dev_attr_page.attr,
1062 ATTR_ADDR_MATCH(col), 1088 &dev_attr_col.attr,
1063 { } /* End of list */ 1089 NULL
1090};
1091
1092static struct attribute_group addrmatch_grp = {
1093 .attrs = i7core_addrmatch_attrs,
1094};
1095
1096static const struct attribute_group *addrmatch_groups[] = {
1097 &addrmatch_grp,
1098 NULL
1064}; 1099};
1065 1100
1066static const struct mcidev_sysfs_group i7core_inject_addrmatch = { 1101static void addrmatch_release(struct device *device)
1067 .name = "inject_addrmatch", 1102{
1068 .mcidev_attr = i7core_addrmatch_attrs, 1103 edac_dbg(1, "Releasing device %s\n", dev_name(device));
1104 kfree(device);
1105}
1106
1107static struct device_type addrmatch_type = {
1108 .groups = addrmatch_groups,
1109 .release = addrmatch_release,
1069}; 1110};
1070 1111
1071static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { 1112/*
1072 ATTR_COUNTER(0), 1113 * all_channel_counts sysfs struct
1073 ATTR_COUNTER(1), 1114 */
1074 ATTR_COUNTER(2), 1115
1075 { .attr = { .name = NULL } } 1116static struct attribute *i7core_udimm_counters_attrs[] = {
1117 &dev_attr_udimm0.attr,
1118 &dev_attr_udimm1.attr,
1119 &dev_attr_udimm2.attr,
1120 NULL
1076}; 1121};
1077 1122
1078static const struct mcidev_sysfs_group i7core_udimm_counters = { 1123static struct attribute_group all_channel_counts_grp = {
1079 .name = "all_channel_counts", 1124 .attrs = i7core_udimm_counters_attrs,
1080 .mcidev_attr = i7core_udimm_counters_attrs,
1081}; 1125};
1082 1126
1083static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = { 1127static const struct attribute_group *all_channel_counts_groups[] = {
1084 { 1128 &all_channel_counts_grp,
1085 .attr = { 1129 NULL
1086 .name = "inject_section",
1087 .mode = (S_IRUGO | S_IWUSR)
1088 },
1089 .show = i7core_inject_section_show,
1090 .store = i7core_inject_section_store,
1091 }, {
1092 .attr = {
1093 .name = "inject_type",
1094 .mode = (S_IRUGO | S_IWUSR)
1095 },
1096 .show = i7core_inject_type_show,
1097 .store = i7core_inject_type_store,
1098 }, {
1099 .attr = {
1100 .name = "inject_eccmask",
1101 .mode = (S_IRUGO | S_IWUSR)
1102 },
1103 .show = i7core_inject_eccmask_show,
1104 .store = i7core_inject_eccmask_store,
1105 }, {
1106 .grp = &i7core_inject_addrmatch,
1107 }, {
1108 .attr = {
1109 .name = "inject_enable",
1110 .mode = (S_IRUGO | S_IWUSR)
1111 },
1112 .show = i7core_inject_enable_show,
1113 .store = i7core_inject_enable_store,
1114 },
1115 { } /* End of list */
1116}; 1130};
1117 1131
1118static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = { 1132static void all_channel_counts_release(struct device *device)
1119 { 1133{
1120 .attr = { 1134 edac_dbg(1, "Releasing device %s\n", dev_name(device));
1121 .name = "inject_section", 1135 kfree(device);
1122 .mode = (S_IRUGO | S_IWUSR) 1136}
1123 }, 1137
1124 .show = i7core_inject_section_show, 1138static struct device_type all_channel_counts_type = {
1125 .store = i7core_inject_section_store, 1139 .groups = all_channel_counts_groups,
1126 }, { 1140 .release = all_channel_counts_release,
1127 .attr = {
1128 .name = "inject_type",
1129 .mode = (S_IRUGO | S_IWUSR)
1130 },
1131 .show = i7core_inject_type_show,
1132 .store = i7core_inject_type_store,
1133 }, {
1134 .attr = {
1135 .name = "inject_eccmask",
1136 .mode = (S_IRUGO | S_IWUSR)
1137 },
1138 .show = i7core_inject_eccmask_show,
1139 .store = i7core_inject_eccmask_store,
1140 }, {
1141 .grp = &i7core_inject_addrmatch,
1142 }, {
1143 .attr = {
1144 .name = "inject_enable",
1145 .mode = (S_IRUGO | S_IWUSR)
1146 },
1147 .show = i7core_inject_enable_show,
1148 .store = i7core_inject_enable_store,
1149 }, {
1150 .grp = &i7core_udimm_counters,
1151 },
1152 { } /* End of list */
1153}; 1141};
1154 1142
1143/*
1144 * inject sysfs attributes
1145 */
1146
1147static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
1148 i7core_inject_section_show, i7core_inject_section_store);
1149
1150static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR,
1151 i7core_inject_type_show, i7core_inject_type_store);
1152
1153
1154static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR,
1155 i7core_inject_eccmask_show, i7core_inject_eccmask_store);
1156
1157static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR,
1158 i7core_inject_enable_show, i7core_inject_enable_store);
1159
1160static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
1161{
1162 struct i7core_pvt *pvt = mci->pvt_info;
1163 int rc;
1164
1165 rc = device_create_file(&mci->dev, &dev_attr_inject_section);
1166 if (rc < 0)
1167 return rc;
1168 rc = device_create_file(&mci->dev, &dev_attr_inject_type);
1169 if (rc < 0)
1170 return rc;
1171 rc = device_create_file(&mci->dev, &dev_attr_inject_eccmask);
1172 if (rc < 0)
1173 return rc;
1174 rc = device_create_file(&mci->dev, &dev_attr_inject_enable);
1175 if (rc < 0)
1176 return rc;
1177
1178 pvt->addrmatch_dev = kzalloc(sizeof(*pvt->addrmatch_dev), GFP_KERNEL);
1179 if (!pvt->addrmatch_dev)
1180 return rc;
1181
1182 pvt->addrmatch_dev->type = &addrmatch_type;
1183 pvt->addrmatch_dev->bus = mci->dev.bus;
1184 device_initialize(pvt->addrmatch_dev);
1185 pvt->addrmatch_dev->parent = &mci->dev;
1186 dev_set_name(pvt->addrmatch_dev, "inject_addrmatch");
1187 dev_set_drvdata(pvt->addrmatch_dev, mci);
1188
1189 edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev));
1190
1191 rc = device_add(pvt->addrmatch_dev);
1192 if (rc < 0)
1193 return rc;
1194
1195 if (!pvt->is_registered) {
1196 pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
1197 GFP_KERNEL);
1198 if (!pvt->chancounts_dev) {
1199 put_device(pvt->addrmatch_dev);
1200 device_del(pvt->addrmatch_dev);
1201 return rc;
1202 }
1203
1204 pvt->chancounts_dev->type = &all_channel_counts_type;
1205 pvt->chancounts_dev->bus = mci->dev.bus;
1206 device_initialize(pvt->chancounts_dev);
1207 pvt->chancounts_dev->parent = &mci->dev;
1208 dev_set_name(pvt->chancounts_dev, "all_channel_counts");
1209 dev_set_drvdata(pvt->chancounts_dev, mci);
1210
1211 edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev));
1212
1213 rc = device_add(pvt->chancounts_dev);
1214 if (rc < 0)
1215 return rc;
1216 }
1217 return 0;
1218}
1219
1220static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
1221{
1222 struct i7core_pvt *pvt = mci->pvt_info;
1223
1224 edac_dbg(1, "\n");
1225
1226 device_remove_file(&mci->dev, &dev_attr_inject_section);
1227 device_remove_file(&mci->dev, &dev_attr_inject_type);
1228 device_remove_file(&mci->dev, &dev_attr_inject_eccmask);
1229 device_remove_file(&mci->dev, &dev_attr_inject_enable);
1230
1231 if (!pvt->is_registered) {
1232 put_device(pvt->chancounts_dev);
1233 device_del(pvt->chancounts_dev);
1234 }
1235 put_device(pvt->addrmatch_dev);
1236 device_del(pvt->addrmatch_dev);
1237}
1238
1155/**************************************************************************** 1239/****************************************************************************
1156 Device initialization routines: put/get, init/exit 1240 Device initialization routines: put/get, init/exit
1157 ****************************************************************************/ 1241 ****************************************************************************/
@@ -1164,14 +1248,14 @@ static void i7core_put_devices(struct i7core_dev *i7core_dev)
1164{ 1248{
1165 int i; 1249 int i;
1166 1250
1167 debugf0(__FILE__ ": %s()\n", __func__); 1251 edac_dbg(0, "\n");
1168 for (i = 0; i < i7core_dev->n_devs; i++) { 1252 for (i = 0; i < i7core_dev->n_devs; i++) {
1169 struct pci_dev *pdev = i7core_dev->pdev[i]; 1253 struct pci_dev *pdev = i7core_dev->pdev[i];
1170 if (!pdev) 1254 if (!pdev)
1171 continue; 1255 continue;
1172 debugf0("Removing dev %02x:%02x.%d\n", 1256 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1173 pdev->bus->number, 1257 pdev->bus->number,
1174 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1258 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1175 pci_dev_put(pdev); 1259 pci_dev_put(pdev);
1176 } 1260 }
1177} 1261}
@@ -1214,12 +1298,12 @@ static unsigned i7core_pci_lastbus(void)
1214 1298
1215 while ((b = pci_find_next_bus(b)) != NULL) { 1299 while ((b = pci_find_next_bus(b)) != NULL) {
1216 bus = b->number; 1300 bus = b->number;
1217 debugf0("Found bus %d\n", bus); 1301 edac_dbg(0, "Found bus %d\n", bus);
1218 if (bus > last_bus) 1302 if (bus > last_bus)
1219 last_bus = bus; 1303 last_bus = bus;
1220 } 1304 }
1221 1305
1222 debugf0("Last bus %d\n", last_bus); 1306 edac_dbg(0, "Last bus %d\n", last_bus);
1223 1307
1224 return last_bus; 1308 return last_bus;
1225} 1309}
@@ -1326,10 +1410,10 @@ static int i7core_get_onedevice(struct pci_dev **prev,
1326 return -ENODEV; 1410 return -ENODEV;
1327 } 1411 }
1328 1412
1329 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", 1413 edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1330 socket, bus, dev_descr->dev, 1414 socket, bus, dev_descr->dev,
1331 dev_descr->func, 1415 dev_descr->func,
1332 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1416 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1333 1417
1334 /* 1418 /*
1335 * As stated on drivers/pci/search.c, the reference count for 1419 * As stated on drivers/pci/search.c, the reference count for
@@ -1427,13 +1511,13 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
1427 family = "unknown"; 1511 family = "unknown";
1428 pvt->enable_scrub = false; 1512 pvt->enable_scrub = false;
1429 } 1513 }
1430 debugf0("Detected a processor type %s\n", family); 1514 edac_dbg(0, "Detected a processor type %s\n", family);
1431 } else 1515 } else
1432 goto error; 1516 goto error;
1433 1517
1434 debugf0("Associated fn %d.%d, dev = %p, socket %d\n", 1518 edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n",
1435 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1519 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1436 pdev, i7core_dev->socket); 1520 pdev, i7core_dev->socket);
1437 1521
1438 if (PCI_SLOT(pdev->devfn) == 3 && 1522 if (PCI_SLOT(pdev->devfn) == 3 &&
1439 PCI_FUNC(pdev->devfn) == 2) 1523 PCI_FUNC(pdev->devfn) == 2)
@@ -1452,18 +1536,6 @@ error:
1452/**************************************************************************** 1536/****************************************************************************
1453 Error check routines 1537 Error check routines
1454 ****************************************************************************/ 1538 ****************************************************************************/
1455static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
1456 const int chan,
1457 const int dimm,
1458 const int add)
1459{
1460 int i;
1461
1462 for (i = 0; i < add; i++) {
1463 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
1464 chan, dimm, -1, "error", "", NULL);
1465 }
1466}
1467 1539
1468static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, 1540static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1469 const int chan, 1541 const int chan,
@@ -1502,12 +1574,17 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1502 1574
1503 /*updated the edac core */ 1575 /*updated the edac core */
1504 if (add0 != 0) 1576 if (add0 != 0)
1505 i7core_rdimm_update_errcount(mci, chan, 0, add0); 1577 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0,
1578 0, 0, 0,
1579 chan, 0, -1, "error", "");
1506 if (add1 != 0) 1580 if (add1 != 0)
1507 i7core_rdimm_update_errcount(mci, chan, 1, add1); 1581 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1,
1582 0, 0, 0,
1583 chan, 1, -1, "error", "");
1508 if (add2 != 0) 1584 if (add2 != 0)
1509 i7core_rdimm_update_errcount(mci, chan, 2, add2); 1585 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2,
1510 1586 0, 0, 0,
1587 chan, 2, -1, "error", "");
1511} 1588}
1512 1589
1513static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) 1590static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
@@ -1530,8 +1607,8 @@ static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1530 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, 1607 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1531 &rcv[2][1]); 1608 &rcv[2][1]);
1532 for (i = 0 ; i < 3; i++) { 1609 for (i = 0 ; i < 3; i++) {
1533 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", 1610 edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1534 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); 1611 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1535 /*if the channel has 3 dimms*/ 1612 /*if the channel has 3 dimms*/
1536 if (pvt->channel[i].dimms > 2) { 1613 if (pvt->channel[i].dimms > 2) {
1537 new0 = DIMM_BOT_COR_ERR(rcv[i][0]); 1614 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
@@ -1562,7 +1639,7 @@ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1562 int new0, new1, new2; 1639 int new0, new1, new2;
1563 1640
1564 if (!pvt->pci_mcr[4]) { 1641 if (!pvt->pci_mcr[4]) {
1565 debugf0("%s MCR registers not found\n", __func__); 1642 edac_dbg(0, "MCR registers not found\n");
1566 return; 1643 return;
1567 } 1644 }
1568 1645
@@ -1626,7 +1703,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1626 const struct mce *m) 1703 const struct mce *m)
1627{ 1704{
1628 struct i7core_pvt *pvt = mci->pvt_info; 1705 struct i7core_pvt *pvt = mci->pvt_info;
1629 char *type, *optype, *err, msg[80]; 1706 char *type, *optype, *err;
1630 enum hw_event_mc_err_type tp_event; 1707 enum hw_event_mc_err_type tp_event;
1631 unsigned long error = m->status & 0x1ff0000l; 1708 unsigned long error = m->status & 0x1ff0000l;
1632 bool uncorrected_error = m->mcgstatus & 1ll << 61; 1709 bool uncorrected_error = m->mcgstatus & 1ll << 61;
@@ -1704,20 +1781,18 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1704 err = "unknown"; 1781 err = "unknown";
1705 } 1782 }
1706 1783
1707 snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
1708
1709 /* 1784 /*
1710 * Call the helper to output message 1785 * Call the helper to output message
1711 * FIXME: what to do if core_err_cnt > 1? Currently, it generates 1786 * FIXME: what to do if core_err_cnt > 1? Currently, it generates
1712 * only one event 1787 * only one event
1713 */ 1788 */
1714 if (uncorrected_error || !pvt->is_registered) 1789 if (uncorrected_error || !pvt->is_registered)
1715 edac_mc_handle_error(tp_event, mci, 1790 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1716 m->addr >> PAGE_SHIFT, 1791 m->addr >> PAGE_SHIFT,
1717 m->addr & ~PAGE_MASK, 1792 m->addr & ~PAGE_MASK,
1718 syndrome, 1793 syndrome,
1719 channel, dimm, -1, 1794 channel, dimm, -1,
1720 err, msg, m); 1795 err, optype);
1721} 1796}
1722 1797
1723/* 1798/*
@@ -1814,12 +1889,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1814 if (mce->bank != 8) 1889 if (mce->bank != 8)
1815 return NOTIFY_DONE; 1890 return NOTIFY_DONE;
1816 1891
1817#ifdef CONFIG_SMP
1818 /* Only handle if it is the right mc controller */
1819 if (mce->socketid != pvt->i7core_dev->socket)
1820 return NOTIFY_DONE;
1821#endif
1822
1823 smp_rmb(); 1892 smp_rmb();
1824 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { 1893 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1825 smp_wmb(); 1894 smp_wmb();
@@ -2100,8 +2169,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2100 struct i7core_pvt *pvt; 2169 struct i7core_pvt *pvt;
2101 2170
2102 if (unlikely(!mci || !mci->pvt_info)) { 2171 if (unlikely(!mci || !mci->pvt_info)) {
2103 debugf0("MC: " __FILE__ ": %s(): dev = %p\n", 2172 edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev);
2104 __func__, &i7core_dev->pdev[0]->dev);
2105 2173
2106 i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); 2174 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2107 return; 2175 return;
@@ -2109,22 +2177,20 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2109 2177
2110 pvt = mci->pvt_info; 2178 pvt = mci->pvt_info;
2111 2179
2112 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 2180 edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
2113 __func__, mci, &i7core_dev->pdev[0]->dev);
2114 2181
2115 /* Disable scrubrate setting */ 2182 /* Disable scrubrate setting */
2116 if (pvt->enable_scrub) 2183 if (pvt->enable_scrub)
2117 disable_sdram_scrub_setting(mci); 2184 disable_sdram_scrub_setting(mci);
2118 2185
2119 mce_unregister_decode_chain(&i7_mce_dec);
2120
2121 /* Disable EDAC polling */ 2186 /* Disable EDAC polling */
2122 i7core_pci_ctl_release(pvt); 2187 i7core_pci_ctl_release(pvt);
2123 2188
2124 /* Remove MC sysfs nodes */ 2189 /* Remove MC sysfs nodes */
2125 edac_mc_del_mc(mci->dev); 2190 i7core_delete_sysfs_devices(mci);
2191 edac_mc_del_mc(mci->pdev);
2126 2192
2127 debugf1("%s: free mci struct\n", mci->ctl_name); 2193 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
2128 kfree(mci->ctl_name); 2194 kfree(mci->ctl_name);
2129 edac_mc_free(mci); 2195 edac_mc_free(mci);
2130 i7core_dev->mci = NULL; 2196 i7core_dev->mci = NULL;
@@ -2150,8 +2216,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2150 if (unlikely(!mci)) 2216 if (unlikely(!mci))
2151 return -ENOMEM; 2217 return -ENOMEM;
2152 2218
2153 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 2219 edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
2154 __func__, mci, &i7core_dev->pdev[0]->dev);
2155 2220
2156 pvt = mci->pvt_info; 2221 pvt = mci->pvt_info;
2157 memset(pvt, 0, sizeof(*pvt)); 2222 memset(pvt, 0, sizeof(*pvt));
@@ -2180,15 +2245,11 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2180 if (unlikely(rc < 0)) 2245 if (unlikely(rc < 0))
2181 goto fail0; 2246 goto fail0;
2182 2247
2183 if (pvt->is_registered)
2184 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2185 else
2186 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2187 2248
2188 /* Get dimm basic config */ 2249 /* Get dimm basic config */
2189 get_dimm_config(mci); 2250 get_dimm_config(mci);
2190 /* record ptr to the generic device */ 2251 /* record ptr to the generic device */
2191 mci->dev = &i7core_dev->pdev[0]->dev; 2252 mci->pdev = &i7core_dev->pdev[0]->dev;
2192 /* Set the function pointer to an actual operation function */ 2253 /* Set the function pointer to an actual operation function */
2193 mci->edac_check = i7core_check_error; 2254 mci->edac_check = i7core_check_error;
2194 2255
@@ -2198,8 +2259,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2198 2259
2199 /* add this new MC control structure to EDAC's list of MCs */ 2260 /* add this new MC control structure to EDAC's list of MCs */
2200 if (unlikely(edac_mc_add_mc(mci))) { 2261 if (unlikely(edac_mc_add_mc(mci))) {
2201 debugf0("MC: " __FILE__ 2262 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
2202 ": %s(): failed edac_mc_add_mc()\n", __func__);
2203 /* FIXME: perhaps some code should go here that disables error 2263 /* FIXME: perhaps some code should go here that disables error
2204 * reporting if we just enabled it 2264 * reporting if we just enabled it
2205 */ 2265 */
@@ -2207,6 +2267,12 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2207 rc = -EINVAL; 2267 rc = -EINVAL;
2208 goto fail0; 2268 goto fail0;
2209 } 2269 }
2270 if (i7core_create_sysfs_devices(mci)) {
2271 edac_dbg(0, "MC: failed to create sysfs nodes\n");
2272 edac_mc_del_mc(mci->pdev);
2273 rc = -EINVAL;
2274 goto fail0;
2275 }
2210 2276
2211 /* Default error mask is any memory */ 2277 /* Default error mask is any memory */
2212 pvt->inject.channel = 0; 2278 pvt->inject.channel = 0;
@@ -2222,8 +2288,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2222 /* DCLK for scrub rate setting */ 2288 /* DCLK for scrub rate setting */
2223 pvt->dclk_freq = get_dclk_freq(); 2289 pvt->dclk_freq = get_dclk_freq();
2224 2290
2225 mce_register_decode_chain(&i7_mce_dec);
2226
2227 return 0; 2291 return 0;
2228 2292
2229fail0: 2293fail0:
@@ -2308,7 +2372,7 @@ static void __devexit i7core_remove(struct pci_dev *pdev)
2308{ 2372{
2309 struct i7core_dev *i7core_dev; 2373 struct i7core_dev *i7core_dev;
2310 2374
2311 debugf0(__FILE__ ": %s()\n", __func__); 2375 edac_dbg(0, "\n");
2312 2376
2313 /* 2377 /*
2314 * we have a trouble here: pdev value for removal will be wrong, since 2378 * we have a trouble here: pdev value for removal will be wrong, since
@@ -2357,7 +2421,7 @@ static int __init i7core_init(void)
2357{ 2421{
2358 int pci_rc; 2422 int pci_rc;
2359 2423
2360 debugf2("MC: " __FILE__ ": %s()\n", __func__); 2424 edac_dbg(2, "\n");
2361 2425
2362 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 2426 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2363 opstate_init(); 2427 opstate_init();
@@ -2367,8 +2431,10 @@ static int __init i7core_init(void)
2367 2431
2368 pci_rc = pci_register_driver(&i7core_driver); 2432 pci_rc = pci_register_driver(&i7core_driver);
2369 2433
2370 if (pci_rc >= 0) 2434 if (pci_rc >= 0) {
2435 mce_register_decode_chain(&i7_mce_dec);
2371 return 0; 2436 return 0;
2437 }
2372 2438
2373 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", 2439 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2374 pci_rc); 2440 pci_rc);
@@ -2382,8 +2448,9 @@ static int __init i7core_init(void)
2382 */ 2448 */
2383static void __exit i7core_exit(void) 2449static void __exit i7core_exit(void)
2384{ 2450{
2385 debugf2("MC: " __FILE__ ": %s()\n", __func__); 2451 edac_dbg(2, "\n");
2386 pci_unregister_driver(&i7core_driver); 2452 pci_unregister_driver(&i7core_driver);
2453 mce_unregister_decode_chain(&i7_mce_dec);
2387} 2454}
2388 2455
2389module_init(i7core_init); 2456module_init(i7core_init);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 52072c28a8a6..90f303db5d1d 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -124,7 +124,7 @@ static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
124 *info) 124 *info)
125{ 125{
126 struct pci_dev *pdev; 126 struct pci_dev *pdev;
127 pdev = to_pci_dev(mci->dev); 127 pdev = to_pci_dev(mci->pdev);
128 pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap); 128 pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
129 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) 129 if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
130 /* Clear error to allow next error to be reported [p.61] */ 130 /* Clear error to allow next error to be reported [p.61] */
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { 156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
157 error_found = 1; 157 error_found = 1;
158 if (handle_errors) 158 if (handle_errors)
159 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 159 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
160 page, pageoffset, 0, 160 page, pageoffset, 0,
161 edac_mc_find_csrow_by_page(mci, page), 161 edac_mc_find_csrow_by_page(mci, page),
162 0, -1, mci->ctl_name, "", NULL); 162 0, -1, mci->ctl_name, "");
163 } 163 }
164 164
165 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { 165 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
166 error_found = 1; 166 error_found = 1;
167 if (handle_errors) 167 if (handle_errors)
168 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 168 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
169 page, pageoffset, 0, 169 page, pageoffset, 0,
170 edac_mc_find_csrow_by_page(mci, page), 170 edac_mc_find_csrow_by_page(mci, page),
171 0, -1, mci->ctl_name, "", NULL); 171 0, -1, mci->ctl_name, "");
172 } 172 }
173 173
174 return error_found; 174 return error_found;
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
178{ 178{
179 struct i82443bxgx_edacmc_error_info info; 179 struct i82443bxgx_edacmc_error_info info;
180 180
181 debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); 181 edac_dbg(1, "MC%d\n", mci->mc_idx);
182 i82443bxgx_edacmc_get_error_info(mci, &info); 182 i82443bxgx_edacmc_get_error_info(mci, &info);
183 i82443bxgx_edacmc_process_error_info(mci, &info, 1); 183 i82443bxgx_edacmc_process_error_info(mci, &info, 1);
184} 184}
@@ -197,18 +197,17 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
197 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); 197 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
198 row_high_limit_last = 0; 198 row_high_limit_last = 0;
199 for (index = 0; index < mci->nr_csrows; index++) { 199 for (index = 0; index < mci->nr_csrows; index++) {
200 csrow = &mci->csrows[index]; 200 csrow = mci->csrows[index];
201 dimm = csrow->channels[0].dimm; 201 dimm = csrow->channels[0]->dimm;
202 202
203 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 203 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
204 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", 204 edac_dbg(1, "MC%d: Row=%d DRB = %#0x\n",
205 mci->mc_idx, __FILE__, __func__, index, drbar); 205 mci->mc_idx, index, drbar);
206 row_high_limit = ((u32) drbar << 23); 206 row_high_limit = ((u32) drbar << 23);
207 /* find the DRAM Chip Select Base address and mask */ 207 /* find the DRAM Chip Select Base address and mask */
208 debugf1("MC%d: %s: %s() Row=%d, " 208 edac_dbg(1, "MC%d: Row=%d, Boundary Address=%#0x, Last = %#0x\n",
209 "Boundary Address=%#0x, Last = %#0x\n", 209 mci->mc_idx, index, row_high_limit,
210 mci->mc_idx, __FILE__, __func__, index, row_high_limit, 210 row_high_limit_last);
211 row_high_limit_last);
212 211
213 /* 440GX goes to 2GB, represented with a DRB of 0. */ 212 /* 440GX goes to 2GB, represented with a DRB of 0. */
214 if (row_high_limit_last && !row_high_limit) 213 if (row_high_limit_last && !row_high_limit)
@@ -241,7 +240,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
241 enum mem_type mtype; 240 enum mem_type mtype;
242 enum edac_type edac_mode; 241 enum edac_type edac_mode;
243 242
244 debugf0("MC: %s: %s()\n", __FILE__, __func__); 243 edac_dbg(0, "MC:\n");
245 244
246 /* Something is really hosed if PCI config space reads from 245 /* Something is really hosed if PCI config space reads from
247 * the MC aren't working. 246 * the MC aren't working.
@@ -259,8 +258,8 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
259 if (mci == NULL) 258 if (mci == NULL)
260 return -ENOMEM; 259 return -ENOMEM;
261 260
262 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); 261 edac_dbg(0, "MC: mci = %p\n", mci);
263 mci->dev = &pdev->dev; 262 mci->pdev = &pdev->dev;
264 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; 263 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
265 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 264 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
266 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); 265 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
@@ -275,8 +274,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
275 mtype = MEM_RDR; 274 mtype = MEM_RDR;
276 break; 275 break;
277 default: 276 default:
278 debugf0("Unknown/reserved DRAM type value " 277 edac_dbg(0, "Unknown/reserved DRAM type value in DRAMC register!\n");
279 "in DRAMC register!\n");
280 mtype = -MEM_UNKNOWN; 278 mtype = -MEM_UNKNOWN;
281 } 279 }
282 280
@@ -305,8 +303,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
305 edac_mode = EDAC_SECDED; 303 edac_mode = EDAC_SECDED;
306 break; 304 break;
307 default: 305 default:
308 debugf0("%s(): Unknown/reserved ECC state " 306 edac_dbg(0, "Unknown/reserved ECC state in NBXCFG register!\n");
309 "in NBXCFG register!\n", __func__);
310 edac_mode = EDAC_UNKNOWN; 307 edac_mode = EDAC_UNKNOWN;
311 break; 308 break;
312 } 309 }
@@ -330,7 +327,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
330 mci->ctl_page_to_phys = NULL; 327 mci->ctl_page_to_phys = NULL;
331 328
332 if (edac_mc_add_mc(mci)) { 329 if (edac_mc_add_mc(mci)) {
333 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 330 edac_dbg(3, "failed edac_mc_add_mc()\n");
334 goto fail; 331 goto fail;
335 } 332 }
336 333
@@ -345,7 +342,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
345 __func__); 342 __func__);
346 } 343 }
347 344
348 debugf3("MC: %s: %s(): success\n", __FILE__, __func__); 345 edac_dbg(3, "MC: success\n");
349 return 0; 346 return 0;
350 347
351fail: 348fail:
@@ -361,7 +358,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
361{ 358{
362 int rc; 359 int rc;
363 360
364 debugf0("MC: %s: %s()\n", __FILE__, __func__); 361 edac_dbg(0, "MC:\n");
365 362
366 /* don't need to call pci_enable_device() */ 363 /* don't need to call pci_enable_device() */
367 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); 364 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
@@ -376,7 +373,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
376{ 373{
377 struct mem_ctl_info *mci; 374 struct mem_ctl_info *mci;
378 375
379 debugf0("%s: %s()\n", __FILE__, __func__); 376 edac_dbg(0, "\n");
380 377
381 if (i82443bxgx_pci) 378 if (i82443bxgx_pci)
382 edac_pci_release_generic_ctl(i82443bxgx_pci); 379 edac_pci_release_generic_ctl(i82443bxgx_pci);
@@ -428,7 +425,7 @@ static int __init i82443bxgx_edacmc_init(void)
428 id = &i82443bxgx_pci_tbl[i]; 425 id = &i82443bxgx_pci_tbl[i];
429 } 426 }
430 if (!mci_pdev) { 427 if (!mci_pdev) {
431 debugf0("i82443bxgx pci_get_device fail\n"); 428 edac_dbg(0, "i82443bxgx pci_get_device fail\n");
432 pci_rc = -ENODEV; 429 pci_rc = -ENODEV;
433 goto fail1; 430 goto fail1;
434 } 431 }
@@ -436,7 +433,7 @@ static int __init i82443bxgx_edacmc_init(void)
436 pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl); 433 pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
437 434
438 if (pci_rc < 0) { 435 if (pci_rc < 0) {
439 debugf0("i82443bxgx init fail\n"); 436 edac_dbg(0, "i82443bxgx init fail\n");
440 pci_rc = -ENODEV; 437 pci_rc = -ENODEV;
441 goto fail1; 438 goto fail1;
442 } 439 }
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 08045059d10b..1faa74971513 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -67,7 +67,7 @@ static void i82860_get_error_info(struct mem_ctl_info *mci,
67{ 67{
68 struct pci_dev *pdev; 68 struct pci_dev *pdev;
69 69
70 pdev = to_pci_dev(mci->dev); 70 pdev = to_pci_dev(mci->pdev);
71 71
72 /* 72 /*
73 * This is a mess because there is no atomic way to read all the 73 * This is a mess because there is no atomic way to read all the
@@ -109,25 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
109 return 1; 109 return 1;
110 110
111 if ((info->errsts ^ info->errsts2) & 0x0003) { 111 if ((info->errsts ^ info->errsts2) & 0x0003) {
112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
113 -1, -1, -1, "UE overwrote CE", "", NULL); 113 -1, -1, -1, "UE overwrote CE", "");
114 info->errsts = info->errsts2; 114 info->errsts = info->errsts2;
115 } 115 }
116 116
117 info->eap >>= PAGE_SHIFT; 117 info->eap >>= PAGE_SHIFT;
118 row = edac_mc_find_csrow_by_page(mci, info->eap); 118 row = edac_mc_find_csrow_by_page(mci, info->eap);
119 dimm = mci->csrows[row].channels[0].dimm; 119 dimm = mci->csrows[row]->channels[0]->dimm;
120 120
121 if (info->errsts & 0x0002) 121 if (info->errsts & 0x0002)
122 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 122 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
123 info->eap, 0, 0, 123 info->eap, 0, 0,
124 dimm->location[0], dimm->location[1], -1, 124 dimm->location[0], dimm->location[1], -1,
125 "i82860 UE", "", NULL); 125 "i82860 UE", "");
126 else 126 else
127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
128 info->eap, 0, info->derrsyn, 128 info->eap, 0, info->derrsyn,
129 dimm->location[0], dimm->location[1], -1, 129 dimm->location[0], dimm->location[1], -1,
130 "i82860 CE", "", NULL); 130 "i82860 CE", "");
131 131
132 return 1; 132 return 1;
133} 133}
@@ -136,7 +136,7 @@ static void i82860_check(struct mem_ctl_info *mci)
136{ 136{
137 struct i82860_error_info info; 137 struct i82860_error_info info;
138 138
139 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 139 edac_dbg(1, "MC%d\n", mci->mc_idx);
140 i82860_get_error_info(mci, &info); 140 i82860_get_error_info(mci, &info);
141 i82860_process_error_info(mci, &info, 1); 141 i82860_process_error_info(mci, &info, 1);
142} 142}
@@ -161,14 +161,13 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
161 * in all eight rows. 161 * in all eight rows.
162 */ 162 */
163 for (index = 0; index < mci->nr_csrows; index++) { 163 for (index = 0; index < mci->nr_csrows; index++) {
164 csrow = &mci->csrows[index]; 164 csrow = mci->csrows[index];
165 dimm = csrow->channels[0].dimm; 165 dimm = csrow->channels[0]->dimm;
166 166
167 pci_read_config_word(pdev, I82860_GBA + index * 2, &value); 167 pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
168 cumul_size = (value & I82860_GBA_MASK) << 168 cumul_size = (value & I82860_GBA_MASK) <<
169 (I82860_GBA_SHIFT - PAGE_SHIFT); 169 (I82860_GBA_SHIFT - PAGE_SHIFT);
170 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 170 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
171 cumul_size);
172 171
173 if (cumul_size == last_cumul_size) 172 if (cumul_size == last_cumul_size)
174 continue; /* not populated */ 173 continue; /* not populated */
@@ -210,8 +209,8 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
210 if (!mci) 209 if (!mci)
211 return -ENOMEM; 210 return -ENOMEM;
212 211
213 debugf3("%s(): init mci\n", __func__); 212 edac_dbg(3, "init mci\n");
214 mci->dev = &pdev->dev; 213 mci->pdev = &pdev->dev;
215 mci->mtype_cap = MEM_FLAG_DDR; 214 mci->mtype_cap = MEM_FLAG_DDR;
216 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 215 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
217 /* I"m not sure about this but I think that all RDRAM is SECDED */ 216 /* I"m not sure about this but I think that all RDRAM is SECDED */
@@ -229,7 +228,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
229 * type of memory controller. The ID is therefore hardcoded to 0. 228 * type of memory controller. The ID is therefore hardcoded to 0.
230 */ 229 */
231 if (edac_mc_add_mc(mci)) { 230 if (edac_mc_add_mc(mci)) {
232 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 231 edac_dbg(3, "failed edac_mc_add_mc()\n");
233 goto fail; 232 goto fail;
234 } 233 }
235 234
@@ -245,7 +244,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
245 } 244 }
246 245
247 /* get this far and it's successful */ 246 /* get this far and it's successful */
248 debugf3("%s(): success\n", __func__); 247 edac_dbg(3, "success\n");
249 248
250 return 0; 249 return 0;
251 250
@@ -260,7 +259,7 @@ static int __devinit i82860_init_one(struct pci_dev *pdev,
260{ 259{
261 int rc; 260 int rc;
262 261
263 debugf0("%s()\n", __func__); 262 edac_dbg(0, "\n");
264 i82860_printk(KERN_INFO, "i82860 init one\n"); 263 i82860_printk(KERN_INFO, "i82860 init one\n");
265 264
266 if (pci_enable_device(pdev) < 0) 265 if (pci_enable_device(pdev) < 0)
@@ -278,7 +277,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
278{ 277{
279 struct mem_ctl_info *mci; 278 struct mem_ctl_info *mci;
280 279
281 debugf0("%s()\n", __func__); 280 edac_dbg(0, "\n");
282 281
283 if (i82860_pci) 282 if (i82860_pci)
284 edac_pci_release_generic_ctl(i82860_pci); 283 edac_pci_release_generic_ctl(i82860_pci);
@@ -311,7 +310,7 @@ static int __init i82860_init(void)
311{ 310{
312 int pci_rc; 311 int pci_rc;
313 312
314 debugf3("%s()\n", __func__); 313 edac_dbg(3, "\n");
315 314
316 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 315 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
317 opstate_init(); 316 opstate_init();
@@ -324,7 +323,7 @@ static int __init i82860_init(void)
324 PCI_DEVICE_ID_INTEL_82860_0, NULL); 323 PCI_DEVICE_ID_INTEL_82860_0, NULL);
325 324
326 if (mci_pdev == NULL) { 325 if (mci_pdev == NULL) {
327 debugf0("860 pci_get_device fail\n"); 326 edac_dbg(0, "860 pci_get_device fail\n");
328 pci_rc = -ENODEV; 327 pci_rc = -ENODEV;
329 goto fail1; 328 goto fail1;
330 } 329 }
@@ -332,7 +331,7 @@ static int __init i82860_init(void)
332 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); 331 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
333 332
334 if (pci_rc < 0) { 333 if (pci_rc < 0) {
335 debugf0("860 init fail\n"); 334 edac_dbg(0, "860 init fail\n");
336 pci_rc = -ENODEV; 335 pci_rc = -ENODEV;
337 goto fail1; 336 goto fail1;
338 } 337 }
@@ -352,7 +351,7 @@ fail0:
352 351
353static void __exit i82860_exit(void) 352static void __exit i82860_exit(void)
354{ 353{
355 debugf3("%s()\n", __func__); 354 edac_dbg(3, "\n");
356 355
357 pci_unregister_driver(&i82860_driver); 356 pci_unregister_driver(&i82860_driver);
358 357
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index b613e31c16e5..3e416b1a6b53 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -189,7 +189,7 @@ static void i82875p_get_error_info(struct mem_ctl_info *mci,
189{ 189{
190 struct pci_dev *pdev; 190 struct pci_dev *pdev;
191 191
192 pdev = to_pci_dev(mci->dev); 192 pdev = to_pci_dev(mci->pdev);
193 193
194 /* 194 /*
195 * This is a mess because there is no atomic way to read all the 195 * This is a mess because there is no atomic way to read all the
@@ -227,7 +227,7 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
227{ 227{
228 int row, multi_chan; 228 int row, multi_chan;
229 229
230 multi_chan = mci->csrows[0].nr_channels - 1; 230 multi_chan = mci->csrows[0]->nr_channels - 1;
231 231
232 if (!(info->errsts & 0x0081)) 232 if (!(info->errsts & 0x0081))
233 return 0; 233 return 0;
@@ -236,9 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
236 return 1; 236 return 1;
237 237
238 if ((info->errsts ^ info->errsts2) & 0x0081) { 238 if ((info->errsts ^ info->errsts2) & 0x0081) {
239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
240 -1, -1, -1, 240 -1, -1, -1,
241 "UE overwrote CE", "", NULL); 241 "UE overwrote CE", "");
242 info->errsts = info->errsts2; 242 info->errsts = info->errsts2;
243 } 243 }
244 244
@@ -246,15 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
246 row = edac_mc_find_csrow_by_page(mci, info->eap); 246 row = edac_mc_find_csrow_by_page(mci, info->eap);
247 247
248 if (info->errsts & 0x0080) 248 if (info->errsts & 0x0080)
249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
250 info->eap, 0, 0, 250 info->eap, 0, 0,
251 row, -1, -1, 251 row, -1, -1,
252 "i82875p UE", "", NULL); 252 "i82875p UE", "");
253 else 253 else
254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
255 info->eap, 0, info->derrsyn, 255 info->eap, 0, info->derrsyn,
256 row, multi_chan ? (info->des & 0x1) : 0, 256 row, multi_chan ? (info->des & 0x1) : 0,
257 -1, "i82875p CE", "", NULL); 257 -1, "i82875p CE", "");
258 258
259 return 1; 259 return 1;
260} 260}
@@ -263,7 +263,7 @@ static void i82875p_check(struct mem_ctl_info *mci)
263{ 263{
264 struct i82875p_error_info info; 264 struct i82875p_error_info info;
265 265
266 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 266 edac_dbg(1, "MC%d\n", mci->mc_idx);
267 i82875p_get_error_info(mci, &info); 267 i82875p_get_error_info(mci, &info);
268 i82875p_process_error_info(mci, &info, 1); 268 i82875p_process_error_info(mci, &info, 1);
269} 269}
@@ -367,12 +367,11 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
367 */ 367 */
368 368
369 for (index = 0; index < mci->nr_csrows; index++) { 369 for (index = 0; index < mci->nr_csrows; index++) {
370 csrow = &mci->csrows[index]; 370 csrow = mci->csrows[index];
371 371
372 value = readb(ovrfl_window + I82875P_DRB + index); 372 value = readb(ovrfl_window + I82875P_DRB + index);
373 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); 373 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
374 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 374 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
375 cumul_size);
376 if (cumul_size == last_cumul_size) 375 if (cumul_size == last_cumul_size)
377 continue; /* not populated */ 376 continue; /* not populated */
378 377
@@ -382,7 +381,7 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
382 last_cumul_size = cumul_size; 381 last_cumul_size = cumul_size;
383 382
384 for (j = 0; j < nr_chans; j++) { 383 for (j = 0; j < nr_chans; j++) {
385 dimm = csrow->channels[j].dimm; 384 dimm = csrow->channels[j]->dimm;
386 385
387 dimm->nr_pages = nr_pages / nr_chans; 386 dimm->nr_pages = nr_pages / nr_chans;
388 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 387 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
@@ -405,7 +404,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
405 u32 nr_chans; 404 u32 nr_chans;
406 struct i82875p_error_info discard; 405 struct i82875p_error_info discard;
407 406
408 debugf0("%s()\n", __func__); 407 edac_dbg(0, "\n");
409 408
410 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); 409 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
411 410
@@ -426,11 +425,8 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
426 goto fail0; 425 goto fail0;
427 } 426 }
428 427
429 /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */ 428 edac_dbg(3, "init mci\n");
430 kobject_get(&mci->edac_mci_kobj); 429 mci->pdev = &pdev->dev;
431
432 debugf3("%s(): init mci\n", __func__);
433 mci->dev = &pdev->dev;
434 mci->mtype_cap = MEM_FLAG_DDR; 430 mci->mtype_cap = MEM_FLAG_DDR;
435 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 431 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
436 mci->edac_cap = EDAC_FLAG_UNKNOWN; 432 mci->edac_cap = EDAC_FLAG_UNKNOWN;
@@ -440,7 +436,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
440 mci->dev_name = pci_name(pdev); 436 mci->dev_name = pci_name(pdev);
441 mci->edac_check = i82875p_check; 437 mci->edac_check = i82875p_check;
442 mci->ctl_page_to_phys = NULL; 438 mci->ctl_page_to_phys = NULL;
443 debugf3("%s(): init pvt\n", __func__); 439 edac_dbg(3, "init pvt\n");
444 pvt = (struct i82875p_pvt *)mci->pvt_info; 440 pvt = (struct i82875p_pvt *)mci->pvt_info;
445 pvt->ovrfl_pdev = ovrfl_pdev; 441 pvt->ovrfl_pdev = ovrfl_pdev;
446 pvt->ovrfl_window = ovrfl_window; 442 pvt->ovrfl_window = ovrfl_window;
@@ -451,7 +447,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
451 * type of memory controller. The ID is therefore hardcoded to 0. 447 * type of memory controller. The ID is therefore hardcoded to 0.
452 */ 448 */
453 if (edac_mc_add_mc(mci)) { 449 if (edac_mc_add_mc(mci)) {
454 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 450 edac_dbg(3, "failed edac_mc_add_mc()\n");
455 goto fail1; 451 goto fail1;
456 } 452 }
457 453
@@ -467,11 +463,10 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
467 } 463 }
468 464
469 /* get this far and it's successful */ 465 /* get this far and it's successful */
470 debugf3("%s(): success\n", __func__); 466 edac_dbg(3, "success\n");
471 return 0; 467 return 0;
472 468
473fail1: 469fail1:
474 kobject_put(&mci->edac_mci_kobj);
475 edac_mc_free(mci); 470 edac_mc_free(mci);
476 471
477fail0: 472fail0:
@@ -489,7 +484,7 @@ static int __devinit i82875p_init_one(struct pci_dev *pdev,
489{ 484{
490 int rc; 485 int rc;
491 486
492 debugf0("%s()\n", __func__); 487 edac_dbg(0, "\n");
493 i82875p_printk(KERN_INFO, "i82875p init one\n"); 488 i82875p_printk(KERN_INFO, "i82875p init one\n");
494 489
495 if (pci_enable_device(pdev) < 0) 490 if (pci_enable_device(pdev) < 0)
@@ -508,7 +503,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
508 struct mem_ctl_info *mci; 503 struct mem_ctl_info *mci;
509 struct i82875p_pvt *pvt = NULL; 504 struct i82875p_pvt *pvt = NULL;
510 505
511 debugf0("%s()\n", __func__); 506 edac_dbg(0, "\n");
512 507
513 if (i82875p_pci) 508 if (i82875p_pci)
514 edac_pci_release_generic_ctl(i82875p_pci); 509 edac_pci_release_generic_ctl(i82875p_pci);
@@ -554,7 +549,7 @@ static int __init i82875p_init(void)
554{ 549{
555 int pci_rc; 550 int pci_rc;
556 551
557 debugf3("%s()\n", __func__); 552 edac_dbg(3, "\n");
558 553
559 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 554 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
560 opstate_init(); 555 opstate_init();
@@ -569,7 +564,7 @@ static int __init i82875p_init(void)
569 PCI_DEVICE_ID_INTEL_82875_0, NULL); 564 PCI_DEVICE_ID_INTEL_82875_0, NULL);
570 565
571 if (!mci_pdev) { 566 if (!mci_pdev) {
572 debugf0("875p pci_get_device fail\n"); 567 edac_dbg(0, "875p pci_get_device fail\n");
573 pci_rc = -ENODEV; 568 pci_rc = -ENODEV;
574 goto fail1; 569 goto fail1;
575 } 570 }
@@ -577,7 +572,7 @@ static int __init i82875p_init(void)
577 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); 572 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
578 573
579 if (pci_rc < 0) { 574 if (pci_rc < 0) {
580 debugf0("875p init fail\n"); 575 edac_dbg(0, "875p init fail\n");
581 pci_rc = -ENODEV; 576 pci_rc = -ENODEV;
582 goto fail1; 577 goto fail1;
583 } 578 }
@@ -597,7 +592,7 @@ fail0:
597 592
598static void __exit i82875p_exit(void) 593static void __exit i82875p_exit(void)
599{ 594{
600 debugf3("%s()\n", __func__); 595 edac_dbg(3, "\n");
601 596
602 i82875p_remove_one(mci_pdev); 597 i82875p_remove_one(mci_pdev);
603 pci_dev_put(mci_pdev); 598 pci_dev_put(mci_pdev);
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 433332c7cdba..069e26c11c4f 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -241,7 +241,7 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci,
241{ 241{
242 struct pci_dev *pdev; 242 struct pci_dev *pdev;
243 243
244 pdev = to_pci_dev(mci->dev); 244 pdev = to_pci_dev(mci->pdev);
245 245
246 /* 246 /*
247 * This is a mess because there is no atomic way to read all the 247 * This is a mess because there is no atomic way to read all the
@@ -288,8 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
288 return 1; 288 return 1;
289 289
290 if ((info->errsts ^ info->errsts2) & 0x0003) { 290 if ((info->errsts ^ info->errsts2) & 0x0003) {
291 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 291 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
292 -1, -1, -1, "UE overwrote CE", "", NULL); 292 -1, -1, -1, "UE overwrote CE", "");
293 info->errsts = info->errsts2; 293 info->errsts = info->errsts2;
294 } 294 }
295 295
@@ -308,21 +308,21 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
308 (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page); 308 (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
309 return 0; 309 return 0;
310 } 310 }
311 chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1; 311 chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1;
312 offst = info->eap 312 offst = info->eap
313 & ((1 << PAGE_SHIFT) - 313 & ((1 << PAGE_SHIFT) -
314 (1 << mci->csrows[row].channels[chan].dimm->grain)); 314 (1 << mci->csrows[row]->channels[chan]->dimm->grain));
315 315
316 if (info->errsts & 0x0002) 316 if (info->errsts & 0x0002)
317 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 317 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
318 page, offst, 0, 318 page, offst, 0,
319 row, -1, -1, 319 row, -1, -1,
320 "i82975x UE", "", NULL); 320 "i82975x UE", "");
321 else 321 else
322 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 322 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
323 page, offst, info->derrsyn, 323 page, offst, info->derrsyn,
324 row, chan ? chan : 0, -1, 324 row, chan ? chan : 0, -1,
325 "i82975x CE", "", NULL); 325 "i82975x CE", "");
326 326
327 return 1; 327 return 1;
328} 328}
@@ -331,7 +331,7 @@ static void i82975x_check(struct mem_ctl_info *mci)
331{ 331{
332 struct i82975x_error_info info; 332 struct i82975x_error_info info;
333 333
334 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 334 edac_dbg(1, "MC%d\n", mci->mc_idx);
335 i82975x_get_error_info(mci, &info); 335 i82975x_get_error_info(mci, &info);
336 i82975x_process_error_info(mci, &info, 1); 336 i82975x_process_error_info(mci, &info, 1);
337} 337}
@@ -394,7 +394,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
394 */ 394 */
395 395
396 for (index = 0; index < mci->nr_csrows; index++) { 396 for (index = 0; index < mci->nr_csrows; index++) {
397 csrow = &mci->csrows[index]; 397 csrow = mci->csrows[index];
398 398
399 value = readb(mch_window + I82975X_DRB + index + 399 value = readb(mch_window + I82975X_DRB + index +
400 ((index >= 4) ? 0x80 : 0)); 400 ((index >= 4) ? 0x80 : 0));
@@ -406,8 +406,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
406 */ 406 */
407 if (csrow->nr_channels > 1) 407 if (csrow->nr_channels > 1)
408 cumul_size <<= 1; 408 cumul_size <<= 1;
409 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 409 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
410 cumul_size);
411 410
412 nr_pages = cumul_size - last_cumul_size; 411 nr_pages = cumul_size - last_cumul_size;
413 if (!nr_pages) 412 if (!nr_pages)
@@ -421,10 +420,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
421 */ 420 */
422 dtype = i82975x_dram_type(mch_window, index); 421 dtype = i82975x_dram_type(mch_window, index);
423 for (chan = 0; chan < csrow->nr_channels; chan++) { 422 for (chan = 0; chan < csrow->nr_channels; chan++) {
424 dimm = mci->csrows[index].channels[chan].dimm; 423 dimm = mci->csrows[index]->channels[chan]->dimm;
425 424
426 dimm->nr_pages = nr_pages / csrow->nr_channels; 425 dimm->nr_pages = nr_pages / csrow->nr_channels;
427 strncpy(csrow->channels[chan].dimm->label, 426 strncpy(csrow->channels[chan]->dimm->label,
428 labels[(index >> 1) + (chan * 2)], 427 labels[(index >> 1) + (chan * 2)],
429 EDAC_MC_LABEL_LEN); 428 EDAC_MC_LABEL_LEN);
430 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */ 429 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
@@ -489,11 +488,11 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
489 u8 c1drb[4]; 488 u8 c1drb[4];
490#endif 489#endif
491 490
492 debugf0("%s()\n", __func__); 491 edac_dbg(0, "\n");
493 492
494 pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar); 493 pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
495 if (!(mchbar & 1)) { 494 if (!(mchbar & 1)) {
496 debugf3("%s(): failed, MCHBAR disabled!\n", __func__); 495 edac_dbg(3, "failed, MCHBAR disabled!\n");
497 goto fail0; 496 goto fail0;
498 } 497 }
499 mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ 498 mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
@@ -558,8 +557,8 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
558 goto fail1; 557 goto fail1;
559 } 558 }
560 559
561 debugf3("%s(): init mci\n", __func__); 560 edac_dbg(3, "init mci\n");
562 mci->dev = &pdev->dev; 561 mci->pdev = &pdev->dev;
563 mci->mtype_cap = MEM_FLAG_DDR2; 562 mci->mtype_cap = MEM_FLAG_DDR2;
564 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 563 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
565 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 564 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
@@ -569,7 +568,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
569 mci->dev_name = pci_name(pdev); 568 mci->dev_name = pci_name(pdev);
570 mci->edac_check = i82975x_check; 569 mci->edac_check = i82975x_check;
571 mci->ctl_page_to_phys = NULL; 570 mci->ctl_page_to_phys = NULL;
572 debugf3("%s(): init pvt\n", __func__); 571 edac_dbg(3, "init pvt\n");
573 pvt = (struct i82975x_pvt *) mci->pvt_info; 572 pvt = (struct i82975x_pvt *) mci->pvt_info;
574 pvt->mch_window = mch_window; 573 pvt->mch_window = mch_window;
575 i82975x_init_csrows(mci, pdev, mch_window); 574 i82975x_init_csrows(mci, pdev, mch_window);
@@ -578,12 +577,12 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
578 577
579 /* finalize this instance of memory controller with edac core */ 578 /* finalize this instance of memory controller with edac core */
580 if (edac_mc_add_mc(mci)) { 579 if (edac_mc_add_mc(mci)) {
581 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 580 edac_dbg(3, "failed edac_mc_add_mc()\n");
582 goto fail2; 581 goto fail2;
583 } 582 }
584 583
585 /* get this far and it's successful */ 584 /* get this far and it's successful */
586 debugf3("%s(): success\n", __func__); 585 edac_dbg(3, "success\n");
587 return 0; 586 return 0;
588 587
589fail2: 588fail2:
@@ -601,7 +600,7 @@ static int __devinit i82975x_init_one(struct pci_dev *pdev,
601{ 600{
602 int rc; 601 int rc;
603 602
604 debugf0("%s()\n", __func__); 603 edac_dbg(0, "\n");
605 604
606 if (pci_enable_device(pdev) < 0) 605 if (pci_enable_device(pdev) < 0)
607 return -EIO; 606 return -EIO;
@@ -619,7 +618,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
619 struct mem_ctl_info *mci; 618 struct mem_ctl_info *mci;
620 struct i82975x_pvt *pvt; 619 struct i82975x_pvt *pvt;
621 620
622 debugf0("%s()\n", __func__); 621 edac_dbg(0, "\n");
623 622
624 mci = edac_mc_del_mc(&pdev->dev); 623 mci = edac_mc_del_mc(&pdev->dev);
625 if (mci == NULL) 624 if (mci == NULL)
@@ -655,7 +654,7 @@ static int __init i82975x_init(void)
655{ 654{
656 int pci_rc; 655 int pci_rc;
657 656
658 debugf3("%s()\n", __func__); 657 edac_dbg(3, "\n");
659 658
660 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 659 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
661 opstate_init(); 660 opstate_init();
@@ -669,7 +668,7 @@ static int __init i82975x_init(void)
669 PCI_DEVICE_ID_INTEL_82975_0, NULL); 668 PCI_DEVICE_ID_INTEL_82975_0, NULL);
670 669
671 if (!mci_pdev) { 670 if (!mci_pdev) {
672 debugf0("i82975x pci_get_device fail\n"); 671 edac_dbg(0, "i82975x pci_get_device fail\n");
673 pci_rc = -ENODEV; 672 pci_rc = -ENODEV;
674 goto fail1; 673 goto fail1;
675 } 674 }
@@ -677,7 +676,7 @@ static int __init i82975x_init(void)
677 pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl); 676 pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
678 677
679 if (pci_rc < 0) { 678 if (pci_rc < 0) {
680 debugf0("i82975x init fail\n"); 679 edac_dbg(0, "i82975x init fail\n");
681 pci_rc = -ENODEV; 680 pci_rc = -ENODEV;
682 goto fail1; 681 goto fail1;
683 } 682 }
@@ -697,7 +696,7 @@ fail0:
697 696
698static void __exit i82975x_exit(void) 697static void __exit i82975x_exit(void)
699{ 698{
700 debugf3("%s()\n", __func__); 699 edac_dbg(3, "\n");
701 700
702 pci_unregister_driver(&i82975x_driver); 701 pci_unregister_driver(&i82975x_driver);
703 702
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4c402353ba98..a1e791ec25d3 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -49,34 +49,45 @@ static u32 orig_hid1[2];
49 49
50/************************ MC SYSFS parts ***********************************/ 50/************************ MC SYSFS parts ***********************************/
51 51
52static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci, 52#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
53
54static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
55 struct device_attribute *mattr,
53 char *data) 56 char *data)
54{ 57{
58 struct mem_ctl_info *mci = to_mci(dev);
55 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 59 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
56 return sprintf(data, "0x%08x", 60 return sprintf(data, "0x%08x",
57 in_be32(pdata->mc_vbase + 61 in_be32(pdata->mc_vbase +
58 MPC85XX_MC_DATA_ERR_INJECT_HI)); 62 MPC85XX_MC_DATA_ERR_INJECT_HI));
59} 63}
60 64
61static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci, 65static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
66 struct device_attribute *mattr,
62 char *data) 67 char *data)
63{ 68{
69 struct mem_ctl_info *mci = to_mci(dev);
64 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 70 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
65 return sprintf(data, "0x%08x", 71 return sprintf(data, "0x%08x",
66 in_be32(pdata->mc_vbase + 72 in_be32(pdata->mc_vbase +
67 MPC85XX_MC_DATA_ERR_INJECT_LO)); 73 MPC85XX_MC_DATA_ERR_INJECT_LO));
68} 74}
69 75
70static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data) 76static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
77 struct device_attribute *mattr,
78 char *data)
71{ 79{
80 struct mem_ctl_info *mci = to_mci(dev);
72 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 81 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
73 return sprintf(data, "0x%08x", 82 return sprintf(data, "0x%08x",
74 in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT)); 83 in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
75} 84}
76 85
77static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci, 86static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
87 struct device_attribute *mattr,
78 const char *data, size_t count) 88 const char *data, size_t count)
79{ 89{
90 struct mem_ctl_info *mci = to_mci(dev);
80 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 91 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
81 if (isdigit(*data)) { 92 if (isdigit(*data)) {
82 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI, 93 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
@@ -86,9 +97,11 @@ static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
86 return 0; 97 return 0;
87} 98}
88 99
89static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci, 100static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
101 struct device_attribute *mattr,
90 const char *data, size_t count) 102 const char *data, size_t count)
91{ 103{
104 struct mem_ctl_info *mci = to_mci(dev);
92 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 105 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
93 if (isdigit(*data)) { 106 if (isdigit(*data)) {
94 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO, 107 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
@@ -98,9 +111,11 @@ static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
98 return 0; 111 return 0;
99} 112}
100 113
101static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci, 114static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
102 const char *data, size_t count) 115 struct device_attribute *mattr,
116 const char *data, size_t count)
103{ 117{
118 struct mem_ctl_info *mci = to_mci(dev);
104 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 119 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
105 if (isdigit(*data)) { 120 if (isdigit(*data)) {
106 out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT, 121 out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
@@ -110,38 +125,35 @@ static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
110 return 0; 125 return 0;
111} 126}
112 127
113static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = { 128DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
114 { 129 mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
115 .attr = { 130DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
116 .name = "inject_data_hi", 131 mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
117 .mode = (S_IRUGO | S_IWUSR) 132DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
118 }, 133 mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
119 .show = mpc85xx_mc_inject_data_hi_show,
120 .store = mpc85xx_mc_inject_data_hi_store},
121 {
122 .attr = {
123 .name = "inject_data_lo",
124 .mode = (S_IRUGO | S_IWUSR)
125 },
126 .show = mpc85xx_mc_inject_data_lo_show,
127 .store = mpc85xx_mc_inject_data_lo_store},
128 {
129 .attr = {
130 .name = "inject_ctrl",
131 .mode = (S_IRUGO | S_IWUSR)
132 },
133 .show = mpc85xx_mc_inject_ctrl_show,
134 .store = mpc85xx_mc_inject_ctrl_store},
135 134
136 /* End of list */ 135static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci)
137 { 136{
138 .attr = {.name = NULL} 137 int rc;
139 } 138
140}; 139 rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi);
140 if (rc < 0)
141 return rc;
142 rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo);
143 if (rc < 0)
144 return rc;
145 rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl);
146 if (rc < 0)
147 return rc;
141 148
142static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci) 149 return 0;
150}
151
152static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci)
143{ 153{
144 mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes; 154 device_remove_file(&mci->dev, &dev_attr_inject_data_hi);
155 device_remove_file(&mci->dev, &dev_attr_inject_data_lo);
156 device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
145} 157}
146 158
147/**************************** PCI Err device ***************************/ 159/**************************** PCI Err device ***************************/
@@ -268,7 +280,7 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
268 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); 280 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
269 281
270 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 282 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
271 debugf3("%s(): failed edac_pci_add_device()\n", __func__); 283 edac_dbg(3, "failed edac_pci_add_device()\n");
272 goto err; 284 goto err;
273 } 285 }
274 286
@@ -291,7 +303,7 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
291 } 303 }
292 304
293 devres_remove_group(&op->dev, mpc85xx_pci_err_probe); 305 devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
294 debugf3("%s(): success\n", __func__); 306 edac_dbg(3, "success\n");
295 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); 307 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
296 308
297 return 0; 309 return 0;
@@ -309,7 +321,7 @@ static int mpc85xx_pci_err_remove(struct platform_device *op)
309 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); 321 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
310 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 322 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
311 323
312 debugf0("%s()\n", __func__); 324 edac_dbg(0, "\n");
313 325
314 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 326 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
315 orig_pci_err_cap_dr); 327 orig_pci_err_cap_dr);
@@ -570,7 +582,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
570 pdata->edac_idx = edac_dev_idx++; 582 pdata->edac_idx = edac_dev_idx++;
571 583
572 if (edac_device_add_device(edac_dev) > 0) { 584 if (edac_device_add_device(edac_dev) > 0) {
573 debugf3("%s(): failed edac_device_add_device()\n", __func__); 585 edac_dbg(3, "failed edac_device_add_device()\n");
574 goto err; 586 goto err;
575 } 587 }
576 588
@@ -598,7 +610,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
598 610
599 devres_remove_group(&op->dev, mpc85xx_l2_err_probe); 611 devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
600 612
601 debugf3("%s(): success\n", __func__); 613 edac_dbg(3, "success\n");
602 printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n"); 614 printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
603 615
604 return 0; 616 return 0;
@@ -616,7 +628,7 @@ static int mpc85xx_l2_err_remove(struct platform_device *op)
616 struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); 628 struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
617 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 629 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
618 630
619 debugf0("%s()\n", __func__); 631 edac_dbg(0, "\n");
620 632
621 if (edac_op_state == EDAC_OPSTATE_INT) { 633 if (edac_op_state == EDAC_OPSTATE_INT) {
622 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0); 634 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
@@ -813,7 +825,7 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
813 pfn = err_addr >> PAGE_SHIFT; 825 pfn = err_addr >> PAGE_SHIFT;
814 826
815 for (row_index = 0; row_index < mci->nr_csrows; row_index++) { 827 for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
816 csrow = &mci->csrows[row_index]; 828 csrow = mci->csrows[row_index];
817 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) 829 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
818 break; 830 break;
819 } 831 }
@@ -854,16 +866,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); 866 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
855 867
856 if (err_detect & DDR_EDE_SBE) 868 if (err_detect & DDR_EDE_SBE)
857 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 869 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
858 pfn, err_addr & ~PAGE_MASK, syndrome, 870 pfn, err_addr & ~PAGE_MASK, syndrome,
859 row_index, 0, -1, 871 row_index, 0, -1,
860 mci->ctl_name, "", NULL); 872 mci->ctl_name, "");
861 873
862 if (err_detect & DDR_EDE_MBE) 874 if (err_detect & DDR_EDE_MBE)
863 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 875 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
864 pfn, err_addr & ~PAGE_MASK, syndrome, 876 pfn, err_addr & ~PAGE_MASK, syndrome,
865 row_index, 0, -1, 877 row_index, 0, -1,
866 mci->ctl_name, "", NULL); 878 mci->ctl_name, "");
867 879
868 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 880 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
869} 881}
@@ -933,8 +945,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
933 u32 start; 945 u32 start;
934 u32 end; 946 u32 end;
935 947
936 csrow = &mci->csrows[index]; 948 csrow = mci->csrows[index];
937 dimm = csrow->channels[0].dimm; 949 dimm = csrow->channels[0]->dimm;
938 950
939 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + 951 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
940 (index * MPC85XX_MC_CS_BNDS_OFS)); 952 (index * MPC85XX_MC_CS_BNDS_OFS));
@@ -980,7 +992,8 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
980 layers[1].type = EDAC_MC_LAYER_CHANNEL; 992 layers[1].type = EDAC_MC_LAYER_CHANNEL;
981 layers[1].size = 1; 993 layers[1].size = 1;
982 layers[1].is_virt_csrow = false; 994 layers[1].is_virt_csrow = false;
983 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata)); 995 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
996 sizeof(*pdata));
984 if (!mci) { 997 if (!mci) {
985 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 998 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
986 return -ENOMEM; 999 return -ENOMEM;
@@ -989,9 +1002,9 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
989 pdata = mci->pvt_info; 1002 pdata = mci->pvt_info;
990 pdata->name = "mpc85xx_mc_err"; 1003 pdata->name = "mpc85xx_mc_err";
991 pdata->irq = NO_IRQ; 1004 pdata->irq = NO_IRQ;
992 mci->dev = &op->dev; 1005 mci->pdev = &op->dev;
993 pdata->edac_idx = edac_mc_idx++; 1006 pdata->edac_idx = edac_mc_idx++;
994 dev_set_drvdata(mci->dev, mci); 1007 dev_set_drvdata(mci->pdev, mci);
995 mci->ctl_name = pdata->name; 1008 mci->ctl_name = pdata->name;
996 mci->dev_name = pdata->name; 1009 mci->dev_name = pdata->name;
997 1010
@@ -1025,7 +1038,7 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
1025 goto err; 1038 goto err;
1026 } 1039 }
1027 1040
1028 debugf3("%s(): init mci\n", __func__); 1041 edac_dbg(3, "init mci\n");
1029 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | 1042 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
1030 MEM_FLAG_DDR | MEM_FLAG_DDR2; 1043 MEM_FLAG_DDR | MEM_FLAG_DDR2;
1031 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 1044 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
@@ -1040,8 +1053,6 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
1040 1053
1041 mci->scrub_mode = SCRUB_SW_SRC; 1054 mci->scrub_mode = SCRUB_SW_SRC;
1042 1055
1043 mpc85xx_set_mc_sysfs_attributes(mci);
1044
1045 mpc85xx_init_csrows(mci); 1056 mpc85xx_init_csrows(mci);
1046 1057
1047 /* store the original error disable bits */ 1058 /* store the original error disable bits */
@@ -1053,7 +1064,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
1053 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0); 1064 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
1054 1065
1055 if (edac_mc_add_mc(mci)) { 1066 if (edac_mc_add_mc(mci)) {
1056 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1067 edac_dbg(3, "failed edac_mc_add_mc()\n");
1068 goto err;
1069 }
1070
1071 if (mpc85xx_create_sysfs_attributes(mci)) {
1072 edac_mc_del_mc(mci->pdev);
1073 edac_dbg(3, "failed edac_mc_add_mc()\n");
1057 goto err; 1074 goto err;
1058 } 1075 }
1059 1076
@@ -1087,7 +1104,7 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
1087 } 1104 }
1088 1105
1089 devres_remove_group(&op->dev, mpc85xx_mc_err_probe); 1106 devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
1090 debugf3("%s(): success\n", __func__); 1107 edac_dbg(3, "success\n");
1091 printk(KERN_INFO EDAC_MOD_STR " MC err registered\n"); 1108 printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
1092 1109
1093 return 0; 1110 return 0;
@@ -1105,7 +1122,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op)
1105 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); 1122 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
1106 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 1123 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
1107 1124
1108 debugf0("%s()\n", __func__); 1125 edac_dbg(0, "\n");
1109 1126
1110 if (edac_op_state == EDAC_OPSTATE_INT) { 1127 if (edac_op_state == EDAC_OPSTATE_INT) {
1111 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0); 1128 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
@@ -1116,6 +1133,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op)
1116 orig_ddr_err_disable); 1133 orig_ddr_err_disable);
1117 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe); 1134 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
1118 1135
1136 mpc85xx_remove_sysfs_attributes(mci);
1119 edac_mc_del_mc(&op->dev); 1137 edac_mc_del_mc(&op->dev);
1120 edac_mc_free(mci); 1138 edac_mc_free(mci);
1121 return 0; 1139 return 0;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index b0bb5a3d2527..2b315c2edc3c 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -169,7 +169,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
169 MV64X60_PCIx_ERR_MASK_VAL); 169 MV64X60_PCIx_ERR_MASK_VAL);
170 170
171 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 171 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
172 debugf3("%s(): failed edac_pci_add_device()\n", __func__); 172 edac_dbg(3, "failed edac_pci_add_device()\n");
173 goto err; 173 goto err;
174 } 174 }
175 175
@@ -194,7 +194,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
194 devres_remove_group(&pdev->dev, mv64x60_pci_err_probe); 194 devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
195 195
196 /* get this far and it's successful */ 196 /* get this far and it's successful */
197 debugf3("%s(): success\n", __func__); 197 edac_dbg(3, "success\n");
198 198
199 return 0; 199 return 0;
200 200
@@ -210,7 +210,7 @@ static int mv64x60_pci_err_remove(struct platform_device *pdev)
210{ 210{
211 struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); 211 struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
212 212
213 debugf0("%s()\n", __func__); 213 edac_dbg(0, "\n");
214 214
215 edac_pci_del_device(&pdev->dev); 215 edac_pci_del_device(&pdev->dev);
216 216
@@ -336,7 +336,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
336 pdata->edac_idx = edac_dev_idx++; 336 pdata->edac_idx = edac_dev_idx++;
337 337
338 if (edac_device_add_device(edac_dev) > 0) { 338 if (edac_device_add_device(edac_dev) > 0) {
339 debugf3("%s(): failed edac_device_add_device()\n", __func__); 339 edac_dbg(3, "failed edac_device_add_device()\n");
340 goto err; 340 goto err;
341 } 341 }
342 342
@@ -363,7 +363,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
363 devres_remove_group(&pdev->dev, mv64x60_sram_err_probe); 363 devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
364 364
365 /* get this far and it's successful */ 365 /* get this far and it's successful */
366 debugf3("%s(): success\n", __func__); 366 edac_dbg(3, "success\n");
367 367
368 return 0; 368 return 0;
369 369
@@ -379,7 +379,7 @@ static int mv64x60_sram_err_remove(struct platform_device *pdev)
379{ 379{
380 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); 380 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
381 381
382 debugf0("%s()\n", __func__); 382 edac_dbg(0, "\n");
383 383
384 edac_device_del_device(&pdev->dev); 384 edac_device_del_device(&pdev->dev);
385 edac_device_free_ctl_info(edac_dev); 385 edac_device_free_ctl_info(edac_dev);
@@ -531,7 +531,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
531 pdata->edac_idx = edac_dev_idx++; 531 pdata->edac_idx = edac_dev_idx++;
532 532
533 if (edac_device_add_device(edac_dev) > 0) { 533 if (edac_device_add_device(edac_dev) > 0) {
534 debugf3("%s(): failed edac_device_add_device()\n", __func__); 534 edac_dbg(3, "failed edac_device_add_device()\n");
535 goto err; 535 goto err;
536 } 536 }
537 537
@@ -558,7 +558,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
558 devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe); 558 devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
559 559
560 /* get this far and it's successful */ 560 /* get this far and it's successful */
561 debugf3("%s(): success\n", __func__); 561 edac_dbg(3, "success\n");
562 562
563 return 0; 563 return 0;
564 564
@@ -574,7 +574,7 @@ static int mv64x60_cpu_err_remove(struct platform_device *pdev)
574{ 574{
575 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); 575 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
576 576
577 debugf0("%s()\n", __func__); 577 edac_dbg(0, "\n");
578 578
579 edac_device_del_device(&pdev->dev); 579 edac_device_del_device(&pdev->dev);
580 edac_device_free_ctl_info(edac_dev); 580 edac_device_free_ctl_info(edac_dev);
@@ -611,17 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
611 611
612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ 612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
613 if (!(reg & 0x1)) 613 if (!(reg & 0x1))
614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
615 err_addr >> PAGE_SHIFT, 615 err_addr >> PAGE_SHIFT,
616 err_addr & PAGE_MASK, syndrome, 616 err_addr & PAGE_MASK, syndrome,
617 0, 0, -1, 617 0, 0, -1,
618 mci->ctl_name, "", NULL); 618 mci->ctl_name, "");
619 else /* 2 bit error, UE */ 619 else /* 2 bit error, UE */
620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
621 err_addr >> PAGE_SHIFT, 621 err_addr >> PAGE_SHIFT,
622 err_addr & PAGE_MASK, 0, 622 err_addr & PAGE_MASK, 0,
623 0, 0, -1, 623 0, 0, -1,
624 mci->ctl_name, "", NULL); 624 mci->ctl_name, "");
625 625
626 /* clear the error */ 626 /* clear the error */
627 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); 627 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -670,8 +670,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
670 670
671 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); 671 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
672 672
673 csrow = &mci->csrows[0]; 673 csrow = mci->csrows[0];
674 dimm = csrow->channels[0].dimm; 674 dimm = csrow->channels[0]->dimm;
675 675
676 dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT; 676 dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
677 dimm->grain = 8; 677 dimm->grain = 8;
@@ -724,7 +724,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
724 } 724 }
725 725
726 pdata = mci->pvt_info; 726 pdata = mci->pvt_info;
727 mci->dev = &pdev->dev; 727 mci->pdev = &pdev->dev;
728 platform_set_drvdata(pdev, mci); 728 platform_set_drvdata(pdev, mci);
729 pdata->name = "mv64x60_mc_err"; 729 pdata->name = "mv64x60_mc_err";
730 pdata->irq = NO_IRQ; 730 pdata->irq = NO_IRQ;
@@ -766,7 +766,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
766 goto err2; 766 goto err2;
767 } 767 }
768 768
769 debugf3("%s(): init mci\n", __func__); 769 edac_dbg(3, "init mci\n");
770 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 770 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
771 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 771 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
772 mci->edac_cap = EDAC_FLAG_SECDED; 772 mci->edac_cap = EDAC_FLAG_SECDED;
@@ -790,7 +790,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
790 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); 790 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
791 791
792 if (edac_mc_add_mc(mci)) { 792 if (edac_mc_add_mc(mci)) {
793 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 793 edac_dbg(3, "failed edac_mc_add_mc()\n");
794 goto err; 794 goto err;
795 } 795 }
796 796
@@ -815,7 +815,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
815 } 815 }
816 816
817 /* get this far and it's successful */ 817 /* get this far and it's successful */
818 debugf3("%s(): success\n", __func__); 818 edac_dbg(3, "success\n");
819 819
820 return 0; 820 return 0;
821 821
@@ -831,7 +831,7 @@ static int mv64x60_mc_err_remove(struct platform_device *pdev)
831{ 831{
832 struct mem_ctl_info *mci = platform_get_drvdata(pdev); 832 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
833 833
834 debugf0("%s()\n", __func__); 834 edac_dbg(0, "\n");
835 835
836 edac_mc_del_mc(&pdev->dev); 836 edac_mc_del_mc(&pdev->dev);
837 edac_mc_free(mci); 837 edac_mc_free(mci);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index b095a906a994..2d35b78ada3c 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -74,7 +74,7 @@ static int system_mmc_id;
74 74
75static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) 75static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
76{ 76{
77 struct pci_dev *pdev = to_pci_dev(mci->dev); 77 struct pci_dev *pdev = to_pci_dev(mci->pdev);
78 u32 tmp; 78 u32 tmp;
79 79
80 pci_read_config_dword(pdev, MCDEBUG_ERRSTA, 80 pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
@@ -95,7 +95,7 @@ static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
95 95
96static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) 96static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
97{ 97{
98 struct pci_dev *pdev = to_pci_dev(mci->dev); 98 struct pci_dev *pdev = to_pci_dev(mci->pdev);
99 u32 errlog1a; 99 u32 errlog1a;
100 u32 cs; 100 u32 cs;
101 101
@@ -110,16 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
110 /* uncorrectable/multi-bit errors */ 110 /* uncorrectable/multi-bit errors */
111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | 111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
112 MCDEBUG_ERRSTA_RFL_STATUS)) { 112 MCDEBUG_ERRSTA_RFL_STATUS)) {
113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
114 mci->csrows[cs].first_page, 0, 0, 114 mci->csrows[cs]->first_page, 0, 0,
115 cs, 0, -1, mci->ctl_name, "", NULL); 115 cs, 0, -1, mci->ctl_name, "");
116 } 116 }
117 117
118 /* correctable/single-bit errors */ 118 /* correctable/single-bit errors */
119 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) 119 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
120 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 120 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
121 mci->csrows[cs].first_page, 0, 0, 121 mci->csrows[cs]->first_page, 0, 0,
122 cs, 0, -1, mci->ctl_name, "", NULL); 122 cs, 0, -1, mci->ctl_name, "");
123} 123}
124 124
125static void pasemi_edac_check(struct mem_ctl_info *mci) 125static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -141,8 +141,8 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
141 int index; 141 int index;
142 142
143 for (index = 0; index < mci->nr_csrows; index++) { 143 for (index = 0; index < mci->nr_csrows; index++) {
144 csrow = &mci->csrows[index]; 144 csrow = mci->csrows[index];
145 dimm = csrow->channels[0].dimm; 145 dimm = csrow->channels[0]->dimm;
146 146
147 pci_read_config_dword(pdev, 147 pci_read_config_dword(pdev,
148 MCDRAM_RANKCFG + (index * 12), 148 MCDRAM_RANKCFG + (index * 12),
@@ -225,7 +225,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
225 MCCFG_ERRCOR_ECC_GEN_EN | 225 MCCFG_ERRCOR_ECC_GEN_EN |
226 MCCFG_ERRCOR_ECC_CRR_EN; 226 MCCFG_ERRCOR_ECC_CRR_EN;
227 227
228 mci->dev = &pdev->dev; 228 mci->pdev = &pdev->dev;
229 mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR; 229 mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
230 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 230 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
231 mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ? 231 mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index f3f9fed06ad7..bf0957635991 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,10 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
727 727
728 for (row = 0; row < mci->nr_csrows; row++) 728 for (row = 0; row < mci->nr_csrows; row++)
729 if (ppc4xx_edac_check_bank_error(status, row)) 729 if (ppc4xx_edac_check_bank_error(status, row))
730 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 730 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
731 0, 0, 0, 731 0, 0, 0,
732 row, 0, -1, 732 row, 0, -1,
733 message, "", NULL); 733 message, "");
734} 734}
735 735
736/** 736/**
@@ -758,10 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
758 758
759 for (row = 0; row < mci->nr_csrows; row++) 759 for (row = 0; row < mci->nr_csrows; row++)
760 if (ppc4xx_edac_check_bank_error(status, row)) 760 if (ppc4xx_edac_check_bank_error(status, row))
761 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 761 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
762 page, offset, 0, 762 page, offset, 0,
763 row, 0, -1, 763 row, 0, -1,
764 message, "", NULL); 764 message, "");
765} 765}
766 766
767/** 767/**
@@ -1027,9 +1027,9 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
1027 1027
1028 /* Initial driver pointers and private data */ 1028 /* Initial driver pointers and private data */
1029 1029
1030 mci->dev = &op->dev; 1030 mci->pdev = &op->dev;
1031 1031
1032 dev_set_drvdata(mci->dev, mci); 1032 dev_set_drvdata(mci->pdev, mci);
1033 1033
1034 pdata = mci->pvt_info; 1034 pdata = mci->pvt_info;
1035 1035
@@ -1334,7 +1334,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
1334 return 0; 1334 return 0;
1335 1335
1336 fail1: 1336 fail1:
1337 edac_mc_del_mc(mci->dev); 1337 edac_mc_del_mc(mci->pdev);
1338 1338
1339 fail: 1339 fail:
1340 edac_mc_free(mci); 1340 edac_mc_free(mci);
@@ -1368,7 +1368,7 @@ ppc4xx_edac_remove(struct platform_device *op)
1368 1368
1369 dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN); 1369 dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
1370 1370
1371 edac_mc_del_mc(mci->dev); 1371 edac_mc_del_mc(mci->pdev);
1372 edac_mc_free(mci); 1372 edac_mc_free(mci);
1373 1373
1374 return 0; 1374 return 0;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index e1cacd164f31..f854debd5533 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -140,7 +140,7 @@ static void r82600_get_error_info(struct mem_ctl_info *mci,
140{ 140{
141 struct pci_dev *pdev; 141 struct pci_dev *pdev;
142 142
143 pdev = to_pci_dev(mci->dev); 143 pdev = to_pci_dev(mci->pdev);
144 pci_read_config_dword(pdev, R82600_EAP, &info->eapr); 144 pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
145 145
146 if (info->eapr & BIT(0)) 146 if (info->eapr & BIT(0))
@@ -179,11 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
179 error_found = 1; 179 error_found = 1;
180 180
181 if (handle_errors) 181 if (handle_errors)
182 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 182 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
183 page, 0, syndrome, 183 page, 0, syndrome,
184 edac_mc_find_csrow_by_page(mci, page), 184 edac_mc_find_csrow_by_page(mci, page),
185 0, -1, 185 0, -1,
186 mci->ctl_name, "", NULL); 186 mci->ctl_name, "");
187 } 187 }
188 188
189 if (info->eapr & BIT(1)) { /* UE? */ 189 if (info->eapr & BIT(1)) { /* UE? */
@@ -191,11 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
191 191
192 if (handle_errors) 192 if (handle_errors)
193 /* 82600 doesn't give enough info */ 193 /* 82600 doesn't give enough info */
194 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 194 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
195 page, 0, 0, 195 page, 0, 0,
196 edac_mc_find_csrow_by_page(mci, page), 196 edac_mc_find_csrow_by_page(mci, page),
197 0, -1, 197 0, -1,
198 mci->ctl_name, "", NULL); 198 mci->ctl_name, "");
199 } 199 }
200 200
201 return error_found; 201 return error_found;
@@ -205,7 +205,7 @@ static void r82600_check(struct mem_ctl_info *mci)
205{ 205{
206 struct r82600_error_info info; 206 struct r82600_error_info info;
207 207
208 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 208 edac_dbg(1, "MC%d\n", mci->mc_idx);
209 r82600_get_error_info(mci, &info); 209 r82600_get_error_info(mci, &info);
210 r82600_process_error_info(mci, &info, 1); 210 r82600_process_error_info(mci, &info, 1);
211} 211}
@@ -230,19 +230,19 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
230 row_high_limit_last = 0; 230 row_high_limit_last = 0;
231 231
232 for (index = 0; index < mci->nr_csrows; index++) { 232 for (index = 0; index < mci->nr_csrows; index++) {
233 csrow = &mci->csrows[index]; 233 csrow = mci->csrows[index];
234 dimm = csrow->channels[0].dimm; 234 dimm = csrow->channels[0]->dimm;
235 235
236 /* find the DRAM Chip Select Base address and mask */ 236 /* find the DRAM Chip Select Base address and mask */
237 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); 237 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
238 238
239 debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar); 239 edac_dbg(1, "Row=%d DRBA = %#0x\n", index, drbar);
240 240
241 row_high_limit = ((u32) drbar << 24); 241 row_high_limit = ((u32) drbar << 24);
242/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ 242/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
243 243
244 debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n", 244 edac_dbg(1, "Row=%d, Boundary Address=%#0x, Last = %#0x\n",
245 __func__, index, row_high_limit, row_high_limit_last); 245 index, row_high_limit, row_high_limit_last);
246 246
247 /* Empty row [p.57] */ 247 /* Empty row [p.57] */
248 if (row_high_limit == row_high_limit_last) 248 if (row_high_limit == row_high_limit_last)
@@ -277,14 +277,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
277 u32 sdram_refresh_rate; 277 u32 sdram_refresh_rate;
278 struct r82600_error_info discard; 278 struct r82600_error_info discard;
279 279
280 debugf0("%s()\n", __func__); 280 edac_dbg(0, "\n");
281 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); 281 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
282 pci_read_config_dword(pdev, R82600_EAP, &eapr); 282 pci_read_config_dword(pdev, R82600_EAP, &eapr);
283 scrub_disabled = eapr & BIT(31); 283 scrub_disabled = eapr & BIT(31);
284 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); 284 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
285 debugf2("%s(): sdram refresh rate = %#0x\n", __func__, 285 edac_dbg(2, "sdram refresh rate = %#0x\n", sdram_refresh_rate);
286 sdram_refresh_rate); 286 edac_dbg(2, "DRAMC register = %#0x\n", dramcr);
287 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
288 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 287 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
289 layers[0].size = R82600_NR_CSROWS; 288 layers[0].size = R82600_NR_CSROWS;
290 layers[0].is_virt_csrow = true; 289 layers[0].is_virt_csrow = true;
@@ -295,8 +294,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
295 if (mci == NULL) 294 if (mci == NULL)
296 return -ENOMEM; 295 return -ENOMEM;
297 296
298 debugf0("%s(): mci = %p\n", __func__, mci); 297 edac_dbg(0, "mci = %p\n", mci);
299 mci->dev = &pdev->dev; 298 mci->pdev = &pdev->dev;
300 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 299 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
301 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 300 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
302 /* FIXME try to work out if the chip leads have been used for COM2 301 /* FIXME try to work out if the chip leads have been used for COM2
@@ -311,8 +310,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
311 310
312 if (ecc_enabled(dramcr)) { 311 if (ecc_enabled(dramcr)) {
313 if (scrub_disabled) 312 if (scrub_disabled)
314 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " 313 edac_dbg(3, "mci = %p - Scrubbing disabled! EAP: %#0x\n",
315 "%#0x\n", __func__, mci, eapr); 314 mci, eapr);
316 } else 315 } else
317 mci->edac_cap = EDAC_FLAG_NONE; 316 mci->edac_cap = EDAC_FLAG_NONE;
318 317
@@ -329,15 +328,14 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
329 * type of memory controller. The ID is therefore hardcoded to 0. 328 * type of memory controller. The ID is therefore hardcoded to 0.
330 */ 329 */
331 if (edac_mc_add_mc(mci)) { 330 if (edac_mc_add_mc(mci)) {
332 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 331 edac_dbg(3, "failed edac_mc_add_mc()\n");
333 goto fail; 332 goto fail;
334 } 333 }
335 334
336 /* get this far and it's successful */ 335 /* get this far and it's successful */
337 336
338 if (disable_hardware_scrub) { 337 if (disable_hardware_scrub) {
339 debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", 338 edac_dbg(3, "Disabling Hardware Scrub (scrub on error)\n");
340 __func__);
341 pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); 339 pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
342 } 340 }
343 341
@@ -352,7 +350,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
352 __func__); 350 __func__);
353 } 351 }
354 352
355 debugf3("%s(): success\n", __func__); 353 edac_dbg(3, "success\n");
356 return 0; 354 return 0;
357 355
358fail: 356fail:
@@ -364,7 +362,7 @@ fail:
364static int __devinit r82600_init_one(struct pci_dev *pdev, 362static int __devinit r82600_init_one(struct pci_dev *pdev,
365 const struct pci_device_id *ent) 363 const struct pci_device_id *ent)
366{ 364{
367 debugf0("%s()\n", __func__); 365 edac_dbg(0, "\n");
368 366
369 /* don't need to call pci_enable_device() */ 367 /* don't need to call pci_enable_device() */
370 return r82600_probe1(pdev, ent->driver_data); 368 return r82600_probe1(pdev, ent->driver_data);
@@ -374,7 +372,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
374{ 372{
375 struct mem_ctl_info *mci; 373 struct mem_ctl_info *mci;
376 374
377 debugf0("%s()\n", __func__); 375 edac_dbg(0, "\n");
378 376
379 if (r82600_pci) 377 if (r82600_pci)
380 edac_pci_release_generic_ctl(r82600_pci); 378 edac_pci_release_generic_ctl(r82600_pci);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4adaf4b7da99..f3b1f9fafa4b 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -381,8 +381,8 @@ static inline int numrank(u32 mtr)
381 int ranks = (1 << RANK_CNT_BITS(mtr)); 381 int ranks = (1 << RANK_CNT_BITS(mtr));
382 382
383 if (ranks > 4) { 383 if (ranks > 4) {
384 debugf0("Invalid number of ranks: %d (max = 4) raw value = %x (%04x)", 384 edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n",
385 ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr); 385 ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
386 return -EINVAL; 386 return -EINVAL;
387 } 387 }
388 388
@@ -394,8 +394,8 @@ static inline int numrow(u32 mtr)
394 int rows = (RANK_WIDTH_BITS(mtr) + 12); 394 int rows = (RANK_WIDTH_BITS(mtr) + 12);
395 395
396 if (rows < 13 || rows > 18) { 396 if (rows < 13 || rows > 18) {
397 debugf0("Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)", 397 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
398 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); 398 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
399 return -EINVAL; 399 return -EINVAL;
400 } 400 }
401 401
@@ -407,8 +407,8 @@ static inline int numcol(u32 mtr)
407 int cols = (COL_WIDTH_BITS(mtr) + 10); 407 int cols = (COL_WIDTH_BITS(mtr) + 10);
408 408
409 if (cols > 12) { 409 if (cols > 12) {
410 debugf0("Invalid number of cols: %d (max = 4) raw value = %x (%04x)", 410 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
411 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); 411 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
412 return -EINVAL; 412 return -EINVAL;
413 } 413 }
414 414
@@ -475,8 +475,8 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
475 475
476 if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot && 476 if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
477 PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) { 477 PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
478 debugf1("Associated %02x.%02x.%d with %p\n", 478 edac_dbg(1, "Associated %02x.%02x.%d with %p\n",
479 bus, slot, func, sbridge_dev->pdev[i]); 479 bus, slot, func, sbridge_dev->pdev[i]);
480 return sbridge_dev->pdev[i]; 480 return sbridge_dev->pdev[i];
481 } 481 }
482 } 482 }
@@ -523,45 +523,45 @@ static int get_dimm_config(struct mem_ctl_info *mci)
523 523
524 pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg); 524 pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg);
525 pvt->sbridge_dev->node_id = NODE_ID(reg); 525 pvt->sbridge_dev->node_id = NODE_ID(reg);
526 debugf0("mc#%d: Node ID: %d, source ID: %d\n", 526 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
527 pvt->sbridge_dev->mc, 527 pvt->sbridge_dev->mc,
528 pvt->sbridge_dev->node_id, 528 pvt->sbridge_dev->node_id,
529 pvt->sbridge_dev->source_id); 529 pvt->sbridge_dev->source_id);
530 530
531 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg); 531 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
532 if (IS_MIRROR_ENABLED(reg)) { 532 if (IS_MIRROR_ENABLED(reg)) {
533 debugf0("Memory mirror is enabled\n"); 533 edac_dbg(0, "Memory mirror is enabled\n");
534 pvt->is_mirrored = true; 534 pvt->is_mirrored = true;
535 } else { 535 } else {
536 debugf0("Memory mirror is disabled\n"); 536 edac_dbg(0, "Memory mirror is disabled\n");
537 pvt->is_mirrored = false; 537 pvt->is_mirrored = false;
538 } 538 }
539 539
540 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); 540 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
541 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { 541 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
542 debugf0("Lockstep is enabled\n"); 542 edac_dbg(0, "Lockstep is enabled\n");
543 mode = EDAC_S8ECD8ED; 543 mode = EDAC_S8ECD8ED;
544 pvt->is_lockstep = true; 544 pvt->is_lockstep = true;
545 } else { 545 } else {
546 debugf0("Lockstep is disabled\n"); 546 edac_dbg(0, "Lockstep is disabled\n");
547 mode = EDAC_S4ECD4ED; 547 mode = EDAC_S4ECD4ED;
548 pvt->is_lockstep = false; 548 pvt->is_lockstep = false;
549 } 549 }
550 if (IS_CLOSE_PG(pvt->info.mcmtr)) { 550 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
551 debugf0("address map is on closed page mode\n"); 551 edac_dbg(0, "address map is on closed page mode\n");
552 pvt->is_close_pg = true; 552 pvt->is_close_pg = true;
553 } else { 553 } else {
554 debugf0("address map is on open page mode\n"); 554 edac_dbg(0, "address map is on open page mode\n");
555 pvt->is_close_pg = false; 555 pvt->is_close_pg = false;
556 } 556 }
557 557
558 pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg); 558 pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
559 if (IS_RDIMM_ENABLED(reg)) { 559 if (IS_RDIMM_ENABLED(reg)) {
560 /* FIXME: Can also be LRDIMM */ 560 /* FIXME: Can also be LRDIMM */
561 debugf0("Memory is registered\n"); 561 edac_dbg(0, "Memory is registered\n");
562 mtype = MEM_RDDR3; 562 mtype = MEM_RDDR3;
563 } else { 563 } else {
564 debugf0("Memory is unregistered\n"); 564 edac_dbg(0, "Memory is unregistered\n");
565 mtype = MEM_DDR3; 565 mtype = MEM_DDR3;
566 } 566 }
567 567
@@ -576,7 +576,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
576 i, j, 0); 576 i, j, 0);
577 pci_read_config_dword(pvt->pci_tad[i], 577 pci_read_config_dword(pvt->pci_tad[i],
578 mtr_regs[j], &mtr); 578 mtr_regs[j], &mtr);
579 debugf4("Channel #%d MTR%d = %x\n", i, j, mtr); 579 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
580 if (IS_DIMM_PRESENT(mtr)) { 580 if (IS_DIMM_PRESENT(mtr)) {
581 pvt->channel[i].dimms++; 581 pvt->channel[i].dimms++;
582 582
@@ -588,10 +588,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
588 size = (rows * cols * banks * ranks) >> (20 - 3); 588 size = (rows * cols * banks * ranks) >> (20 - 3);
589 npages = MiB_TO_PAGES(size); 589 npages = MiB_TO_PAGES(size);
590 590
591 debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 591 edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
592 pvt->sbridge_dev->mc, i, j, 592 pvt->sbridge_dev->mc, i, j,
593 size, npages, 593 size, npages,
594 banks, ranks, rows, cols); 594 banks, ranks, rows, cols);
595 595
596 dimm->nr_pages = npages; 596 dimm->nr_pages = npages;
597 dimm->grain = 32; 597 dimm->grain = 32;
@@ -629,8 +629,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
629 tmp_mb = (1 + pvt->tolm) >> 20; 629 tmp_mb = (1 + pvt->tolm) >> 20;
630 630
631 mb = div_u64_rem(tmp_mb, 1000, &kb); 631 mb = div_u64_rem(tmp_mb, 1000, &kb);
632 debugf0("TOLM: %u.%03u GB (0x%016Lx)\n", 632 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
633 mb, kb, (u64)pvt->tolm);
634 633
635 /* Address range is already 45:25 */ 634 /* Address range is already 45:25 */
636 pci_read_config_dword(pvt->pci_sad1, TOHM, 635 pci_read_config_dword(pvt->pci_sad1, TOHM,
@@ -639,8 +638,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
639 tmp_mb = (1 + pvt->tohm) >> 20; 638 tmp_mb = (1 + pvt->tohm) >> 20;
640 639
641 mb = div_u64_rem(tmp_mb, 1000, &kb); 640 mb = div_u64_rem(tmp_mb, 1000, &kb);
642 debugf0("TOHM: %u.%03u GB (0x%016Lx)", 641 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm);
643 mb, kb, (u64)pvt->tohm);
644 642
645 /* 643 /*
646 * Step 2) Get SAD range and SAD Interleave list 644 * Step 2) Get SAD range and SAD Interleave list
@@ -663,13 +661,13 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
663 661
664 tmp_mb = (limit + 1) >> 20; 662 tmp_mb = (limit + 1) >> 20;
665 mb = div_u64_rem(tmp_mb, 1000, &kb); 663 mb = div_u64_rem(tmp_mb, 1000, &kb);
666 debugf0("SAD#%d %s up to %u.%03u GB (0x%016Lx) %s reg=0x%08x\n", 664 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
667 n_sads, 665 n_sads,
668 get_dram_attr(reg), 666 get_dram_attr(reg),
669 mb, kb, 667 mb, kb,
670 ((u64)tmp_mb) << 20L, 668 ((u64)tmp_mb) << 20L,
671 INTERLEAVE_MODE(reg) ? "Interleave: 8:6" : "Interleave: [8:6]XOR[18:16]", 669 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
672 reg); 670 reg);
673 prv = limit; 671 prv = limit;
674 672
675 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], 673 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -679,8 +677,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
679 if (j > 0 && sad_interl == sad_pkg(reg, j)) 677 if (j > 0 && sad_interl == sad_pkg(reg, j))
680 break; 678 break;
681 679
682 debugf0("SAD#%d, interleave #%d: %d\n", 680 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
683 n_sads, j, sad_pkg(reg, j)); 681 n_sads, j, sad_pkg(reg, j));
684 } 682 }
685 } 683 }
686 684
@@ -697,16 +695,16 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
697 tmp_mb = (limit + 1) >> 20; 695 tmp_mb = (limit + 1) >> 20;
698 696
699 mb = div_u64_rem(tmp_mb, 1000, &kb); 697 mb = div_u64_rem(tmp_mb, 1000, &kb);
700 debugf0("TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", 698 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
701 n_tads, mb, kb, 699 n_tads, mb, kb,
702 ((u64)tmp_mb) << 20L, 700 ((u64)tmp_mb) << 20L,
703 (u32)TAD_SOCK(reg), 701 (u32)TAD_SOCK(reg),
704 (u32)TAD_CH(reg), 702 (u32)TAD_CH(reg),
705 (u32)TAD_TGT0(reg), 703 (u32)TAD_TGT0(reg),
706 (u32)TAD_TGT1(reg), 704 (u32)TAD_TGT1(reg),
707 (u32)TAD_TGT2(reg), 705 (u32)TAD_TGT2(reg),
708 (u32)TAD_TGT3(reg), 706 (u32)TAD_TGT3(reg),
709 reg); 707 reg);
710 prv = limit; 708 prv = limit;
711 } 709 }
712 710
@@ -722,11 +720,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
722 &reg); 720 &reg);
723 tmp_mb = TAD_OFFSET(reg) >> 20; 721 tmp_mb = TAD_OFFSET(reg) >> 20;
724 mb = div_u64_rem(tmp_mb, 1000, &kb); 722 mb = div_u64_rem(tmp_mb, 1000, &kb);
725 debugf0("TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", 723 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
726 i, j, 724 i, j,
727 mb, kb, 725 mb, kb,
728 ((u64)tmp_mb) << 20L, 726 ((u64)tmp_mb) << 20L,
729 reg); 727 reg);
730 } 728 }
731 } 729 }
732 730
@@ -747,12 +745,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
747 tmp_mb = RIR_LIMIT(reg) >> 20; 745 tmp_mb = RIR_LIMIT(reg) >> 20;
748 rir_way = 1 << RIR_WAY(reg); 746 rir_way = 1 << RIR_WAY(reg);
749 mb = div_u64_rem(tmp_mb, 1000, &kb); 747 mb = div_u64_rem(tmp_mb, 1000, &kb);
750 debugf0("CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", 748 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
751 i, j, 749 i, j,
752 mb, kb, 750 mb, kb,
753 ((u64)tmp_mb) << 20L, 751 ((u64)tmp_mb) << 20L,
754 rir_way, 752 rir_way,
755 reg); 753 reg);
756 754
757 for (k = 0; k < rir_way; k++) { 755 for (k = 0; k < rir_way; k++) {
758 pci_read_config_dword(pvt->pci_tad[i], 756 pci_read_config_dword(pvt->pci_tad[i],
@@ -761,12 +759,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
761 tmp_mb = RIR_OFFSET(reg) << 6; 759 tmp_mb = RIR_OFFSET(reg) << 6;
762 760
763 mb = div_u64_rem(tmp_mb, 1000, &kb); 761 mb = div_u64_rem(tmp_mb, 1000, &kb);
764 debugf0("CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 762 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
765 i, j, k, 763 i, j, k,
766 mb, kb, 764 mb, kb,
767 ((u64)tmp_mb) << 20L, 765 ((u64)tmp_mb) << 20L,
768 (u32)RIR_RNK_TGT(reg), 766 (u32)RIR_RNK_TGT(reg),
769 reg); 767 reg);
770 } 768 }
771 } 769 }
772 } 770 }
@@ -853,16 +851,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
853 if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way)) 851 if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way))
854 break; 852 break;
855 sad_interleave[sad_way] = sad_pkg(reg, sad_way); 853 sad_interleave[sad_way] = sad_pkg(reg, sad_way);
856 debugf0("SAD interleave #%d: %d\n", 854 edac_dbg(0, "SAD interleave #%d: %d\n",
857 sad_way, sad_interleave[sad_way]); 855 sad_way, sad_interleave[sad_way]);
858 } 856 }
859 debugf0("mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", 857 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
860 pvt->sbridge_dev->mc, 858 pvt->sbridge_dev->mc,
861 n_sads, 859 n_sads,
862 addr, 860 addr,
863 limit, 861 limit,
864 sad_way + 7, 862 sad_way + 7,
865 interleave_mode ? "" : "XOR[18:16]"); 863 interleave_mode ? "" : "XOR[18:16]");
866 if (interleave_mode) 864 if (interleave_mode)
867 idx = ((addr >> 6) ^ (addr >> 16)) & 7; 865 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
868 else 866 else
@@ -884,8 +882,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
884 return -EINVAL; 882 return -EINVAL;
885 } 883 }
886 *socket = sad_interleave[idx]; 884 *socket = sad_interleave[idx];
887 debugf0("SAD interleave index: %d (wayness %d) = CPU socket %d\n", 885 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
888 idx, sad_way, *socket); 886 idx, sad_way, *socket);
889 887
890 /* 888 /*
891 * Move to the proper node structure, in order to access the 889 * Move to the proper node structure, in order to access the
@@ -972,16 +970,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
972 970
973 offset = TAD_OFFSET(tad_offset); 971 offset = TAD_OFFSET(tad_offset);
974 972
975 debugf0("TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", 973 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
976 n_tads, 974 n_tads,
977 addr, 975 addr,
978 limit, 976 limit,
979 (u32)TAD_SOCK(reg), 977 (u32)TAD_SOCK(reg),
980 ch_way, 978 ch_way,
981 offset, 979 offset,
982 idx, 980 idx,
983 base_ch, 981 base_ch,
984 *channel_mask); 982 *channel_mask);
985 983
986 /* Calculate channel address */ 984 /* Calculate channel address */
987 /* Remove the TAD offset */ 985 /* Remove the TAD offset */
@@ -1017,11 +1015,11 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1017 1015
1018 limit = RIR_LIMIT(reg); 1016 limit = RIR_LIMIT(reg);
1019 mb = div_u64_rem(limit >> 20, 1000, &kb); 1017 mb = div_u64_rem(limit >> 20, 1000, &kb);
1020 debugf0("RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", 1018 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1021 n_rir, 1019 n_rir,
1022 mb, kb, 1020 mb, kb,
1023 limit, 1021 limit,
1024 1 << RIR_WAY(reg)); 1022 1 << RIR_WAY(reg));
1025 if (ch_addr <= limit) 1023 if (ch_addr <= limit)
1026 break; 1024 break;
1027 } 1025 }
@@ -1042,12 +1040,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1042 &reg); 1040 &reg);
1043 *rank = RIR_RNK_TGT(reg); 1041 *rank = RIR_RNK_TGT(reg);
1044 1042
1045 debugf0("RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", 1043 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
1046 n_rir, 1044 n_rir,
1047 ch_addr, 1045 ch_addr,
1048 limit, 1046 limit,
1049 rir_way, 1047 rir_way,
1050 idx); 1048 idx);
1051 1049
1052 return 0; 1050 return 0;
1053} 1051}
@@ -1064,14 +1062,14 @@ static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
1064{ 1062{
1065 int i; 1063 int i;
1066 1064
1067 debugf0(__FILE__ ": %s()\n", __func__); 1065 edac_dbg(0, "\n");
1068 for (i = 0; i < sbridge_dev->n_devs; i++) { 1066 for (i = 0; i < sbridge_dev->n_devs; i++) {
1069 struct pci_dev *pdev = sbridge_dev->pdev[i]; 1067 struct pci_dev *pdev = sbridge_dev->pdev[i];
1070 if (!pdev) 1068 if (!pdev)
1071 continue; 1069 continue;
1072 debugf0("Removing dev %02x:%02x.%d\n", 1070 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1073 pdev->bus->number, 1071 pdev->bus->number,
1074 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1072 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1075 pci_dev_put(pdev); 1073 pci_dev_put(pdev);
1076 } 1074 }
1077} 1075}
@@ -1177,10 +1175,9 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
1177 return -ENODEV; 1175 return -ENODEV;
1178 } 1176 }
1179 1177
1180 debugf0("Detected dev %02x:%d.%d PCI ID %04x:%04x\n", 1178 edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
1181 bus, dev_descr->dev, 1179 bus, dev_descr->dev, dev_descr->func,
1182 dev_descr->func, 1180 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1183 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1184 1181
1185 /* 1182 /*
1186 * As stated on drivers/pci/search.c, the reference count for 1183 * As stated on drivers/pci/search.c, the reference count for
@@ -1297,10 +1294,10 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
1297 goto error; 1294 goto error;
1298 } 1295 }
1299 1296
1300 debugf0("Associated PCI %02x.%02d.%d with dev = %p\n", 1297 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1301 sbridge_dev->bus, 1298 sbridge_dev->bus,
1302 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1299 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1303 pdev); 1300 pdev);
1304 } 1301 }
1305 1302
1306 /* Check if everything were registered */ 1303 /* Check if everything were registered */
@@ -1435,8 +1432,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1435 * to the group of dimm's where the error may be happening. 1432 * to the group of dimm's where the error may be happening.
1436 */ 1433 */
1437 snprintf(msg, sizeof(msg), 1434 snprintf(msg, sizeof(msg),
1438 "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", 1435 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1439 core_err_cnt,
1440 overflow ? " OVERFLOW" : "", 1436 overflow ? " OVERFLOW" : "",
1441 (uncorrected_error && recoverable) ? " recoverable" : "", 1437 (uncorrected_error && recoverable) ? " recoverable" : "",
1442 area_type, 1438 area_type,
@@ -1445,20 +1441,20 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1445 channel_mask, 1441 channel_mask,
1446 rank); 1442 rank);
1447 1443
1448 debugf0("%s", msg); 1444 edac_dbg(0, "%s\n", msg);
1449 1445
1450 /* FIXME: need support for channel mask */ 1446 /* FIXME: need support for channel mask */
1451 1447
1452 /* Call the helper to output message */ 1448 /* Call the helper to output message */
1453 edac_mc_handle_error(tp_event, mci, 1449 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1454 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 1450 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1455 channel, dimm, -1, 1451 channel, dimm, -1,
1456 optype, msg, m); 1452 optype, msg);
1457 return; 1453 return;
1458err_parsing: 1454err_parsing:
1459 edac_mc_handle_error(tp_event, mci, 0, 0, 0, 1455 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
1460 -1, -1, -1, 1456 -1, -1, -1,
1461 msg, "", m); 1457 msg, "");
1462 1458
1463} 1459}
1464 1460
@@ -1592,8 +1588,7 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1592 struct sbridge_pvt *pvt; 1588 struct sbridge_pvt *pvt;
1593 1589
1594 if (unlikely(!mci || !mci->pvt_info)) { 1590 if (unlikely(!mci || !mci->pvt_info)) {
1595 debugf0("MC: " __FILE__ ": %s(): dev = %p\n", 1591 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
1596 __func__, &sbridge_dev->pdev[0]->dev);
1597 1592
1598 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); 1593 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
1599 return; 1594 return;
@@ -1601,15 +1596,13 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1601 1596
1602 pvt = mci->pvt_info; 1597 pvt = mci->pvt_info;
1603 1598
1604 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 1599 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1605 __func__, mci, &sbridge_dev->pdev[0]->dev); 1600 mci, &sbridge_dev->pdev[0]->dev);
1606
1607 mce_unregister_decode_chain(&sbridge_mce_dec);
1608 1601
1609 /* Remove MC sysfs nodes */ 1602 /* Remove MC sysfs nodes */
1610 edac_mc_del_mc(mci->dev); 1603 edac_mc_del_mc(mci->pdev);
1611 1604
1612 debugf1("%s: free mci struct\n", mci->ctl_name); 1605 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1613 kfree(mci->ctl_name); 1606 kfree(mci->ctl_name);
1614 edac_mc_free(mci); 1607 edac_mc_free(mci);
1615 sbridge_dev->mci = NULL; 1608 sbridge_dev->mci = NULL;
@@ -1640,8 +1633,8 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1640 if (unlikely(!mci)) 1633 if (unlikely(!mci))
1641 return -ENOMEM; 1634 return -ENOMEM;
1642 1635
1643 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 1636 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1644 __func__, mci, &sbridge_dev->pdev[0]->dev); 1637 mci, &sbridge_dev->pdev[0]->dev);
1645 1638
1646 pvt = mci->pvt_info; 1639 pvt = mci->pvt_info;
1647 memset(pvt, 0, sizeof(*pvt)); 1640 memset(pvt, 0, sizeof(*pvt));
@@ -1672,17 +1665,15 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1672 get_memory_layout(mci); 1665 get_memory_layout(mci);
1673 1666
1674 /* record ptr to the generic device */ 1667 /* record ptr to the generic device */
1675 mci->dev = &sbridge_dev->pdev[0]->dev; 1668 mci->pdev = &sbridge_dev->pdev[0]->dev;
1676 1669
1677 /* add this new MC control structure to EDAC's list of MCs */ 1670 /* add this new MC control structure to EDAC's list of MCs */
1678 if (unlikely(edac_mc_add_mc(mci))) { 1671 if (unlikely(edac_mc_add_mc(mci))) {
1679 debugf0("MC: " __FILE__ 1672 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1680 ": %s(): failed edac_mc_add_mc()\n", __func__);
1681 rc = -EINVAL; 1673 rc = -EINVAL;
1682 goto fail0; 1674 goto fail0;
1683 } 1675 }
1684 1676
1685 mce_register_decode_chain(&sbridge_mce_dec);
1686 return 0; 1677 return 0;
1687 1678
1688fail0: 1679fail0:
@@ -1725,7 +1716,8 @@ static int __devinit sbridge_probe(struct pci_dev *pdev,
1725 mc = 0; 1716 mc = 0;
1726 1717
1727 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 1718 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1728 debugf0("Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc); 1719 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
1720 mc, mc + 1, num_mc);
1729 sbridge_dev->mc = mc++; 1721 sbridge_dev->mc = mc++;
1730 rc = sbridge_register_mci(sbridge_dev); 1722 rc = sbridge_register_mci(sbridge_dev);
1731 if (unlikely(rc < 0)) 1723 if (unlikely(rc < 0))
@@ -1755,7 +1747,7 @@ static void __devexit sbridge_remove(struct pci_dev *pdev)
1755{ 1747{
1756 struct sbridge_dev *sbridge_dev; 1748 struct sbridge_dev *sbridge_dev;
1757 1749
1758 debugf0(__FILE__ ": %s()\n", __func__); 1750 edac_dbg(0, "\n");
1759 1751
1760 /* 1752 /*
1761 * we have a trouble here: pdev value for removal will be wrong, since 1753 * we have a trouble here: pdev value for removal will be wrong, since
@@ -1804,15 +1796,17 @@ static int __init sbridge_init(void)
1804{ 1796{
1805 int pci_rc; 1797 int pci_rc;
1806 1798
1807 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1799 edac_dbg(2, "\n");
1808 1800
1809 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1801 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1810 opstate_init(); 1802 opstate_init();
1811 1803
1812 pci_rc = pci_register_driver(&sbridge_driver); 1804 pci_rc = pci_register_driver(&sbridge_driver);
1813 1805
1814 if (pci_rc >= 0) 1806 if (pci_rc >= 0) {
1807 mce_register_decode_chain(&sbridge_mce_dec);
1815 return 0; 1808 return 0;
1809 }
1816 1810
1817 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", 1811 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
1818 pci_rc); 1812 pci_rc);
@@ -1826,8 +1820,9 @@ static int __init sbridge_init(void)
1826 */ 1820 */
1827static void __exit sbridge_exit(void) 1821static void __exit sbridge_exit(void)
1828{ 1822{
1829 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1823 edac_dbg(2, "\n");
1830 pci_unregister_driver(&sbridge_driver); 1824 pci_unregister_driver(&sbridge_driver);
1825 mce_unregister_decode_chain(&sbridge_mce_dec);
1831} 1826}
1832 1827
1833module_init(sbridge_init); 1828module_init(sbridge_init);
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 7bb4614730db..1e904b7b79a0 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -69,12 +69,12 @@ static void tile_edac_check(struct mem_ctl_info *mci)
69 69
70 /* Check if the current error count is different from the saved one. */ 70 /* Check if the current error count is different from the saved one. */
71 if (mem_error.sbe_count != priv->ce_count) { 71 if (mem_error.sbe_count != priv->ce_count) {
72 dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node); 72 dev_dbg(mci->pdev, "ECC CE err on node %d\n", priv->node);
73 priv->ce_count = mem_error.sbe_count; 73 priv->ce_count = mem_error.sbe_count;
74 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 74 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
75 0, 0, 0, 75 0, 0, 0,
76 0, 0, -1, 76 0, 0, -1,
77 mci->ctl_name, "", NULL); 77 mci->ctl_name, "");
78 } 78 }
79} 79}
80 80
@@ -84,10 +84,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
84 */ 84 */
85static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci) 85static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
86{ 86{
87 struct csrow_info *csrow = &mci->csrows[0]; 87 struct csrow_info *csrow = mci->csrows[0];
88 struct tile_edac_priv *priv = mci->pvt_info; 88 struct tile_edac_priv *priv = mci->pvt_info;
89 struct mshim_mem_info mem_info; 89 struct mshim_mem_info mem_info;
90 struct dimm_info *dimm = csrow->channels[0].dimm; 90 struct dimm_info *dimm = csrow->channels[0]->dimm;
91 91
92 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, 92 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
93 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != 93 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -149,7 +149,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
149 priv->node = pdev->id; 149 priv->node = pdev->id;
150 priv->hv_devhdl = hv_devhdl; 150 priv->hv_devhdl = hv_devhdl;
151 151
152 mci->dev = &pdev->dev; 152 mci->pdev = &pdev->dev;
153 mci->mtype_cap = MEM_FLAG_DDR2; 153 mci->mtype_cap = MEM_FLAG_DDR2;
154 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 154 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
155 155
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 1ac7962d63ea..08a992693e62 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -103,10 +103,10 @@ static int how_many_channel(struct pci_dev *pdev)
103 103
104 pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b); 104 pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
105 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ 105 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
106 debugf0("In single channel mode.\n"); 106 edac_dbg(0, "In single channel mode\n");
107 x38_channel_num = 1; 107 x38_channel_num = 1;
108 } else { 108 } else {
109 debugf0("In dual channel mode.\n"); 109 edac_dbg(0, "In dual channel mode\n");
110 x38_channel_num = 2; 110 x38_channel_num = 2;
111 } 111 }
112 112
@@ -151,7 +151,7 @@ static void x38_clear_error_info(struct mem_ctl_info *mci)
151{ 151{
152 struct pci_dev *pdev; 152 struct pci_dev *pdev;
153 153
154 pdev = to_pci_dev(mci->dev); 154 pdev = to_pci_dev(mci->pdev);
155 155
156 /* 156 /*
157 * Clear any error bits. 157 * Clear any error bits.
@@ -172,7 +172,7 @@ static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
172 struct pci_dev *pdev; 172 struct pci_dev *pdev;
173 void __iomem *window = mci->pvt_info; 173 void __iomem *window = mci->pvt_info;
174 174
175 pdev = to_pci_dev(mci->dev); 175 pdev = to_pci_dev(mci->pdev);
176 176
177 /* 177 /*
178 * This is a mess because there is no atomic way to read all the 178 * This is a mess because there is no atomic way to read all the
@@ -215,26 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
215 return; 215 return;
216 216
217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { 217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
218 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 218 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
219 -1, -1, -1, 219 -1, -1, -1,
220 "UE overwrote CE", "", NULL); 220 "UE overwrote CE", "");
221 info->errsts = info->errsts2; 221 info->errsts = info->errsts2;
222 } 222 }
223 223
224 for (channel = 0; channel < x38_channel_num; channel++) { 224 for (channel = 0; channel < x38_channel_num; channel++) {
225 log = info->eccerrlog[channel]; 225 log = info->eccerrlog[channel];
226 if (log & X38_ECCERRLOG_UE) { 226 if (log & X38_ECCERRLOG_UE) {
227 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 227 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
228 0, 0, 0, 228 0, 0, 0,
229 eccerrlog_row(channel, log), 229 eccerrlog_row(channel, log),
230 -1, -1, 230 -1, -1,
231 "x38 UE", "", NULL); 231 "x38 UE", "");
232 } else if (log & X38_ECCERRLOG_CE) { 232 } else if (log & X38_ECCERRLOG_CE) {
233 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 233 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
234 0, 0, eccerrlog_syndrome(log), 234 0, 0, eccerrlog_syndrome(log),
235 eccerrlog_row(channel, log), 235 eccerrlog_row(channel, log),
236 -1, -1, 236 -1, -1,
237 "x38 CE", "", NULL); 237 "x38 CE", "");
238 } 238 }
239 } 239 }
240} 240}
@@ -243,7 +243,7 @@ static void x38_check(struct mem_ctl_info *mci)
243{ 243{
244 struct x38_error_info info; 244 struct x38_error_info info;
245 245
246 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 246 edac_dbg(1, "MC%d\n", mci->mc_idx);
247 x38_get_and_clear_error_info(mci, &info); 247 x38_get_and_clear_error_info(mci, &info);
248 x38_process_error_info(mci, &info); 248 x38_process_error_info(mci, &info);
249} 249}
@@ -331,7 +331,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
331 bool stacked; 331 bool stacked;
332 void __iomem *window; 332 void __iomem *window;
333 333
334 debugf0("MC: %s()\n", __func__); 334 edac_dbg(0, "MC:\n");
335 335
336 window = x38_map_mchbar(pdev); 336 window = x38_map_mchbar(pdev);
337 if (!window) 337 if (!window)
@@ -352,9 +352,9 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
352 if (!mci) 352 if (!mci)
353 return -ENOMEM; 353 return -ENOMEM;
354 354
355 debugf3("MC: %s(): init mci\n", __func__); 355 edac_dbg(3, "MC: init mci\n");
356 356
357 mci->dev = &pdev->dev; 357 mci->pdev = &pdev->dev;
358 mci->mtype_cap = MEM_FLAG_DDR2; 358 mci->mtype_cap = MEM_FLAG_DDR2;
359 359
360 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 360 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -378,7 +378,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
378 */ 378 */
379 for (i = 0; i < mci->nr_csrows; i++) { 379 for (i = 0; i < mci->nr_csrows; i++) {
380 unsigned long nr_pages; 380 unsigned long nr_pages;
381 struct csrow_info *csrow = &mci->csrows[i]; 381 struct csrow_info *csrow = mci->csrows[i];
382 382
383 nr_pages = drb_to_nr_pages(drbs, stacked, 383 nr_pages = drb_to_nr_pages(drbs, stacked,
384 i / X38_RANKS_PER_CHANNEL, 384 i / X38_RANKS_PER_CHANNEL,
@@ -388,7 +388,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
388 continue; 388 continue;
389 389
390 for (j = 0; j < x38_channel_num; j++) { 390 for (j = 0; j < x38_channel_num; j++) {
391 struct dimm_info *dimm = csrow->channels[j].dimm; 391 struct dimm_info *dimm = csrow->channels[j]->dimm;
392 392
393 dimm->nr_pages = nr_pages / x38_channel_num; 393 dimm->nr_pages = nr_pages / x38_channel_num;
394 dimm->grain = nr_pages << PAGE_SHIFT; 394 dimm->grain = nr_pages << PAGE_SHIFT;
@@ -402,12 +402,12 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
402 402
403 rc = -ENODEV; 403 rc = -ENODEV;
404 if (edac_mc_add_mc(mci)) { 404 if (edac_mc_add_mc(mci)) {
405 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 405 edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
406 goto fail; 406 goto fail;
407 } 407 }
408 408
409 /* get this far and it's successful */ 409 /* get this far and it's successful */
410 debugf3("MC: %s(): success\n", __func__); 410 edac_dbg(3, "MC: success\n");
411 return 0; 411 return 0;
412 412
413fail: 413fail:
@@ -423,7 +423,7 @@ static int __devinit x38_init_one(struct pci_dev *pdev,
423{ 423{
424 int rc; 424 int rc;
425 425
426 debugf0("MC: %s()\n", __func__); 426 edac_dbg(0, "MC:\n");
427 427
428 if (pci_enable_device(pdev) < 0) 428 if (pci_enable_device(pdev) < 0)
429 return -EIO; 429 return -EIO;
@@ -439,7 +439,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
439{ 439{
440 struct mem_ctl_info *mci; 440 struct mem_ctl_info *mci;
441 441
442 debugf0("%s()\n", __func__); 442 edac_dbg(0, "\n");
443 443
444 mci = edac_mc_del_mc(&pdev->dev); 444 mci = edac_mc_del_mc(&pdev->dev);
445 if (!mci) 445 if (!mci)
@@ -472,7 +472,7 @@ static int __init x38_init(void)
472{ 472{
473 int pci_rc; 473 int pci_rc;
474 474
475 debugf3("MC: %s()\n", __func__); 475 edac_dbg(3, "MC:\n");
476 476
477 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 477 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
478 opstate_init(); 478 opstate_init();
@@ -486,14 +486,14 @@ static int __init x38_init(void)
486 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 486 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
487 PCI_DEVICE_ID_INTEL_X38_HB, NULL); 487 PCI_DEVICE_ID_INTEL_X38_HB, NULL);
488 if (!mci_pdev) { 488 if (!mci_pdev) {
489 debugf0("x38 pci_get_device fail\n"); 489 edac_dbg(0, "x38 pci_get_device fail\n");
490 pci_rc = -ENODEV; 490 pci_rc = -ENODEV;
491 goto fail1; 491 goto fail1;
492 } 492 }
493 493
494 pci_rc = x38_init_one(mci_pdev, x38_pci_tbl); 494 pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
495 if (pci_rc < 0) { 495 if (pci_rc < 0) {
496 debugf0("x38 init fail\n"); 496 edac_dbg(0, "x38 init fail\n");
497 pci_rc = -ENODEV; 497 pci_rc = -ENODEV;
498 goto fail1; 498 goto fail1;
499 } 499 }
@@ -513,7 +513,7 @@ fail0:
513 513
514static void __exit x38_exit(void) 514static void __exit x38_exit(void)
515{ 515{
516 debugf3("MC: %s()\n", __func__); 516 edac_dbg(3, "MC:\n");
517 517
518 pci_unregister_driver(&x38_driver); 518 pci_unregister_driver(&x38_driver);
519 if (!x38_registered) { 519 if (!x38_registered) {
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 29c5cf852efc..e175c8ed4ec4 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -21,12 +21,30 @@ config EXTCON_GPIO
21 Say Y here to enable GPIO based extcon support. Note that GPIO 21 Say Y here to enable GPIO based extcon support. Note that GPIO
22 extcon supports single state per extcon instance. 22 extcon supports single state per extcon instance.
23 23
24config EXTCON_MAX77693
25 tristate "MAX77693 EXTCON Support"
26 depends on MFD_MAX77693
27 select IRQ_DOMAIN
28 select REGMAP_I2C
29 help
30 If you say yes here you get support for the MUIC device of
31 Maxim MAX77693 PMIC. The MAX77693 MUIC is a USB port accessory
32 detector and switch.
33
24config EXTCON_MAX8997 34config EXTCON_MAX8997
25 tristate "MAX8997 EXTCON Support" 35 tristate "MAX8997 EXTCON Support"
26 depends on MFD_MAX8997 36 depends on MFD_MAX8997 && IRQ_DOMAIN
27 help 37 help
28 If you say yes here you get support for the MUIC device of 38 If you say yes here you get support for the MUIC device of
29 Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory 39 Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
30 detector and switch. 40 detector and switch.
31 41
42config EXTCON_ARIZONA
43 tristate "Wolfson Arizona EXTCON support"
44 depends on MFD_ARIZONA
45 help
46 Say Y here to enable support for external accessory detection
47 with Wolfson Arizona devices. These are audio CODECs with
48 advanced audio accessory detection support.
49
32endif # MULTISTATE_SWITCH 50endif # MULTISTATE_SWITCH
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 86020bdb6da0..88961b332348 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -4,4 +4,6 @@
4 4
5obj-$(CONFIG_EXTCON) += extcon_class.o 5obj-$(CONFIG_EXTCON) += extcon_class.o
6obj-$(CONFIG_EXTCON_GPIO) += extcon_gpio.o 6obj-$(CONFIG_EXTCON_GPIO) += extcon_gpio.o
7obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
7obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o 8obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
9obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
new file mode 100644
index 000000000000..427a289f32a5
--- /dev/null
+++ b/drivers/extcon/extcon-arizona.c
@@ -0,0 +1,490 @@
1/*
2 * extcon-arizona.c - Extcon driver Wolfson Arizona devices
3 *
4 * Copyright (C) 2012 Wolfson Microelectronics plc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/i2c.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/err.h>
23#include <linux/gpio.h>
24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26#include <linux/regulator/consumer.h>
27#include <linux/extcon.h>
28
29#include <linux/mfd/arizona/core.h>
30#include <linux/mfd/arizona/pdata.h>
31#include <linux/mfd/arizona/registers.h>
32
33struct arizona_extcon_info {
34 struct device *dev;
35 struct arizona *arizona;
36 struct mutex lock;
37 struct regulator *micvdd;
38
39 int micd_mode;
40 const struct arizona_micd_config *micd_modes;
41 int micd_num_modes;
42
43 bool micd_reva;
44
45 bool mic;
46 bool detecting;
47 int jack_flips;
48
49 struct extcon_dev edev;
50};
51
52static const struct arizona_micd_config micd_default_modes[] = {
53 { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
54 { 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 },
55};
56
57#define ARIZONA_CABLE_MECHANICAL 0
58#define ARIZONA_CABLE_MICROPHONE 1
59#define ARIZONA_CABLE_HEADPHONE 2
60
61static const char *arizona_cable[] = {
62 "Mechanical",
63 "Microphone",
64 "Headphone",
65 NULL,
66};
67
68static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
69{
70 struct arizona *arizona = info->arizona;
71
72 gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
73 info->micd_modes[mode].gpio);
74 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
75 ARIZONA_MICD_BIAS_SRC_MASK,
76 info->micd_modes[mode].bias);
77 regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
78 ARIZONA_ACCDET_SRC, info->micd_modes[mode].src);
79
80 info->micd_mode = mode;
81
82 dev_dbg(arizona->dev, "Set jack polarity to %d\n", mode);
83}
84
85static void arizona_start_mic(struct arizona_extcon_info *info)
86{
87 struct arizona *arizona = info->arizona;
88 bool change;
89 int ret;
90
91 info->detecting = true;
92 info->mic = false;
93 info->jack_flips = 0;
94
95 /* Microphone detection can't use idle mode */
96 pm_runtime_get(info->dev);
97
98 ret = regulator_enable(info->micvdd);
99 if (ret != 0) {
100 dev_err(arizona->dev, "Failed to enable MICVDD: %d\n",
101 ret);
102 }
103
104 if (info->micd_reva) {
105 regmap_write(arizona->regmap, 0x80, 0x3);
106 regmap_write(arizona->regmap, 0x294, 0);
107 regmap_write(arizona->regmap, 0x80, 0x0);
108 }
109
110 regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
111 ARIZONA_MICD_ENA, ARIZONA_MICD_ENA,
112 &change);
113 if (!change) {
114 regulator_disable(info->micvdd);
115 pm_runtime_put_autosuspend(info->dev);
116 }
117}
118
119static void arizona_stop_mic(struct arizona_extcon_info *info)
120{
121 struct arizona *arizona = info->arizona;
122 bool change;
123
124 regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
125 ARIZONA_MICD_ENA, 0,
126 &change);
127
128 if (info->micd_reva) {
129 regmap_write(arizona->regmap, 0x80, 0x3);
130 regmap_write(arizona->regmap, 0x294, 2);
131 regmap_write(arizona->regmap, 0x80, 0x0);
132 }
133
134 if (change) {
135 regulator_disable(info->micvdd);
136 pm_runtime_put_autosuspend(info->dev);
137 }
138}
139
140static irqreturn_t arizona_micdet(int irq, void *data)
141{
142 struct arizona_extcon_info *info = data;
143 struct arizona *arizona = info->arizona;
144 unsigned int val;
145 int ret;
146
147 mutex_lock(&info->lock);
148
149 ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
150 if (ret != 0) {
151 dev_err(arizona->dev, "Failed to read MICDET: %d\n", ret);
152 return IRQ_NONE;
153 }
154
155 dev_dbg(arizona->dev, "MICDET: %x\n", val);
156
157 if (!(val & ARIZONA_MICD_VALID)) {
158 dev_warn(arizona->dev, "Microphone detection state invalid\n");
159 mutex_unlock(&info->lock);
160 return IRQ_NONE;
161 }
162
163 /* Due to jack detect this should never happen */
164 if (!(val & ARIZONA_MICD_STS)) {
165 dev_warn(arizona->dev, "Detected open circuit\n");
166 info->detecting = false;
167 goto handled;
168 }
169
170 /* If we got a high impedence we should have a headset, report it. */
171 if (info->detecting && (val & 0x400)) {
172 ret = extcon_update_state(&info->edev,
173 1 << ARIZONA_CABLE_MICROPHONE |
174 1 << ARIZONA_CABLE_HEADPHONE,
175 1 << ARIZONA_CABLE_MICROPHONE |
176 1 << ARIZONA_CABLE_HEADPHONE);
177
178 if (ret != 0)
179 dev_err(arizona->dev, "Headset report failed: %d\n",
180 ret);
181
182 info->mic = true;
183 info->detecting = false;
184 goto handled;
185 }
186
187 /* If we detected a lower impedence during initial startup
188 * then we probably have the wrong polarity, flip it. Don't
189 * do this for the lowest impedences to speed up detection of
190 * plain headphones. If both polarities report a low
191 * impedence then give up and report headphones.
192 */
193 if (info->detecting && (val & 0x3f8)) {
194 info->jack_flips++;
195
196 if (info->jack_flips >= info->micd_num_modes) {
197 dev_dbg(arizona->dev, "Detected headphone\n");
198 info->detecting = false;
199 arizona_stop_mic(info);
200
201 ret = extcon_set_cable_state_(&info->edev,
202 ARIZONA_CABLE_HEADPHONE,
203 true);
204 if (ret != 0)
205 dev_err(arizona->dev,
206 "Headphone report failed: %d\n",
207 ret);
208 } else {
209 info->micd_mode++;
210 if (info->micd_mode == info->micd_num_modes)
211 info->micd_mode = 0;
212 arizona_extcon_set_mode(info, info->micd_mode);
213
214 info->jack_flips++;
215 }
216
217 goto handled;
218 }
219
220 /*
221 * If we're still detecting and we detect a short then we've
222 * got a headphone. Otherwise it's a button press, the
223 * button reporting is stubbed out for now.
224 */
225 if (val & 0x3fc) {
226 if (info->mic) {
227 dev_dbg(arizona->dev, "Mic button detected\n");
228
229 } else if (info->detecting) {
230 dev_dbg(arizona->dev, "Headphone detected\n");
231 info->detecting = false;
232 arizona_stop_mic(info);
233
234 ret = extcon_set_cable_state_(&info->edev,
235 ARIZONA_CABLE_HEADPHONE,
236 true);
237 if (ret != 0)
238 dev_err(arizona->dev,
239 "Headphone report failed: %d\n",
240 ret);
241 } else {
242 dev_warn(arizona->dev, "Button with no mic: %x\n",
243 val);
244 }
245 } else {
246 dev_dbg(arizona->dev, "Mic button released\n");
247 }
248
249handled:
250 pm_runtime_mark_last_busy(info->dev);
251 mutex_unlock(&info->lock);
252
253 return IRQ_HANDLED;
254}
255
256static irqreturn_t arizona_jackdet(int irq, void *data)
257{
258 struct arizona_extcon_info *info = data;
259 struct arizona *arizona = info->arizona;
260 unsigned int val;
261 int ret;
262
263 pm_runtime_get_sync(info->dev);
264
265 mutex_lock(&info->lock);
266
267 ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val);
268 if (ret != 0) {
269 dev_err(arizona->dev, "Failed to read jackdet status: %d\n",
270 ret);
271 mutex_unlock(&info->lock);
272 pm_runtime_put_autosuspend(info->dev);
273 return IRQ_NONE;
274 }
275
276 if (val & ARIZONA_JD1_STS) {
277 dev_dbg(arizona->dev, "Detected jack\n");
278 ret = extcon_set_cable_state_(&info->edev,
279 ARIZONA_CABLE_MECHANICAL, true);
280
281 if (ret != 0)
282 dev_err(arizona->dev, "Mechanical report failed: %d\n",
283 ret);
284
285 arizona_start_mic(info);
286 } else {
287 dev_dbg(arizona->dev, "Detected jack removal\n");
288
289 arizona_stop_mic(info);
290
291 ret = extcon_update_state(&info->edev, 0xffffffff, 0);
292 if (ret != 0)
293 dev_err(arizona->dev, "Removal report failed: %d\n",
294 ret);
295 }
296
297 mutex_unlock(&info->lock);
298
299 pm_runtime_mark_last_busy(info->dev);
300 pm_runtime_put_autosuspend(info->dev);
301
302 return IRQ_HANDLED;
303}
304
305static int __devinit arizona_extcon_probe(struct platform_device *pdev)
306{
307 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
308 struct arizona_pdata *pdata;
309 struct arizona_extcon_info *info;
310 int ret, mode;
311
312 pdata = dev_get_platdata(arizona->dev);
313
314 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
315 if (!info) {
316 dev_err(&pdev->dev, "failed to allocate memory\n");
317 ret = -ENOMEM;
318 goto err;
319 }
320
321 info->micvdd = devm_regulator_get(arizona->dev, "MICVDD");
322 if (IS_ERR(info->micvdd)) {
323 ret = PTR_ERR(info->micvdd);
324 dev_err(arizona->dev, "Failed to get MICVDD: %d\n", ret);
325 goto err;
326 }
327
328 mutex_init(&info->lock);
329 info->arizona = arizona;
330 info->dev = &pdev->dev;
331 info->detecting = true;
332 platform_set_drvdata(pdev, info);
333
334 switch (arizona->type) {
335 case WM5102:
336 switch (arizona->rev) {
337 case 0:
338 info->micd_reva = true;
339 break;
340 default:
341 break;
342 }
343 break;
344 default:
345 break;
346 }
347
348 info->edev.name = "Headset Jack";
349 info->edev.supported_cable = arizona_cable;
350
351 ret = extcon_dev_register(&info->edev, arizona->dev);
352 if (ret < 0) {
353 dev_err(arizona->dev, "extcon_dev_regster() failed: %d\n",
354 ret);
355 goto err;
356 }
357
358 if (pdata->num_micd_configs) {
359 info->micd_modes = pdata->micd_configs;
360 info->micd_num_modes = pdata->num_micd_configs;
361 } else {
362 info->micd_modes = micd_default_modes;
363 info->micd_num_modes = ARRAY_SIZE(micd_default_modes);
364 }
365
366 if (arizona->pdata.micd_pol_gpio > 0) {
367 if (info->micd_modes[0].gpio)
368 mode = GPIOF_OUT_INIT_HIGH;
369 else
370 mode = GPIOF_OUT_INIT_LOW;
371
372 ret = devm_gpio_request_one(&pdev->dev,
373 arizona->pdata.micd_pol_gpio,
374 mode,
375 "MICD polarity");
376 if (ret != 0) {
377 dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
378 arizona->pdata.micd_pol_gpio, ret);
379 goto err_register;
380 }
381 }
382
383 arizona_extcon_set_mode(info, 0);
384
385 pm_runtime_enable(&pdev->dev);
386 pm_runtime_idle(&pdev->dev);
387 pm_runtime_get_sync(&pdev->dev);
388
389 ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_RISE,
390 "JACKDET rise", arizona_jackdet, info);
391 if (ret != 0) {
392 dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
393 ret);
394 goto err_register;
395 }
396
397 ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 1);
398 if (ret != 0) {
399 dev_err(&pdev->dev, "Failed to set JD rise IRQ wake: %d\n",
400 ret);
401 goto err_rise;
402 }
403
404 ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_FALL,
405 "JACKDET fall", arizona_jackdet, info);
406 if (ret != 0) {
407 dev_err(&pdev->dev, "Failed to get JD fall IRQ: %d\n", ret);
408 goto err_rise_wake;
409 }
410
411 ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 1);
412 if (ret != 0) {
413 dev_err(&pdev->dev, "Failed to set JD fall IRQ wake: %d\n",
414 ret);
415 goto err_fall;
416 }
417
418 ret = arizona_request_irq(arizona, ARIZONA_IRQ_MICDET,
419 "MICDET", arizona_micdet, info);
420 if (ret != 0) {
421 dev_err(&pdev->dev, "Failed to get MICDET IRQ: %d\n", ret);
422 goto err_fall_wake;
423 }
424
425 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
426 ARIZONA_MICD_BIAS_STARTTIME_MASK |
427 ARIZONA_MICD_RATE_MASK,
428 7 << ARIZONA_MICD_BIAS_STARTTIME_SHIFT |
429 8 << ARIZONA_MICD_RATE_SHIFT);
430
431 arizona_clk32k_enable(arizona);
432 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_DEBOUNCE,
433 ARIZONA_JD1_DB, ARIZONA_JD1_DB);
434 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
435 ARIZONA_JD1_ENA, ARIZONA_JD1_ENA);
436
437 pm_runtime_put(&pdev->dev);
438
439 return 0;
440
441err_fall_wake:
442 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
443err_fall:
444 arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
445err_rise_wake:
446 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
447err_rise:
448 arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
449err_register:
450 pm_runtime_disable(&pdev->dev);
451 extcon_dev_unregister(&info->edev);
452err:
453 return ret;
454}
455
456static int __devexit arizona_extcon_remove(struct platform_device *pdev)
457{
458 struct arizona_extcon_info *info = platform_get_drvdata(pdev);
459 struct arizona *arizona = info->arizona;
460
461 pm_runtime_disable(&pdev->dev);
462
463 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
464 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
465 arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
466 arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
467 arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
468 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
469 ARIZONA_JD1_ENA, 0);
470 arizona_clk32k_disable(arizona);
471 extcon_dev_unregister(&info->edev);
472
473 return 0;
474}
475
476static struct platform_driver arizona_extcon_driver = {
477 .driver = {
478 .name = "arizona-extcon",
479 .owner = THIS_MODULE,
480 },
481 .probe = arizona_extcon_probe,
482 .remove = __devexit_p(arizona_extcon_remove),
483};
484
485module_platform_driver(arizona_extcon_driver);
486
487MODULE_DESCRIPTION("Arizona Extcon driver");
488MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
489MODULE_LICENSE("GPL");
490MODULE_ALIAS("platform:extcon-arizona");
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
new file mode 100644
index 000000000000..920a609b2c35
--- /dev/null
+++ b/drivers/extcon/extcon-max77693.c
@@ -0,0 +1,779 @@
1/*
2 * extcon-max77693.c - MAX77693 extcon driver to support MAX77693 MUIC
3 *
4 * Copyright (C) 2012 Samsung Electrnoics
5 * Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/i2c.h>
21#include <linux/slab.h>
22#include <linux/interrupt.h>
23#include <linux/err.h>
24#include <linux/platform_device.h>
25#include <linux/mfd/max77693.h>
26#include <linux/mfd/max77693-private.h>
27#include <linux/extcon.h>
28#include <linux/regmap.h>
29#include <linux/irqdomain.h>
30
31#define DEV_NAME "max77693-muic"
32
33/* MAX77693 MUIC - STATUS1~3 Register */
34#define STATUS1_ADC_SHIFT (0)
35#define STATUS1_ADCLOW_SHIFT (5)
36#define STATUS1_ADCERR_SHIFT (6)
37#define STATUS1_ADC1K_SHIFT (7)
38#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
39#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
40#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
41#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
42
43#define STATUS2_CHGTYP_SHIFT (0)
44#define STATUS2_CHGDETRUN_SHIFT (3)
45#define STATUS2_DCDTMR_SHIFT (4)
46#define STATUS2_DXOVP_SHIFT (5)
47#define STATUS2_VBVOLT_SHIFT (6)
48#define STATUS2_VIDRM_SHIFT (7)
49#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
50#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
51#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
52#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
53#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
54#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
55
56#define STATUS3_OVP_SHIFT (2)
57#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
58
59/* MAX77693 CDETCTRL1~2 register */
60#define CDETCTRL1_CHGDETEN_SHIFT (0)
61#define CDETCTRL1_CHGTYPMAN_SHIFT (1)
62#define CDETCTRL1_DCDEN_SHIFT (2)
63#define CDETCTRL1_DCD2SCT_SHIFT (3)
64#define CDETCTRL1_CDDELAY_SHIFT (4)
65#define CDETCTRL1_DCDCPL_SHIFT (5)
66#define CDETCTRL1_CDPDET_SHIFT (7)
67#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT)
68#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
69#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT)
70#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT)
71#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT)
72#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT)
73#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT)
74
75#define CDETCTRL2_VIDRMEN_SHIFT (1)
76#define CDETCTRL2_DXOVPEN_SHIFT (3)
77#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT)
78#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT)
79
80/* MAX77693 MUIC - CONTROL1~3 register */
81#define COMN1SW_SHIFT (0)
82#define COMP2SW_SHIFT (3)
83#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
84#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
85#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
86#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
87 | (1 << COMN1SW_SHIFT))
88#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
89 | (2 << COMN1SW_SHIFT))
90#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
91 | (3 << COMN1SW_SHIFT))
92#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
93 | (0 << COMN1SW_SHIFT))
94
95#define CONTROL2_LOWPWR_SHIFT (0)
96#define CONTROL2_ADCEN_SHIFT (1)
97#define CONTROL2_CPEN_SHIFT (2)
98#define CONTROL2_SFOUTASRT_SHIFT (3)
99#define CONTROL2_SFOUTORD_SHIFT (4)
100#define CONTROL2_ACCDET_SHIFT (5)
101#define CONTROL2_USBCPINT_SHIFT (6)
102#define CONTROL2_RCPS_SHIFT (7)
103#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
104#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
105#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
106#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
107#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
108#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
109#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
110#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
111
112#define CONTROL3_JIGSET_SHIFT (0)
113#define CONTROL3_BTLDSET_SHIFT (2)
114#define CONTROL3_ADCDBSET_SHIFT (4)
115#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
116#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
117#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
118
119enum max77693_muic_adc_debounce_time {
120 ADC_DEBOUNCE_TIME_5MS = 0,
121 ADC_DEBOUNCE_TIME_10MS,
122 ADC_DEBOUNCE_TIME_25MS,
123 ADC_DEBOUNCE_TIME_38_62MS,
124};
125
126struct max77693_muic_info {
127 struct device *dev;
128 struct max77693_dev *max77693;
129 struct extcon_dev *edev;
130 int prev_adc;
131 int prev_adc_gnd;
132 int prev_chg_type;
133 u8 status[2];
134
135 int irq;
136 struct work_struct irq_work;
137 struct mutex mutex;
138};
139
140enum max77693_muic_charger_type {
141 MAX77693_CHARGER_TYPE_NONE = 0,
142 MAX77693_CHARGER_TYPE_USB,
143 MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT,
144 MAX77693_CHARGER_TYPE_DEDICATED_CHG,
145 MAX77693_CHARGER_TYPE_APPLE_500MA,
146 MAX77693_CHARGER_TYPE_APPLE_1A_2A,
147 MAX77693_CHARGER_TYPE_DEAD_BATTERY = 7,
148};
149
150/**
151 * struct max77693_muic_irq
152 * @irq: the index of irq list of MUIC device.
153 * @name: the name of irq.
154 * @virq: the virtual irq to use irq domain
155 */
156struct max77693_muic_irq {
157 unsigned int irq;
158 const char *name;
159 unsigned int virq;
160};
161
162static struct max77693_muic_irq muic_irqs[] = {
163 { MAX77693_MUIC_IRQ_INT1_ADC, "muic-ADC" },
164 { MAX77693_MUIC_IRQ_INT1_ADC_LOW, "muic-ADCLOW" },
165 { MAX77693_MUIC_IRQ_INT1_ADC_ERR, "muic-ADCError" },
166 { MAX77693_MUIC_IRQ_INT1_ADC1K, "muic-ADC1K" },
167 { MAX77693_MUIC_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
168 { MAX77693_MUIC_IRQ_INT2_CHGDETREUN, "muic-CHGDETREUN" },
169 { MAX77693_MUIC_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
170 { MAX77693_MUIC_IRQ_INT2_DXOVP, "muic-DXOVP" },
171 { MAX77693_MUIC_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
172 { MAX77693_MUIC_IRQ_INT2_VIDRM, "muic-VIDRM" },
173 { MAX77693_MUIC_IRQ_INT3_EOC, "muic-EOC" },
174 { MAX77693_MUIC_IRQ_INT3_CGMBC, "muic-CGMBC" },
175 { MAX77693_MUIC_IRQ_INT3_OVP, "muic-OVP" },
176 { MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR, "muic-MBCCHG_ERR" },
177 { MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, "muic-CHG_ENABLED" },
178 { MAX77693_MUIC_IRQ_INT3_BAT_DET, "muic-BAT_DET" },
179};
180
181/* Define supported accessory type */
182enum max77693_muic_acc_type {
183 MAX77693_MUIC_ADC_GROUND = 0x0,
184 MAX77693_MUIC_ADC_SEND_END_BUTTON,
185 MAX77693_MUIC_ADC_REMOTE_S1_BUTTON,
186 MAX77693_MUIC_ADC_REMOTE_S2_BUTTON,
187 MAX77693_MUIC_ADC_REMOTE_S3_BUTTON,
188 MAX77693_MUIC_ADC_REMOTE_S4_BUTTON,
189 MAX77693_MUIC_ADC_REMOTE_S5_BUTTON,
190 MAX77693_MUIC_ADC_REMOTE_S6_BUTTON,
191 MAX77693_MUIC_ADC_REMOTE_S7_BUTTON,
192 MAX77693_MUIC_ADC_REMOTE_S8_BUTTON,
193 MAX77693_MUIC_ADC_REMOTE_S9_BUTTON,
194 MAX77693_MUIC_ADC_REMOTE_S10_BUTTON,
195 MAX77693_MUIC_ADC_REMOTE_S11_BUTTON,
196 MAX77693_MUIC_ADC_REMOTE_S12_BUTTON,
197 MAX77693_MUIC_ADC_RESERVED_ACC_1,
198 MAX77693_MUIC_ADC_RESERVED_ACC_2,
199 MAX77693_MUIC_ADC_RESERVED_ACC_3,
200 MAX77693_MUIC_ADC_RESERVED_ACC_4,
201 MAX77693_MUIC_ADC_RESERVED_ACC_5,
202 MAX77693_MUIC_ADC_CEA936_AUDIO,
203 MAX77693_MUIC_ADC_PHONE_POWERED_DEV,
204 MAX77693_MUIC_ADC_TTY_CONVERTER,
205 MAX77693_MUIC_ADC_UART_CABLE,
206 MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG,
207 MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF,
208 MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON,
209 MAX77693_MUIC_ADC_AV_CABLE_NOLOAD,
210 MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG,
211 MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF,
212 MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON,
213 MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE,
214 MAX77693_MUIC_ADC_OPEN,
215
216 /* The below accessories have same ADC value so ADCLow and
217 ADC1K bit is used to separate specific accessory */
218 MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, ADCLow:0, ADC1K:0 */
219 MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, ADCLow:1, ADC1K:0 */
220 MAX77693_MUIC_GND_MHL_CABLE = 0x103, /* ADC:0x0, ADCLow:1, ADC1K:1 */
221};
222
223/* MAX77693 MUIC device support below list of accessories(external connector) */
224const char *max77693_extcon_cable[] = {
225 [0] = "USB",
226 [1] = "USB-Host",
227 [2] = "TA",
228 [3] = "Fast-charger",
229 [4] = "Slow-charger",
230 [5] = "Charge-downstream",
231 [6] = "MHL",
232 [7] = "Audio-video-load",
233 [8] = "Audio-video-noload",
234 [9] = "JIG",
235
236 NULL,
237};
238
239static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
240 enum max77693_muic_adc_debounce_time time)
241{
242 int ret = 0;
243 u8 ctrl3;
244
245 switch (time) {
246 case ADC_DEBOUNCE_TIME_5MS:
247 case ADC_DEBOUNCE_TIME_10MS:
248 case ADC_DEBOUNCE_TIME_25MS:
249 case ADC_DEBOUNCE_TIME_38_62MS:
250 ret = max77693_read_reg(info->max77693->regmap_muic,
251 MAX77693_MUIC_REG_CTRL3, &ctrl3);
252 ctrl3 &= ~CONTROL3_ADCDBSET_MASK;
253 ctrl3 |= (time << CONTROL3_ADCDBSET_SHIFT);
254
255 ret = max77693_write_reg(info->max77693->regmap_muic,
256 MAX77693_MUIC_REG_CTRL3, ctrl3);
257 if (ret) {
258 dev_err(info->dev, "failed to set ADC debounce time\n");
259 ret = -EINVAL;
260 }
261 break;
262 default:
263 dev_err(info->dev, "invalid ADC debounce time\n");
264 ret = -EINVAL;
265 break;
266 }
267
268 return ret;
269};
270
271static int max77693_muic_set_path(struct max77693_muic_info *info,
272 u8 val, bool attached)
273{
274 int ret = 0;
275 u8 ctrl1, ctrl2 = 0;
276
277 if (attached)
278 ctrl1 = val;
279 else
280 ctrl1 = CONTROL1_SW_OPEN;
281
282 ret = max77693_update_reg(info->max77693->regmap_muic,
283 MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK);
284 if (ret < 0) {
285 dev_err(info->dev, "failed to update MUIC register\n");
286 goto out;
287 }
288
289 if (attached)
290 ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
291 else
292 ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
293
294 ret = max77693_update_reg(info->max77693->regmap_muic,
295 MAX77693_MUIC_REG_CTRL2, ctrl2,
296 CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
297 if (ret < 0) {
298 dev_err(info->dev, "failed to update MUIC register\n");
299 goto out;
300 }
301
302 dev_info(info->dev,
303 "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
304 ctrl1, ctrl2, attached ? "attached" : "detached");
305out:
306 return ret;
307}
308
309static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info,
310 bool attached)
311{
312 int ret = 0;
313 int type;
314 int adc, adc1k, adclow;
315
316 if (attached) {
317 adc = info->status[0] & STATUS1_ADC_MASK;
318 adclow = info->status[0] & STATUS1_ADCLOW_MASK;
319 adclow >>= STATUS1_ADCLOW_SHIFT;
320 adc1k = info->status[0] & STATUS1_ADC1K_MASK;
321 adc1k >>= STATUS1_ADC1K_SHIFT;
322
323 /**
324 * [0x1][ADCLow][ADC1K]
325 * [0x1 0 0 ] : USB_OTG
326 * [0x1 1 0 ] : Audio Video Cable with load
327 * [0x1 1 1 ] : MHL
328 */
329 type = ((0x1 << 8) | (adclow << 1) | adc1k);
330
331 /* Store previous ADC value to handle accessory
332 when accessory will be detached */
333 info->prev_adc = adc;
334 info->prev_adc_gnd = type;
335 } else
336 type = info->prev_adc_gnd;
337
338 switch (type) {
339 case MAX77693_MUIC_GND_USB_OTG:
340 /* USB_OTG */
341 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
342 if (ret < 0)
343 goto out;
344 extcon_set_cable_state(info->edev, "USB-Host", attached);
345 break;
346 case MAX77693_MUIC_GND_AV_CABLE_LOAD:
347 /* Audio Video Cable with load */
348 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
349 if (ret < 0)
350 goto out;
351 extcon_set_cable_state(info->edev,
352 "Audio-video-load", attached);
353 break;
354 case MAX77693_MUIC_GND_MHL_CABLE:
355 /* MHL */
356 extcon_set_cable_state(info->edev, "MHL", attached);
357 break;
358 default:
359 dev_err(info->dev, "faild to detect %s accessory\n",
360 attached ? "attached" : "detached");
361 dev_err(info->dev, "- adc:0x%x, adclow:0x%x, adc1k:0x%x\n",
362 adc, adclow, adc1k);
363 ret = -EINVAL;
364 break;
365 }
366
367out:
368 return ret;
369}
370
371static int max77693_muic_adc_handler(struct max77693_muic_info *info,
372 int curr_adc, bool attached)
373{
374 int ret = 0;
375 int adc;
376
377 if (attached) {
378 /* Store ADC value to handle accessory
379 when accessory will be detached */
380 info->prev_adc = curr_adc;
381 adc = curr_adc;
382 } else
383 adc = info->prev_adc;
384
385 dev_info(info->dev,
386 "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
387 attached ? "attached" : "detached", curr_adc, info->prev_adc);
388
389 switch (adc) {
390 case MAX77693_MUIC_ADC_GROUND:
391 /* USB_OTG/MHL/Audio */
392 max77693_muic_adc_ground_handler(info, attached);
393 break;
394 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
395 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
396 /* USB */
397 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
398 if (ret < 0)
399 goto out;
400 extcon_set_cable_state(info->edev, "USB", attached);
401 break;
402 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
403 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
404 /* JIG */
405 ret = max77693_muic_set_path(info, CONTROL1_SW_UART, attached);
406 if (ret < 0)
407 goto out;
408 extcon_set_cable_state(info->edev, "JIG", attached);
409 break;
410 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE:
411 /* Audio Video cable with no-load */
412 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
413 if (ret < 0)
414 goto out;
415 extcon_set_cable_state(info->edev,
416 "Audio-video-noload", attached);
417 break;
418 case MAX77693_MUIC_ADC_SEND_END_BUTTON:
419 case MAX77693_MUIC_ADC_REMOTE_S1_BUTTON:
420 case MAX77693_MUIC_ADC_REMOTE_S2_BUTTON:
421 case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON:
422 case MAX77693_MUIC_ADC_REMOTE_S4_BUTTON:
423 case MAX77693_MUIC_ADC_REMOTE_S5_BUTTON:
424 case MAX77693_MUIC_ADC_REMOTE_S6_BUTTON:
425 case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON:
426 case MAX77693_MUIC_ADC_REMOTE_S8_BUTTON:
427 case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
428 case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
429 case MAX77693_MUIC_ADC_REMOTE_S11_BUTTON:
430 case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON:
431 case MAX77693_MUIC_ADC_RESERVED_ACC_1:
432 case MAX77693_MUIC_ADC_RESERVED_ACC_2:
433 case MAX77693_MUIC_ADC_RESERVED_ACC_3:
434 case MAX77693_MUIC_ADC_RESERVED_ACC_4:
435 case MAX77693_MUIC_ADC_RESERVED_ACC_5:
436 case MAX77693_MUIC_ADC_CEA936_AUDIO:
437 case MAX77693_MUIC_ADC_PHONE_POWERED_DEV:
438 case MAX77693_MUIC_ADC_TTY_CONVERTER:
439 case MAX77693_MUIC_ADC_UART_CABLE:
440 case MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG:
441 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:
442 case MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG:
443 /* This accessory isn't used in general case if it is specially
444 needed to detect additional accessory, should implement
445 proper operation when this accessory is attached/detached. */
446 dev_info(info->dev,
447 "accessory is %s but it isn't used (adc:0x%x)\n",
448 attached ? "attached" : "detached", adc);
449 goto out;
450 default:
451 dev_err(info->dev,
452 "failed to detect %s accessory (adc:0x%x)\n",
453 attached ? "attached" : "detached", adc);
454 ret = -EINVAL;
455 goto out;
456 }
457
458out:
459 return ret;
460}
461
462static int max77693_muic_chg_handler(struct max77693_muic_info *info,
463 int curr_chg_type, bool attached)
464{
465 int ret = 0;
466 int chg_type;
467
468 if (attached) {
469 /* Store previous charger type to control
470 when charger accessory will be detached */
471 info->prev_chg_type = curr_chg_type;
472 chg_type = curr_chg_type;
473 } else
474 chg_type = info->prev_chg_type;
475
476 dev_info(info->dev,
477 "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
478 attached ? "attached" : "detached",
479 curr_chg_type, info->prev_chg_type);
480
481 switch (chg_type) {
482 case MAX77693_CHARGER_TYPE_USB:
483 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
484 if (ret < 0)
485 goto out;
486 extcon_set_cable_state(info->edev, "USB", attached);
487 break;
488 case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
489 extcon_set_cable_state(info->edev,
490 "Charge-downstream", attached);
491 break;
492 case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
493 extcon_set_cable_state(info->edev, "TA", attached);
494 break;
495 case MAX77693_CHARGER_TYPE_APPLE_500MA:
496 extcon_set_cable_state(info->edev, "Slow-charger", attached);
497 break;
498 case MAX77693_CHARGER_TYPE_APPLE_1A_2A:
499 extcon_set_cable_state(info->edev, "Fast-charger", attached);
500 break;
501 case MAX77693_CHARGER_TYPE_DEAD_BATTERY:
502 break;
503 default:
504 dev_err(info->dev,
505 "failed to detect %s accessory (chg_type:0x%x)\n",
506 attached ? "attached" : "detached", chg_type);
507 ret = -EINVAL;
508 goto out;
509 }
510
511out:
512 return ret;
513}
514
515static void max77693_muic_irq_work(struct work_struct *work)
516{
517 struct max77693_muic_info *info = container_of(work,
518 struct max77693_muic_info, irq_work);
519 int curr_adc, curr_chg_type;
520 int irq_type = -1;
521 int i, ret = 0;
522 bool attached = true;
523
524 if (!info->edev)
525 return;
526
527 mutex_lock(&info->mutex);
528
529 for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
530 if (info->irq == muic_irqs[i].virq)
531 irq_type = muic_irqs[i].irq;
532
533 ret = max77693_bulk_read(info->max77693->regmap_muic,
534 MAX77693_MUIC_REG_STATUS1, 2, info->status);
535 if (ret) {
536 dev_err(info->dev, "failed to read MUIC register\n");
537 mutex_unlock(&info->mutex);
538 return;
539 }
540
541 switch (irq_type) {
542 case MAX77693_MUIC_IRQ_INT1_ADC:
543 case MAX77693_MUIC_IRQ_INT1_ADC_LOW:
544 case MAX77693_MUIC_IRQ_INT1_ADC_ERR:
545 case MAX77693_MUIC_IRQ_INT1_ADC1K:
546 /* Handle all of accessory except for
547 type of charger accessory */
548 curr_adc = info->status[0] & STATUS1_ADC_MASK;
549 curr_adc >>= STATUS1_ADC_SHIFT;
550
551 /* Check accossory state which is either detached or attached */
552 if (curr_adc == MAX77693_MUIC_ADC_OPEN)
553 attached = false;
554
555 ret = max77693_muic_adc_handler(info, curr_adc, attached);
556 break;
557 case MAX77693_MUIC_IRQ_INT2_CHGTYP:
558 case MAX77693_MUIC_IRQ_INT2_CHGDETREUN:
559 case MAX77693_MUIC_IRQ_INT2_DCDTMR:
560 case MAX77693_MUIC_IRQ_INT2_DXOVP:
561 case MAX77693_MUIC_IRQ_INT2_VBVOLT:
562 case MAX77693_MUIC_IRQ_INT2_VIDRM:
563 /* Handle charger accessory */
564 curr_chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
565 curr_chg_type >>= STATUS2_CHGTYP_SHIFT;
566
567 /* Check charger accossory state which
568 is either detached or attached */
569 if (curr_chg_type == MAX77693_CHARGER_TYPE_NONE)
570 attached = false;
571
572 ret = max77693_muic_chg_handler(info, curr_chg_type, attached);
573 break;
574 case MAX77693_MUIC_IRQ_INT3_EOC:
575 case MAX77693_MUIC_IRQ_INT3_CGMBC:
576 case MAX77693_MUIC_IRQ_INT3_OVP:
577 case MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR:
578 case MAX77693_MUIC_IRQ_INT3_CHG_ENABLED:
579 case MAX77693_MUIC_IRQ_INT3_BAT_DET:
580 break;
581 default:
582 dev_err(info->dev, "muic interrupt: irq %d occurred\n",
583 irq_type);
584 break;
585 }
586
587 if (ret < 0)
588 dev_err(info->dev, "failed to handle MUIC interrupt\n");
589
590 mutex_unlock(&info->mutex);
591
592 return;
593}
594
595static irqreturn_t max77693_muic_irq_handler(int irq, void *data)
596{
597 struct max77693_muic_info *info = data;
598
599 info->irq = irq;
600 schedule_work(&info->irq_work);
601
602 return IRQ_HANDLED;
603}
604
605static struct regmap_config max77693_muic_regmap_config = {
606 .reg_bits = 8,
607 .val_bits = 8,
608};
609
610static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
611{
612 int ret = 0;
613 int adc, chg_type;
614
615 mutex_lock(&info->mutex);
616
617 /* Read STATUSx register to detect accessory */
618 ret = max77693_bulk_read(info->max77693->regmap_muic,
619 MAX77693_MUIC_REG_STATUS1, 2, info->status);
620 if (ret) {
621 dev_err(info->dev, "failed to read MUIC register\n");
622 mutex_unlock(&info->mutex);
623 return -EINVAL;
624 }
625
626 adc = info->status[0] & STATUS1_ADC_MASK;
627 adc >>= STATUS1_ADC_SHIFT;
628
629 if (adc != MAX77693_MUIC_ADC_OPEN) {
630 dev_info(info->dev,
631 "external connector is attached (adc:0x%02x)\n", adc);
632
633 ret = max77693_muic_adc_handler(info, adc, true);
634 if (ret < 0)
635 dev_err(info->dev, "failed to detect accessory\n");
636 goto out;
637 }
638
639 chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
640 chg_type >>= STATUS2_CHGTYP_SHIFT;
641
642 if (chg_type != MAX77693_CHARGER_TYPE_NONE) {
643 dev_info(info->dev,
644 "external connector is attached (chg_type:0x%x)\n",
645 chg_type);
646
647 max77693_muic_chg_handler(info, chg_type, true);
648 if (ret < 0)
649 dev_err(info->dev, "failed to detect charger accessory\n");
650 }
651
652out:
653 mutex_unlock(&info->mutex);
654 return ret;
655}
656
657static int __devinit max77693_muic_probe(struct platform_device *pdev)
658{
659 struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
660 struct max77693_muic_info *info;
661 int ret, i;
662 u8 id;
663
664 info = kzalloc(sizeof(struct max77693_muic_info), GFP_KERNEL);
665 if (!info) {
666 dev_err(&pdev->dev, "failed to allocate memory\n");
667 ret = -ENOMEM;
668 goto err_kfree;
669 }
670 info->dev = &pdev->dev;
671 info->max77693 = max77693;
672 info->max77693->regmap_muic = regmap_init_i2c(info->max77693->muic,
673 &max77693_muic_regmap_config);
674 if (IS_ERR(info->max77693->regmap_muic)) {
675 ret = PTR_ERR(info->max77693->regmap_muic);
676 dev_err(max77693->dev,
677 "failed to allocate register map: %d\n", ret);
678 goto err_regmap;
679 }
680 platform_set_drvdata(pdev, info);
681 mutex_init(&info->mutex);
682
683 INIT_WORK(&info->irq_work, max77693_muic_irq_work);
684
685 /* Support irq domain for MAX77693 MUIC device */
686 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
687 struct max77693_muic_irq *muic_irq = &muic_irqs[i];
688 int virq = 0;
689
690 virq = irq_create_mapping(max77693->irq_domain, muic_irq->irq);
691 if (!virq)
692 goto err_irq;
693 muic_irq->virq = virq;
694
695 ret = request_threaded_irq(virq, NULL,
696 max77693_muic_irq_handler,
697 0, muic_irq->name, info);
698 if (ret) {
699 dev_err(&pdev->dev,
700 "failed: irq request (IRQ: %d,"
701 " error :%d)\n",
702 muic_irq->irq, ret);
703
704 for (i = i - 1; i >= 0; i--)
705 free_irq(muic_irq->virq, info);
706 goto err_irq;
707 }
708 }
709
710 /* Initialize extcon device */
711 info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
712 if (!info->edev) {
713 dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
714 ret = -ENOMEM;
715 goto err_irq;
716 }
717 info->edev->name = DEV_NAME;
718 info->edev->supported_cable = max77693_extcon_cable;
719 ret = extcon_dev_register(info->edev, NULL);
720 if (ret) {
721 dev_err(&pdev->dev, "failed to register extcon device\n");
722 goto err_extcon;
723 }
724
725 /* Check revision number of MUIC device*/
726 ret = max77693_read_reg(info->max77693->regmap_muic,
727 MAX77693_MUIC_REG_ID, &id);
728 if (ret < 0) {
729 dev_err(&pdev->dev, "failed to read revision number\n");
730 goto err_extcon;
731 }
732 dev_info(info->dev, "device ID : 0x%x\n", id);
733
734 /* Set ADC debounce time */
735 max77693_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
736
737 /* Detect accessory on boot */
738 max77693_muic_detect_accessory(info);
739
740 return ret;
741
742err_extcon:
743 kfree(info->edev);
744err_irq:
745err_regmap:
746 kfree(info);
747err_kfree:
748 return ret;
749}
750
751static int __devexit max77693_muic_remove(struct platform_device *pdev)
752{
753 struct max77693_muic_info *info = platform_get_drvdata(pdev);
754 int i;
755
756 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
757 free_irq(muic_irqs[i].virq, info);
758 cancel_work_sync(&info->irq_work);
759 extcon_dev_unregister(info->edev);
760 kfree(info);
761
762 return 0;
763}
764
765static struct platform_driver max77693_muic_driver = {
766 .driver = {
767 .name = DEV_NAME,
768 .owner = THIS_MODULE,
769 },
770 .probe = max77693_muic_probe,
771 .remove = __devexit_p(max77693_muic_remove),
772};
773
774module_platform_driver(max77693_muic_driver);
775
776MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver");
777MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
778MODULE_LICENSE("GPL");
779MODULE_ALIAS("platform:extcon-max77693");
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 23416e443765..ef9090a4271d 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -26,6 +26,7 @@
26#include <linux/mfd/max8997.h> 26#include <linux/mfd/max8997.h>
27#include <linux/mfd/max8997-private.h> 27#include <linux/mfd/max8997-private.h>
28#include <linux/extcon.h> 28#include <linux/extcon.h>
29#include <linux/irqdomain.h>
29 30
30#define DEV_NAME "max8997-muic" 31#define DEV_NAME "max8997-muic"
31 32
@@ -77,6 +78,7 @@
77struct max8997_muic_irq { 78struct max8997_muic_irq {
78 unsigned int irq; 79 unsigned int irq;
79 const char *name; 80 const char *name;
81 unsigned int virq;
80}; 82};
81 83
82static struct max8997_muic_irq muic_irqs[] = { 84static struct max8997_muic_irq muic_irqs[] = {
@@ -116,8 +118,8 @@ const char *max8997_extcon_cable[] = {
116 [5] = "Charge-downstream", 118 [5] = "Charge-downstream",
117 [6] = "MHL", 119 [6] = "MHL",
118 [7] = "Dock-desk", 120 [7] = "Dock-desk",
119 [7] = "Dock-card", 121 [8] = "Dock-card",
120 [8] = "JIG", 122 [9] = "JIG",
121 123
122 NULL, 124 NULL,
123}; 125};
@@ -343,12 +345,10 @@ static void max8997_muic_irq_work(struct work_struct *work)
343{ 345{
344 struct max8997_muic_info *info = container_of(work, 346 struct max8997_muic_info *info = container_of(work,
345 struct max8997_muic_info, irq_work); 347 struct max8997_muic_info, irq_work);
346 struct max8997_dev *max8997 = i2c_get_clientdata(info->muic);
347 u8 status[2]; 348 u8 status[2];
348 u8 adc, chg_type; 349 u8 adc, chg_type;
349 350 int irq_type = 0;
350 int irq_type = info->irq - max8997->irq_base; 351 int i, ret;
351 int ret;
352 352
353 mutex_lock(&info->mutex); 353 mutex_lock(&info->mutex);
354 354
@@ -363,6 +363,10 @@ static void max8997_muic_irq_work(struct work_struct *work)
363 dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__, 363 dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
364 status[0], status[1]); 364 status[0], status[1]);
365 365
366 for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
367 if (info->irq == muic_irqs[i].virq)
368 irq_type = muic_irqs[i].irq;
369
366 switch (irq_type) { 370 switch (irq_type) {
367 case MAX8997_MUICIRQ_ADC: 371 case MAX8997_MUICIRQ_ADC:
368 adc = status[0] & STATUS1_ADC_MASK; 372 adc = status[0] & STATUS1_ADC_MASK;
@@ -448,11 +452,15 @@ static int __devinit max8997_muic_probe(struct platform_device *pdev)
448 452
449 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) { 453 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
450 struct max8997_muic_irq *muic_irq = &muic_irqs[i]; 454 struct max8997_muic_irq *muic_irq = &muic_irqs[i];
455 int virq = 0;
451 456
452 ret = request_threaded_irq(pdata->irq_base + muic_irq->irq, 457 virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
453 NULL, max8997_muic_irq_handler, 458 if (!virq)
454 0, muic_irq->name, 459 goto err_irq;
455 info); 460 muic_irq->virq = virq;
461
462 ret = request_threaded_irq(virq, NULL,max8997_muic_irq_handler,
463 0, muic_irq->name, info);
456 if (ret) { 464 if (ret) {
457 dev_err(&pdev->dev, 465 dev_err(&pdev->dev,
458 "failed: irq request (IRQ: %d," 466 "failed: irq request (IRQ: %d,"
@@ -496,7 +504,7 @@ err_extcon:
496 kfree(info->edev); 504 kfree(info->edev);
497err_irq: 505err_irq:
498 while (--i >= 0) 506 while (--i >= 0)
499 free_irq(pdata->irq_base + muic_irqs[i].irq, info); 507 free_irq(muic_irqs[i].virq, info);
500 kfree(info); 508 kfree(info);
501err_kfree: 509err_kfree:
502 return ret; 510 return ret;
@@ -505,15 +513,15 @@ err_kfree:
505static int __devexit max8997_muic_remove(struct platform_device *pdev) 513static int __devexit max8997_muic_remove(struct platform_device *pdev)
506{ 514{
507 struct max8997_muic_info *info = platform_get_drvdata(pdev); 515 struct max8997_muic_info *info = platform_get_drvdata(pdev);
508 struct max8997_dev *max8997 = i2c_get_clientdata(info->muic);
509 int i; 516 int i;
510 517
511 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) 518 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
512 free_irq(max8997->irq_base + muic_irqs[i].irq, info); 519 free_irq(muic_irqs[i].virq, info);
513 cancel_work_sync(&info->irq_work); 520 cancel_work_sync(&info->irq_work);
514 521
515 extcon_dev_unregister(info->edev); 522 extcon_dev_unregister(info->edev);
516 523
524 kfree(info->edev);
517 kfree(info); 525 kfree(info);
518 526
519 return 0; 527 return 0;
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon_class.c
index f598a700ec15..f6419f9db76c 100644
--- a/drivers/extcon/extcon_class.c
+++ b/drivers/extcon/extcon_class.c
@@ -65,7 +65,7 @@ const char *extcon_cable_name[] = {
65 NULL, 65 NULL,
66}; 66};
67 67
68struct class *extcon_class; 68static struct class *extcon_class;
69#if defined(CONFIG_ANDROID) 69#if defined(CONFIG_ANDROID)
70static struct class_compat *switch_class; 70static struct class_compat *switch_class;
71#endif /* CONFIG_ANDROID */ 71#endif /* CONFIG_ANDROID */
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
762#if defined(CONFIG_ANDROID) 762#if defined(CONFIG_ANDROID)
763 if (switch_class) 763 if (switch_class)
764 ret = class_compat_create_link(switch_class, edev->dev, 764 ret = class_compat_create_link(switch_class, edev->dev,
765 dev); 765 NULL);
766#endif /* CONFIG_ANDROID */ 766#endif /* CONFIG_ANDROID */
767 767
768 spin_lock_init(&edev->lock); 768 spin_lock_init(&edev->lock);
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe7a07b47336..3cc152e690b0 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -105,38 +105,35 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
105 105
106 ret = extcon_dev_register(&extcon_data->edev, &pdev->dev); 106 ret = extcon_dev_register(&extcon_data->edev, &pdev->dev);
107 if (ret < 0) 107 if (ret < 0)
108 goto err_extcon_dev_register; 108 return ret;
109 109
110 ret = gpio_request_one(extcon_data->gpio, GPIOF_DIR_IN, pdev->name); 110 ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
111 pdev->name);
111 if (ret < 0) 112 if (ret < 0)
112 goto err_request_gpio; 113 goto err;
113 114
114 INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); 115 INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work);
115 116
116 extcon_data->irq = gpio_to_irq(extcon_data->gpio); 117 extcon_data->irq = gpio_to_irq(extcon_data->gpio);
117 if (extcon_data->irq < 0) { 118 if (extcon_data->irq < 0) {
118 ret = extcon_data->irq; 119 ret = extcon_data->irq;
119 goto err_detect_irq_num_failed; 120 goto err;
120 } 121 }
121 122
122 ret = request_any_context_irq(extcon_data->irq, gpio_irq_handler, 123 ret = request_any_context_irq(extcon_data->irq, gpio_irq_handler,
123 pdata->irq_flags, pdev->name, 124 pdata->irq_flags, pdev->name,
124 extcon_data); 125 extcon_data);
125 if (ret < 0) 126 if (ret < 0)
126 goto err_request_irq; 127 goto err;
127 128
129 platform_set_drvdata(pdev, extcon_data);
128 /* Perform initial detection */ 130 /* Perform initial detection */
129 gpio_extcon_work(&extcon_data->work.work); 131 gpio_extcon_work(&extcon_data->work.work);
130 132
131 return 0; 133 return 0;
132 134
133err_request_irq: 135err:
134err_detect_irq_num_failed:
135 gpio_free(extcon_data->gpio);
136err_request_gpio:
137 extcon_dev_unregister(&extcon_data->edev); 136 extcon_dev_unregister(&extcon_data->edev);
138err_extcon_dev_register:
139 devm_kfree(&pdev->dev, extcon_data);
140 137
141 return ret; 138 return ret;
142} 139}
@@ -146,9 +143,8 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
146 struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev); 143 struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
147 144
148 cancel_delayed_work_sync(&extcon_data->work); 145 cancel_delayed_work_sync(&extcon_data->work);
149 gpio_free(extcon_data->gpio); 146 free_irq(extcon_data->irq, extcon_data);
150 extcon_dev_unregister(&extcon_data->edev); 147 extcon_dev_unregister(&extcon_data->edev);
151 devm_kfree(&pdev->dev, extcon_data);
152 148
153 return 0; 149 return 0;
154} 150}
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 4d460ef87161..7a05fd24d68b 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -398,6 +398,14 @@ static ssize_t guid_show(struct device *dev,
398 return ret; 398 return ret;
399} 399}
400 400
401static ssize_t is_local_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
403{
404 struct fw_device *device = fw_device(dev);
405
406 return sprintf(buf, "%u\n", device->is_local);
407}
408
401static int units_sprintf(char *buf, const u32 *directory) 409static int units_sprintf(char *buf, const u32 *directory)
402{ 410{
403 struct fw_csr_iterator ci; 411 struct fw_csr_iterator ci;
@@ -447,6 +455,7 @@ static ssize_t units_show(struct device *dev,
447static struct device_attribute fw_device_attributes[] = { 455static struct device_attribute fw_device_attributes[] = {
448 __ATTR_RO(config_rom), 456 __ATTR_RO(config_rom),
449 __ATTR_RO(guid), 457 __ATTR_RO(guid),
458 __ATTR_RO(is_local),
450 __ATTR_RO(units), 459 __ATTR_RO(units),
451 __ATTR_NULL, 460 __ATTR_NULL,
452}; 461};
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 8382e27e9a27..38c0aa60b2cb 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -146,7 +146,7 @@ EXPORT_SYMBOL(fw_iso_buffer_destroy);
146/* Convert DMA address to offset into virtually contiguous buffer. */ 146/* Convert DMA address to offset into virtually contiguous buffer. */
147size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) 147size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
148{ 148{
149 int i; 149 size_t i;
150 dma_addr_t address; 150 dma_addr_t address;
151 ssize_t offset; 151 ssize_t offset;
152 152
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 780708dc6e25..87d6f2d2f02d 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -525,9 +525,10 @@ const struct fw_address_region fw_high_memory_region =
525 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, }; 525 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
526EXPORT_SYMBOL(fw_high_memory_region); 526EXPORT_SYMBOL(fw_high_memory_region);
527 527
528#if 0 528static const struct fw_address_region low_memory_region =
529const struct fw_address_region fw_low_memory_region =
530 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, }; 529 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
530
531#if 0
531const struct fw_address_region fw_private_region = 532const struct fw_address_region fw_private_region =
532 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; 533 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
533const struct fw_address_region fw_csr_region = 534const struct fw_address_region fw_csr_region =
@@ -1198,6 +1199,23 @@ static struct fw_address_handler registers = {
1198 .address_callback = handle_registers, 1199 .address_callback = handle_registers,
1199}; 1200};
1200 1201
1202static void handle_low_memory(struct fw_card *card, struct fw_request *request,
1203 int tcode, int destination, int source, int generation,
1204 unsigned long long offset, void *payload, size_t length,
1205 void *callback_data)
1206{
1207 /*
1208 * This catches requests not handled by the physical DMA unit,
1209 * i.e., wrong transaction types or unauthorized source nodes.
1210 */
1211 fw_send_response(card, request, RCODE_TYPE_ERROR);
1212}
1213
1214static struct fw_address_handler low_memory = {
1215 .length = 0x000100000000ULL,
1216 .address_callback = handle_low_memory,
1217};
1218
1201MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 1219MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1202MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); 1220MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
1203MODULE_LICENSE("GPL"); 1221MODULE_LICENSE("GPL");
@@ -1259,6 +1277,7 @@ static int __init fw_core_init(void)
1259 1277
1260 fw_core_add_address_handler(&topology_map, &topology_map_region); 1278 fw_core_add_address_handler(&topology_map, &topology_map_region);
1261 fw_core_add_address_handler(&registers, &registers_region); 1279 fw_core_add_address_handler(&registers, &registers_region);
1280 fw_core_add_address_handler(&low_memory, &low_memory_region);
1262 fw_core_add_descriptor(&vendor_id_descriptor); 1281 fw_core_add_descriptor(&vendor_id_descriptor);
1263 fw_core_add_descriptor(&model_id_descriptor); 1282 fw_core_add_descriptor(&model_id_descriptor);
1264 1283
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index c1af05e834b6..c788dbdaf3bc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -191,6 +191,7 @@ struct fw_ohci {
191 unsigned quirks; 191 unsigned quirks;
192 unsigned int pri_req_max; 192 unsigned int pri_req_max;
193 u32 bus_time; 193 u32 bus_time;
194 bool bus_time_running;
194 bool is_root; 195 bool is_root;
195 bool csr_state_setclear_abdicate; 196 bool csr_state_setclear_abdicate;
196 int n_ir; 197 int n_ir;
@@ -1726,6 +1727,13 @@ static u32 update_bus_time(struct fw_ohci *ohci)
1726{ 1727{
1727 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; 1728 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1728 1729
1730 if (unlikely(!ohci->bus_time_running)) {
1731 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1732 ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) |
1733 (cycle_time_seconds & 0x40);
1734 ohci->bus_time_running = true;
1735 }
1736
1729 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) 1737 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1730 ohci->bus_time += 0x40; 1738 ohci->bus_time += 0x40;
1731 1739
@@ -2213,7 +2221,7 @@ static int ohci_enable(struct fw_card *card,
2213{ 2221{
2214 struct fw_ohci *ohci = fw_ohci(card); 2222 struct fw_ohci *ohci = fw_ohci(card);
2215 struct pci_dev *dev = to_pci_dev(card->device); 2223 struct pci_dev *dev = to_pci_dev(card->device);
2216 u32 lps, seconds, version, irqs; 2224 u32 lps, version, irqs;
2217 int i, ret; 2225 int i, ret;
2218 2226
2219 if (software_reset(ohci)) { 2227 if (software_reset(ohci)) {
@@ -2269,9 +2277,12 @@ static int ohci_enable(struct fw_card *card,
2269 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | 2277 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2270 (200 << 16)); 2278 (200 << 16));
2271 2279
2272 seconds = lower_32_bits(get_seconds()); 2280 ohci->bus_time_running = false;
2273 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); 2281
2274 ohci->bus_time = seconds & ~0x3f; 2282 for (i = 0; i < 32; i++)
2283 if (ohci->ir_context_support & (1 << i))
2284 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2285 IR_CONTEXT_MULTI_CHANNEL_MODE);
2275 2286
2276 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2287 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2277 if (version >= OHCI_VERSION_1_1) { 2288 if (version >= OHCI_VERSION_1_1) {
@@ -2369,7 +2380,6 @@ static int ohci_enable(struct fw_card *card,
2369 OHCI1394_postedWriteErr | 2380 OHCI1394_postedWriteErr |
2370 OHCI1394_selfIDComplete | 2381 OHCI1394_selfIDComplete |
2371 OHCI1394_regAccessFail | 2382 OHCI1394_regAccessFail |
2372 OHCI1394_cycle64Seconds |
2373 OHCI1394_cycleInconsistent | 2383 OHCI1394_cycleInconsistent |
2374 OHCI1394_unrecoverableError | 2384 OHCI1394_unrecoverableError |
2375 OHCI1394_cycleTooLong | 2385 OHCI1394_cycleTooLong |
@@ -2658,7 +2668,8 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2658 2668
2659 case CSR_BUS_TIME: 2669 case CSR_BUS_TIME:
2660 spin_lock_irqsave(&ohci->lock, flags); 2670 spin_lock_irqsave(&ohci->lock, flags);
2661 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); 2671 ohci->bus_time = (update_bus_time(ohci) & 0x40) |
2672 (value & ~0x7f);
2662 spin_unlock_irqrestore(&ohci->lock, flags); 2673 spin_unlock_irqrestore(&ohci->lock, flags);
2663 break; 2674 break;
2664 2675
@@ -3539,6 +3550,13 @@ static int __devinit pci_probe(struct pci_dev *dev,
3539 3550
3540 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); 3551 INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3541 3552
3553 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3554 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3555 dev_err(&dev->dev, "invalid MMIO resource\n");
3556 err = -ENXIO;
3557 goto fail_disable;
3558 }
3559
3542 err = pci_request_region(dev, 0, ohci_driver_name); 3560 err = pci_request_region(dev, 0, ohci_driver_name);
3543 if (err) { 3561 if (err) {
3544 dev_err(&dev->dev, "MMIO resource unavailable\n"); 3562 dev_err(&dev->dev, "MMIO resource unavailable\n");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 153980be4ee6..b298158cb922 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -6,6 +6,7 @@
6#include <linux/dmi.h> 6#include <linux/dmi.h>
7#include <linux/efi.h> 7#include <linux/efi.h>
8#include <linux/bootmem.h> 8#include <linux/bootmem.h>
9#include <linux/random.h>
9#include <asm/dmi.h> 10#include <asm/dmi.h>
10 11
11/* 12/*
@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
111 112
112 dmi_table(buf, dmi_len, dmi_num, decode, NULL); 113 dmi_table(buf, dmi_len, dmi_num, decode, NULL);
113 114
115 add_device_randomness(buf, dmi_len);
116
114 dmi_iounmap(buf, dmi_len); 117 dmi_iounmap(buf, dmi_len);
115 return 0; 118 return 0;
116} 119}
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index adc07102a20d..c1cdc9236666 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -98,7 +98,7 @@ static LIST_HEAD(map_entries);
98/** 98/**
99 * firmware_map_add_entry() - Does the real work to add a firmware memmap entry. 99 * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
100 * @start: Start of the memory range. 100 * @start: Start of the memory range.
101 * @end: End of the memory range (inclusive). 101 * @end: End of the memory range (exclusive).
102 * @type: Type of the memory range. 102 * @type: Type of the memory range.
103 * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised 103 * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
104 * entry. 104 * entry.
@@ -113,7 +113,7 @@ static int firmware_map_add_entry(u64 start, u64 end,
113 BUG_ON(start > end); 113 BUG_ON(start > end);
114 114
115 entry->start = start; 115 entry->start = start;
116 entry->end = end; 116 entry->end = end - 1;
117 entry->type = type; 117 entry->type = type;
118 INIT_LIST_HEAD(&entry->list); 118 INIT_LIST_HEAD(&entry->list);
119 kobject_init(&entry->kobj, &memmap_ktype); 119 kobject_init(&entry->kobj, &memmap_ktype);
@@ -148,7 +148,7 @@ static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
148 * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do 148 * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
149 * memory hotplug. 149 * memory hotplug.
150 * @start: Start of the memory range. 150 * @start: Start of the memory range.
151 * @end: End of the memory range (inclusive). 151 * @end: End of the memory range (exclusive)
152 * @type: Type of the memory range. 152 * @type: Type of the memory range.
153 * 153 *
154 * Adds a firmware mapping entry. This function is for memory hotplug, it is 154 * Adds a firmware mapping entry. This function is for memory hotplug, it is
@@ -175,7 +175,7 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
175/** 175/**
176 * firmware_map_add_early() - Adds a firmware mapping entry. 176 * firmware_map_add_early() - Adds a firmware mapping entry.
177 * @start: Start of the memory range. 177 * @start: Start of the memory range.
178 * @end: End of the memory range (inclusive). 178 * @end: End of the memory range.
179 * @type: Type of the memory range. 179 * @type: Type of the memory range.
180 * 180 *
181 * Adds a firmware mapping entry. This function uses the bootmem allocator 181 * Adds a firmware mapping entry. This function uses the bootmem allocator
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 51e0e2d8fac6..a330492e06f9 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
95 if (efi.hcdp == EFI_INVALID_TABLE_ADDR) 95 if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
96 return -ENODEV; 96 return -ENODEV;
97 97
98 pcdp = ioremap(efi.hcdp, 4096); 98 pcdp = early_ioremap(efi.hcdp, 4096);
99 printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp); 99 printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
100 100
101 if (strstr(cmdline, "console=hcdp")) { 101 if (strstr(cmdline, "console=hcdp")) {
@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
131 } 131 }
132 132
133out: 133out:
134 iounmap(pcdp); 134 early_iounmap(pcdp, 4096);
135 return rc; 135 return rc;
136} 136}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c4067d0141f7..b16c8a72a2e2 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -136,7 +136,7 @@ config GPIO_MPC8XXX
136 136
137config GPIO_MSM_V1 137config GPIO_MSM_V1
138 tristate "Qualcomm MSM GPIO v1" 138 tristate "Qualcomm MSM GPIO v1"
139 depends on GPIOLIB && ARCH_MSM 139 depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
140 help 140 help
141 Say yes here to support the GPIO interface on ARM v6 based 141 Say yes here to support the GPIO interface on ARM v6 based
142 Qualcomm MSM chips. Most of the pins on the MSM can be 142 Qualcomm MSM chips. Most of the pins on the MSM can be
@@ -253,6 +253,12 @@ config GPIO_GE_FPGA
253 253
254comment "I2C GPIO expanders:" 254comment "I2C GPIO expanders:"
255 255
256config GPIO_ARIZONA
257 tristate "Wolfson Microelectronics Arizona class devices"
258 depends on MFD_ARIZONA
259 help
260 Support for GPIOs on Wolfson Arizona class devices.
261
256config GPIO_MAX7300 262config GPIO_MAX7300
257 tristate "Maxim MAX7300 GPIO expander" 263 tristate "Maxim MAX7300 GPIO expander"
258 depends on I2C 264 depends on I2C
@@ -466,6 +472,18 @@ config GPIO_BT8XX
466 472
467 If unsure, say N. 473 If unsure, say N.
468 474
475config GPIO_AMD8111
476 tristate "AMD 8111 GPIO driver"
477 depends on PCI
478 help
479 The AMD 8111 south bridge contains 32 GPIO pins which can be used.
480
481 Note, that usually system firmware/ACPI handles GPIO pins on their
482 own and users might easily break their systems with uncarefull usage
483 of this driver!
484
485 If unsure, say N
486
469config GPIO_LANGWELL 487config GPIO_LANGWELL
470 bool "Intel Langwell/Penwell GPIO support" 488 bool "Intel Langwell/Penwell GPIO support"
471 depends on PCI && X86 489 depends on PCI && X86
@@ -579,6 +597,13 @@ config GPIO_AB8500
579 help 597 help
580 Select this to enable the AB8500 IC GPIO driver 598 Select this to enable the AB8500 IC GPIO driver
581 599
600config GPIO_TPS6586X
601 bool "TPS6586X GPIO"
602 depends on MFD_TPS6586X
603 help
604 Select this option to enable GPIO driver for the TPS6586X
605 chip family.
606
582config GPIO_TPS65910 607config GPIO_TPS65910
583 bool "TPS65910 GPIO" 608 bool "TPS65910 GPIO"
584 depends on MFD_TPS65910 609 depends on MFD_TPS65910
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 0f55662002c3..153caceeb053 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
12obj-$(CONFIG_GPIO_AB8500) += gpio-ab8500.o 12obj-$(CONFIG_GPIO_AB8500) += gpio-ab8500.o
13obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o 13obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
14obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o 14obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
15obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
16obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
15obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o 17obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
16obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o 18obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
17obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o 19obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
@@ -61,6 +63,7 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
61obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o 63obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
62obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o 64obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
63obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o 65obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
66obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
64obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o 67obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
65obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o 68obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
66obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o 69obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 9e9947cb86a3..1077754f8289 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -98,6 +98,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
98 98
99 return 0; 99 return 0;
100} 100}
101EXPORT_SYMBOL(devm_gpio_request_one);
101 102
102/** 103/**
103 * devm_gpio_free - free an interrupt 104 * devm_gpio_free - free an interrupt
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
new file mode 100644
index 000000000000..710fafcdd1b1
--- /dev/null
+++ b/drivers/gpio/gpio-amd8111.c
@@ -0,0 +1,246 @@
1/*
2 * GPIO driver for AMD 8111 south bridges
3 *
4 * Copyright (c) 2012 Dmitry Eremin-Solenikov
5 *
6 * Based on the AMD RNG driver:
7 * Copyright 2005 (c) MontaVista Software, Inc.
8 * with the majority of the code coming from:
9 *
10 * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
11 * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
12 *
13 * derived from
14 *
15 * Hardware driver for the AMD 768 Random Number Generator (RNG)
16 * (c) Copyright 2001 Red Hat Inc
17 *
18 * derived from
19 *
20 * Hardware driver for Intel i810 Random Number Generator (RNG)
21 * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
22 * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
23 *
24 * This file is licensed under the terms of the GNU General Public
25 * License version 2. This program is licensed "as is" without any
26 * warranty of any kind, whether express or implied.
27 */
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/gpio.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33
34#define PMBASE_OFFSET 0xb0
35#define PMBASE_SIZE 0x30
36
37#define AMD_REG_GPIO(i) (0x10 + (i))
38
39#define AMD_GPIO_LTCH_STS 0x40 /* Latch status, w1 */
40#define AMD_GPIO_RTIN 0x20 /* Real Time in, ro */
41#define AMD_GPIO_DEBOUNCE 0x10 /* Debounce, rw */
42#define AMD_GPIO_MODE_MASK 0x0c /* Pin Mode Select, rw */
43#define AMD_GPIO_MODE_IN 0x00
44#define AMD_GPIO_MODE_OUT 0x04
45/* Enable alternative (e.g. clkout, IRQ, etc) function of the pin */
46#define AMD_GPIO_MODE_ALTFN 0x08 /* Or 0x09 */
47#define AMD_GPIO_X_MASK 0x03 /* In/Out specific, rw */
48#define AMD_GPIO_X_IN_ACTIVEHI 0x01 /* Active High */
49#define AMD_GPIO_X_IN_LATCH 0x02 /* Latched version is selected */
50#define AMD_GPIO_X_OUT_LOW 0x00
51#define AMD_GPIO_X_OUT_HI 0x01
52#define AMD_GPIO_X_OUT_CLK0 0x02
53#define AMD_GPIO_X_OUT_CLK1 0x03
54
55/*
56 * Data for PCI driver interface
57 *
58 * This data only exists for exporting the supported
59 * PCI ids via MODULE_DEVICE_TABLE. We do not actually
60 * register a pci_driver, because someone else might one day
61 * want to register another driver on the same PCI id.
62 */
63static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS), 0 },
65 { 0, }, /* terminate list */
66};
67MODULE_DEVICE_TABLE(pci, pci_tbl);
68
69struct amd_gpio {
70 struct gpio_chip chip;
71 u32 pmbase;
72 void __iomem *pm;
73 struct pci_dev *pdev;
74 spinlock_t lock; /* guards hw registers and orig table */
75 u8 orig[32];
76};
77
78#define to_agp(chip) container_of(chip, struct amd_gpio, chip)
79
80static int amd_gpio_request(struct gpio_chip *chip, unsigned offset)
81{
82 struct amd_gpio *agp = to_agp(chip);
83
84 agp->orig[offset] = ioread8(agp->pm + AMD_REG_GPIO(offset)) &
85 (AMD_GPIO_DEBOUNCE | AMD_GPIO_MODE_MASK | AMD_GPIO_X_MASK);
86
87 dev_dbg(&agp->pdev->dev, "Requested gpio %d, data %x\n", offset, agp->orig[offset]);
88
89 return 0;
90}
91
92static void amd_gpio_free(struct gpio_chip *chip, unsigned offset)
93{
94 struct amd_gpio *agp = to_agp(chip);
95
96 dev_dbg(&agp->pdev->dev, "Freed gpio %d, data %x\n", offset, agp->orig[offset]);
97
98 iowrite8(agp->orig[offset], agp->pm + AMD_REG_GPIO(offset));
99}
100
101static void amd_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
102{
103 struct amd_gpio *agp = to_agp(chip);
104 u8 temp;
105 unsigned long flags;
106
107 spin_lock_irqsave(&agp->lock, flags);
108 temp = ioread8(agp->pm + AMD_REG_GPIO(offset));
109 temp = (temp & AMD_GPIO_DEBOUNCE) | AMD_GPIO_MODE_OUT | (value ? AMD_GPIO_X_OUT_HI : AMD_GPIO_X_OUT_LOW);
110 iowrite8(temp, agp->pm + AMD_REG_GPIO(offset));
111 spin_unlock_irqrestore(&agp->lock, flags);
112
113 dev_dbg(&agp->pdev->dev, "Setting gpio %d, value %d, reg=%02x\n", offset, !!value, temp);
114}
115
116static int amd_gpio_get(struct gpio_chip *chip, unsigned offset)
117{
118 struct amd_gpio *agp = to_agp(chip);
119 u8 temp;
120
121 temp = ioread8(agp->pm + AMD_REG_GPIO(offset));
122
123 dev_dbg(&agp->pdev->dev, "Getting gpio %d, reg=%02x\n", offset, temp);
124
125 return (temp & AMD_GPIO_RTIN) ? 1 : 0;
126}
127
128static int amd_gpio_dirout(struct gpio_chip *chip, unsigned offset, int value)
129{
130 struct amd_gpio *agp = to_agp(chip);
131 u8 temp;
132 unsigned long flags;
133
134 spin_lock_irqsave(&agp->lock, flags);
135 temp = ioread8(agp->pm + AMD_REG_GPIO(offset));
136 temp = (temp & AMD_GPIO_DEBOUNCE) | AMD_GPIO_MODE_OUT | (value ? AMD_GPIO_X_OUT_HI : AMD_GPIO_X_OUT_LOW);
137 iowrite8(temp, agp->pm + AMD_REG_GPIO(offset));
138 spin_unlock_irqrestore(&agp->lock, flags);
139
140 dev_dbg(&agp->pdev->dev, "Dirout gpio %d, value %d, reg=%02x\n", offset, !!value, temp);
141
142 return 0;
143}
144
145static int amd_gpio_dirin(struct gpio_chip *chip, unsigned offset)
146{
147 struct amd_gpio *agp = to_agp(chip);
148 u8 temp;
149 unsigned long flags;
150
151 spin_lock_irqsave(&agp->lock, flags);
152 temp = ioread8(agp->pm + AMD_REG_GPIO(offset));
153 temp = (temp & AMD_GPIO_DEBOUNCE) | AMD_GPIO_MODE_IN;
154 iowrite8(temp, agp->pm + AMD_REG_GPIO(offset));
155 spin_unlock_irqrestore(&agp->lock, flags);
156
157 dev_dbg(&agp->pdev->dev, "Dirin gpio %d, reg=%02x\n", offset, temp);
158
159 return 0;
160}
161
162static struct amd_gpio gp = {
163 .chip = {
164 .label = "AMD GPIO",
165 .owner = THIS_MODULE,
166 .base = -1,
167 .ngpio = 32,
168 .request = amd_gpio_request,
169 .free = amd_gpio_free,
170 .set = amd_gpio_set,
171 .get = amd_gpio_get,
172 .direction_output = amd_gpio_dirout,
173 .direction_input = amd_gpio_dirin,
174 },
175};
176
177static int __init amd_gpio_init(void)
178{
179 int err = -ENODEV;
180 struct pci_dev *pdev = NULL;
181 const struct pci_device_id *ent;
182
183
184 /* We look for our device - AMD South Bridge
185 * I don't know about a system with two such bridges,
186 * so we can assume that there is max. one device.
187 *
188 * We can't use plain pci_driver mechanism,
189 * as the device is really a multiple function device,
190 * main driver that binds to the pci_device is an smbus
191 * driver and have to find & bind to the device this way.
192 */
193 for_each_pci_dev(pdev) {
194 ent = pci_match_id(pci_tbl, pdev);
195 if (ent)
196 goto found;
197 }
198 /* Device not found. */
199 goto out;
200
201found:
202 err = pci_read_config_dword(pdev, 0x58, &gp.pmbase);
203 if (err)
204 goto out;
205 err = -EIO;
206 gp.pmbase &= 0x0000FF00;
207 if (gp.pmbase == 0)
208 goto out;
209 if (!request_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE, "AMD GPIO")) {
210 dev_err(&pdev->dev, "AMD GPIO region 0x%x already in use!\n",
211 gp.pmbase + PMBASE_OFFSET);
212 err = -EBUSY;
213 goto out;
214 }
215 gp.pm = ioport_map(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
216 gp.pdev = pdev;
217 gp.chip.dev = &pdev->dev;
218
219 spin_lock_init(&gp.lock);
220
221 printk(KERN_INFO "AMD-8111 GPIO detected\n");
222 err = gpiochip_add(&gp.chip);
223 if (err) {
224 printk(KERN_ERR "GPIO registering failed (%d)\n",
225 err);
226 release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
227 goto out;
228 }
229out:
230 return err;
231}
232
233static void __exit amd_gpio_exit(void)
234{
235 int err = gpiochip_remove(&gp.chip);
236 WARN_ON(err);
237 ioport_unmap(gp.pm);
238 release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
239}
240
241module_init(amd_gpio_init);
242module_exit(amd_gpio_exit);
243
244MODULE_AUTHOR("The Linux Kernel team");
245MODULE_DESCRIPTION("GPIO driver for AMD chipsets");
246MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
new file mode 100644
index 000000000000..8740d2eb06f8
--- /dev/null
+++ b/drivers/gpio/gpio-arizona.c
@@ -0,0 +1,163 @@
1/*
2 * gpiolib support for Wolfson Arizona class devices
3 *
4 * Copyright 2012 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/gpio.h>
19#include <linux/platform_device.h>
20#include <linux/seq_file.h>
21
22#include <linux/mfd/arizona/core.h>
23#include <linux/mfd/arizona/pdata.h>
24#include <linux/mfd/arizona/registers.h>
25
26struct arizona_gpio {
27 struct arizona *arizona;
28 struct gpio_chip gpio_chip;
29};
30
31static inline struct arizona_gpio *to_arizona_gpio(struct gpio_chip *chip)
32{
33 return container_of(chip, struct arizona_gpio, gpio_chip);
34}
35
36static int arizona_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
37{
38 struct arizona_gpio *arizona_gpio = to_arizona_gpio(chip);
39 struct arizona *arizona = arizona_gpio->arizona;
40
41 return regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
42 ARIZONA_GPN_DIR, ARIZONA_GPN_DIR);
43}
44
45static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
46{
47 struct arizona_gpio *arizona_gpio = to_arizona_gpio(chip);
48 struct arizona *arizona = arizona_gpio->arizona;
49 unsigned int val;
50 int ret;
51
52 ret = regmap_read(arizona->regmap, ARIZONA_GPIO1_CTRL + offset, &val);
53 if (ret < 0)
54 return ret;
55
56 if (val & ARIZONA_GPN_LVL)
57 return 1;
58 else
59 return 0;
60}
61
62static int arizona_gpio_direction_out(struct gpio_chip *chip,
63 unsigned offset, int value)
64{
65 struct arizona_gpio *arizona_gpio = to_arizona_gpio(chip);
66 struct arizona *arizona = arizona_gpio->arizona;
67
68 if (value)
69 value = ARIZONA_GPN_LVL;
70
71 return regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
72 ARIZONA_GPN_DIR | ARIZONA_GPN_LVL, value);
73}
74
75static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
76{
77 struct arizona_gpio *arizona_gpio = to_arizona_gpio(chip);
78 struct arizona *arizona = arizona_gpio->arizona;
79
80 if (value)
81 value = ARIZONA_GPN_LVL;
82
83 regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
84 ARIZONA_GPN_LVL, value);
85}
86
87static struct gpio_chip template_chip = {
88 .label = "arizona",
89 .owner = THIS_MODULE,
90 .direction_input = arizona_gpio_direction_in,
91 .get = arizona_gpio_get,
92 .direction_output = arizona_gpio_direction_out,
93 .set = arizona_gpio_set,
94 .can_sleep = 1,
95};
96
97static int __devinit arizona_gpio_probe(struct platform_device *pdev)
98{
99 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
100 struct arizona_pdata *pdata = arizona->dev->platform_data;
101 struct arizona_gpio *arizona_gpio;
102 int ret;
103
104 arizona_gpio = devm_kzalloc(&pdev->dev, sizeof(*arizona_gpio),
105 GFP_KERNEL);
106 if (arizona_gpio == NULL)
107 return -ENOMEM;
108
109 arizona_gpio->arizona = arizona;
110 arizona_gpio->gpio_chip = template_chip;
111 arizona_gpio->gpio_chip.dev = &pdev->dev;
112
113 switch (arizona->type) {
114 case WM5102:
115 case WM5110:
116 arizona_gpio->gpio_chip.ngpio = 5;
117 break;
118 default:
119 dev_err(&pdev->dev, "Unknown chip variant %d\n",
120 arizona->type);
121 return -EINVAL;
122 }
123
124 if (pdata && pdata->gpio_base)
125 arizona_gpio->gpio_chip.base = pdata->gpio_base;
126 else
127 arizona_gpio->gpio_chip.base = -1;
128
129 ret = gpiochip_add(&arizona_gpio->gpio_chip);
130 if (ret < 0) {
131 dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
132 ret);
133 goto err;
134 }
135
136 platform_set_drvdata(pdev, arizona_gpio);
137
138 return ret;
139
140err:
141 return ret;
142}
143
144static int __devexit arizona_gpio_remove(struct platform_device *pdev)
145{
146 struct arizona_gpio *arizona_gpio = platform_get_drvdata(pdev);
147
148 return gpiochip_remove(&arizona_gpio->gpio_chip);
149}
150
151static struct platform_driver arizona_gpio_driver = {
152 .driver.name = "arizona-gpio",
153 .driver.owner = THIS_MODULE,
154 .probe = arizona_gpio_probe,
155 .remove = __devexit_p(arizona_gpio_remove),
156};
157
158module_platform_driver(arizona_gpio_driver);
159
160MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
161MODULE_DESCRIPTION("GPIO interface for Arizona devices");
162MODULE_LICENSE("GPL");
163MODULE_ALIAS("platform:arizona-gpio");
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 150d9768811d..ae37181798b3 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -266,7 +266,7 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
266 return 0; 266 return 0;
267} 267}
268 268
269static void __devexit em_gio_irq_domain_cleanup(struct em_gio_priv *p) 269static void em_gio_irq_domain_cleanup(struct em_gio_priv *p)
270{ 270{
271 struct gpio_em_config *pdata = p->pdev->dev.platform_data; 271 struct gpio_em_config *pdata = p->pdev->dev.platform_data;
272 272
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index a1c8754f52cf..202a99207b7d 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
339 resource_size_t start, len; 339 resource_size_t start, len;
340 struct lnw_gpio *lnw; 340 struct lnw_gpio *lnw;
341 u32 gpio_base; 341 u32 gpio_base;
342 int retval = 0; 342 int retval;
343 int ngpio = id->driver_data; 343 int ngpio = id->driver_data;
344 344
345 retval = pci_enable_device(pdev); 345 retval = pci_enable_device(pdev);
@@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
357 base = ioremap_nocache(start, len); 357 base = ioremap_nocache(start, len);
358 if (!base) { 358 if (!base) {
359 dev_err(&pdev->dev, "error mapping bar1\n"); 359 dev_err(&pdev->dev, "error mapping bar1\n");
360 retval = -EFAULT;
360 goto err3; 361 goto err3;
361 } 362 }
362 gpio_base = *((u32 *)base + 1); 363 gpio_base = *((u32 *)base + 1);
@@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
381 382
382 lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, 383 lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
383 &lnw_gpio_irq_ops, lnw); 384 &lnw_gpio_irq_ops, lnw);
384 if (!lnw->domain) 385 if (!lnw->domain) {
386 retval = -ENOMEM;
385 goto err3; 387 goto err3;
388 }
386 389
387 lnw->reg_base = base; 390 lnw->reg_base = base;
388 lnw->chip.label = dev_name(&pdev->dev); 391 lnw->chip.label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index c2199beca98a..8a420f13905e 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/arm/mach-lpc32xx/gpiolib.c 2 * GPIO driver for LPC32xx SoC
3 * 3 *
4 * Author: Kevin Wells <kevin.wells@nxp.com> 4 * Author: Kevin Wells <kevin.wells@nxp.com>
5 * 5 *
@@ -28,6 +28,7 @@
28#include <mach/hardware.h> 28#include <mach/hardware.h>
29#include <mach/platform.h> 29#include <mach/platform.h>
30#include <mach/gpio-lpc32xx.h> 30#include <mach/gpio-lpc32xx.h>
31#include <mach/irqs.h>
31 32
32#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) 33#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
33#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) 34#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
@@ -367,6 +368,66 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
367 return -EINVAL; 368 return -EINVAL;
368} 369}
369 370
371static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
372{
373 return IRQ_LPC32XX_P0_P1_IRQ;
374}
375
376static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
377 IRQ_LPC32XX_GPIO_00,
378 IRQ_LPC32XX_GPIO_01,
379 IRQ_LPC32XX_GPIO_02,
380 IRQ_LPC32XX_GPIO_03,
381 IRQ_LPC32XX_GPIO_04,
382 IRQ_LPC32XX_GPIO_05,
383};
384
385static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
386{
387 if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
388 return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
389 return -ENXIO;
390}
391
392static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
393 IRQ_LPC32XX_GPI_00,
394 IRQ_LPC32XX_GPI_01,
395 IRQ_LPC32XX_GPI_02,
396 IRQ_LPC32XX_GPI_03,
397 IRQ_LPC32XX_GPI_04,
398 IRQ_LPC32XX_GPI_05,
399 IRQ_LPC32XX_GPI_06,
400 IRQ_LPC32XX_GPI_07,
401 IRQ_LPC32XX_GPI_08,
402 IRQ_LPC32XX_GPI_09,
403 -ENXIO, /* 10 */
404 -ENXIO, /* 11 */
405 -ENXIO, /* 12 */
406 -ENXIO, /* 13 */
407 -ENXIO, /* 14 */
408 -ENXIO, /* 15 */
409 -ENXIO, /* 16 */
410 -ENXIO, /* 17 */
411 -ENXIO, /* 18 */
412 IRQ_LPC32XX_GPI_19,
413 -ENXIO, /* 20 */
414 -ENXIO, /* 21 */
415 -ENXIO, /* 22 */
416 -ENXIO, /* 23 */
417 -ENXIO, /* 24 */
418 -ENXIO, /* 25 */
419 -ENXIO, /* 26 */
420 -ENXIO, /* 27 */
421 IRQ_LPC32XX_GPI_28,
422};
423
424static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
425{
426 if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
427 return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
428 return -ENXIO;
429}
430
370static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = { 431static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
371 { 432 {
372 .chip = { 433 .chip = {
@@ -376,6 +437,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
376 .direction_output = lpc32xx_gpio_dir_output_p012, 437 .direction_output = lpc32xx_gpio_dir_output_p012,
377 .set = lpc32xx_gpio_set_value_p012, 438 .set = lpc32xx_gpio_set_value_p012,
378 .request = lpc32xx_gpio_request, 439 .request = lpc32xx_gpio_request,
440 .to_irq = lpc32xx_gpio_to_irq_p01,
379 .base = LPC32XX_GPIO_P0_GRP, 441 .base = LPC32XX_GPIO_P0_GRP,
380 .ngpio = LPC32XX_GPIO_P0_MAX, 442 .ngpio = LPC32XX_GPIO_P0_MAX,
381 .names = gpio_p0_names, 443 .names = gpio_p0_names,
@@ -391,6 +453,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
391 .direction_output = lpc32xx_gpio_dir_output_p012, 453 .direction_output = lpc32xx_gpio_dir_output_p012,
392 .set = lpc32xx_gpio_set_value_p012, 454 .set = lpc32xx_gpio_set_value_p012,
393 .request = lpc32xx_gpio_request, 455 .request = lpc32xx_gpio_request,
456 .to_irq = lpc32xx_gpio_to_irq_p01,
394 .base = LPC32XX_GPIO_P1_GRP, 457 .base = LPC32XX_GPIO_P1_GRP,
395 .ngpio = LPC32XX_GPIO_P1_MAX, 458 .ngpio = LPC32XX_GPIO_P1_MAX,
396 .names = gpio_p1_names, 459 .names = gpio_p1_names,
@@ -421,6 +484,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
421 .direction_output = lpc32xx_gpio_dir_output_p3, 484 .direction_output = lpc32xx_gpio_dir_output_p3,
422 .set = lpc32xx_gpio_set_value_p3, 485 .set = lpc32xx_gpio_set_value_p3,
423 .request = lpc32xx_gpio_request, 486 .request = lpc32xx_gpio_request,
487 .to_irq = lpc32xx_gpio_to_irq_gpio_p3,
424 .base = LPC32XX_GPIO_P3_GRP, 488 .base = LPC32XX_GPIO_P3_GRP,
425 .ngpio = LPC32XX_GPIO_P3_MAX, 489 .ngpio = LPC32XX_GPIO_P3_MAX,
426 .names = gpio_p3_names, 490 .names = gpio_p3_names,
@@ -434,6 +498,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
434 .direction_input = lpc32xx_gpio_dir_in_always, 498 .direction_input = lpc32xx_gpio_dir_in_always,
435 .get = lpc32xx_gpi_get_value, 499 .get = lpc32xx_gpi_get_value,
436 .request = lpc32xx_gpio_request, 500 .request = lpc32xx_gpio_request,
501 .to_irq = lpc32xx_gpio_to_irq_gpi_p3,
437 .base = LPC32XX_GPI_P3_GRP, 502 .base = LPC32XX_GPI_P3_GRP,
438 .ngpio = LPC32XX_GPI_P3_MAX, 503 .ngpio = LPC32XX_GPI_P3_MAX,
439 .names = gpi_p3_names, 504 .names = gpi_p3_names,
@@ -457,13 +522,6 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
457 }, 522 },
458}; 523};
459 524
460/* Empty now, can be removed later when mach-lpc32xx is finally switched over
461 * to DT support
462 */
463void __init lpc32xx_gpio_init(void)
464{
465}
466
467static int lpc32xx_of_xlate(struct gpio_chip *gc, 525static int lpc32xx_of_xlate(struct gpio_chip *gc,
468 const struct of_phandle_args *gpiospec, u32 *flags) 526 const struct of_phandle_args *gpiospec, u32 *flags)
469{ 527{
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 71a838f44501..b38986285868 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset)
99 if (offset < 20) 99 if (offset < 20)
100 return INTEL_MSIC_GPIO0HV0CTLO - offset + 16; 100 return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
101 101
102 return INTEL_MSIC_GPIO1HV0CTLO + offset + 20; 102 return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
103} 103}
104 104
105static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 105static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index c337143b18f8..80f44bb64a87 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/irqdomain.h>
26#include <linux/gpio.h> 27#include <linux/gpio.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
@@ -33,12 +34,11 @@
33#include <asm-generic/bug.h> 34#include <asm-generic/bug.h>
34#include <asm/mach/irq.h> 35#include <asm/mach/irq.h>
35 36
36#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
37
38enum mxc_gpio_hwtype { 37enum mxc_gpio_hwtype {
39 IMX1_GPIO, /* runs on i.mx1 */ 38 IMX1_GPIO, /* runs on i.mx1 */
40 IMX21_GPIO, /* runs on i.mx21 and i.mx27 */ 39 IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
41 IMX31_GPIO, /* runs on all other i.mx */ 40 IMX31_GPIO, /* runs on i.mx31 */
41 IMX35_GPIO, /* runs on all other i.mx */
42}; 42};
43 43
44/* device type dependent stuff */ 44/* device type dependent stuff */
@@ -50,6 +50,7 @@ struct mxc_gpio_hwdata {
50 unsigned icr2_reg; 50 unsigned icr2_reg;
51 unsigned imr_reg; 51 unsigned imr_reg;
52 unsigned isr_reg; 52 unsigned isr_reg;
53 int edge_sel_reg;
53 unsigned low_level; 54 unsigned low_level;
54 unsigned high_level; 55 unsigned high_level;
55 unsigned rise_edge; 56 unsigned rise_edge;
@@ -61,7 +62,7 @@ struct mxc_gpio_port {
61 void __iomem *base; 62 void __iomem *base;
62 int irq; 63 int irq;
63 int irq_high; 64 int irq_high;
64 int virtual_irq_start; 65 struct irq_domain *domain;
65 struct bgpio_chip bgc; 66 struct bgpio_chip bgc;
66 u32 both_edges; 67 u32 both_edges;
67}; 68};
@@ -74,6 +75,7 @@ static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
74 .icr2_reg = 0x2c, 75 .icr2_reg = 0x2c,
75 .imr_reg = 0x30, 76 .imr_reg = 0x30,
76 .isr_reg = 0x34, 77 .isr_reg = 0x34,
78 .edge_sel_reg = -EINVAL,
77 .low_level = 0x03, 79 .low_level = 0x03,
78 .high_level = 0x02, 80 .high_level = 0x02,
79 .rise_edge = 0x00, 81 .rise_edge = 0x00,
@@ -88,6 +90,22 @@ static struct mxc_gpio_hwdata imx31_gpio_hwdata = {
88 .icr2_reg = 0x10, 90 .icr2_reg = 0x10,
89 .imr_reg = 0x14, 91 .imr_reg = 0x14,
90 .isr_reg = 0x18, 92 .isr_reg = 0x18,
93 .edge_sel_reg = -EINVAL,
94 .low_level = 0x00,
95 .high_level = 0x01,
96 .rise_edge = 0x02,
97 .fall_edge = 0x03,
98};
99
100static struct mxc_gpio_hwdata imx35_gpio_hwdata = {
101 .dr_reg = 0x00,
102 .gdir_reg = 0x04,
103 .psr_reg = 0x08,
104 .icr1_reg = 0x0c,
105 .icr2_reg = 0x10,
106 .imr_reg = 0x14,
107 .isr_reg = 0x18,
108 .edge_sel_reg = 0x1c,
91 .low_level = 0x00, 109 .low_level = 0x00,
92 .high_level = 0x01, 110 .high_level = 0x01,
93 .rise_edge = 0x02, 111 .rise_edge = 0x02,
@@ -104,12 +122,13 @@ static struct mxc_gpio_hwdata *mxc_gpio_hwdata;
104#define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg) 122#define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg)
105#define GPIO_IMR (mxc_gpio_hwdata->imr_reg) 123#define GPIO_IMR (mxc_gpio_hwdata->imr_reg)
106#define GPIO_ISR (mxc_gpio_hwdata->isr_reg) 124#define GPIO_ISR (mxc_gpio_hwdata->isr_reg)
125#define GPIO_EDGE_SEL (mxc_gpio_hwdata->edge_sel_reg)
107 126
108#define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level) 127#define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level)
109#define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level) 128#define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level)
110#define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge) 129#define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge)
111#define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge) 130#define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge)
112#define GPIO_INT_NONE 0x4 131#define GPIO_INT_BOTH_EDGES 0x4
113 132
114static struct platform_device_id mxc_gpio_devtype[] = { 133static struct platform_device_id mxc_gpio_devtype[] = {
115 { 134 {
@@ -122,6 +141,9 @@ static struct platform_device_id mxc_gpio_devtype[] = {
122 .name = "imx31-gpio", 141 .name = "imx31-gpio",
123 .driver_data = IMX31_GPIO, 142 .driver_data = IMX31_GPIO,
124 }, { 143 }, {
144 .name = "imx35-gpio",
145 .driver_data = IMX35_GPIO,
146 }, {
125 /* sentinel */ 147 /* sentinel */
126 } 148 }
127}; 149};
@@ -130,6 +152,7 @@ static const struct of_device_id mxc_gpio_dt_ids[] = {
130 { .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], }, 152 { .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], },
131 { .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], }, 153 { .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
132 { .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], }, 154 { .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
155 { .compatible = "fsl,imx35-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
133 { /* sentinel */ } 156 { /* sentinel */ }
134}; 157};
135 158
@@ -144,14 +167,15 @@ static LIST_HEAD(mxc_gpio_ports);
144 167
145static int gpio_set_irq_type(struct irq_data *d, u32 type) 168static int gpio_set_irq_type(struct irq_data *d, u32 type)
146{ 169{
147 u32 gpio = irq_to_gpio(d->irq);
148 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 170 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
149 struct mxc_gpio_port *port = gc->private; 171 struct mxc_gpio_port *port = gc->private;
150 u32 bit, val; 172 u32 bit, val;
173 u32 gpio_idx = d->hwirq;
174 u32 gpio = port->bgc.gc.base + gpio_idx;
151 int edge; 175 int edge;
152 void __iomem *reg = port->base; 176 void __iomem *reg = port->base;
153 177
154 port->both_edges &= ~(1 << (gpio & 31)); 178 port->both_edges &= ~(1 << gpio_idx);
155 switch (type) { 179 switch (type) {
156 case IRQ_TYPE_EDGE_RISING: 180 case IRQ_TYPE_EDGE_RISING:
157 edge = GPIO_INT_RISE_EDGE; 181 edge = GPIO_INT_RISE_EDGE;
@@ -160,15 +184,19 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
160 edge = GPIO_INT_FALL_EDGE; 184 edge = GPIO_INT_FALL_EDGE;
161 break; 185 break;
162 case IRQ_TYPE_EDGE_BOTH: 186 case IRQ_TYPE_EDGE_BOTH:
163 val = gpio_get_value(gpio); 187 if (GPIO_EDGE_SEL >= 0) {
164 if (val) { 188 edge = GPIO_INT_BOTH_EDGES;
165 edge = GPIO_INT_LOW_LEV;
166 pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
167 } else { 189 } else {
168 edge = GPIO_INT_HIGH_LEV; 190 val = gpio_get_value(gpio);
169 pr_debug("mxc: set GPIO %d to high trigger\n", gpio); 191 if (val) {
192 edge = GPIO_INT_LOW_LEV;
193 pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
194 } else {
195 edge = GPIO_INT_HIGH_LEV;
196 pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
197 }
198 port->both_edges |= 1 << gpio_idx;
170 } 199 }
171 port->both_edges |= 1 << (gpio & 31);
172 break; 200 break;
173 case IRQ_TYPE_LEVEL_LOW: 201 case IRQ_TYPE_LEVEL_LOW:
174 edge = GPIO_INT_LOW_LEV; 202 edge = GPIO_INT_LOW_LEV;
@@ -180,11 +208,24 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
180 return -EINVAL; 208 return -EINVAL;
181 } 209 }
182 210
183 reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ 211 if (GPIO_EDGE_SEL >= 0) {
184 bit = gpio & 0xf; 212 val = readl(port->base + GPIO_EDGE_SEL);
185 val = readl(reg) & ~(0x3 << (bit << 1)); 213 if (edge == GPIO_INT_BOTH_EDGES)
186 writel(val | (edge << (bit << 1)), reg); 214 writel(val | (1 << gpio_idx),
187 writel(1 << (gpio & 0x1f), port->base + GPIO_ISR); 215 port->base + GPIO_EDGE_SEL);
216 else
217 writel(val & ~(1 << gpio_idx),
218 port->base + GPIO_EDGE_SEL);
219 }
220
221 if (edge != GPIO_INT_BOTH_EDGES) {
222 reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
223 bit = gpio_idx & 0xf;
224 val = readl(reg) & ~(0x3 << (bit << 1));
225 writel(val | (edge << (bit << 1)), reg);
226 }
227
228 writel(1 << gpio_idx, port->base + GPIO_ISR);
188 229
189 return 0; 230 return 0;
190} 231}
@@ -217,15 +258,13 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
217/* handle 32 interrupts in one status register */ 258/* handle 32 interrupts in one status register */
218static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) 259static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
219{ 260{
220 u32 gpio_irq_no_base = port->virtual_irq_start;
221
222 while (irq_stat != 0) { 261 while (irq_stat != 0) {
223 int irqoffset = fls(irq_stat) - 1; 262 int irqoffset = fls(irq_stat) - 1;
224 263
225 if (port->both_edges & (1 << irqoffset)) 264 if (port->both_edges & (1 << irqoffset))
226 mxc_flip_edge(port, irqoffset); 265 mxc_flip_edge(port, irqoffset);
227 266
228 generic_handle_irq(gpio_irq_no_base + irqoffset); 267 generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
229 268
230 irq_stat &= ~(1 << irqoffset); 269 irq_stat &= ~(1 << irqoffset);
231 } 270 }
@@ -276,10 +315,9 @@ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
276 */ 315 */
277static int gpio_set_wake_irq(struct irq_data *d, u32 enable) 316static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
278{ 317{
279 u32 gpio = irq_to_gpio(d->irq);
280 u32 gpio_idx = gpio & 0x1F;
281 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 318 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
282 struct mxc_gpio_port *port = gc->private; 319 struct mxc_gpio_port *port = gc->private;
320 u32 gpio_idx = d->hwirq;
283 321
284 if (enable) { 322 if (enable) {
285 if (port->irq_high && (gpio_idx >= 16)) 323 if (port->irq_high && (gpio_idx >= 16))
@@ -296,12 +334,12 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
296 return 0; 334 return 0;
297} 335}
298 336
299static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port) 337static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
300{ 338{
301 struct irq_chip_generic *gc; 339 struct irq_chip_generic *gc;
302 struct irq_chip_type *ct; 340 struct irq_chip_type *ct;
303 341
304 gc = irq_alloc_generic_chip("gpio-mxc", 1, port->virtual_irq_start, 342 gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base,
305 port->base, handle_level_irq); 343 port->base, handle_level_irq);
306 gc->private = port; 344 gc->private = port;
307 345
@@ -338,7 +376,9 @@ static void __devinit mxc_gpio_get_hw(struct platform_device *pdev)
338 return; 376 return;
339 } 377 }
340 378
341 if (hwtype == IMX31_GPIO) 379 if (hwtype == IMX35_GPIO)
380 mxc_gpio_hwdata = &imx35_gpio_hwdata;
381 else if (hwtype == IMX31_GPIO)
342 mxc_gpio_hwdata = &imx31_gpio_hwdata; 382 mxc_gpio_hwdata = &imx31_gpio_hwdata;
343 else 383 else
344 mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata; 384 mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata;
@@ -352,7 +392,7 @@ static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
352 struct mxc_gpio_port *port = 392 struct mxc_gpio_port *port =
353 container_of(bgc, struct mxc_gpio_port, bgc); 393 container_of(bgc, struct mxc_gpio_port, bgc);
354 394
355 return port->virtual_irq_start + offset; 395 return irq_find_mapping(port->domain, offset);
356} 396}
357 397
358static int __devinit mxc_gpio_probe(struct platform_device *pdev) 398static int __devinit mxc_gpio_probe(struct platform_device *pdev)
@@ -360,6 +400,7 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
360 struct device_node *np = pdev->dev.of_node; 400 struct device_node *np = pdev->dev.of_node;
361 struct mxc_gpio_port *port; 401 struct mxc_gpio_port *port;
362 struct resource *iores; 402 struct resource *iores;
403 int irq_base;
363 int err; 404 int err;
364 405
365 mxc_gpio_get_hw(pdev); 406 mxc_gpio_get_hw(pdev);
@@ -398,10 +439,12 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
398 writel(~0, port->base + GPIO_ISR); 439 writel(~0, port->base + GPIO_ISR);
399 440
400 if (mxc_gpio_hwtype == IMX21_GPIO) { 441 if (mxc_gpio_hwtype == IMX21_GPIO) {
401 /* setup one handler for all GPIO interrupts */ 442 /*
402 if (pdev->id == 0) 443 * Setup one handler for all GPIO interrupts. Actually setting
403 irq_set_chained_handler(port->irq, 444 * the handler is needed only once, but doing it for every port
404 mx2_gpio_irq_handler); 445 * is more robust and easier.
446 */
447 irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
405 } else { 448 } else {
406 /* setup one handler for each entry */ 449 /* setup one handler for each entry */
407 irq_set_chained_handler(port->irq, mx3_gpio_irq_handler); 450 irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
@@ -422,28 +465,37 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
422 goto out_iounmap; 465 goto out_iounmap;
423 466
424 port->bgc.gc.to_irq = mxc_gpio_to_irq; 467 port->bgc.gc.to_irq = mxc_gpio_to_irq;
425 port->bgc.gc.base = pdev->id * 32; 468 port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
426 port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir); 469 pdev->id * 32;
427 port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
428 470
429 err = gpiochip_add(&port->bgc.gc); 471 err = gpiochip_add(&port->bgc.gc);
430 if (err) 472 if (err)
431 goto out_bgpio_remove; 473 goto out_bgpio_remove;
432 474
433 /* 475 irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
434 * In dt case, we use gpio number range dynamically 476 if (irq_base < 0) {
435 * allocated by gpio core. 477 err = irq_base;
436 */ 478 goto out_gpiochip_remove;
437 port->virtual_irq_start = MXC_GPIO_IRQ_START + (np ? port->bgc.gc.base : 479 }
438 pdev->id * 32); 480
481 port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
482 &irq_domain_simple_ops, NULL);
483 if (!port->domain) {
484 err = -ENODEV;
485 goto out_irqdesc_free;
486 }
439 487
440 /* gpio-mxc can be a generic irq chip */ 488 /* gpio-mxc can be a generic irq chip */
441 mxc_gpio_init_gc(port); 489 mxc_gpio_init_gc(port, irq_base);
442 490
443 list_add_tail(&port->node, &mxc_gpio_ports); 491 list_add_tail(&port->node, &mxc_gpio_ports);
444 492
445 return 0; 493 return 0;
446 494
495out_irqdesc_free:
496 irq_free_descs(irq_base, 32);
497out_gpiochip_remove:
498 WARN_ON(gpiochip_remove(&port->bgc.gc) < 0);
447out_bgpio_remove: 499out_bgpio_remove:
448 bgpio_remove(&port->bgc); 500 bgpio_remove(&port->bgc);
449out_iounmap: 501out_iounmap:
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index c4ed1722734c..e6efd77668f0 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -174,12 +174,22 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
174 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 174 if (bank->dbck_enable_mask && !bank->dbck_enabled) {
175 clk_enable(bank->dbck); 175 clk_enable(bank->dbck);
176 bank->dbck_enabled = true; 176 bank->dbck_enabled = true;
177
178 __raw_writel(bank->dbck_enable_mask,
179 bank->base + bank->regs->debounce_en);
177 } 180 }
178} 181}
179 182
180static inline void _gpio_dbck_disable(struct gpio_bank *bank) 183static inline void _gpio_dbck_disable(struct gpio_bank *bank)
181{ 184{
182 if (bank->dbck_enable_mask && bank->dbck_enabled) { 185 if (bank->dbck_enable_mask && bank->dbck_enabled) {
186 /*
187 * Disable debounce before cutting it's clock. If debounce is
188 * enabled but the clock is not, GPIO module seems to be unable
189 * to detect events and generate interrupts at least on OMAP3.
190 */
191 __raw_writel(0, bank->base + bank->regs->debounce_en);
192
183 clk_disable(bank->dbck); 193 clk_disable(bank->dbck);
184 bank->dbck_enabled = false; 194 bank->dbck_enabled = false;
185 } 195 }
@@ -889,12 +899,6 @@ static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
889 899
890 bank = container_of(chip, struct gpio_bank, chip); 900 bank = container_of(chip, struct gpio_bank, chip);
891 901
892 if (!bank->dbck) {
893 bank->dbck = clk_get(bank->dev, "dbclk");
894 if (IS_ERR(bank->dbck))
895 dev_err(bank->dev, "Could not get gpio dbck\n");
896 }
897
898 spin_lock_irqsave(&bank->lock, flags); 902 spin_lock_irqsave(&bank->lock, flags);
899 _set_gpio_debounce(bank, offset, debounce); 903 _set_gpio_debounce(bank, offset, debounce);
900 spin_unlock_irqrestore(&bank->lock, flags); 904 spin_unlock_irqrestore(&bank->lock, flags);
@@ -966,6 +970,10 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
966 /* Initialize interface clk ungated, module enabled */ 970 /* Initialize interface clk ungated, module enabled */
967 if (bank->regs->ctrl) 971 if (bank->regs->ctrl)
968 __raw_writel(0, base + bank->regs->ctrl); 972 __raw_writel(0, base + bank->regs->ctrl);
973
974 bank->dbck = clk_get(bank->dev, "dbclk");
975 if (IS_ERR(bank->dbck))
976 dev_err(bank->dev, "Could not get gpio dbck\n");
969} 977}
970 978
971static __devinit void 979static __devinit void
@@ -1081,7 +1089,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
1081 bank->is_mpuio = pdata->is_mpuio; 1089 bank->is_mpuio = pdata->is_mpuio;
1082 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1090 bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1083 bank->loses_context = pdata->loses_context; 1091 bank->loses_context = pdata->loses_context;
1084 bank->get_context_loss_count = pdata->get_context_loss_count;
1085 bank->regs = pdata->regs; 1092 bank->regs = pdata->regs;
1086#ifdef CONFIG_OF_GPIO 1093#ifdef CONFIG_OF_GPIO
1087 bank->chip.of_node = of_node_get(node); 1094 bank->chip.of_node = of_node_get(node);
@@ -1135,6 +1142,9 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
1135 omap_gpio_chip_init(bank); 1142 omap_gpio_chip_init(bank);
1136 omap_gpio_show_rev(bank); 1143 omap_gpio_show_rev(bank);
1137 1144
1145 if (bank->loses_context)
1146 bank->get_context_loss_count = pdata->get_context_loss_count;
1147
1138 pm_runtime_put(bank->dev); 1148 pm_runtime_put(bank->dev);
1139 1149
1140 list_add_tail(&bank->node, &omap_gpio_list); 1150 list_add_tail(&bank->node, &omap_gpio_list);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 1c313c710be3..9c693ae17956 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -78,10 +78,10 @@ struct pca953x_chip {
78 78
79#ifdef CONFIG_GPIO_PCA953X_IRQ 79#ifdef CONFIG_GPIO_PCA953X_IRQ
80 struct mutex irq_lock; 80 struct mutex irq_lock;
81 uint16_t irq_mask; 81 u32 irq_mask;
82 uint16_t irq_stat; 82 u32 irq_stat;
83 uint16_t irq_trig_raise; 83 u32 irq_trig_raise;
84 uint16_t irq_trig_fall; 84 u32 irq_trig_fall;
85 int irq_base; 85 int irq_base;
86#endif 86#endif
87 87
@@ -98,12 +98,11 @@ static int pca953x_write_reg(struct pca953x_chip *chip, int reg, u32 val)
98 if (chip->gpio_chip.ngpio <= 8) 98 if (chip->gpio_chip.ngpio <= 8)
99 ret = i2c_smbus_write_byte_data(chip->client, reg, val); 99 ret = i2c_smbus_write_byte_data(chip->client, reg, val);
100 else if (chip->gpio_chip.ngpio == 24) { 100 else if (chip->gpio_chip.ngpio == 24) {
101 ret = i2c_smbus_write_word_data(chip->client, 101 cpu_to_le32s(&val);
102 ret = i2c_smbus_write_i2c_block_data(chip->client,
102 (reg << 2) | REG_ADDR_AI, 103 (reg << 2) | REG_ADDR_AI,
103 val & 0xffff); 104 3,
104 ret = i2c_smbus_write_byte_data(chip->client, 105 (u8 *) &val);
105 (reg << 2) + 2,
106 (val & 0xff0000) >> 16);
107 } 106 }
108 else { 107 else {
109 switch (chip->chip_type) { 108 switch (chip->chip_type) {
@@ -135,22 +134,27 @@ static int pca953x_read_reg(struct pca953x_chip *chip, int reg, u32 *val)
135{ 134{
136 int ret; 135 int ret;
137 136
138 if (chip->gpio_chip.ngpio <= 8) 137 if (chip->gpio_chip.ngpio <= 8) {
139 ret = i2c_smbus_read_byte_data(chip->client, reg); 138 ret = i2c_smbus_read_byte_data(chip->client, reg);
140 else if (chip->gpio_chip.ngpio == 24) { 139 *val = ret;
141 ret = i2c_smbus_read_word_data(chip->client, reg << 2);
142 ret |= (i2c_smbus_read_byte_data(chip->client,
143 (reg << 2) + 2)<<16);
144 } 140 }
145 else 141 else if (chip->gpio_chip.ngpio == 24) {
142 *val = 0;
143 ret = i2c_smbus_read_i2c_block_data(chip->client,
144 (reg << 2) | REG_ADDR_AI,
145 3,
146 (u8 *) val);
147 le32_to_cpus(val);
148 } else {
146 ret = i2c_smbus_read_word_data(chip->client, reg << 1); 149 ret = i2c_smbus_read_word_data(chip->client, reg << 1);
150 *val = ret;
151 }
147 152
148 if (ret < 0) { 153 if (ret < 0) {
149 dev_err(&chip->client->dev, "failed reading register\n"); 154 dev_err(&chip->client->dev, "failed reading register\n");
150 return ret; 155 return ret;
151 } 156 }
152 157
153 *val = (u32)ret;
154 return 0; 158 return 0;
155} 159}
156 160
@@ -349,8 +353,8 @@ static void pca953x_irq_bus_lock(struct irq_data *d)
349static void pca953x_irq_bus_sync_unlock(struct irq_data *d) 353static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
350{ 354{
351 struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); 355 struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
352 uint16_t new_irqs; 356 u32 new_irqs;
353 uint16_t level; 357 u32 level;
354 358
355 /* Look for any newly setup interrupt */ 359 /* Look for any newly setup interrupt */
356 new_irqs = chip->irq_trig_fall | chip->irq_trig_raise; 360 new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
@@ -368,8 +372,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
368static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) 372static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
369{ 373{
370 struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); 374 struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
371 uint16_t level = d->irq - chip->irq_base; 375 u32 level = d->irq - chip->irq_base;
372 uint16_t mask = 1 << level; 376 u32 mask = 1 << level;
373 377
374 if (!(type & IRQ_TYPE_EDGE_BOTH)) { 378 if (!(type & IRQ_TYPE_EDGE_BOTH)) {
375 dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", 379 dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -399,12 +403,12 @@ static struct irq_chip pca953x_irq_chip = {
399 .irq_set_type = pca953x_irq_set_type, 403 .irq_set_type = pca953x_irq_set_type,
400}; 404};
401 405
402static uint16_t pca953x_irq_pending(struct pca953x_chip *chip) 406static u32 pca953x_irq_pending(struct pca953x_chip *chip)
403{ 407{
404 u32 cur_stat; 408 u32 cur_stat;
405 uint16_t old_stat; 409 u32 old_stat;
406 uint16_t pending; 410 u32 pending;
407 uint16_t trigger; 411 u32 trigger;
408 int ret, offset = 0; 412 int ret, offset = 0;
409 413
410 switch (chip->chip_type) { 414 switch (chip->chip_type) {
@@ -440,8 +444,8 @@ static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
440static irqreturn_t pca953x_irq_handler(int irq, void *devid) 444static irqreturn_t pca953x_irq_handler(int irq, void *devid)
441{ 445{
442 struct pca953x_chip *chip = devid; 446 struct pca953x_chip *chip = devid;
443 uint16_t pending; 447 u32 pending;
444 uint16_t level; 448 u32 level;
445 449
446 pending = pca953x_irq_pending(chip); 450 pending = pca953x_irq_pending(chip);
447 451
@@ -564,7 +568,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
564 * WARNING: This is DEPRECATED and will be removed eventually! 568 * WARNING: This is DEPRECATED and will be removed eventually!
565 */ 569 */
566static void 570static void
567pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 571pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, u32 *invert)
568{ 572{
569 struct device_node *node; 573 struct device_node *node;
570 const __be32 *val; 574 const __be32 *val;
@@ -592,13 +596,13 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
592} 596}
593#else 597#else
594static void 598static void
595pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 599pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, u32 *invert)
596{ 600{
597 *gpio_base = -1; 601 *gpio_base = -1;
598} 602}
599#endif 603#endif
600 604
601static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert) 605static int __devinit device_pca953x_init(struct pca953x_chip *chip, u32 invert)
602{ 606{
603 int ret; 607 int ret;
604 608
@@ -617,7 +621,7 @@ out:
617 return ret; 621 return ret;
618} 622}
619 623
620static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert) 624static int __devinit device_pca957x_init(struct pca953x_chip *chip, u32 invert)
621{ 625{
622 int ret; 626 int ret;
623 u32 val = 0; 627 u32 val = 0;
@@ -653,8 +657,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
653{ 657{
654 struct pca953x_platform_data *pdata; 658 struct pca953x_platform_data *pdata;
655 struct pca953x_chip *chip; 659 struct pca953x_chip *chip;
656 int irq_base=0, invert=0; 660 int irq_base = 0;
657 int ret; 661 int ret;
662 u32 invert = 0;
658 663
659 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); 664 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
660 if (chip == NULL) 665 if (chip == NULL)
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 2d1de9e7e9bd..076e236d0da7 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -61,61 +61,28 @@ struct pcf857x {
61 struct i2c_client *client; 61 struct i2c_client *client;
62 struct mutex lock; /* protect 'out' */ 62 struct mutex lock; /* protect 'out' */
63 unsigned out; /* software latch */ 63 unsigned out; /* software latch */
64
65 int (*write)(struct i2c_client *client, unsigned data);
66 int (*read)(struct i2c_client *client);
64}; 67};
65 68
66/*-------------------------------------------------------------------------*/ 69/*-------------------------------------------------------------------------*/
67 70
68/* Talk to 8-bit I/O expander */ 71/* Talk to 8-bit I/O expander */
69 72
70static int pcf857x_input8(struct gpio_chip *chip, unsigned offset) 73static int i2c_write_le8(struct i2c_client *client, unsigned data)
71{
72 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
73 int status;
74
75 mutex_lock(&gpio->lock);
76 gpio->out |= (1 << offset);
77 status = i2c_smbus_write_byte(gpio->client, gpio->out);
78 mutex_unlock(&gpio->lock);
79
80 return status;
81}
82
83static int pcf857x_get8(struct gpio_chip *chip, unsigned offset)
84{
85 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
86 s32 value;
87
88 value = i2c_smbus_read_byte(gpio->client);
89 return (value < 0) ? 0 : (value & (1 << offset));
90}
91
92static int pcf857x_output8(struct gpio_chip *chip, unsigned offset, int value)
93{ 74{
94 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); 75 return i2c_smbus_write_byte(client, data);
95 unsigned bit = 1 << offset;
96 int status;
97
98 mutex_lock(&gpio->lock);
99 if (value)
100 gpio->out |= bit;
101 else
102 gpio->out &= ~bit;
103 status = i2c_smbus_write_byte(gpio->client, gpio->out);
104 mutex_unlock(&gpio->lock);
105
106 return status;
107} 76}
108 77
109static void pcf857x_set8(struct gpio_chip *chip, unsigned offset, int value) 78static int i2c_read_le8(struct i2c_client *client)
110{ 79{
111 pcf857x_output8(chip, offset, value); 80 return (int)i2c_smbus_read_byte(client);
112} 81}
113 82
114/*-------------------------------------------------------------------------*/
115
116/* Talk to 16-bit I/O expander */ 83/* Talk to 16-bit I/O expander */
117 84
118static int i2c_write_le16(struct i2c_client *client, u16 word) 85static int i2c_write_le16(struct i2c_client *client, unsigned word)
119{ 86{
120 u8 buf[2] = { word & 0xff, word >> 8, }; 87 u8 buf[2] = { word & 0xff, word >> 8, };
121 int status; 88 int status;
@@ -135,29 +102,31 @@ static int i2c_read_le16(struct i2c_client *client)
135 return (buf[1] << 8) | buf[0]; 102 return (buf[1] << 8) | buf[0];
136} 103}
137 104
138static int pcf857x_input16(struct gpio_chip *chip, unsigned offset) 105/*-------------------------------------------------------------------------*/
106
107static int pcf857x_input(struct gpio_chip *chip, unsigned offset)
139{ 108{
140 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); 109 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
141 int status; 110 int status;
142 111
143 mutex_lock(&gpio->lock); 112 mutex_lock(&gpio->lock);
144 gpio->out |= (1 << offset); 113 gpio->out |= (1 << offset);
145 status = i2c_write_le16(gpio->client, gpio->out); 114 status = gpio->write(gpio->client, gpio->out);
146 mutex_unlock(&gpio->lock); 115 mutex_unlock(&gpio->lock);
147 116
148 return status; 117 return status;
149} 118}
150 119
151static int pcf857x_get16(struct gpio_chip *chip, unsigned offset) 120static int pcf857x_get(struct gpio_chip *chip, unsigned offset)
152{ 121{
153 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); 122 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
154 int value; 123 int value;
155 124
156 value = i2c_read_le16(gpio->client); 125 value = gpio->read(gpio->client);
157 return (value < 0) ? 0 : (value & (1 << offset)); 126 return (value < 0) ? 0 : (value & (1 << offset));
158} 127}
159 128
160static int pcf857x_output16(struct gpio_chip *chip, unsigned offset, int value) 129static int pcf857x_output(struct gpio_chip *chip, unsigned offset, int value)
161{ 130{
162 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); 131 struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
163 unsigned bit = 1 << offset; 132 unsigned bit = 1 << offset;
@@ -168,15 +137,15 @@ static int pcf857x_output16(struct gpio_chip *chip, unsigned offset, int value)
168 gpio->out |= bit; 137 gpio->out |= bit;
169 else 138 else
170 gpio->out &= ~bit; 139 gpio->out &= ~bit;
171 status = i2c_write_le16(gpio->client, gpio->out); 140 status = gpio->write(gpio->client, gpio->out);
172 mutex_unlock(&gpio->lock); 141 mutex_unlock(&gpio->lock);
173 142
174 return status; 143 return status;
175} 144}
176 145
177static void pcf857x_set16(struct gpio_chip *chip, unsigned offset, int value) 146static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value)
178{ 147{
179 pcf857x_output16(chip, offset, value); 148 pcf857x_output(chip, offset, value);
180} 149}
181 150
182/*-------------------------------------------------------------------------*/ 151/*-------------------------------------------------------------------------*/
@@ -200,10 +169,15 @@ static int pcf857x_probe(struct i2c_client *client,
200 169
201 mutex_init(&gpio->lock); 170 mutex_init(&gpio->lock);
202 171
203 gpio->chip.base = pdata ? pdata->gpio_base : -1; 172 gpio->chip.base = pdata ? pdata->gpio_base : -1;
204 gpio->chip.can_sleep = 1; 173 gpio->chip.can_sleep = 1;
205 gpio->chip.dev = &client->dev; 174 gpio->chip.dev = &client->dev;
206 gpio->chip.owner = THIS_MODULE; 175 gpio->chip.owner = THIS_MODULE;
176 gpio->chip.get = pcf857x_get;
177 gpio->chip.set = pcf857x_set;
178 gpio->chip.direction_input = pcf857x_input;
179 gpio->chip.direction_output = pcf857x_output;
180 gpio->chip.ngpio = id->driver_data;
207 181
208 /* NOTE: the OnSemi jlc1562b is also largely compatible with 182 /* NOTE: the OnSemi jlc1562b is also largely compatible with
209 * these parts, notably for output. It has a low-resolution 183 * these parts, notably for output. It has a low-resolution
@@ -216,12 +190,9 @@ static int pcf857x_probe(struct i2c_client *client,
216 * 190 *
217 * NOTE: we don't distinguish here between *4 and *4a parts. 191 * NOTE: we don't distinguish here between *4 and *4a parts.
218 */ 192 */
219 gpio->chip.ngpio = id->driver_data;
220 if (gpio->chip.ngpio == 8) { 193 if (gpio->chip.ngpio == 8) {
221 gpio->chip.direction_input = pcf857x_input8; 194 gpio->write = i2c_write_le8;
222 gpio->chip.get = pcf857x_get8; 195 gpio->read = i2c_read_le8;
223 gpio->chip.direction_output = pcf857x_output8;
224 gpio->chip.set = pcf857x_set8;
225 196
226 if (!i2c_check_functionality(client->adapter, 197 if (!i2c_check_functionality(client->adapter,
227 I2C_FUNC_SMBUS_BYTE)) 198 I2C_FUNC_SMBUS_BYTE))
@@ -238,10 +209,8 @@ static int pcf857x_probe(struct i2c_client *client,
238 * NOTE: we don't distinguish here between '75 and '75c parts. 209 * NOTE: we don't distinguish here between '75 and '75c parts.
239 */ 210 */
240 } else if (gpio->chip.ngpio == 16) { 211 } else if (gpio->chip.ngpio == 16) {
241 gpio->chip.direction_input = pcf857x_input16; 212 gpio->write = i2c_write_le16;
242 gpio->chip.get = pcf857x_get16; 213 gpio->read = i2c_read_le16;
243 gpio->chip.direction_output = pcf857x_output16;
244 gpio->chip.set = pcf857x_set16;
245 214
246 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) 215 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
247 status = -EIO; 216 status = -EIO;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 58a6a63a6ece..9cac88a65f78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -62,6 +62,7 @@ int pxa_last_gpio;
62 62
63#ifdef CONFIG_OF 63#ifdef CONFIG_OF
64static struct irq_domain *domain; 64static struct irq_domain *domain;
65static struct device_node *pxa_gpio_of_node;
65#endif 66#endif
66 67
67struct pxa_gpio_chip { 68struct pxa_gpio_chip {
@@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
277 (value ? GPSR_OFFSET : GPCR_OFFSET)); 278 (value ? GPSR_OFFSET : GPCR_OFFSET));
278} 279}
279 280
281#ifdef CONFIG_OF_GPIO
282static int pxa_gpio_of_xlate(struct gpio_chip *gc,
283 const struct of_phandle_args *gpiospec,
284 u32 *flags)
285{
286 if (gpiospec->args[0] > pxa_last_gpio)
287 return -EINVAL;
288
289 if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip)
290 return -EINVAL;
291
292 if (flags)
293 *flags = gpiospec->args[1];
294
295 return gpiospec->args[0] % 32;
296}
297#endif
298
280static int __devinit pxa_init_gpio_chip(int gpio_end, 299static int __devinit pxa_init_gpio_chip(int gpio_end,
281 int (*set_wake)(unsigned int, unsigned int)) 300 int (*set_wake)(unsigned int, unsigned int))
282{ 301{
@@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end,
304 c->get = pxa_gpio_get; 323 c->get = pxa_gpio_get;
305 c->set = pxa_gpio_set; 324 c->set = pxa_gpio_set;
306 c->to_irq = pxa_gpio_to_irq; 325 c->to_irq = pxa_gpio_to_irq;
326#ifdef CONFIG_OF_GPIO
327 c->of_node = pxa_gpio_of_node;
328 c->of_xlate = pxa_gpio_of_xlate;
329 c->of_gpio_n_cells = 2;
330#endif
307 331
308 /* number of GPIOs on last bank may be less than 32 */ 332 /* number of GPIOs on last bank may be less than 32 */
309 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32; 333 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -488,6 +512,7 @@ static int pxa_gpio_nums(void)
488 return count; 512 return count;
489} 513}
490 514
515#ifdef CONFIG_OF
491static struct of_device_id pxa_gpio_dt_ids[] = { 516static struct of_device_id pxa_gpio_dt_ids[] = {
492 { .compatible = "mrvl,pxa-gpio" }, 517 { .compatible = "mrvl,pxa-gpio" },
493 { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO }, 518 { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO },
@@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
505 530
506const struct irq_domain_ops pxa_irq_domain_ops = { 531const struct irq_domain_ops pxa_irq_domain_ops = {
507 .map = pxa_irq_domain_map, 532 .map = pxa_irq_domain_map,
533 .xlate = irq_domain_xlate_twocell,
508}; 534};
509 535
510#ifdef CONFIG_OF
511static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) 536static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
512{ 537{
513 int ret, nr_banks, nr_gpios, irq_base; 538 int ret, nr_banks, nr_gpios, irq_base;
@@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
545 } 570 }
546 domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0, 571 domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0,
547 &pxa_irq_domain_ops, NULL); 572 &pxa_irq_domain_ops, NULL);
573 pxa_gpio_of_node = np;
548 return 0; 574 return 0;
549err: 575err:
550 iounmap(gpio_reg_base); 576 iounmap(gpio_reg_base);
@@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = {
653 .probe = pxa_gpio_probe, 679 .probe = pxa_gpio_probe,
654 .driver = { 680 .driver = {
655 .name = "pxa-gpio", 681 .name = "pxa-gpio",
656 .of_match_table = pxa_gpio_dt_ids, 682 .of_match_table = of_match_ptr(pxa_gpio_dt_ids),
657 }, 683 },
658}; 684};
659 685
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index b6453d0e44ad..ba126cc04073 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,12 +2454,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
2454 }, 2454 },
2455 }, { 2455 }, {
2456 .chip = { 2456 .chip = {
2457 .base = EXYNOS5_GPC4(0),
2458 .ngpio = EXYNOS5_GPIO_C4_NR,
2459 .label = "GPC4",
2460 },
2461 }, {
2462 .chip = {
2463 .base = EXYNOS5_GPD0(0), 2457 .base = EXYNOS5_GPD0(0),
2464 .ngpio = EXYNOS5_GPIO_D0_NR, 2458 .ngpio = EXYNOS5_GPIO_D0_NR,
2465 .label = "GPD0", 2459 .label = "GPD0",
@@ -2513,6 +2507,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
2513 .label = "GPY6", 2507 .label = "GPY6",
2514 }, 2508 },
2515 }, { 2509 }, {
2510 .chip = {
2511 .base = EXYNOS5_GPC4(0),
2512 .ngpio = EXYNOS5_GPIO_C4_NR,
2513 .label = "GPC4",
2514 },
2515 }, {
2516 .config = &samsung_gpio_cfgs[9], 2516 .config = &samsung_gpio_cfgs[9],
2517 .irq_base = IRQ_EINT(0), 2517 .irq_base = IRQ_EINT(0),
2518 .chip = { 2518 .chip = {
@@ -2681,11 +2681,14 @@ static int exynos_gpio_xlate(struct gpio_chip *gc,
2681 2681
2682 if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1]))) 2682 if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1])))
2683 pr_warn("gpio_xlate: failed to set pin function\n"); 2683 pr_warn("gpio_xlate: failed to set pin function\n");
2684 if (s3c_gpio_setpull(pin, gpiospec->args[2])) 2684 if (s3c_gpio_setpull(pin, gpiospec->args[2] & 0xffff))
2685 pr_warn("gpio_xlate: failed to set pin pull up/down\n"); 2685 pr_warn("gpio_xlate: failed to set pin pull up/down\n");
2686 if (s5p_gpio_set_drvstr(pin, gpiospec->args[3])) 2686 if (s5p_gpio_set_drvstr(pin, gpiospec->args[3]))
2687 pr_warn("gpio_xlate: failed to set pin drive strength\n"); 2687 pr_warn("gpio_xlate: failed to set pin drive strength\n");
2688 2688
2689 if (flags)
2690 *flags = gpiospec->args[2] >> 16;
2691
2689 return gpiospec->args[0]; 2692 return gpiospec->args[0];
2690} 2693}
2691 2694
@@ -2833,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void)
2833 } 2836 }
2834 2837
2835 /* need to set base address for gpc4 */ 2838 /* need to set base address for gpc4 */
2836 exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; 2839 exynos5_gpios_1[20].base = gpio_base1 + 0x2E0;
2837 2840
2838 /* need to set base address for gpx */ 2841 /* need to set base address for gpx */
2839 chip = &exynos5_gpios_1[21]; 2842 chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 424dce8e3f30..8707d4572a06 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
241 break; 241 break;
242 242
243 default: 243 default:
244 return -ENODEV; 244 err = -ENODEV;
245 goto err_sch_gpio_core;
245 } 246 }
246 247
247 sch_gpio_core.dev = &pdev->dev; 248 sch_gpio_core.dev = &pdev->dev;
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 38416be8ba11..6064fb376e11 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -383,8 +383,9 @@ static int __devinit gsta_probe(struct platform_device *dev)
383 } 383 }
384 spin_lock_init(&chip->lock); 384 spin_lock_init(&chip->lock);
385 gsta_gpio_setup(chip); 385 gsta_gpio_setup(chip);
386 for (i = 0; i < GSTA_NR_GPIO; i++) 386 if (gpio_pdata)
387 gsta_set_config(chip, i, gpio_pdata->pinconfig[i]); 387 for (i = 0; i < GSTA_NR_GPIO; i++)
388 gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
388 389
389 /* 384 was used in previous code: be compatible for other drivers */ 390 /* 384 was used in previous code: be compatible for other drivers */
390 err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE); 391 err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
new file mode 100644
index 000000000000..2526b3bb0fae
--- /dev/null
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -0,0 +1,158 @@
1/*
2 * TI TPS6586x GPIO driver
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 * Author: Laxman dewangan <ldewangan@nvidia.com>
6 *
7 * Based on tps6586x.c
8 * Copyright (c) 2010 CompuLab Ltd.
9 * Mike Rapoport <mike@compulab.co.il>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <linux/errno.h>
25#include <linux/gpio.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/mfd/tps6586x.h>
29#include <linux/of_device.h>
30#include <linux/platform_device.h>
31
32/* GPIO control registers */
33#define TPS6586X_GPIOSET1 0x5d
34#define TPS6586X_GPIOSET2 0x5e
35
36struct tps6586x_gpio {
37 struct gpio_chip gpio_chip;
38 struct device *parent;
39};
40
41static inline struct tps6586x_gpio *to_tps6586x_gpio(struct gpio_chip *chip)
42{
43 return container_of(chip, struct tps6586x_gpio, gpio_chip);
44}
45
46static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset)
47{
48 struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
49 uint8_t val;
50 int ret;
51
52 ret = tps6586x_read(tps6586x_gpio->parent, TPS6586X_GPIOSET2, &val);
53 if (ret)
54 return ret;
55
56 return !!(val & (1 << offset));
57}
58
59static void tps6586x_gpio_set(struct gpio_chip *gc, unsigned offset,
60 int value)
61{
62 struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
63
64 tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2,
65 value << offset, 1 << offset);
66}
67
68static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
69 int value)
70{
71 struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
72 uint8_t val, mask;
73
74 tps6586x_gpio_set(gc, offset, value);
75
76 val = 0x1 << (offset * 2);
77 mask = 0x3 << (offset * 2);
78
79 return tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET1,
80 val, mask);
81}
82
83static int __devinit tps6586x_gpio_probe(struct platform_device *pdev)
84{
85 struct tps6586x_platform_data *pdata;
86 struct tps6586x_gpio *tps6586x_gpio;
87 int ret;
88
89 pdata = dev_get_platdata(pdev->dev.parent);
90 tps6586x_gpio = devm_kzalloc(&pdev->dev,
91 sizeof(*tps6586x_gpio), GFP_KERNEL);
92 if (!tps6586x_gpio) {
93 dev_err(&pdev->dev, "Could not allocate tps6586x_gpio\n");
94 return -ENOMEM;
95 }
96
97 tps6586x_gpio->parent = pdev->dev.parent;
98
99 tps6586x_gpio->gpio_chip.owner = THIS_MODULE;
100 tps6586x_gpio->gpio_chip.label = pdev->name;
101 tps6586x_gpio->gpio_chip.dev = &pdev->dev;
102 tps6586x_gpio->gpio_chip.ngpio = 4;
103 tps6586x_gpio->gpio_chip.can_sleep = 1;
104
105 /* FIXME: add handling of GPIOs as dedicated inputs */
106 tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
107 tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
108 tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
109
110#ifdef CONFIG_OF_GPIO
111 tps6586x_gpio->gpio_chip.of_node = pdev->dev.parent->of_node;
112#endif
113 if (pdata && pdata->gpio_base)
114 tps6586x_gpio->gpio_chip.base = pdata->gpio_base;
115 else
116 tps6586x_gpio->gpio_chip.base = -1;
117
118 ret = gpiochip_add(&tps6586x_gpio->gpio_chip);
119 if (ret < 0) {
120 dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
121 return ret;
122 }
123
124 platform_set_drvdata(pdev, tps6586x_gpio);
125
126 return ret;
127}
128
129static int __devexit tps6586x_gpio_remove(struct platform_device *pdev)
130{
131 struct tps6586x_gpio *tps6586x_gpio = platform_get_drvdata(pdev);
132
133 return gpiochip_remove(&tps6586x_gpio->gpio_chip);
134}
135
136static struct platform_driver tps6586x_gpio_driver = {
137 .driver.name = "tps6586x-gpio",
138 .driver.owner = THIS_MODULE,
139 .probe = tps6586x_gpio_probe,
140 .remove = __devexit_p(tps6586x_gpio_remove),
141};
142
143static int __init tps6586x_gpio_init(void)
144{
145 return platform_driver_register(&tps6586x_gpio_driver);
146}
147subsys_initcall(tps6586x_gpio_init);
148
149static void __exit tps6586x_gpio_exit(void)
150{
151 platform_driver_unregister(&tps6586x_gpio_driver);
152}
153module_exit(tps6586x_gpio_exit);
154
155MODULE_ALIAS("platform:tps6586x-gpio");
156MODULE_DESCRIPTION("GPIO interface for TPS6586X PMIC");
157MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
158MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index c1ad2884f2ed..11f29c82253c 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -149,6 +149,9 @@ static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
149 tps65910_gpio->gpio_chip.set = tps65910_gpio_set; 149 tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
150 tps65910_gpio->gpio_chip.get = tps65910_gpio_get; 150 tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
151 tps65910_gpio->gpio_chip.dev = &pdev->dev; 151 tps65910_gpio->gpio_chip.dev = &pdev->dev;
152#ifdef CONFIG_OF_GPIO
153 tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
154#endif
152 if (pdata && pdata->gpio_base) 155 if (pdata && pdata->gpio_base)
153 tps65910_gpio->gpio_chip.base = pdata->gpio_base; 156 tps65910_gpio->gpio_chip.base = pdata->gpio_base;
154 else 157 else
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 92ea5350dfe9..1c764e779d80 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -19,6 +19,7 @@
19#include <linux/mfd/core.h> 19#include <linux/mfd/core.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/regmap.h>
22 23
23#include <linux/mfd/wm8994/core.h> 24#include <linux/mfd/wm8994/core.h>
24#include <linux/mfd/wm8994/pdata.h> 25#include <linux/mfd/wm8994/pdata.h>
@@ -89,8 +90,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
89 struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip); 90 struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
90 struct wm8994 *wm8994 = wm8994_gpio->wm8994; 91 struct wm8994 *wm8994 = wm8994_gpio->wm8994;
91 92
93 if (value)
94 value = WM8994_GPN_LVL;
95
92 return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, 96 return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
93 WM8994_GPN_DIR, 0); 97 WM8994_GPN_DIR | WM8994_GPN_LVL, value);
94} 98}
95 99
96static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 100static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -109,10 +113,7 @@ static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
109 struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip); 113 struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
110 struct wm8994 *wm8994 = wm8994_gpio->wm8994; 114 struct wm8994 *wm8994 = wm8994_gpio->wm8994;
111 115
112 if (!wm8994->irq_base) 116 return regmap_irq_get_virq(wm8994->irq_data, offset);
113 return -EINVAL;
114
115 return wm8994->irq_base + offset;
116} 117}
117 118
118 119
@@ -251,7 +252,8 @@ static int __devinit wm8994_gpio_probe(struct platform_device *pdev)
251 struct wm8994_gpio *wm8994_gpio; 252 struct wm8994_gpio *wm8994_gpio;
252 int ret; 253 int ret;
253 254
254 wm8994_gpio = kzalloc(sizeof(*wm8994_gpio), GFP_KERNEL); 255 wm8994_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8994_gpio),
256 GFP_KERNEL);
255 if (wm8994_gpio == NULL) 257 if (wm8994_gpio == NULL)
256 return -ENOMEM; 258 return -ENOMEM;
257 259
@@ -276,20 +278,14 @@ static int __devinit wm8994_gpio_probe(struct platform_device *pdev)
276 return ret; 278 return ret;
277 279
278err: 280err:
279 kfree(wm8994_gpio);
280 return ret; 281 return ret;
281} 282}
282 283
283static int __devexit wm8994_gpio_remove(struct platform_device *pdev) 284static int __devexit wm8994_gpio_remove(struct platform_device *pdev)
284{ 285{
285 struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev); 286 struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev);
286 int ret;
287
288 ret = gpiochip_remove(&wm8994_gpio->gpio_chip);
289 if (ret == 0)
290 kfree(wm8994_gpio);
291 287
292 return ret; 288 return gpiochip_remove(&wm8994_gpio->gpio_chip);
293} 289}
294 290
295static struct platform_driver wm8994_gpio_driver = { 291static struct platform_driver wm8994_gpio_driver = {
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d18068a9f3ec..a18c4aa68b1e 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -21,7 +21,7 @@
21#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24/* Private data structure for of_gpiochip_is_match */ 24/* Private data structure for of_gpiochip_find_and_xlate */
25struct gg_data { 25struct gg_data {
26 enum of_gpio_flags *flags; 26 enum of_gpio_flags *flags;
27 struct of_phandle_args gpiospec; 27 struct of_phandle_args gpiospec;
@@ -62,7 +62,10 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
62int of_get_named_gpio_flags(struct device_node *np, const char *propname, 62int of_get_named_gpio_flags(struct device_node *np, const char *propname,
63 int index, enum of_gpio_flags *flags) 63 int index, enum of_gpio_flags *flags)
64{ 64{
65 struct gg_data gg_data = { .flags = flags, .out_gpio = -ENODEV }; 65 /* Return -EPROBE_DEFER to support probe() functions to be called
66 * later when the GPIO actually becomes available
67 */
68 struct gg_data gg_data = { .flags = flags, .out_gpio = -EPROBE_DEFER };
66 int ret; 69 int ret;
67 70
68 /* .of_xlate might decide to not fill in the flags, so clear it. */ 71 /* .of_xlate might decide to not fill in the flags, so clear it. */
@@ -73,7 +76,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
73 &gg_data.gpiospec); 76 &gg_data.gpiospec);
74 if (ret) { 77 if (ret) {
75 pr_debug("%s: can't parse gpios property\n", __func__); 78 pr_debug("%s: can't parse gpios property\n", __func__);
76 return -EINVAL; 79 return ret;
77 } 80 }
78 81
79 gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); 82 gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 120b2a0e3167..de0213c9d11c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1186,7 +1186,7 @@ int gpio_request(unsigned gpio, const char *label)
1186{ 1186{
1187 struct gpio_desc *desc; 1187 struct gpio_desc *desc;
1188 struct gpio_chip *chip; 1188 struct gpio_chip *chip;
1189 int status = -EINVAL; 1189 int status = -EPROBE_DEFER;
1190 unsigned long flags; 1190 unsigned long flags;
1191 1191
1192 spin_lock_irqsave(&gpio_lock, flags); 1192 spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 23120c00a881..90e28081712d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,6 +22,7 @@ menuconfig DRM
22config DRM_USB 22config DRM_USB
23 tristate 23 tristate
24 depends on DRM 24 depends on DRM
25 depends on USB_ARCH_HAS_HCD
25 select USB 26 select USB
26 27
27config DRM_KMS_HELPER 28config DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 65f9d231af14..7282c081fb53 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -460,8 +460,8 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
460} 460}
461 461
462static bool ast_crtc_mode_fixup(struct drm_crtc *crtc, 462static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
463 struct drm_display_mode *mode, 463 const struct drm_display_mode *mode,
464 struct drm_display_mode *adjusted_mode) 464 struct drm_display_mode *adjusted_mode)
465{ 465{
466 return true; 466 return true;
467} 467}
@@ -680,7 +680,7 @@ static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
680} 680}
681 681
682static bool ast_mode_fixup(struct drm_encoder *encoder, 682static bool ast_mode_fixup(struct drm_encoder *encoder,
683 struct drm_display_mode *mode, 683 const struct drm_display_mode *mode,
684 struct drm_display_mode *adjusted_mode) 684 struct drm_display_mode *adjusted_mode)
685{ 685{
686 return true; 686 return true;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 100f6308c509..a44d31aa4e3c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -97,7 +97,7 @@ static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
97 * to just pass that straight through, so this does nothing 97 * to just pass that straight through, so this does nothing
98 */ 98 */
99static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc, 99static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
100 struct drm_display_mode *mode, 100 const struct drm_display_mode *mode,
101 struct drm_display_mode *adjusted_mode) 101 struct drm_display_mode *adjusted_mode)
102{ 102{
103 return true; 103 return true;
@@ -429,8 +429,8 @@ void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
429 429
430 430
431static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder, 431static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
432 struct drm_display_mode *mode, 432 const struct drm_display_mode *mode,
433 struct drm_display_mode *adjusted_mode) 433 struct drm_display_mode *adjusted_mode)
434{ 434{
435 return true; 435 return true;
436} 436}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 348b367debeb..b356c719f2f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -641,8 +641,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
641 641
642 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 642 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
643 return -EINVAL; 643 return -EINVAL;
644 if (dev->queue_count)
645 return -EBUSY; /* Not while in use */
646 644
647 /* Make sure buffers are located in AGP memory that we own */ 645 /* Make sure buffers are located in AGP memory that we own */
648 valid = 0; 646 valid = 0;
@@ -704,7 +702,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
704 buf->next = NULL; 702 buf->next = NULL;
705 buf->waiting = 0; 703 buf->waiting = 0;
706 buf->pending = 0; 704 buf->pending = 0;
707 init_waitqueue_head(&buf->dma_wait);
708 buf->file_priv = NULL; 705 buf->file_priv = NULL;
709 706
710 buf->dev_priv_size = dev->driver->dev_priv_size; 707 buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -796,13 +793,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
796 order = drm_order(request->size); 793 order = drm_order(request->size);
797 size = 1 << order; 794 size = 1 << order;
798 795
799 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", 796 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
800 request->count, request->size, size, order, dev->queue_count); 797 request->count, request->size, size, order);
801 798
802 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 799 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
803 return -EINVAL; 800 return -EINVAL;
804 if (dev->queue_count)
805 return -EBUSY; /* Not while in use */
806 801
807 alignment = (request->flags & _DRM_PAGE_ALIGN) 802 alignment = (request->flags & _DRM_PAGE_ALIGN)
808 ? PAGE_ALIGN(size) : size; 803 ? PAGE_ALIGN(size) : size;
@@ -904,7 +899,6 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
904 buf->next = NULL; 899 buf->next = NULL;
905 buf->waiting = 0; 900 buf->waiting = 0;
906 buf->pending = 0; 901 buf->pending = 0;
907 init_waitqueue_head(&buf->dma_wait);
908 buf->file_priv = NULL; 902 buf->file_priv = NULL;
909 903
910 buf->dev_priv_size = dev->driver->dev_priv_size; 904 buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -1019,8 +1013,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1019 1013
1020 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1014 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1021 return -EINVAL; 1015 return -EINVAL;
1022 if (dev->queue_count)
1023 return -EBUSY; /* Not while in use */
1024 1016
1025 spin_lock(&dev->count_lock); 1017 spin_lock(&dev->count_lock);
1026 if (dev->buf_use) { 1018 if (dev->buf_use) {
@@ -1071,7 +1063,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1071 buf->next = NULL; 1063 buf->next = NULL;
1072 buf->waiting = 0; 1064 buf->waiting = 0;
1073 buf->pending = 0; 1065 buf->pending = 0;
1074 init_waitqueue_head(&buf->dma_wait);
1075 buf->file_priv = NULL; 1066 buf->file_priv = NULL;
1076 1067
1077 buf->dev_priv_size = dev->driver->dev_priv_size; 1068 buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -1177,8 +1168,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1177 1168
1178 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1169 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1179 return -EINVAL; 1170 return -EINVAL;
1180 if (dev->queue_count)
1181 return -EBUSY; /* Not while in use */
1182 1171
1183 spin_lock(&dev->count_lock); 1172 spin_lock(&dev->count_lock);
1184 if (dev->buf_use) { 1173 if (dev->buf_use) {
@@ -1228,7 +1217,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1228 buf->next = NULL; 1217 buf->next = NULL;
1229 buf->waiting = 0; 1218 buf->waiting = 0;
1230 buf->pending = 0; 1219 buf->pending = 0;
1231 init_waitqueue_head(&buf->dma_wait);
1232 buf->file_priv = NULL; 1220 buf->file_priv = NULL;
1233 1221
1234 buf->dev_priv_size = dev->driver->dev_priv_size; 1222 buf->dev_priv_size = dev->driver->dev_priv_size;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 08a7aa722d6b..6fbfc244748f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1981 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1981 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1982 return -EINVAL; 1982 return -EINVAL;
1983 1983
1984 if (!req->flags) 1984 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
1985 return -EINVAL; 1985 return -EINVAL;
1986 1986
1987 mutex_lock(&dev->mode_config.mutex); 1987 mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 1c7a1c0d3edd..70b13fc19396 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -46,7 +46,6 @@ static struct drm_info_list drm_debugfs_list[] = {
46 {"name", drm_name_info, 0}, 46 {"name", drm_name_info, 0},
47 {"vm", drm_vm_info, 0}, 47 {"vm", drm_vm_info, 0},
48 {"clients", drm_clients_info, 0}, 48 {"clients", drm_clients_info, 0},
49 {"queues", drm_queues_info, 0},
50 {"bufs", drm_bufs_info, 0}, 49 {"bufs", drm_bufs_info, 0},
51 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 50 {"gem_names", drm_gem_name_info, DRIVER_GEM},
52#if DRM_DEBUG_CODE 51#if DRM_DEBUG_CODE
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index cfb4e333ec0f..08f5e5309b22 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -120,11 +120,6 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
120 buf->pending = 0; 120 buf->pending = 0;
121 buf->file_priv = NULL; 121 buf->file_priv = NULL;
122 buf->used = 0; 122 buf->used = 0;
123
124 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
125 && waitqueue_active(&buf->dma_wait)) {
126 wake_up_interruptible(&buf->dma_wait);
127 }
128} 123}
129 124
130/** 125/**
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8a9d0792e4ec..9238de4009fa 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -182,7 +182,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
182int drm_lastclose(struct drm_device * dev) 182int drm_lastclose(struct drm_device * dev)
183{ 183{
184 struct drm_vma_entry *vma, *vma_temp; 184 struct drm_vma_entry *vma, *vma_temp;
185 int i;
186 185
187 DRM_DEBUG("\n"); 186 DRM_DEBUG("\n");
188 187
@@ -228,16 +227,6 @@ int drm_lastclose(struct drm_device * dev)
228 kfree(vma); 227 kfree(vma);
229 } 228 }
230 229
231 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
232 for (i = 0; i < dev->queue_count; i++) {
233 kfree(dev->queuelist[i]);
234 dev->queuelist[i] = NULL;
235 }
236 kfree(dev->queuelist);
237 dev->queuelist = NULL;
238 }
239 dev->queue_count = 0;
240
241 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 230 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
242 !drm_core_check_feature(dev, DRIVER_MODESET)) 231 !drm_core_check_feature(dev, DRIVER_MODESET))
243 drm_dma_takedown(dev); 232 drm_dma_takedown(dev);
@@ -486,7 +475,7 @@ long drm_ioctl(struct file *filp,
486 kfree(kdata); 475 kfree(kdata);
487 atomic_dec(&dev->ioctl_count); 476 atomic_dec(&dev->ioctl_count);
488 if (retcode) 477 if (retcode)
489 DRM_DEBUG("ret = %x\n", retcode); 478 DRM_DEBUG("ret = %d\n", retcode);
490 return retcode; 479 return retcode;
491} 480}
492 481
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index eb92fe257a39..b7ee230572b7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -87,6 +87,9 @@ static struct edid_quirk {
87 int product_id; 87 int product_id;
88 u32 quirks; 88 u32 quirks;
89} edid_quirk_list[] = { 89} edid_quirk_list[] = {
90 /* ASUS VW222S */
91 { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
92
90 /* Acer AL1706 */ 93 /* Acer AL1706 */
91 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, 94 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
92 /* Acer F51 */ 95 /* Acer F51 */
@@ -610,7 +613,7 @@ static bool
610drm_monitor_supports_rb(struct edid *edid) 613drm_monitor_supports_rb(struct edid *edid)
611{ 614{
612 if (edid->revision >= 4) { 615 if (edid->revision >= 4) {
613 bool ret; 616 bool ret = false;
614 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); 617 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
615 return ret; 618 return ret;
616 } 619 }
@@ -1039,6 +1042,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
1039 return true; 1042 return true;
1040} 1043}
1041 1044
1045static bool valid_inferred_mode(const struct drm_connector *connector,
1046 const struct drm_display_mode *mode)
1047{
1048 struct drm_display_mode *m;
1049 bool ok = false;
1050
1051 list_for_each_entry(m, &connector->probed_modes, head) {
1052 if (mode->hdisplay == m->hdisplay &&
1053 mode->vdisplay == m->vdisplay &&
1054 drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
1055 return false; /* duplicated */
1056 if (mode->hdisplay <= m->hdisplay &&
1057 mode->vdisplay <= m->vdisplay)
1058 ok = true;
1059 }
1060 return ok;
1061}
1062
1042static int 1063static int
1043drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, 1064drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1044 struct detailed_timing *timing) 1065 struct detailed_timing *timing)
@@ -1048,7 +1069,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1048 struct drm_device *dev = connector->dev; 1069 struct drm_device *dev = connector->dev;
1049 1070
1050 for (i = 0; i < drm_num_dmt_modes; i++) { 1071 for (i = 0; i < drm_num_dmt_modes; i++) {
1051 if (mode_in_range(drm_dmt_modes + i, edid, timing)) { 1072 if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
1073 valid_inferred_mode(connector, drm_dmt_modes + i)) {
1052 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); 1074 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
1053 if (newmode) { 1075 if (newmode) {
1054 drm_mode_probed_add(connector, newmode); 1076 drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1110,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
1088 return modes; 1110 return modes;
1089 1111
1090 fixup_mode_1366x768(newmode); 1112 fixup_mode_1366x768(newmode);
1091 if (!mode_in_range(newmode, edid, timing)) { 1113 if (!mode_in_range(newmode, edid, timing) ||
1114 !valid_inferred_mode(connector, newmode)) {
1092 drm_mode_destroy(dev, newmode); 1115 drm_mode_destroy(dev, newmode);
1093 continue; 1116 continue;
1094 } 1117 }
@@ -1116,7 +1139,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1116 return modes; 1139 return modes;
1117 1140
1118 fixup_mode_1366x768(newmode); 1141 fixup_mode_1366x768(newmode);
1119 if (!mode_in_range(newmode, edid, timing)) { 1142 if (!mode_in_range(newmode, edid, timing) ||
1143 !valid_inferred_mode(connector, newmode)) {
1120 drm_mode_destroy(dev, newmode); 1144 drm_mode_destroy(dev, newmode);
1121 continue; 1145 continue;
1122 } 1146 }
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 66d4a28ad5a2..0303935d10e2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name,
119{ 119{
120 const struct firmware *fw; 120 const struct firmware *fw;
121 struct platform_device *pdev; 121 struct platform_device *pdev;
122 u8 *fwdata = NULL, *edid; 122 u8 *fwdata = NULL, *edid, *new_edid;
123 int fwsize, expected; 123 int fwsize, expected;
124 int builtin = 0, err = 0; 124 int builtin = 0, err = 0;
125 int i, valid_extensions = 0; 125 int i, valid_extensions = 0;
@@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name,
195 "\"%s\" for connector \"%s\"\n", valid_extensions, 195 "\"%s\" for connector \"%s\"\n", valid_extensions,
196 edid[0x7e], name, connector_name); 196 edid[0x7e], name, connector_name);
197 edid[0x7e] = valid_extensions; 197 edid[0x7e] = valid_extensions;
198 edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, 198 new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
199 GFP_KERNEL); 199 GFP_KERNEL);
200 if (edid == NULL) { 200 if (new_edid == NULL) {
201 err = -ENOMEM; 201 err = -ENOMEM;
202 kfree(edid);
202 goto relfw_out; 203 goto relfw_out;
203 } 204 }
205 edid = new_edid;
204 } 206 }
205 207
206 connector->display_info.raw_edid = edid; 208 connector->display_info.raw_edid = edid;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5683b7fdd746..f546d1e8af82 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -228,7 +228,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
228 int i, ret; 228 int i, ret;
229 for (i = 0; i < fb_helper->crtc_count; i++) { 229 for (i = 0; i < fb_helper->crtc_count; i++) {
230 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; 230 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
231 ret = drm_crtc_helper_set_config(mode_set); 231 ret = mode_set->crtc->funcs->set_config(mode_set);
232 if (ret) 232 if (ret)
233 error = true; 233 error = true;
234 } 234 }
@@ -1353,7 +1353,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1353 struct drm_device *dev = fb_helper->dev; 1353 struct drm_device *dev = fb_helper->dev;
1354 int count = 0; 1354 int count = 0;
1355 u32 max_width, max_height, bpp_sel; 1355 u32 max_width, max_height, bpp_sel;
1356 bool bound = false, crtcs_bound = false; 1356 int bound = 0, crtcs_bound = 0;
1357 struct drm_crtc *crtc; 1357 struct drm_crtc *crtc;
1358 1358
1359 if (!fb_helper->fb) 1359 if (!fb_helper->fb)
@@ -1362,12 +1362,12 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1362 mutex_lock(&dev->mode_config.mutex); 1362 mutex_lock(&dev->mode_config.mutex);
1363 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1363 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1364 if (crtc->fb) 1364 if (crtc->fb)
1365 crtcs_bound = true; 1365 crtcs_bound++;
1366 if (crtc->fb == fb_helper->fb) 1366 if (crtc->fb == fb_helper->fb)
1367 bound = true; 1367 bound++;
1368 } 1368 }
1369 1369
1370 if (!bound && crtcs_bound) { 1370 if (bound < crtcs_bound) {
1371 fb_helper->delayed_hotplug = true; 1371 fb_helper->delayed_hotplug = true;
1372 mutex_unlock(&dev->mode_config.mutex); 1372 mutex_unlock(&dev->mode_config.mutex);
1373 return 0; 1373 return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 123de28f94ef..5062eec673f1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -75,10 +75,6 @@ static int drm_setup(struct drm_device * dev)
75 75
76 dev->sigdata.lock = NULL; 76 dev->sigdata.lock = NULL;
77 77
78 dev->queue_count = 0;
79 dev->queue_reserved = 0;
80 dev->queue_slots = 0;
81 dev->queuelist = NULL;
82 dev->context_flag = 0; 78 dev->context_flag = 0;
83 dev->interrupt_flag = 0; 79 dev->interrupt_flag = 0;
84 dev->dma_flag = 0; 80 dev->dma_flag = 0;
@@ -144,12 +140,12 @@ int drm_open(struct inode *inode, struct file *filp)
144 } 140 }
145 if (!retcode) { 141 if (!retcode) {
146 mutex_lock(&dev->struct_mutex); 142 mutex_lock(&dev->struct_mutex);
147 if (minor->type == DRM_MINOR_LEGACY) { 143 if (dev->dev_mapping == NULL)
148 if (dev->dev_mapping == NULL) 144 dev->dev_mapping = &inode->i_data;
149 dev->dev_mapping = inode->i_mapping; 145 /* ihold ensures nobody can remove inode with our i_data */
150 else if (dev->dev_mapping != inode->i_mapping) 146 ihold(container_of(dev->dev_mapping, struct inode, i_data));
151 retcode = -ENODEV; 147 inode->i_mapping = dev->dev_mapping;
152 } 148 filp->f_mapping = dev->dev_mapping;
153 mutex_unlock(&dev->struct_mutex); 149 mutex_unlock(&dev->struct_mutex);
154 } 150 }
155 151
@@ -370,72 +366,16 @@ int drm_fasync(int fd, struct file *filp, int on)
370} 366}
371EXPORT_SYMBOL(drm_fasync); 367EXPORT_SYMBOL(drm_fasync);
372 368
373/*
374 * Reclaim locked buffers; note that this may be a bad idea if the current
375 * context doesn't have the hw lock...
376 */
377static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
378{
379 struct drm_file *file_priv = f->private_data;
380
381 if (drm_i_have_hw_lock(dev, file_priv)) {
382 dev->driver->reclaim_buffers_locked(dev, file_priv);
383 } else {
384 unsigned long _end = jiffies + 3 * DRM_HZ;
385 int locked = 0;
386
387 drm_idlelock_take(&file_priv->master->lock);
388
389 /*
390 * Wait for a while.
391 */
392 do {
393 spin_lock_bh(&file_priv->master->lock.spinlock);
394 locked = file_priv->master->lock.idle_has_lock;
395 spin_unlock_bh(&file_priv->master->lock.spinlock);
396 if (locked)
397 break;
398 schedule();
399 } while (!time_after_eq(jiffies, _end));
400
401 if (!locked) {
402 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
403 "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
404 "\tI will go on reclaiming the buffers anyway.\n");
405 }
406
407 dev->driver->reclaim_buffers_locked(dev, file_priv);
408 drm_idlelock_release(&file_priv->master->lock);
409 }
410}
411
412static void drm_master_release(struct drm_device *dev, struct file *filp) 369static void drm_master_release(struct drm_device *dev, struct file *filp)
413{ 370{
414 struct drm_file *file_priv = filp->private_data; 371 struct drm_file *file_priv = filp->private_data;
415 372
416 if (dev->driver->reclaim_buffers_locked &&
417 file_priv->master->lock.hw_lock)
418 drm_reclaim_locked_buffers(dev, filp);
419
420 if (dev->driver->reclaim_buffers_idlelocked &&
421 file_priv->master->lock.hw_lock) {
422 drm_idlelock_take(&file_priv->master->lock);
423 dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
424 drm_idlelock_release(&file_priv->master->lock);
425 }
426
427
428 if (drm_i_have_hw_lock(dev, file_priv)) { 373 if (drm_i_have_hw_lock(dev, file_priv)) {
429 DRM_DEBUG("File %p released, freeing lock for context %d\n", 374 DRM_DEBUG("File %p released, freeing lock for context %d\n",
430 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); 375 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
431 drm_lock_free(&file_priv->master->lock, 376 drm_lock_free(&file_priv->master->lock,
432 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); 377 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
433 } 378 }
434
435 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
436 !dev->driver->reclaim_buffers_locked) {
437 dev->driver->reclaim_buffers(dev, file_priv);
438 }
439} 379}
440 380
441static void drm_events_release(struct drm_file *file_priv) 381static void drm_events_release(struct drm_file *file_priv)
@@ -505,6 +445,9 @@ int drm_release(struct inode *inode, struct file *filp)
505 if (file_priv->minor->master) 445 if (file_priv->minor->master)
506 drm_master_release(dev, filp); 446 drm_master_release(dev, filp);
507 447
448 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
449 drm_core_reclaim_buffers(dev, file_priv);
450
508 drm_events_release(file_priv); 451 drm_events_release(file_priv);
509 452
510 if (dev->driver->driver_features & DRIVER_MODESET) 453 if (dev->driver->driver_features & DRIVER_MODESET)
@@ -566,6 +509,9 @@ int drm_release(struct inode *inode, struct file *filp)
566 } 509 }
567 } 510 }
568 511
512 BUG_ON(dev->dev_mapping == NULL);
513 iput(container_of(dev->dev_mapping, struct inode, i_data));
514
569 /* drop the reference held my the file priv */ 515 /* drop the reference held my the file priv */
570 drm_master_put(&file_priv->master); 516 drm_master_put(&file_priv->master);
571 file_priv->is_master = 0; 517 file_priv->is_master = 0;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d58e69da1fb5..fbe0842038b5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -354,7 +354,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
354 354
355 /* Get a DRM GEM mmap offset allocated... */ 355 /* Get a DRM GEM mmap offset allocated... */
356 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 356 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
357 obj->size / PAGE_SIZE, 0, 0); 357 obj->size / PAGE_SIZE, 0, false);
358 358
359 if (!list->file_offset_node) { 359 if (!list->file_offset_node) {
360 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 360 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index ab1162da70f8..8928edbb94c7 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -110,42 +110,6 @@ int drm_vm_info(struct seq_file *m, void *data)
110} 110}
111 111
112/** 112/**
113 * Called when "/proc/dri/.../queues" is read.
114 */
115int drm_queues_info(struct seq_file *m, void *data)
116{
117 struct drm_info_node *node = (struct drm_info_node *) m->private;
118 struct drm_device *dev = node->minor->dev;
119 int i;
120 struct drm_queue *q;
121
122 mutex_lock(&dev->struct_mutex);
123 seq_printf(m, " ctx/flags use fin"
124 " blk/rw/rwf wait flushed queued"
125 " locks\n\n");
126 for (i = 0; i < dev->queue_count; i++) {
127 q = dev->queuelist[i];
128 atomic_inc(&q->use_count);
129 seq_printf(m, "%5d/0x%03x %5d %5d"
130 " %5d/%c%c/%c%c%c %5Zd\n",
131 i,
132 q->flags,
133 atomic_read(&q->use_count),
134 atomic_read(&q->finalization),
135 atomic_read(&q->block_count),
136 atomic_read(&q->block_read) ? 'r' : '-',
137 atomic_read(&q->block_write) ? 'w' : '-',
138 waitqueue_active(&q->read_queue) ? 'r' : '-',
139 waitqueue_active(&q->write_queue) ? 'w' : '-',
140 waitqueue_active(&q->flush_queue) ? 'f' : '-',
141 DRM_BUFCOUNT(&q->waitlist));
142 atomic_dec(&q->use_count);
143 }
144 mutex_unlock(&dev->struct_mutex);
145 return 0;
146}
147
148/**
149 * Called when "/proc/dri/.../bufs" is read. 113 * Called when "/proc/dri/.../bufs" is read.
150 */ 114 */
151int drm_bufs_info(struct seq_file *m, void *data) 115int drm_bufs_info(struct seq_file *m, void *data)
@@ -235,7 +199,7 @@ int drm_clients_info(struct seq_file *m, void *data)
235} 199}
236 200
237 201
238int drm_gem_one_name_info(int id, void *ptr, void *data) 202static int drm_gem_one_name_info(int id, void *ptr, void *data)
239{ 203{
240 struct drm_gem_object *obj = ptr; 204 struct drm_gem_object *obj = ptr;
241 struct seq_file *m = data; 205 struct seq_file *m = data;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c798eeae0a03..03f16f352fe2 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -974,7 +974,6 @@ EXPORT_SYMBOL(drm_vblank_off);
974 * drm_vblank_pre_modeset - account for vblanks across mode sets 974 * drm_vblank_pre_modeset - account for vblanks across mode sets
975 * @dev: DRM device 975 * @dev: DRM device
976 * @crtc: CRTC in question 976 * @crtc: CRTC in question
977 * @post: post or pre mode set?
978 * 977 *
979 * Account for vblank events across mode setting events, which will likely 978 * Account for vblank events across mode setting events, which will likely
980 * reset the hardware frame counter. 979 * reset the hardware frame counter.
@@ -1037,6 +1036,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1037 if (!dev->num_crtcs) 1036 if (!dev->num_crtcs)
1038 return 0; 1037 return 0;
1039 1038
1039 /* KMS drivers handle this internally */
1040 if (drm_core_check_feature(dev, DRIVER_MODESET))
1041 return 0;
1042
1040 crtc = modeset->crtc; 1043 crtc = modeset->crtc;
1041 if (crtc >= dev->num_crtcs) 1044 if (crtc >= dev->num_crtcs)
1042 return -EINVAL; 1045 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 521152041691..32039553e172 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -70,10 +70,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
70 lock->context, task_pid_nr(current), 70 lock->context, task_pid_nr(current),
71 master->lock.hw_lock->lock, lock->flags); 71 master->lock.hw_lock->lock, lock->flags);
72 72
73 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
74 if (lock->context < 0)
75 return -EINVAL;
76
77 add_wait_queue(&master->lock.lock_queue, &entry); 73 add_wait_queue(&master->lock.lock_queue, &entry);
78 spin_lock_bh(&master->lock.spinlock); 74 spin_lock_bh(&master->lock.spinlock);
79 master->lock.user_waiters++; 75 master->lock.user_waiters++;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 961fb54f4266..9bb82f7f0061 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -118,45 +118,53 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
118 118
119static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 119static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 struct drm_mm_node *node, 120 struct drm_mm_node *node,
121 unsigned long size, unsigned alignment) 121 unsigned long size, unsigned alignment,
122 unsigned long color)
122{ 123{
123 struct drm_mm *mm = hole_node->mm; 124 struct drm_mm *mm = hole_node->mm;
124 unsigned long tmp = 0, wasted = 0;
125 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 125 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
126 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 126 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
127 unsigned long adj_start = hole_start;
128 unsigned long adj_end = hole_end;
127 129
128 BUG_ON(!hole_node->hole_follows || node->allocated); 130 BUG_ON(!hole_node->hole_follows || node->allocated);
129 131
130 if (alignment) 132 if (mm->color_adjust)
131 tmp = hole_start % alignment; 133 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
132 134
133 if (!tmp) { 135 if (alignment) {
136 unsigned tmp = adj_start % alignment;
137 if (tmp)
138 adj_start += alignment - tmp;
139 }
140
141 if (adj_start == hole_start) {
134 hole_node->hole_follows = 0; 142 hole_node->hole_follows = 0;
135 list_del_init(&hole_node->hole_stack); 143 list_del(&hole_node->hole_stack);
136 } else 144 }
137 wasted = alignment - tmp;
138 145
139 node->start = hole_start + wasted; 146 node->start = adj_start;
140 node->size = size; 147 node->size = size;
141 node->mm = mm; 148 node->mm = mm;
149 node->color = color;
142 node->allocated = 1; 150 node->allocated = 1;
143 151
144 INIT_LIST_HEAD(&node->hole_stack); 152 INIT_LIST_HEAD(&node->hole_stack);
145 list_add(&node->node_list, &hole_node->node_list); 153 list_add(&node->node_list, &hole_node->node_list);
146 154
147 BUG_ON(node->start + node->size > hole_end); 155 BUG_ON(node->start + node->size > adj_end);
148 156
157 node->hole_follows = 0;
149 if (node->start + node->size < hole_end) { 158 if (node->start + node->size < hole_end) {
150 list_add(&node->hole_stack, &mm->hole_stack); 159 list_add(&node->hole_stack, &mm->hole_stack);
151 node->hole_follows = 1; 160 node->hole_follows = 1;
152 } else {
153 node->hole_follows = 0;
154 } 161 }
155} 162}
156 163
157struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 164struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
158 unsigned long size, 165 unsigned long size,
159 unsigned alignment, 166 unsigned alignment,
167 unsigned long color,
160 int atomic) 168 int atomic)
161{ 169{
162 struct drm_mm_node *node; 170 struct drm_mm_node *node;
@@ -165,7 +173,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
165 if (unlikely(node == NULL)) 173 if (unlikely(node == NULL))
166 return NULL; 174 return NULL;
167 175
168 drm_mm_insert_helper(hole_node, node, size, alignment); 176 drm_mm_insert_helper(hole_node, node, size, alignment, color);
169 177
170 return node; 178 return node;
171} 179}
@@ -181,11 +189,11 @@ int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
181{ 189{
182 struct drm_mm_node *hole_node; 190 struct drm_mm_node *hole_node;
183 191
184 hole_node = drm_mm_search_free(mm, size, alignment, 0); 192 hole_node = drm_mm_search_free(mm, size, alignment, false);
185 if (!hole_node) 193 if (!hole_node)
186 return -ENOSPC; 194 return -ENOSPC;
187 195
188 drm_mm_insert_helper(hole_node, node, size, alignment); 196 drm_mm_insert_helper(hole_node, node, size, alignment, 0);
189 197
190 return 0; 198 return 0;
191} 199}
@@ -194,50 +202,57 @@ EXPORT_SYMBOL(drm_mm_insert_node);
194static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 202static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
195 struct drm_mm_node *node, 203 struct drm_mm_node *node,
196 unsigned long size, unsigned alignment, 204 unsigned long size, unsigned alignment,
205 unsigned long color,
197 unsigned long start, unsigned long end) 206 unsigned long start, unsigned long end)
198{ 207{
199 struct drm_mm *mm = hole_node->mm; 208 struct drm_mm *mm = hole_node->mm;
200 unsigned long tmp = 0, wasted = 0;
201 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 209 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
202 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 210 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
211 unsigned long adj_start = hole_start;
212 unsigned long adj_end = hole_end;
203 213
204 BUG_ON(!hole_node->hole_follows || node->allocated); 214 BUG_ON(!hole_node->hole_follows || node->allocated);
205 215
206 if (hole_start < start) 216 if (mm->color_adjust)
207 wasted += start - hole_start; 217 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
208 if (alignment)
209 tmp = (hole_start + wasted) % alignment;
210 218
211 if (tmp) 219 if (adj_start < start)
212 wasted += alignment - tmp; 220 adj_start = start;
221
222 if (alignment) {
223 unsigned tmp = adj_start % alignment;
224 if (tmp)
225 adj_start += alignment - tmp;
226 }
213 227
214 if (!wasted) { 228 if (adj_start == hole_start) {
215 hole_node->hole_follows = 0; 229 hole_node->hole_follows = 0;
216 list_del_init(&hole_node->hole_stack); 230 list_del(&hole_node->hole_stack);
217 } 231 }
218 232
219 node->start = hole_start + wasted; 233 node->start = adj_start;
220 node->size = size; 234 node->size = size;
221 node->mm = mm; 235 node->mm = mm;
236 node->color = color;
222 node->allocated = 1; 237 node->allocated = 1;
223 238
224 INIT_LIST_HEAD(&node->hole_stack); 239 INIT_LIST_HEAD(&node->hole_stack);
225 list_add(&node->node_list, &hole_node->node_list); 240 list_add(&node->node_list, &hole_node->node_list);
226 241
227 BUG_ON(node->start + node->size > hole_end); 242 BUG_ON(node->start + node->size > adj_end);
228 BUG_ON(node->start + node->size > end); 243 BUG_ON(node->start + node->size > end);
229 244
245 node->hole_follows = 0;
230 if (node->start + node->size < hole_end) { 246 if (node->start + node->size < hole_end) {
231 list_add(&node->hole_stack, &mm->hole_stack); 247 list_add(&node->hole_stack, &mm->hole_stack);
232 node->hole_follows = 1; 248 node->hole_follows = 1;
233 } else {
234 node->hole_follows = 0;
235 } 249 }
236} 250}
237 251
238struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, 252struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
239 unsigned long size, 253 unsigned long size,
240 unsigned alignment, 254 unsigned alignment,
255 unsigned long color,
241 unsigned long start, 256 unsigned long start,
242 unsigned long end, 257 unsigned long end,
243 int atomic) 258 int atomic)
@@ -248,7 +263,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node
248 if (unlikely(node == NULL)) 263 if (unlikely(node == NULL))
249 return NULL; 264 return NULL;
250 265
251 drm_mm_insert_helper_range(hole_node, node, size, alignment, 266 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
252 start, end); 267 start, end);
253 268
254 return node; 269 return node;
@@ -267,11 +282,11 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
267 struct drm_mm_node *hole_node; 282 struct drm_mm_node *hole_node;
268 283
269 hole_node = drm_mm_search_free_in_range(mm, size, alignment, 284 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
270 start, end, 0); 285 start, end, false);
271 if (!hole_node) 286 if (!hole_node)
272 return -ENOSPC; 287 return -ENOSPC;
273 288
274 drm_mm_insert_helper_range(hole_node, node, size, alignment, 289 drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
275 start, end); 290 start, end);
276 291
277 return 0; 292 return 0;
@@ -336,27 +351,23 @@ EXPORT_SYMBOL(drm_mm_put_block);
336static int check_free_hole(unsigned long start, unsigned long end, 351static int check_free_hole(unsigned long start, unsigned long end,
337 unsigned long size, unsigned alignment) 352 unsigned long size, unsigned alignment)
338{ 353{
339 unsigned wasted = 0;
340
341 if (end - start < size) 354 if (end - start < size)
342 return 0; 355 return 0;
343 356
344 if (alignment) { 357 if (alignment) {
345 unsigned tmp = start % alignment; 358 unsigned tmp = start % alignment;
346 if (tmp) 359 if (tmp)
347 wasted = alignment - tmp; 360 start += alignment - tmp;
348 }
349
350 if (end >= start + size + wasted) {
351 return 1;
352 } 361 }
353 362
354 return 0; 363 return end >= start + size;
355} 364}
356 365
357struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, 366struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
358 unsigned long size, 367 unsigned long size,
359 unsigned alignment, int best_match) 368 unsigned alignment,
369 unsigned long color,
370 bool best_match)
360{ 371{
361 struct drm_mm_node *entry; 372 struct drm_mm_node *entry;
362 struct drm_mm_node *best; 373 struct drm_mm_node *best;
@@ -368,10 +379,17 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
368 best_size = ~0UL; 379 best_size = ~0UL;
369 380
370 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 381 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
382 unsigned long adj_start = drm_mm_hole_node_start(entry);
383 unsigned long adj_end = drm_mm_hole_node_end(entry);
384
385 if (mm->color_adjust) {
386 mm->color_adjust(entry, color, &adj_start, &adj_end);
387 if (adj_end <= adj_start)
388 continue;
389 }
390
371 BUG_ON(!entry->hole_follows); 391 BUG_ON(!entry->hole_follows);
372 if (!check_free_hole(drm_mm_hole_node_start(entry), 392 if (!check_free_hole(adj_start, adj_end, size, alignment))
373 drm_mm_hole_node_end(entry),
374 size, alignment))
375 continue; 393 continue;
376 394
377 if (!best_match) 395 if (!best_match)
@@ -385,14 +403,15 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
385 403
386 return best; 404 return best;
387} 405}
388EXPORT_SYMBOL(drm_mm_search_free); 406EXPORT_SYMBOL(drm_mm_search_free_generic);
389 407
390struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, 408struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
391 unsigned long size, 409 unsigned long size,
392 unsigned alignment, 410 unsigned alignment,
393 unsigned long start, 411 unsigned long color,
394 unsigned long end, 412 unsigned long start,
395 int best_match) 413 unsigned long end,
414 bool best_match)
396{ 415{
397 struct drm_mm_node *entry; 416 struct drm_mm_node *entry;
398 struct drm_mm_node *best; 417 struct drm_mm_node *best;
@@ -410,6 +429,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
410 end : drm_mm_hole_node_end(entry); 429 end : drm_mm_hole_node_end(entry);
411 430
412 BUG_ON(!entry->hole_follows); 431 BUG_ON(!entry->hole_follows);
432
433 if (mm->color_adjust) {
434 mm->color_adjust(entry, color, &adj_start, &adj_end);
435 if (adj_end <= adj_start)
436 continue;
437 }
438
413 if (!check_free_hole(adj_start, adj_end, size, alignment)) 439 if (!check_free_hole(adj_start, adj_end, size, alignment))
414 continue; 440 continue;
415 441
@@ -424,7 +450,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
424 450
425 return best; 451 return best;
426} 452}
427EXPORT_SYMBOL(drm_mm_search_free_in_range); 453EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
428 454
429/** 455/**
430 * Moves an allocation. To be used with embedded struct drm_mm_node. 456 * Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -437,6 +463,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
437 new->mm = old->mm; 463 new->mm = old->mm;
438 new->start = old->start; 464 new->start = old->start;
439 new->size = old->size; 465 new->size = old->size;
466 new->color = old->color;
440 467
441 old->allocated = 0; 468 old->allocated = 0;
442 new->allocated = 1; 469 new->allocated = 1;
@@ -452,9 +479,12 @@ EXPORT_SYMBOL(drm_mm_replace_node);
452 * Warning: As long as the scan list is non-empty, no other operations than 479 * Warning: As long as the scan list is non-empty, no other operations than
453 * adding/removing nodes to/from the scan list are allowed. 480 * adding/removing nodes to/from the scan list are allowed.
454 */ 481 */
455void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, 482void drm_mm_init_scan(struct drm_mm *mm,
456 unsigned alignment) 483 unsigned long size,
484 unsigned alignment,
485 unsigned long color)
457{ 486{
487 mm->scan_color = color;
458 mm->scan_alignment = alignment; 488 mm->scan_alignment = alignment;
459 mm->scan_size = size; 489 mm->scan_size = size;
460 mm->scanned_blocks = 0; 490 mm->scanned_blocks = 0;
@@ -474,11 +504,14 @@ EXPORT_SYMBOL(drm_mm_init_scan);
474 * Warning: As long as the scan list is non-empty, no other operations than 504 * Warning: As long as the scan list is non-empty, no other operations than
475 * adding/removing nodes to/from the scan list are allowed. 505 * adding/removing nodes to/from the scan list are allowed.
476 */ 506 */
477void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, 507void drm_mm_init_scan_with_range(struct drm_mm *mm,
508 unsigned long size,
478 unsigned alignment, 509 unsigned alignment,
510 unsigned long color,
479 unsigned long start, 511 unsigned long start,
480 unsigned long end) 512 unsigned long end)
481{ 513{
514 mm->scan_color = color;
482 mm->scan_alignment = alignment; 515 mm->scan_alignment = alignment;
483 mm->scan_size = size; 516 mm->scan_size = size;
484 mm->scanned_blocks = 0; 517 mm->scanned_blocks = 0;
@@ -522,17 +555,21 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
522 555
523 hole_start = drm_mm_hole_node_start(prev_node); 556 hole_start = drm_mm_hole_node_start(prev_node);
524 hole_end = drm_mm_hole_node_end(prev_node); 557 hole_end = drm_mm_hole_node_end(prev_node);
558
559 adj_start = hole_start;
560 adj_end = hole_end;
561
562 if (mm->color_adjust)
563 mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
564
525 if (mm->scan_check_range) { 565 if (mm->scan_check_range) {
526 adj_start = hole_start < mm->scan_start ? 566 if (adj_start < mm->scan_start)
527 mm->scan_start : hole_start; 567 adj_start = mm->scan_start;
528 adj_end = hole_end > mm->scan_end ? 568 if (adj_end > mm->scan_end)
529 mm->scan_end : hole_end; 569 adj_end = mm->scan_end;
530 } else {
531 adj_start = hole_start;
532 adj_end = hole_end;
533 } 570 }
534 571
535 if (check_free_hole(adj_start , adj_end, 572 if (check_free_hole(adj_start, adj_end,
536 mm->scan_size, mm->scan_alignment)) { 573 mm->scan_size, mm->scan_alignment)) {
537 mm->scan_hit_start = hole_start; 574 mm->scan_hit_start = hole_start;
538 mm->scan_hit_size = hole_end; 575 mm->scan_hit_size = hole_end;
@@ -616,6 +653,8 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
616 mm->head_node.size = start - mm->head_node.start; 653 mm->head_node.size = start - mm->head_node.start;
617 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 654 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
618 655
656 mm->color_adjust = NULL;
657
619 return 0; 658 return 0;
620} 659}
621EXPORT_SYMBOL(drm_mm_init); 660EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b7adb4a967fd..28637c181b15 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
706 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); 706 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
707 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); 707 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
708 p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); 708 p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
709
710 p->crtc_hadjusted = false;
711 p->crtc_vadjusted = false;
712} 709}
713EXPORT_SYMBOL(drm_mode_set_crtcinfo); 710EXPORT_SYMBOL(drm_mode_set_crtcinfo);
714 711
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 13f3d936472f..5320364582ce 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -465,3 +465,52 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
465 DRM_INFO("Module unloaded\n"); 465 DRM_INFO("Module unloaded\n");
466} 466}
467EXPORT_SYMBOL(drm_pci_exit); 467EXPORT_SYMBOL(drm_pci_exit);
468
469int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
470{
471 struct pci_dev *root;
472 int pos;
473 u32 lnkcap, lnkcap2;
474
475 *mask = 0;
476 if (!dev->pdev)
477 return -EINVAL;
478
479 if (!pci_is_pcie(dev->pdev))
480 return -EINVAL;
481
482 root = dev->pdev->bus->self;
483
484 pos = pci_pcie_cap(root);
485 if (!pos)
486 return -EINVAL;
487
488 /* we've been informed via and serverworks don't make the cut */
489 if (root->vendor == PCI_VENDOR_ID_VIA ||
490 root->vendor == PCI_VENDOR_ID_SERVERWORKS)
491 return -EINVAL;
492
493 pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
494 pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
495
496 lnkcap &= PCI_EXP_LNKCAP_SLS;
497 lnkcap2 &= 0xfe;
498
499 if (lnkcap2) { /* PCIE GEN 3.0 */
500 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
501 *mask |= DRM_PCIE_SPEED_25;
502 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
503 *mask |= DRM_PCIE_SPEED_50;
504 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
505 *mask |= DRM_PCIE_SPEED_80;
506 } else {
507 if (lnkcap & 1)
508 *mask |= DRM_PCIE_SPEED_25;
509 if (lnkcap & 2)
510 *mask |= DRM_PCIE_SPEED_50;
511 }
512
513 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
514 return 0;
515}
516EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index fff87221f9e9..da457b18eaaf 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -53,7 +53,6 @@ static struct drm_info_list drm_proc_list[] = {
53 {"name", drm_name_info, 0}, 53 {"name", drm_name_info, 0},
54 {"vm", drm_vm_info, 0}, 54 {"vm", drm_vm_info, 0},
55 {"clients", drm_clients_info, 0}, 55 {"clients", drm_clients_info, 0},
56 {"queues", drm_queues_info, 0},
57 {"bufs", drm_bufs_info, 0}, 56 {"bufs", drm_bufs_info, 0},
58 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 57 {"gem_names", drm_gem_name_info, DRIVER_GEM},
59#if DRM_DEBUG_CODE 58#if DRM_DEBUG_CODE
@@ -90,7 +89,7 @@ static const struct file_operations drm_proc_fops = {
90 * Create a given set of proc files represented by an array of 89 * Create a given set of proc files represented by an array of
91 * gdm_proc_lists in the given root directory. 90 * gdm_proc_lists in the given root directory.
92 */ 91 */
93int drm_proc_create_files(struct drm_info_list *files, int count, 92static int drm_proc_create_files(struct drm_info_list *files, int count,
94 struct proc_dir_entry *root, struct drm_minor *minor) 93 struct proc_dir_entry *root, struct drm_minor *minor)
95{ 94{
96 struct drm_device *dev = minor->dev; 95 struct drm_device *dev = minor->dev;
@@ -173,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
173 return 0; 172 return 0;
174} 173}
175 174
176int drm_proc_remove_files(struct drm_info_list *files, int count, 175static int drm_proc_remove_files(struct drm_info_list *files, int count,
177 struct drm_minor *minor) 176 struct drm_minor *minor)
178{ 177{
179 struct list_head *pos, *q; 178 struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 45cf1dd3eb9c..45ac8d6c92b7 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -134,6 +134,7 @@ void drm_sysfs_destroy(void)
134 return; 134 return;
135 class_remove_file(drm_class, &class_attr_version.attr); 135 class_remove_file(drm_class, &class_attr_version.attr);
136 class_destroy(drm_class); 136 class_destroy(drm_class);
137 drm_class = NULL;
137} 138}
138 139
139/** 140/**
@@ -554,6 +555,9 @@ void drm_sysfs_device_remove(struct drm_minor *minor)
554 555
555int drm_class_device_register(struct device *dev) 556int drm_class_device_register(struct device *dev)
556{ 557{
558 if (!drm_class || IS_ERR(drm_class))
559 return -ENOENT;
560
557 dev->class = drm_class; 561 dev->class = drm_class;
558 return device_register(dev); 562 return device_register(dev);
559} 563}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index bf791fa0e50d..d9568198c300 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -196,7 +196,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
196 return ret; 196 return ret;
197} 197}
198 198
199struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) 199static struct drm_encoder *exynos_drm_best_encoder(
200 struct drm_connector *connector)
200{ 201{
201 struct drm_device *dev = connector->dev; 202 struct drm_device *dev = connector->dev;
202 struct exynos_drm_connector *exynos_connector = 203 struct exynos_drm_connector *exynos_connector =
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index eaf630dc5dba..84dd099eae3b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -33,7 +33,6 @@
33#include "exynos_drm_fbdev.h" 33#include "exynos_drm_fbdev.h"
34 34
35static LIST_HEAD(exynos_drm_subdrv_list); 35static LIST_HEAD(exynos_drm_subdrv_list);
36static struct drm_device *drm_dev;
37 36
38static int exynos_drm_subdrv_probe(struct drm_device *dev, 37static int exynos_drm_subdrv_probe(struct drm_device *dev,
39 struct exynos_drm_subdrv *subdrv) 38 struct exynos_drm_subdrv *subdrv)
@@ -120,8 +119,6 @@ int exynos_drm_device_register(struct drm_device *dev)
120 if (!dev) 119 if (!dev)
121 return -EINVAL; 120 return -EINVAL;
122 121
123 drm_dev = dev;
124
125 list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { 122 list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
126 subdrv->drm_dev = dev; 123 subdrv->drm_dev = dev;
127 err = exynos_drm_subdrv_probe(dev, subdrv); 124 err = exynos_drm_subdrv_probe(dev, subdrv);
@@ -149,8 +146,6 @@ int exynos_drm_device_unregister(struct drm_device *dev)
149 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) 146 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list)
150 exynos_drm_subdrv_remove(dev, subdrv); 147 exynos_drm_subdrv_remove(dev, subdrv);
151 148
152 drm_dev = NULL;
153
154 return 0; 149 return 0;
155} 150}
156EXPORT_SYMBOL_GPL(exynos_drm_device_unregister); 151EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 4afb625128d7..abb1e2f8227f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,21 +29,23 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31 31
32#include "exynos_drm_crtc.h"
33#include "exynos_drm_drv.h" 32#include "exynos_drm_drv.h"
34#include "exynos_drm_fb.h"
35#include "exynos_drm_encoder.h" 33#include "exynos_drm_encoder.h"
36#include "exynos_drm_gem.h" 34#include "exynos_drm_plane.h"
37 35
38#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ 36#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
39 drm_crtc) 37 drm_crtc)
40 38
39enum exynos_crtc_mode {
40 CRTC_MODE_NORMAL, /* normal mode */
41 CRTC_MODE_BLANK, /* The private plane of crtc is blank */
42};
43
41/* 44/*
42 * Exynos specific crtc structure. 45 * Exynos specific crtc structure.
43 * 46 *
44 * @drm_crtc: crtc object. 47 * @drm_crtc: crtc object.
45 * @overlay: contain information common to display controller and hdmi and 48 * @drm_plane: pointer of private plane object for this crtc
46 * contents of this overlay object would be copied to sub driver size.
47 * @pipe: a crtc index created at load() with a new crtc object creation 49 * @pipe: a crtc index created at load() with a new crtc object creation
48 * and the crtc object would be set to private->crtc array 50 * and the crtc object would be set to private->crtc array
49 * to get a crtc object corresponding to this pipe from private->crtc 51 * to get a crtc object corresponding to this pipe from private->crtc
@@ -52,115 +54,16 @@
52 * we can refer to the crtc to current hardware interrupt occured through 54 * we can refer to the crtc to current hardware interrupt occured through
53 * this pipe value. 55 * this pipe value.
54 * @dpms: store the crtc dpms value 56 * @dpms: store the crtc dpms value
57 * @mode: store the crtc mode value
55 */ 58 */
56struct exynos_drm_crtc { 59struct exynos_drm_crtc {
57 struct drm_crtc drm_crtc; 60 struct drm_crtc drm_crtc;
58 struct exynos_drm_overlay overlay; 61 struct drm_plane *plane;
59 unsigned int pipe; 62 unsigned int pipe;
60 unsigned int dpms; 63 unsigned int dpms;
64 enum exynos_crtc_mode mode;
61}; 65};
62 66
63static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
64{
65 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
66 struct exynos_drm_overlay *overlay = &exynos_crtc->overlay;
67
68 exynos_drm_fn_encoder(crtc, overlay,
69 exynos_drm_encoder_crtc_mode_set);
70 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
71 exynos_drm_encoder_crtc_commit);
72}
73
74int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
75 struct drm_framebuffer *fb,
76 struct drm_display_mode *mode,
77 struct exynos_drm_crtc_pos *pos)
78{
79 struct exynos_drm_gem_buf *buffer;
80 unsigned int actual_w;
81 unsigned int actual_h;
82 int nr = exynos_drm_format_num_buffers(fb->pixel_format);
83 int i;
84
85 for (i = 0; i < nr; i++) {
86 buffer = exynos_drm_fb_buffer(fb, i);
87 if (!buffer) {
88 DRM_LOG_KMS("buffer is null\n");
89 return -EFAULT;
90 }
91
92 overlay->dma_addr[i] = buffer->dma_addr;
93 overlay->vaddr[i] = buffer->kvaddr;
94
95 DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
96 i, (unsigned long)overlay->vaddr[i],
97 (unsigned long)overlay->dma_addr[i]);
98 }
99
100 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
101 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
102
103 /* set drm framebuffer data. */
104 overlay->fb_x = pos->fb_x;
105 overlay->fb_y = pos->fb_y;
106 overlay->fb_width = fb->width;
107 overlay->fb_height = fb->height;
108 overlay->src_width = pos->src_w;
109 overlay->src_height = pos->src_h;
110 overlay->bpp = fb->bits_per_pixel;
111 overlay->pitch = fb->pitches[0];
112 overlay->pixel_format = fb->pixel_format;
113
114 /* set overlay range to be displayed. */
115 overlay->crtc_x = pos->crtc_x;
116 overlay->crtc_y = pos->crtc_y;
117 overlay->crtc_width = actual_w;
118 overlay->crtc_height = actual_h;
119
120 /* set drm mode data. */
121 overlay->mode_width = mode->hdisplay;
122 overlay->mode_height = mode->vdisplay;
123 overlay->refresh = mode->vrefresh;
124 overlay->scan_flag = mode->flags;
125
126 DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
127 overlay->crtc_x, overlay->crtc_y,
128 overlay->crtc_width, overlay->crtc_height);
129
130 return 0;
131}
132
133static int exynos_drm_crtc_update(struct drm_crtc *crtc)
134{
135 struct exynos_drm_crtc *exynos_crtc;
136 struct exynos_drm_overlay *overlay;
137 struct exynos_drm_crtc_pos pos;
138 struct drm_display_mode *mode = &crtc->mode;
139 struct drm_framebuffer *fb = crtc->fb;
140
141 if (!mode || !fb)
142 return -EINVAL;
143
144 exynos_crtc = to_exynos_crtc(crtc);
145 overlay = &exynos_crtc->overlay;
146
147 memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
148
149 /* it means the offset of framebuffer to be displayed. */
150 pos.fb_x = crtc->x;
151 pos.fb_y = crtc->y;
152
153 /* OSD position to be displayed. */
154 pos.crtc_x = 0;
155 pos.crtc_y = 0;
156 pos.crtc_w = fb->width - crtc->x;
157 pos.crtc_h = fb->height - crtc->y;
158 pos.src_w = pos.crtc_w;
159 pos.src_h = pos.crtc_h;
160
161 return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
162}
163
164static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 67static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
165{ 68{
166 struct drm_device *dev = crtc->dev; 69 struct drm_device *dev = crtc->dev;
@@ -175,23 +78,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
175 78
176 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
177 80
178 switch (mode) { 81 exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
179 case DRM_MODE_DPMS_ON: 82 exynos_crtc->dpms = mode;
180 exynos_drm_fn_encoder(crtc, &mode,
181 exynos_drm_encoder_crtc_dpms);
182 exynos_crtc->dpms = mode;
183 break;
184 case DRM_MODE_DPMS_STANDBY:
185 case DRM_MODE_DPMS_SUSPEND:
186 case DRM_MODE_DPMS_OFF:
187 exynos_drm_fn_encoder(crtc, &mode,
188 exynos_drm_encoder_crtc_dpms);
189 exynos_crtc->dpms = mode;
190 break;
191 default:
192 DRM_ERROR("unspecified mode %d\n", mode);
193 break;
194 }
195 83
196 mutex_unlock(&dev->struct_mutex); 84 mutex_unlock(&dev->struct_mutex);
197} 85}
@@ -209,35 +97,13 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
209 97
210 DRM_DEBUG_KMS("%s\n", __FILE__); 98 DRM_DEBUG_KMS("%s\n", __FILE__);
211 99
212 /* 100 exynos_plane_commit(exynos_crtc->plane);
213 * when set_crtc is requested from user or at booting time, 101 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
214 * crtc->commit would be called without dpms call so if dpms is
215 * no power on then crtc->dpms should be called
216 * with DRM_MODE_DPMS_ON for the hardware power to be on.
217 */
218 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
219 int mode = DRM_MODE_DPMS_ON;
220
221 /*
222 * enable hardware(power on) to all encoders hdmi connected
223 * to current crtc.
224 */
225 exynos_drm_crtc_dpms(crtc, mode);
226 /*
227 * enable dma to all encoders connected to current crtc and
228 * lcd panel.
229 */
230 exynos_drm_fn_encoder(crtc, &mode,
231 exynos_drm_encoder_dpms_from_crtc);
232 }
233
234 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
235 exynos_drm_encoder_crtc_commit);
236} 102}
237 103
238static bool 104static bool
239exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, 105exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
240 struct drm_display_mode *mode, 106 const struct drm_display_mode *mode,
241 struct drm_display_mode *adjusted_mode) 107 struct drm_display_mode *adjusted_mode)
242{ 108{
243 DRM_DEBUG_KMS("%s\n", __FILE__); 109 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -251,31 +117,61 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
251 struct drm_display_mode *adjusted_mode, int x, int y, 117 struct drm_display_mode *adjusted_mode, int x, int y,
252 struct drm_framebuffer *old_fb) 118 struct drm_framebuffer *old_fb)
253{ 119{
120 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
121 struct drm_plane *plane = exynos_crtc->plane;
122 unsigned int crtc_w;
123 unsigned int crtc_h;
124 int pipe = exynos_crtc->pipe;
125 int ret;
126
254 DRM_DEBUG_KMS("%s\n", __FILE__); 127 DRM_DEBUG_KMS("%s\n", __FILE__);
255 128
129 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
130
256 /* 131 /*
257 * copy the mode data adjusted by mode_fixup() into crtc->mode 132 * copy the mode data adjusted by mode_fixup() into crtc->mode
258 * so that hardware can be seet to proper mode. 133 * so that hardware can be seet to proper mode.
259 */ 134 */
260 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); 135 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
261 136
262 return exynos_drm_crtc_update(crtc); 137 crtc_w = crtc->fb->width - x;
138 crtc_h = crtc->fb->height - y;
139
140 ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
141 x, y, crtc_w, crtc_h);
142 if (ret)
143 return ret;
144
145 plane->crtc = crtc;
146 plane->fb = crtc->fb;
147
148 exynos_drm_fn_encoder(crtc, &pipe, exynos_drm_encoder_crtc_pipe);
149
150 return 0;
263} 151}
264 152
265static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 153static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
266 struct drm_framebuffer *old_fb) 154 struct drm_framebuffer *old_fb)
267{ 155{
156 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
157 struct drm_plane *plane = exynos_crtc->plane;
158 unsigned int crtc_w;
159 unsigned int crtc_h;
268 int ret; 160 int ret;
269 161
270 DRM_DEBUG_KMS("%s\n", __FILE__); 162 DRM_DEBUG_KMS("%s\n", __FILE__);
271 163
272 ret = exynos_drm_crtc_update(crtc); 164 crtc_w = crtc->fb->width - x;
165 crtc_h = crtc->fb->height - y;
166
167 ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
168 x, y, crtc_w, crtc_h);
273 if (ret) 169 if (ret)
274 return ret; 170 return ret;
275 171
276 exynos_drm_crtc_apply(crtc); 172 exynos_drm_crtc_commit(crtc);
277 173
278 return ret; 174 return 0;
279} 175}
280 176
281static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc) 177static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
@@ -284,6 +180,16 @@ static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
284 /* drm framework doesn't check NULL */ 180 /* drm framework doesn't check NULL */
285} 181}
286 182
183static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
184{
185 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
186
187 DRM_DEBUG_KMS("%s\n", __FILE__);
188
189 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF);
190 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
191}
192
287static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { 193static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
288 .dpms = exynos_drm_crtc_dpms, 194 .dpms = exynos_drm_crtc_dpms,
289 .prepare = exynos_drm_crtc_prepare, 195 .prepare = exynos_drm_crtc_prepare,
@@ -292,6 +198,7 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
292 .mode_set = exynos_drm_crtc_mode_set, 198 .mode_set = exynos_drm_crtc_mode_set,
293 .mode_set_base = exynos_drm_crtc_mode_set_base, 199 .mode_set_base = exynos_drm_crtc_mode_set_base,
294 .load_lut = exynos_drm_crtc_load_lut, 200 .load_lut = exynos_drm_crtc_load_lut,
201 .disable = exynos_drm_crtc_disable,
295}; 202};
296 203
297static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, 204static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
@@ -327,7 +234,8 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
327 &dev_priv->pageflip_event_list); 234 &dev_priv->pageflip_event_list);
328 235
329 crtc->fb = fb; 236 crtc->fb = fb;
330 ret = exynos_drm_crtc_update(crtc); 237 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
238 NULL);
331 if (ret) { 239 if (ret) {
332 crtc->fb = old_fb; 240 crtc->fb = old_fb;
333 drm_vblank_put(dev, exynos_crtc->pipe); 241 drm_vblank_put(dev, exynos_crtc->pipe);
@@ -335,14 +243,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
335 243
336 goto out; 244 goto out;
337 } 245 }
338
339 /*
340 * the values related to a buffer of the drm framebuffer
341 * to be applied should be set at here. because these values
342 * first, are set to shadow registers and then to
343 * real registers at vsync front porch period.
344 */
345 exynos_drm_crtc_apply(crtc);
346 } 246 }
347out: 247out:
348 mutex_unlock(&dev->struct_mutex); 248 mutex_unlock(&dev->struct_mutex);
@@ -362,18 +262,73 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
362 kfree(exynos_crtc); 262 kfree(exynos_crtc);
363} 263}
364 264
265static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
266 struct drm_property *property,
267 uint64_t val)
268{
269 struct drm_device *dev = crtc->dev;
270 struct exynos_drm_private *dev_priv = dev->dev_private;
271 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
272
273 DRM_DEBUG_KMS("%s\n", __func__);
274
275 if (property == dev_priv->crtc_mode_property) {
276 enum exynos_crtc_mode mode = val;
277
278 if (mode == exynos_crtc->mode)
279 return 0;
280
281 exynos_crtc->mode = mode;
282
283 switch (mode) {
284 case CRTC_MODE_NORMAL:
285 exynos_drm_crtc_commit(crtc);
286 break;
287 case CRTC_MODE_BLANK:
288 exynos_plane_dpms(exynos_crtc->plane,
289 DRM_MODE_DPMS_OFF);
290 break;
291 default:
292 break;
293 }
294
295 return 0;
296 }
297
298 return -EINVAL;
299}
300
365static struct drm_crtc_funcs exynos_crtc_funcs = { 301static struct drm_crtc_funcs exynos_crtc_funcs = {
366 .set_config = drm_crtc_helper_set_config, 302 .set_config = drm_crtc_helper_set_config,
367 .page_flip = exynos_drm_crtc_page_flip, 303 .page_flip = exynos_drm_crtc_page_flip,
368 .destroy = exynos_drm_crtc_destroy, 304 .destroy = exynos_drm_crtc_destroy,
305 .set_property = exynos_drm_crtc_set_property,
306};
307
308static const struct drm_prop_enum_list mode_names[] = {
309 { CRTC_MODE_NORMAL, "normal" },
310 { CRTC_MODE_BLANK, "blank" },
369}; 311};
370 312
371struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev, 313static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
372 struct drm_crtc *crtc)
373{ 314{
374 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 315 struct drm_device *dev = crtc->dev;
316 struct exynos_drm_private *dev_priv = dev->dev_private;
317 struct drm_property *prop;
375 318
376 return &exynos_crtc->overlay; 319 DRM_DEBUG_KMS("%s\n", __func__);
320
321 prop = dev_priv->crtc_mode_property;
322 if (!prop) {
323 prop = drm_property_create_enum(dev, 0, "mode", mode_names,
324 ARRAY_SIZE(mode_names));
325 if (!prop)
326 return;
327
328 dev_priv->crtc_mode_property = prop;
329 }
330
331 drm_object_attach_property(&crtc->base, prop, 0);
377} 332}
378 333
379int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) 334int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
@@ -392,7 +347,12 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
392 347
393 exynos_crtc->pipe = nr; 348 exynos_crtc->pipe = nr;
394 exynos_crtc->dpms = DRM_MODE_DPMS_OFF; 349 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
395 exynos_crtc->overlay.zpos = DEFAULT_ZPOS; 350 exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true);
351 if (!exynos_crtc->plane) {
352 kfree(exynos_crtc);
353 return -ENOMEM;
354 }
355
396 crtc = &exynos_crtc->drm_crtc; 356 crtc = &exynos_crtc->drm_crtc;
397 357
398 private->crtc[nr] = crtc; 358 private->crtc[nr] = crtc;
@@ -400,6 +360,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
400 drm_crtc_init(dev, crtc, &exynos_crtc_funcs); 360 drm_crtc_init(dev, crtc, &exynos_crtc_funcs);
401 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); 361 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
402 362
363 exynos_drm_crtc_attach_mode_property(crtc);
364
403 return 0; 365 return 0;
404} 366}
405 367
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 16b8e2195a0d..6bae8d8c250e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -29,39 +29,8 @@
29#ifndef _EXYNOS_DRM_CRTC_H_ 29#ifndef _EXYNOS_DRM_CRTC_H_
30#define _EXYNOS_DRM_CRTC_H_ 30#define _EXYNOS_DRM_CRTC_H_
31 31
32struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
33 struct drm_crtc *crtc);
34int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); 32int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 33int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 34void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
37 35
38/*
39 * Exynos specific crtc postion structure.
40 *
41 * @fb_x: offset x on a framebuffer to be displyed
42 * - the unit is screen coordinates.
43 * @fb_y: offset y on a framebuffer to be displayed
44 * - the unit is screen coordinates.
45 * @src_w: width of source area to be displayed from a framebuffer.
46 * @src_h: height of source area to be displayed from a framebuffer.
47 * @crtc_x: offset x on hardware screen.
48 * @crtc_y: offset y on hardware screen.
49 * @crtc_w: width of hardware screen.
50 * @crtc_h: height of hardware screen.
51 */
52struct exynos_drm_crtc_pos {
53 unsigned int fb_x;
54 unsigned int fb_y;
55 unsigned int src_w;
56 unsigned int src_h;
57 unsigned int crtc_x;
58 unsigned int crtc_y;
59 unsigned int crtc_w;
60 unsigned int crtc_h;
61};
62
63int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
64 struct drm_framebuffer *fb,
65 struct drm_display_mode *mode,
66 struct exynos_drm_crtc_pos *pos);
67#endif 36#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 274909271c36..613bf8a5d9b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -25,6 +25,7 @@
25 25
26#include "drmP.h" 26#include "drmP.h"
27#include "drm.h" 27#include "drm.h"
28#include "exynos_drm.h"
28#include "exynos_drm_drv.h" 29#include "exynos_drm_drv.h"
29#include "exynos_drm_gem.h" 30#include "exynos_drm_gem.h"
30 31
@@ -86,6 +87,10 @@ static struct sg_table *
86 npages = buf->size / buf->page_size; 87 npages = buf->size / buf->page_size;
87 88
88 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); 89 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
90 if (!sgt) {
91 DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
92 goto err_unlock;
93 }
89 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); 94 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
90 95
91 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", 96 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
@@ -186,7 +191,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
186 struct exynos_drm_gem_obj *exynos_gem_obj; 191 struct exynos_drm_gem_obj *exynos_gem_obj;
187 struct exynos_drm_gem_buf *buffer; 192 struct exynos_drm_gem_buf *buffer;
188 struct page *page; 193 struct page *page;
189 int ret, i = 0; 194 int ret;
190 195
191 DRM_DEBUG_PRIME("%s\n", __FILE__); 196 DRM_DEBUG_PRIME("%s\n", __FILE__);
192 197
@@ -210,7 +215,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
210 215
211 216
212 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 217 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
213 if (IS_ERR(sgt)) { 218 if (IS_ERR_OR_NULL(sgt)) {
214 ret = PTR_ERR(sgt); 219 ret = PTR_ERR(sgt);
215 goto err_buf_detach; 220 goto err_buf_detach;
216 } 221 }
@@ -236,13 +241,25 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
236 } 241 }
237 242
238 sgl = sgt->sgl; 243 sgl = sgt->sgl;
239 buffer->dma_addr = sg_dma_address(sgl);
240 244
241 while (i < sgt->nents) { 245 if (sgt->nents == 1) {
242 buffer->pages[i] = sg_page(sgl); 246 buffer->dma_addr = sg_dma_address(sgt->sgl);
243 buffer->size += sg_dma_len(sgl); 247 buffer->size = sg_dma_len(sgt->sgl);
244 sgl = sg_next(sgl); 248
245 i++; 249 /* always physically continuous memory if sgt->nents is 1. */
250 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
251 } else {
252 unsigned int i = 0;
253
254 buffer->dma_addr = sg_dma_address(sgl);
255 while (i < sgt->nents) {
256 buffer->pages[i] = sg_page(sgl);
257 buffer->size += sg_dma_len(sgl);
258 sgl = sg_next(sgl);
259 i++;
260 }
261
262 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
246 } 263 }
247 264
248 exynos_gem_obj->buffer = buffer; 265 exynos_gem_obj->buffer = buffer;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d6de2e07fa03..ebacec6f1e48 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -85,8 +85,11 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
85 } 85 }
86 86
87 for (nr = 0; nr < MAX_PLANE; nr++) { 87 for (nr = 0; nr < MAX_PLANE; nr++) {
88 ret = exynos_plane_init(dev, nr); 88 struct drm_plane *plane;
89 if (ret) 89 unsigned int possible_crtcs = (1 << MAX_CRTC) - 1;
90
91 plane = exynos_plane_init(dev, possible_crtcs, false);
92 if (!plane)
90 goto err_crtc; 93 goto err_crtc;
91 } 94 }
92 95
@@ -221,8 +224,6 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
221 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), 224 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
222 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, 225 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
223 exynos_drm_gem_get_ioctl, DRM_UNLOCKED), 226 exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
224 DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
225 DRM_UNLOCKED | DRM_AUTH),
226 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, 227 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
227 vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH), 228 vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
228 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, 229 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c82c90c443e7..e22704b249d7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -59,12 +59,14 @@ enum exynos_drm_output_type {
59 * 59 *
60 * @mode_set: copy drm overlay info to hw specific overlay info. 60 * @mode_set: copy drm overlay info to hw specific overlay info.
61 * @commit: apply hardware specific overlay data to registers. 61 * @commit: apply hardware specific overlay data to registers.
62 * @enable: enable hardware specific overlay.
62 * @disable: disable hardware specific overlay. 63 * @disable: disable hardware specific overlay.
63 */ 64 */
64struct exynos_drm_overlay_ops { 65struct exynos_drm_overlay_ops {
65 void (*mode_set)(struct device *subdrv_dev, 66 void (*mode_set)(struct device *subdrv_dev,
66 struct exynos_drm_overlay *overlay); 67 struct exynos_drm_overlay *overlay);
67 void (*commit)(struct device *subdrv_dev, int zpos); 68 void (*commit)(struct device *subdrv_dev, int zpos);
69 void (*enable)(struct device *subdrv_dev, int zpos);
68 void (*disable)(struct device *subdrv_dev, int zpos); 70 void (*disable)(struct device *subdrv_dev, int zpos);
69}; 71};
70 72
@@ -174,7 +176,7 @@ struct exynos_drm_manager_ops {
174 void (*apply)(struct device *subdrv_dev); 176 void (*apply)(struct device *subdrv_dev);
175 void (*mode_fixup)(struct device *subdrv_dev, 177 void (*mode_fixup)(struct device *subdrv_dev,
176 struct drm_connector *connector, 178 struct drm_connector *connector,
177 struct drm_display_mode *mode, 179 const struct drm_display_mode *mode,
178 struct drm_display_mode *adjusted_mode); 180 struct drm_display_mode *adjusted_mode);
179 void (*mode_set)(struct device *subdrv_dev, void *mode); 181 void (*mode_set)(struct device *subdrv_dev, void *mode);
180 void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width, 182 void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width,
@@ -235,6 +237,8 @@ struct exynos_drm_private {
235 * this array is used to be aware of which crtc did it request vblank. 237 * this array is used to be aware of which crtc did it request vblank.
236 */ 238 */
237 struct drm_crtc *crtc[MAX_CRTC]; 239 struct drm_crtc *crtc[MAX_CRTC];
240 struct drm_property *plane_zpos_property;
241 struct drm_property *crtc_mode_property;
238}; 242};
239 243
240/* 244/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 23d5ad379f86..2c037cd7d2d4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -30,7 +30,6 @@
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31 31
32#include "exynos_drm_drv.h" 32#include "exynos_drm_drv.h"
33#include "exynos_drm_crtc.h"
34#include "exynos_drm_encoder.h" 33#include "exynos_drm_encoder.h"
35 34
36#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ 35#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
@@ -108,7 +107,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
108 107
109static bool 108static bool
110exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, 109exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
111 struct drm_display_mode *mode, 110 const struct drm_display_mode *mode,
112 struct drm_display_mode *adjusted_mode) 111 struct drm_display_mode *adjusted_mode)
113{ 112{
114 struct drm_device *dev = encoder->dev; 113 struct drm_device *dev = encoder->dev;
@@ -136,21 +135,16 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
136 struct drm_connector *connector; 135 struct drm_connector *connector;
137 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 136 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
138 struct exynos_drm_manager_ops *manager_ops = manager->ops; 137 struct exynos_drm_manager_ops *manager_ops = manager->ops;
139 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
140 struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev,
141 encoder->crtc);
142 138
143 DRM_DEBUG_KMS("%s\n", __FILE__); 139 DRM_DEBUG_KMS("%s\n", __FILE__);
144 140
141 exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
142
145 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 143 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
146 if (connector->encoder == encoder) { 144 if (connector->encoder == encoder)
147 if (manager_ops && manager_ops->mode_set) 145 if (manager_ops && manager_ops->mode_set)
148 manager_ops->mode_set(manager->dev, 146 manager_ops->mode_set(manager->dev,
149 adjusted_mode); 147 adjusted_mode);
150
151 if (overlay_ops && overlay_ops->mode_set)
152 overlay_ops->mode_set(manager->dev, overlay);
153 }
154 } 148 }
155} 149}
156 150
@@ -310,8 +304,8 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data)
310 struct exynos_drm_manager_ops *manager_ops = manager->ops; 304 struct exynos_drm_manager_ops *manager_ops = manager->ops;
311 int crtc = *(int *)data; 305 int crtc = *(int *)data;
312 306
313 if (manager->pipe == -1) 307 if (manager->pipe != crtc)
314 manager->pipe = crtc; 308 return;
315 309
316 if (manager_ops->enable_vblank) 310 if (manager_ops->enable_vblank)
317 manager_ops->enable_vblank(manager->dev); 311 manager_ops->enable_vblank(manager->dev);
@@ -324,34 +318,41 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
324 struct exynos_drm_manager_ops *manager_ops = manager->ops; 318 struct exynos_drm_manager_ops *manager_ops = manager->ops;
325 int crtc = *(int *)data; 319 int crtc = *(int *)data;
326 320
327 if (manager->pipe == -1) 321 if (manager->pipe != crtc)
328 manager->pipe = crtc; 322 return;
329 323
330 if (manager_ops->disable_vblank) 324 if (manager_ops->disable_vblank)
331 manager_ops->disable_vblank(manager->dev); 325 manager_ops->disable_vblank(manager->dev);
332} 326}
333 327
334void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder, 328void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
335 void *data)
336{ 329{
337 struct exynos_drm_manager *manager = 330 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
338 to_exynos_encoder(encoder)->manager; 331 struct exynos_drm_manager *manager = exynos_encoder->manager;
339 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 332 struct exynos_drm_manager_ops *manager_ops = manager->ops;
340 int zpos = DEFAULT_ZPOS; 333 int mode = *(int *)data;
341 334
342 if (data) 335 DRM_DEBUG_KMS("%s\n", __FILE__);
343 zpos = *(int *)data;
344 336
345 if (overlay_ops && overlay_ops->commit) 337 if (manager_ops && manager_ops->dpms)
346 overlay_ops->commit(manager->dev, zpos); 338 manager_ops->dpms(manager->dev, mode);
339
340 /*
341 * if this condition is ok then it means that the crtc is already
342 * detached from encoder and last function for detaching is properly
343 * done, so clear pipe from manager to prevent repeated call.
344 */
345 if (mode > DRM_MODE_DPMS_ON) {
346 if (!encoder->crtc)
347 manager->pipe = -1;
348 }
347} 349}
348 350
349void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) 351void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data)
350{ 352{
351 struct exynos_drm_manager *manager = 353 struct exynos_drm_manager *manager =
352 to_exynos_encoder(encoder)->manager; 354 to_exynos_encoder(encoder)->manager;
353 int crtc = *(int *)data; 355 int pipe = *(int *)data;
354 int zpos = DEFAULT_ZPOS;
355 356
356 DRM_DEBUG_KMS("%s\n", __FILE__); 357 DRM_DEBUG_KMS("%s\n", __FILE__);
357 358
@@ -359,76 +360,62 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
359 * when crtc is detached from encoder, this pipe is used 360 * when crtc is detached from encoder, this pipe is used
360 * to select manager operation 361 * to select manager operation
361 */ 362 */
362 manager->pipe = crtc; 363 manager->pipe = pipe;
363
364 exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
365} 364}
366 365
367void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data) 366void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data)
368{ 367{
369 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); 368 struct exynos_drm_manager *manager =
370 int mode = *(int *)data; 369 to_exynos_encoder(encoder)->manager;
370 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
371 struct exynos_drm_overlay *overlay = data;
371 372
372 DRM_DEBUG_KMS("%s\n", __FILE__); 373 DRM_DEBUG_KMS("%s\n", __FILE__);
373 374
374 exynos_drm_encoder_dpms(encoder, mode); 375 if (overlay_ops && overlay_ops->mode_set)
375 376 overlay_ops->mode_set(manager->dev, overlay);
376 exynos_encoder->dpms = mode;
377} 377}
378 378
379void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data) 379void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data)
380{ 380{
381 struct drm_device *dev = encoder->dev; 381 struct exynos_drm_manager *manager =
382 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); 382 to_exynos_encoder(encoder)->manager;
383 struct exynos_drm_manager *manager = exynos_encoder->manager; 383 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
384 struct exynos_drm_manager_ops *manager_ops = manager->ops; 384 int zpos = DEFAULT_ZPOS;
385 struct drm_connector *connector;
386 int mode = *(int *)data;
387 385
388 DRM_DEBUG_KMS("%s\n", __FILE__); 386 DRM_DEBUG_KMS("%s\n", __FILE__);
389 387
390 if (manager_ops && manager_ops->dpms) 388 if (data)
391 manager_ops->dpms(manager->dev, mode); 389 zpos = *(int *)data;
392
393 /*
394 * set current dpms mode to the connector connected to
395 * current encoder. connector->dpms would be checked
396 * at drm_helper_connector_dpms()
397 */
398 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
399 if (connector->encoder == encoder)
400 connector->dpms = mode;
401 390
402 /* 391 if (overlay_ops && overlay_ops->commit)
403 * if this condition is ok then it means that the crtc is already 392 overlay_ops->commit(manager->dev, zpos);
404 * detached from encoder and last function for detaching is properly
405 * done, so clear pipe from manager to prevent repeated call.
406 */
407 if (mode > DRM_MODE_DPMS_ON) {
408 if (!encoder->crtc)
409 manager->pipe = -1;
410 }
411} 393}
412 394
413void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) 395void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data)
414{ 396{
415 struct exynos_drm_manager *manager = 397 struct exynos_drm_manager *manager =
416 to_exynos_encoder(encoder)->manager; 398 to_exynos_encoder(encoder)->manager;
417 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 399 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
418 struct exynos_drm_overlay *overlay = data; 400 int zpos = DEFAULT_ZPOS;
419 401
420 if (overlay_ops && overlay_ops->mode_set) 402 DRM_DEBUG_KMS("%s\n", __FILE__);
421 overlay_ops->mode_set(manager->dev, overlay); 403
404 if (data)
405 zpos = *(int *)data;
406
407 if (overlay_ops && overlay_ops->enable)
408 overlay_ops->enable(manager->dev, zpos);
422} 409}
423 410
424void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data) 411void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
425{ 412{
426 struct exynos_drm_manager *manager = 413 struct exynos_drm_manager *manager =
427 to_exynos_encoder(encoder)->manager; 414 to_exynos_encoder(encoder)->manager;
428 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 415 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
429 int zpos = DEFAULT_ZPOS; 416 int zpos = DEFAULT_ZPOS;
430 417
431 DRM_DEBUG_KMS("\n"); 418 DRM_DEBUG_KMS("%s\n", __FILE__);
432 419
433 if (data) 420 if (data)
434 zpos = *(int *)data; 421 zpos = *(int *)data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index eb7d2316847e..6470d9ddf5a1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -40,13 +40,11 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
40 void (*fn)(struct drm_encoder *, void *)); 40 void (*fn)(struct drm_encoder *, void *));
41void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data); 41void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
42void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); 42void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
43void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
44 void *data);
45void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
46void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
47 void *data);
48void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data); 43void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
49void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); 44void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data);
50void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data); 45void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
46void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
47void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
48void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
51 49
52#endif 50#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 29fdbfeb43cb..a68d2b313f03 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -78,7 +78,6 @@ struct fimd_context {
78 struct drm_crtc *crtc; 78 struct drm_crtc *crtc;
79 struct clk *bus_clk; 79 struct clk *bus_clk;
80 struct clk *lcd_clk; 80 struct clk *lcd_clk;
81 struct resource *regs_res;
82 void __iomem *regs; 81 void __iomem *regs;
83 struct fimd_win_data win_data[WINDOWS_NR]; 82 struct fimd_win_data win_data[WINDOWS_NR];
84 unsigned int clkdiv; 83 unsigned int clkdiv;
@@ -813,7 +812,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
813 return -EINVAL; 812 return -EINVAL;
814 } 813 }
815 814
816 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 815 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
817 if (!ctx) 816 if (!ctx)
818 return -ENOMEM; 817 return -ENOMEM;
819 818
@@ -838,33 +837,26 @@ static int __devinit fimd_probe(struct platform_device *pdev)
838 goto err_clk; 837 goto err_clk;
839 } 838 }
840 839
841 ctx->regs_res = request_mem_region(res->start, resource_size(res), 840 ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
842 dev_name(dev));
843 if (!ctx->regs_res) {
844 dev_err(dev, "failed to claim register region\n");
845 ret = -ENOENT;
846 goto err_clk;
847 }
848
849 ctx->regs = ioremap(res->start, resource_size(res));
850 if (!ctx->regs) { 841 if (!ctx->regs) {
851 dev_err(dev, "failed to map registers\n"); 842 dev_err(dev, "failed to map registers\n");
852 ret = -ENXIO; 843 ret = -ENXIO;
853 goto err_req_region_io; 844 goto err_clk;
854 } 845 }
855 846
856 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 847 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
857 if (!res) { 848 if (!res) {
858 dev_err(dev, "irq request failed.\n"); 849 dev_err(dev, "irq request failed.\n");
859 goto err_req_region_irq; 850 goto err_clk;
860 } 851 }
861 852
862 ctx->irq = res->start; 853 ctx->irq = res->start;
863 854
864 ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx); 855 ret = devm_request_irq(&pdev->dev, ctx->irq, fimd_irq_handler,
865 if (ret < 0) { 856 0, "drm_fimd", ctx);
857 if (ret) {
866 dev_err(dev, "irq request failed.\n"); 858 dev_err(dev, "irq request failed.\n");
867 goto err_req_irq; 859 goto err_clk;
868 } 860 }
869 861
870 ctx->vidcon0 = pdata->vidcon0; 862 ctx->vidcon0 = pdata->vidcon0;
@@ -899,14 +891,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
899 891
900 return 0; 892 return 0;
901 893
902err_req_irq:
903err_req_region_irq:
904 iounmap(ctx->regs);
905
906err_req_region_io:
907 release_resource(ctx->regs_res);
908 kfree(ctx->regs_res);
909
910err_clk: 894err_clk:
911 clk_disable(ctx->lcd_clk); 895 clk_disable(ctx->lcd_clk);
912 clk_put(ctx->lcd_clk); 896 clk_put(ctx->lcd_clk);
@@ -916,7 +900,6 @@ err_bus_clk:
916 clk_put(ctx->bus_clk); 900 clk_put(ctx->bus_clk);
917 901
918err_clk_get: 902err_clk_get:
919 kfree(ctx);
920 return ret; 903 return ret;
921} 904}
922 905
@@ -944,13 +927,6 @@ out:
944 clk_put(ctx->lcd_clk); 927 clk_put(ctx->lcd_clk);
945 clk_put(ctx->bus_clk); 928 clk_put(ctx->bus_clk);
946 929
947 iounmap(ctx->regs);
948 release_resource(ctx->regs_res);
949 kfree(ctx->regs_res);
950 free_irq(ctx->irq, ctx);
951
952 kfree(ctx);
953
954 return 0; 930 return 0;
955} 931}
956 932
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 5c8b683029ea..f9efde40c097 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -99,25 +99,17 @@ out:
99struct page **exynos_gem_get_pages(struct drm_gem_object *obj, 99struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
100 gfp_t gfpmask) 100 gfp_t gfpmask)
101{ 101{
102 struct inode *inode;
103 struct address_space *mapping;
104 struct page *p, **pages; 102 struct page *p, **pages;
105 int i, npages; 103 int i, npages;
106 104
107 /* This is the shared memory object that backs the GEM resource */
108 inode = obj->filp->f_path.dentry->d_inode;
109 mapping = inode->i_mapping;
110
111 npages = obj->size >> PAGE_SHIFT; 105 npages = obj->size >> PAGE_SHIFT;
112 106
113 pages = drm_malloc_ab(npages, sizeof(struct page *)); 107 pages = drm_malloc_ab(npages, sizeof(struct page *));
114 if (pages == NULL) 108 if (pages == NULL)
115 return ERR_PTR(-ENOMEM); 109 return ERR_PTR(-ENOMEM);
116 110
117 gfpmask |= mapping_gfp_mask(mapping);
118
119 for (i = 0; i < npages; i++) { 111 for (i = 0; i < npages; i++) {
120 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 112 p = alloc_page(gfpmask);
121 if (IS_ERR(p)) 113 if (IS_ERR(p))
122 goto fail; 114 goto fail;
123 pages[i] = p; 115 pages[i] = p;
@@ -126,31 +118,22 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
126 return pages; 118 return pages;
127 119
128fail: 120fail:
129 while (i--) 121 while (--i)
130 page_cache_release(pages[i]); 122 __free_page(pages[i]);
131 123
132 drm_free_large(pages); 124 drm_free_large(pages);
133 return ERR_PTR(PTR_ERR(p)); 125 return ERR_PTR(PTR_ERR(p));
134} 126}
135 127
136static void exynos_gem_put_pages(struct drm_gem_object *obj, 128static void exynos_gem_put_pages(struct drm_gem_object *obj,
137 struct page **pages, 129 struct page **pages)
138 bool dirty, bool accessed)
139{ 130{
140 int i, npages; 131 int npages;
141 132
142 npages = obj->size >> PAGE_SHIFT; 133 npages = obj->size >> PAGE_SHIFT;
143 134
144 for (i = 0; i < npages; i++) { 135 while (--npages >= 0)
145 if (dirty) 136 __free_page(pages[npages]);
146 set_page_dirty(pages[i]);
147
148 if (accessed)
149 mark_page_accessed(pages[i]);
150
151 /* Undo the reference we took when populating the table */
152 page_cache_release(pages[i]);
153 }
154 137
155 drm_free_large(pages); 138 drm_free_large(pages);
156} 139}
@@ -189,7 +172,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
189 return -EINVAL; 172 return -EINVAL;
190 } 173 }
191 174
192 pages = exynos_gem_get_pages(obj, GFP_KERNEL); 175 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
193 if (IS_ERR(pages)) { 176 if (IS_ERR(pages)) {
194 DRM_ERROR("failed to get pages.\n"); 177 DRM_ERROR("failed to get pages.\n");
195 return PTR_ERR(pages); 178 return PTR_ERR(pages);
@@ -230,7 +213,7 @@ err1:
230 kfree(buf->sgt); 213 kfree(buf->sgt);
231 buf->sgt = NULL; 214 buf->sgt = NULL;
232err: 215err:
233 exynos_gem_put_pages(obj, pages, true, false); 216 exynos_gem_put_pages(obj, pages);
234 return ret; 217 return ret;
235 218
236} 219}
@@ -248,7 +231,7 @@ static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
248 kfree(buf->sgt); 231 kfree(buf->sgt);
249 buf->sgt = NULL; 232 buf->sgt = NULL;
250 233
251 exynos_gem_put_pages(obj, buf->pages, true, false); 234 exynos_gem_put_pages(obj, buf->pages);
252 buf->pages = NULL; 235 buf->pages = NULL;
253 236
254 /* add some codes for UNCACHED type here. TODO */ 237 /* add some codes for UNCACHED type here. TODO */
@@ -291,11 +274,21 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
291 if (!buf->pages) 274 if (!buf->pages)
292 return; 275 return;
293 276
277 /*
278 * do not release memory region from exporter.
279 *
280 * the region will be released by exporter
281 * once dmabuf's refcount becomes 0.
282 */
283 if (obj->import_attach)
284 goto out;
285
294 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) 286 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
295 exynos_drm_gem_put_pages(obj); 287 exynos_drm_gem_put_pages(obj);
296 else 288 else
297 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); 289 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
298 290
291out:
299 exynos_drm_fini_buf(obj->dev, buf); 292 exynos_drm_fini_buf(obj->dev, buf);
300 exynos_gem_obj->buffer = NULL; 293 exynos_gem_obj->buffer = NULL;
301 294
@@ -668,7 +661,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
668 * with DRM_IOCTL_MODE_CREATE_DUMB command. 661 * with DRM_IOCTL_MODE_CREATE_DUMB command.
669 */ 662 */
670 663
671 args->pitch = args->width * args->bpp >> 3; 664 args->pitch = args->width * ((args->bpp + 7) / 8);
672 args->size = PAGE_ALIGN(args->pitch * args->height); 665 args->size = PAGE_ALIGN(args->pitch * args->height);
673 666
674 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 667 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 14d038b6cb02..085b2a5d5f70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -63,7 +63,8 @@ struct exynos_drm_gem_buf {
63 * by user request or at framebuffer creation. 63 * by user request or at framebuffer creation.
64 * continuous memory region allocated by user request 64 * continuous memory region allocated by user request
65 * or at framebuffer creation. 65 * or at framebuffer creation.
66 * @size: total memory size to physically non-continuous memory region. 66 * @size: size requested from user, in bytes and this size is aligned
67 * in page unit.
67 * @flags: indicate memory type to allocated buffer and cache attruibute. 68 * @flags: indicate memory type to allocated buffer and cache attruibute.
68 * 69 *
69 * P.S. this object would be transfered to user as kms_bo.handle so 70 * P.S. this object would be transfered to user as kms_bo.handle so
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 5d9d2c2f8f3f..8ffcdf8b9e22 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -142,7 +142,7 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
142 142
143static void drm_hdmi_mode_fixup(struct device *subdrv_dev, 143static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
144 struct drm_connector *connector, 144 struct drm_connector *connector,
145 struct drm_display_mode *mode, 145 const struct drm_display_mode *mode,
146 struct drm_display_mode *adjusted_mode) 146 struct drm_display_mode *adjusted_mode)
147{ 147{
148 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 148 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index bd8126996e52..a91c42088e42 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -51,7 +51,7 @@ struct exynos_hdmi_ops {
51 51
52 /* manager */ 52 /* manager */
53 void (*mode_fixup)(void *ctx, struct drm_connector *connector, 53 void (*mode_fixup)(void *ctx, struct drm_connector *connector,
54 struct drm_display_mode *mode, 54 const struct drm_display_mode *mode,
55 struct drm_display_mode *adjusted_mode); 55 struct drm_display_mode *adjusted_mode);
56 void (*mode_set)(void *ctx, void *mode); 56 void (*mode_set)(void *ctx, void *mode);
57 void (*get_max_resol)(void *ctx, unsigned int *width, 57 void (*get_max_resol)(void *ctx, unsigned int *width,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index c4c6525d4653..b89829e5043a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -12,9 +12,12 @@
12#include "drmP.h" 12#include "drmP.h"
13 13
14#include "exynos_drm.h" 14#include "exynos_drm.h"
15#include "exynos_drm_crtc.h"
16#include "exynos_drm_drv.h" 15#include "exynos_drm_drv.h"
17#include "exynos_drm_encoder.h" 16#include "exynos_drm_encoder.h"
17#include "exynos_drm_fb.h"
18#include "exynos_drm_gem.h"
19
20#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
18 21
19struct exynos_plane { 22struct exynos_plane {
20 struct drm_plane base; 23 struct drm_plane base;
@@ -30,6 +33,108 @@ static const uint32_t formats[] = {
30 DRM_FORMAT_NV12MT, 33 DRM_FORMAT_NV12MT,
31}; 34};
32 35
36int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
37 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
38 unsigned int crtc_w, unsigned int crtc_h,
39 uint32_t src_x, uint32_t src_y,
40 uint32_t src_w, uint32_t src_h)
41{
42 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
43 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
44 unsigned int actual_w;
45 unsigned int actual_h;
46 int nr;
47 int i;
48
49 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
50
51 nr = exynos_drm_format_num_buffers(fb->pixel_format);
52 for (i = 0; i < nr; i++) {
53 struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
54
55 if (!buffer) {
56 DRM_LOG_KMS("buffer is null\n");
57 return -EFAULT;
58 }
59
60 overlay->dma_addr[i] = buffer->dma_addr;
61 overlay->vaddr[i] = buffer->kvaddr;
62
63 DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
64 i, (unsigned long)overlay->vaddr[i],
65 (unsigned long)overlay->dma_addr[i]);
66 }
67
68 actual_w = min((unsigned)(crtc->mode.hdisplay - crtc_x), crtc_w);
69 actual_h = min((unsigned)(crtc->mode.vdisplay - crtc_y), crtc_h);
70
71 /* set drm framebuffer data. */
72 overlay->fb_x = src_x;
73 overlay->fb_y = src_y;
74 overlay->fb_width = fb->width;
75 overlay->fb_height = fb->height;
76 overlay->src_width = src_w;
77 overlay->src_height = src_h;
78 overlay->bpp = fb->bits_per_pixel;
79 overlay->pitch = fb->pitches[0];
80 overlay->pixel_format = fb->pixel_format;
81
82 /* set overlay range to be displayed. */
83 overlay->crtc_x = crtc_x;
84 overlay->crtc_y = crtc_y;
85 overlay->crtc_width = actual_w;
86 overlay->crtc_height = actual_h;
87
88 /* set drm mode data. */
89 overlay->mode_width = crtc->mode.hdisplay;
90 overlay->mode_height = crtc->mode.vdisplay;
91 overlay->refresh = crtc->mode.vrefresh;
92 overlay->scan_flag = crtc->mode.flags;
93
94 DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
95 overlay->crtc_x, overlay->crtc_y,
96 overlay->crtc_width, overlay->crtc_height);
97
98 exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_plane_mode_set);
99
100 return 0;
101}
102
103void exynos_plane_commit(struct drm_plane *plane)
104{
105 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
106 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
107
108 exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
109 exynos_drm_encoder_plane_commit);
110}
111
112void exynos_plane_dpms(struct drm_plane *plane, int mode)
113{
114 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
115 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
116
117 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
118
119 if (mode == DRM_MODE_DPMS_ON) {
120 if (exynos_plane->enabled)
121 return;
122
123 exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
124 exynos_drm_encoder_plane_enable);
125
126 exynos_plane->enabled = true;
127 } else {
128 if (!exynos_plane->enabled)
129 return;
130
131 exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
132 exynos_drm_encoder_plane_disable);
133
134 exynos_plane->enabled = false;
135 }
136}
137
33static int 138static int
34exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 139exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
35 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 140 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -37,64 +142,37 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
37 uint32_t src_x, uint32_t src_y, 142 uint32_t src_x, uint32_t src_y,
38 uint32_t src_w, uint32_t src_h) 143 uint32_t src_w, uint32_t src_h)
39{ 144{
40 struct exynos_plane *exynos_plane =
41 container_of(plane, struct exynos_plane, base);
42 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
43 struct exynos_drm_crtc_pos pos;
44 int ret; 145 int ret;
45 146
46 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 147 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
47 148
48 memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos)); 149 ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y,
49 pos.crtc_x = crtc_x; 150 crtc_w, crtc_h, src_x >> 16, src_y >> 16,
50 pos.crtc_y = crtc_y; 151 src_w >> 16, src_h >> 16);
51 pos.crtc_w = crtc_w;
52 pos.crtc_h = crtc_h;
53
54 /* considering 16.16 fixed point of source values */
55 pos.fb_x = src_x >> 16;
56 pos.fb_y = src_y >> 16;
57 pos.src_w = src_w >> 16;
58 pos.src_h = src_h >> 16;
59
60 ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
61 if (ret < 0) 152 if (ret < 0)
62 return ret; 153 return ret;
63 154
64 exynos_drm_fn_encoder(crtc, overlay, 155 plane->crtc = crtc;
65 exynos_drm_encoder_crtc_mode_set); 156 plane->fb = crtc->fb;
66 exynos_drm_fn_encoder(crtc, &overlay->zpos,
67 exynos_drm_encoder_crtc_plane_commit);
68 157
69 exynos_plane->enabled = true; 158 exynos_plane_commit(plane);
159 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
70 160
71 return 0; 161 return 0;
72} 162}
73 163
74static int exynos_disable_plane(struct drm_plane *plane) 164static int exynos_disable_plane(struct drm_plane *plane)
75{ 165{
76 struct exynos_plane *exynos_plane =
77 container_of(plane, struct exynos_plane, base);
78 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
79
80 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 166 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
81 167
82 if (!exynos_plane->enabled) 168 exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF);
83 return 0;
84
85 exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
86 exynos_drm_encoder_crtc_disable);
87
88 exynos_plane->enabled = false;
89 exynos_plane->overlay.zpos = DEFAULT_ZPOS;
90 169
91 return 0; 170 return 0;
92} 171}
93 172
94static void exynos_plane_destroy(struct drm_plane *plane) 173static void exynos_plane_destroy(struct drm_plane *plane)
95{ 174{
96 struct exynos_plane *exynos_plane = 175 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
97 container_of(plane, struct exynos_plane, base);
98 176
99 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 177 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
100 178
@@ -103,69 +181,79 @@ static void exynos_plane_destroy(struct drm_plane *plane)
103 kfree(exynos_plane); 181 kfree(exynos_plane);
104} 182}
105 183
184static int exynos_plane_set_property(struct drm_plane *plane,
185 struct drm_property *property,
186 uint64_t val)
187{
188 struct drm_device *dev = plane->dev;
189 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
190 struct exynos_drm_private *dev_priv = dev->dev_private;
191
192 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
193
194 if (property == dev_priv->plane_zpos_property) {
195 exynos_plane->overlay.zpos = val;
196 return 0;
197 }
198
199 return -EINVAL;
200}
201
106static struct drm_plane_funcs exynos_plane_funcs = { 202static struct drm_plane_funcs exynos_plane_funcs = {
107 .update_plane = exynos_update_plane, 203 .update_plane = exynos_update_plane,
108 .disable_plane = exynos_disable_plane, 204 .disable_plane = exynos_disable_plane,
109 .destroy = exynos_plane_destroy, 205 .destroy = exynos_plane_destroy,
206 .set_property = exynos_plane_set_property,
110}; 207};
111 208
112int exynos_plane_init(struct drm_device *dev, unsigned int nr) 209static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
113{ 210{
114 struct exynos_plane *exynos_plane; 211 struct drm_device *dev = plane->dev;
115 uint32_t possible_crtcs; 212 struct exynos_drm_private *dev_priv = dev->dev_private;
213 struct drm_property *prop;
116 214
117 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); 215 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
118 if (!exynos_plane)
119 return -ENOMEM;
120 216
121 /* all CRTCs are available */ 217 prop = dev_priv->plane_zpos_property;
122 possible_crtcs = (1 << MAX_CRTC) - 1; 218 if (!prop) {
219 prop = drm_property_create_range(dev, 0, "zpos", 0,
220 MAX_PLANE - 1);
221 if (!prop)
222 return;
123 223
124 exynos_plane->overlay.zpos = DEFAULT_ZPOS; 224 dev_priv->plane_zpos_property = prop;
225 }
125 226
126 return drm_plane_init(dev, &exynos_plane->base, possible_crtcs, 227 drm_object_attach_property(&plane->base, prop, 0);
127 &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
128 false);
129} 228}
130 229
131int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data, 230struct drm_plane *exynos_plane_init(struct drm_device *dev,
132 struct drm_file *file_priv) 231 unsigned int possible_crtcs, bool priv)
133{ 232{
134 struct drm_exynos_plane_set_zpos *zpos_req = data;
135 struct drm_mode_object *obj;
136 struct drm_plane *plane;
137 struct exynos_plane *exynos_plane; 233 struct exynos_plane *exynos_plane;
138 int ret = 0; 234 int err;
139 235
140 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 236 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
141 237
142 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 238 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
143 return -EINVAL; 239 if (!exynos_plane) {
144 240 DRM_ERROR("failed to allocate plane\n");
145 if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) { 241 return NULL;
146 if (zpos_req->zpos != DEFAULT_ZPOS) {
147 DRM_ERROR("zpos not within limits\n");
148 return -EINVAL;
149 }
150 } 242 }
151 243
152 mutex_lock(&dev->mode_config.mutex); 244 err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
153 245 &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
154 obj = drm_mode_object_find(dev, zpos_req->plane_id, 246 priv);
155 DRM_MODE_OBJECT_PLANE); 247 if (err) {
156 if (!obj) { 248 DRM_ERROR("failed to initialize plane\n");
157 DRM_DEBUG_KMS("Unknown plane ID %d\n", 249 kfree(exynos_plane);
158 zpos_req->plane_id); 250 return NULL;
159 ret = -EINVAL;
160 goto out;
161 } 251 }
162 252
163 plane = obj_to_plane(obj); 253 if (priv)
164 exynos_plane = container_of(plane, struct exynos_plane, base); 254 exynos_plane->overlay.zpos = DEFAULT_ZPOS;
165 255 else
166 exynos_plane->overlay.zpos = zpos_req->zpos; 256 exynos_plane_attach_zpos_property(&exynos_plane->base);
167 257
168out: 258 return &exynos_plane->base;
169 mutex_unlock(&dev->mode_config.mutex);
170 return ret;
171} 259}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 16b71f8217e7..88312458580d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,6 +9,12 @@
9 * 9 *
10 */ 10 */
11 11
12int exynos_plane_init(struct drm_device *dev, unsigned int nr); 12int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
13int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data, 13 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
14 struct drm_file *file_priv); 14 unsigned int crtc_w, unsigned int crtc_h,
15 uint32_t src_x, uint32_t src_y,
16 uint32_t src_w, uint32_t src_h);
17void exynos_plane_commit(struct drm_plane *plane);
18void exynos_plane_dpms(struct drm_plane *plane, int mode);
19struct drm_plane *exynos_plane_init(struct drm_device *dev,
20 unsigned int possible_crtcs, bool priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 7b9c153dceb6..bb1550c4dd57 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -85,8 +85,6 @@ static const char fake_edid_info[] = {
85 0x00, 0x00, 0x00, 0x06 85 0x00, 0x00, 0x00, 0x06
86}; 86};
87 87
88static void vidi_fake_vblank_handler(struct work_struct *work);
89
90static bool vidi_display_is_connected(struct device *dev) 88static bool vidi_display_is_connected(struct device *dev)
91{ 89{
92 struct vidi_context *ctx = get_vidi_context(dev); 90 struct vidi_context *ctx = get_vidi_context(dev);
@@ -531,6 +529,16 @@ static int vidi_store_connection(struct device *dev,
531 if (ctx->connected > 1) 529 if (ctx->connected > 1)
532 return -EINVAL; 530 return -EINVAL;
533 531
532 /* use fake edid data for test. */
533 if (!ctx->raw_edid)
534 ctx->raw_edid = (struct edid *)fake_edid_info;
535
536 /* if raw_edid isn't same as fake data then it can't be tested. */
537 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
538 DRM_DEBUG_KMS("edid data is not fake data.\n");
539 return -EINVAL;
540 }
541
534 DRM_DEBUG_KMS("requested connection.\n"); 542 DRM_DEBUG_KMS("requested connection.\n");
535 543
536 drm_helper_hpd_irq_event(ctx->subdrv.drm_dev); 544 drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
@@ -549,6 +557,8 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
549 struct exynos_drm_manager *manager; 557 struct exynos_drm_manager *manager;
550 struct exynos_drm_display_ops *display_ops; 558 struct exynos_drm_display_ops *display_ops;
551 struct drm_exynos_vidi_connection *vidi = data; 559 struct drm_exynos_vidi_connection *vidi = data;
560 struct edid *raw_edid;
561 int edid_len;
552 562
553 DRM_DEBUG_KMS("%s\n", __FILE__); 563 DRM_DEBUG_KMS("%s\n", __FILE__);
554 564
@@ -557,11 +567,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
557 return -EINVAL; 567 return -EINVAL;
558 } 568 }
559 569
560 if (!vidi->edid) {
561 DRM_DEBUG_KMS("edid data is null.\n");
562 return -EINVAL;
563 }
564
565 if (vidi->connection > 1) { 570 if (vidi->connection > 1) {
566 DRM_DEBUG_KMS("connection should be 0 or 1.\n"); 571 DRM_DEBUG_KMS("connection should be 0 or 1.\n");
567 return -EINVAL; 572 return -EINVAL;
@@ -588,8 +593,30 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
588 return -EINVAL; 593 return -EINVAL;
589 } 594 }
590 595
591 if (vidi->connection) 596 if (vidi->connection) {
592 ctx->raw_edid = (struct edid *)vidi->edid; 597 if (!vidi->edid) {
598 DRM_DEBUG_KMS("edid data is null.\n");
599 return -EINVAL;
600 }
601 raw_edid = (struct edid *)(uint32_t)vidi->edid;
602 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
603 ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
604 if (!ctx->raw_edid) {
605 DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
606 return -ENOMEM;
607 }
608 memcpy(ctx->raw_edid, raw_edid, edid_len);
609 } else {
610 /*
611 * with connection = 0, free raw_edid
612 * only if raw edid data isn't same as fake data.
613 */
614 if (ctx->raw_edid && ctx->raw_edid !=
615 (struct edid *)fake_edid_info) {
616 kfree(ctx->raw_edid);
617 ctx->raw_edid = NULL;
618 }
619 }
593 620
594 ctx->connected = vidi->connection; 621 ctx->connected = vidi->connection;
595 drm_helper_hpd_irq_event(ctx->subdrv.drm_dev); 622 drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
@@ -614,9 +641,6 @@ static int __devinit vidi_probe(struct platform_device *pdev)
614 641
615 INIT_WORK(&ctx->work, vidi_fake_vblank_handler); 642 INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
616 643
617 /* for test */
618 ctx->raw_edid = (struct edid *)fake_edid_info;
619
620 subdrv = &ctx->subdrv; 644 subdrv = &ctx->subdrv;
621 subdrv->dev = dev; 645 subdrv->dev = dev;
622 subdrv->manager = &vidi_manager; 646 subdrv->manager = &vidi_manager;
@@ -644,6 +668,11 @@ static int __devexit vidi_remove(struct platform_device *pdev)
644 668
645 exynos_drm_subdrv_unregister(&ctx->subdrv); 669 exynos_drm_subdrv_unregister(&ctx->subdrv);
646 670
671 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
672 kfree(ctx->raw_edid);
673 ctx->raw_edid = NULL;
674 }
675
647 kfree(ctx); 676 kfree(ctx);
648 677
649 return 0; 678 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a137e9e39a33..409e2ec1207c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -63,7 +63,6 @@ struct hdmi_context {
63 bool dvi_mode; 63 bool dvi_mode;
64 struct mutex hdmi_mutex; 64 struct mutex hdmi_mutex;
65 65
66 struct resource *regs_res;
67 void __iomem *regs; 66 void __iomem *regs;
68 unsigned int external_irq; 67 unsigned int external_irq;
69 unsigned int internal_irq; 68 unsigned int internal_irq;
@@ -1940,7 +1939,7 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
1940} 1939}
1941 1940
1942static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, 1941static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
1943 struct drm_display_mode *mode, 1942 const struct drm_display_mode *mode,
1944 struct drm_display_mode *adjusted_mode) 1943 struct drm_display_mode *adjusted_mode)
1945{ 1944{
1946 struct drm_display_mode *m; 1945 struct drm_display_mode *m;
@@ -2280,16 +2279,17 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2280 return -EINVAL; 2279 return -EINVAL;
2281 } 2280 }
2282 2281
2283 drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL); 2282 drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
2283 GFP_KERNEL);
2284 if (!drm_hdmi_ctx) { 2284 if (!drm_hdmi_ctx) {
2285 DRM_ERROR("failed to allocate common hdmi context.\n"); 2285 DRM_ERROR("failed to allocate common hdmi context.\n");
2286 return -ENOMEM; 2286 return -ENOMEM;
2287 } 2287 }
2288 2288
2289 hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL); 2289 hdata = devm_kzalloc(&pdev->dev, sizeof(struct hdmi_context),
2290 GFP_KERNEL);
2290 if (!hdata) { 2291 if (!hdata) {
2291 DRM_ERROR("out of memory\n"); 2292 DRM_ERROR("out of memory\n");
2292 kfree(drm_hdmi_ctx);
2293 return -ENOMEM; 2293 return -ENOMEM;
2294 } 2294 }
2295 2295
@@ -2318,26 +2318,18 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2318 goto err_resource; 2318 goto err_resource;
2319 } 2319 }
2320 2320
2321 hdata->regs_res = request_mem_region(res->start, resource_size(res), 2321 hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
2322 dev_name(dev));
2323 if (!hdata->regs_res) {
2324 DRM_ERROR("failed to claim register region\n");
2325 ret = -ENOENT;
2326 goto err_resource;
2327 }
2328
2329 hdata->regs = ioremap(res->start, resource_size(res));
2330 if (!hdata->regs) { 2322 if (!hdata->regs) {
2331 DRM_ERROR("failed to map registers\n"); 2323 DRM_ERROR("failed to map registers\n");
2332 ret = -ENXIO; 2324 ret = -ENXIO;
2333 goto err_req_region; 2325 goto err_resource;
2334 } 2326 }
2335 2327
2336 /* DDC i2c driver */ 2328 /* DDC i2c driver */
2337 if (i2c_add_driver(&ddc_driver)) { 2329 if (i2c_add_driver(&ddc_driver)) {
2338 DRM_ERROR("failed to register ddc i2c driver\n"); 2330 DRM_ERROR("failed to register ddc i2c driver\n");
2339 ret = -ENOENT; 2331 ret = -ENOENT;
2340 goto err_iomap; 2332 goto err_resource;
2341 } 2333 }
2342 2334
2343 hdata->ddc_port = hdmi_ddc; 2335 hdata->ddc_port = hdmi_ddc;
@@ -2398,16 +2390,9 @@ err_hdmiphy:
2398 i2c_del_driver(&hdmiphy_driver); 2390 i2c_del_driver(&hdmiphy_driver);
2399err_ddc: 2391err_ddc:
2400 i2c_del_driver(&ddc_driver); 2392 i2c_del_driver(&ddc_driver);
2401err_iomap:
2402 iounmap(hdata->regs);
2403err_req_region:
2404 release_mem_region(hdata->regs_res->start,
2405 resource_size(hdata->regs_res));
2406err_resource: 2393err_resource:
2407 hdmi_resources_cleanup(hdata); 2394 hdmi_resources_cleanup(hdata);
2408err_data: 2395err_data:
2409 kfree(hdata);
2410 kfree(drm_hdmi_ctx);
2411 return ret; 2396 return ret;
2412} 2397}
2413 2398
@@ -2425,18 +2410,11 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
2425 2410
2426 hdmi_resources_cleanup(hdata); 2411 hdmi_resources_cleanup(hdata);
2427 2412
2428 iounmap(hdata->regs);
2429
2430 release_mem_region(hdata->regs_res->start,
2431 resource_size(hdata->regs_res));
2432
2433 /* hdmiphy i2c driver */ 2413 /* hdmiphy i2c driver */
2434 i2c_del_driver(&hdmiphy_driver); 2414 i2c_del_driver(&hdmiphy_driver);
2435 /* DDC i2c driver */ 2415 /* DDC i2c driver */
2436 i2c_del_driver(&ddc_driver); 2416 i2c_del_driver(&ddc_driver);
2437 2417
2438 kfree(hdata);
2439
2440 return 0; 2418 return 0;
2441} 2419}
2442 2420
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e2147a2ddcec..30fcc12f81dd 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -956,7 +956,8 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
956 956
957 clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); 957 clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
958 958
959 mixer_res->mixer_regs = ioremap(res->start, resource_size(res)); 959 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
960 resource_size(res));
960 if (mixer_res->mixer_regs == NULL) { 961 if (mixer_res->mixer_regs == NULL) {
961 dev_err(dev, "register mapping failed.\n"); 962 dev_err(dev, "register mapping failed.\n");
962 ret = -ENXIO; 963 ret = -ENXIO;
@@ -967,38 +968,34 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
967 if (res == NULL) { 968 if (res == NULL) {
968 dev_err(dev, "get memory resource failed.\n"); 969 dev_err(dev, "get memory resource failed.\n");
969 ret = -ENXIO; 970 ret = -ENXIO;
970 goto fail_mixer_regs; 971 goto fail;
971 } 972 }
972 973
973 mixer_res->vp_regs = ioremap(res->start, resource_size(res)); 974 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
975 resource_size(res));
974 if (mixer_res->vp_regs == NULL) { 976 if (mixer_res->vp_regs == NULL) {
975 dev_err(dev, "register mapping failed.\n"); 977 dev_err(dev, "register mapping failed.\n");
976 ret = -ENXIO; 978 ret = -ENXIO;
977 goto fail_mixer_regs; 979 goto fail;
978 } 980 }
979 981
980 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq"); 982 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
981 if (res == NULL) { 983 if (res == NULL) {
982 dev_err(dev, "get interrupt resource failed.\n"); 984 dev_err(dev, "get interrupt resource failed.\n");
983 ret = -ENXIO; 985 ret = -ENXIO;
984 goto fail_vp_regs; 986 goto fail;
985 } 987 }
986 988
987 ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx); 989 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
990 0, "drm_mixer", ctx);
988 if (ret) { 991 if (ret) {
989 dev_err(dev, "request interrupt failed.\n"); 992 dev_err(dev, "request interrupt failed.\n");
990 goto fail_vp_regs; 993 goto fail;
991 } 994 }
992 mixer_res->irq = res->start; 995 mixer_res->irq = res->start;
993 996
994 return 0; 997 return 0;
995 998
996fail_vp_regs:
997 iounmap(mixer_res->vp_regs);
998
999fail_mixer_regs:
1000 iounmap(mixer_res->mixer_regs);
1001
1002fail: 999fail:
1003 if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) 1000 if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
1004 clk_put(mixer_res->sclk_dac); 1001 clk_put(mixer_res->sclk_dac);
@@ -1013,16 +1010,6 @@ fail:
1013 return ret; 1010 return ret;
1014} 1011}
1015 1012
1016static void mixer_resources_cleanup(struct mixer_context *ctx)
1017{
1018 struct mixer_resources *res = &ctx->mixer_res;
1019
1020 free_irq(res->irq, ctx);
1021
1022 iounmap(res->vp_regs);
1023 iounmap(res->mixer_regs);
1024}
1025
1026static int __devinit mixer_probe(struct platform_device *pdev) 1013static int __devinit mixer_probe(struct platform_device *pdev)
1027{ 1014{
1028 struct device *dev = &pdev->dev; 1015 struct device *dev = &pdev->dev;
@@ -1032,16 +1019,16 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1032 1019
1033 dev_info(dev, "probe start\n"); 1020 dev_info(dev, "probe start\n");
1034 1021
1035 drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL); 1022 drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
1023 GFP_KERNEL);
1036 if (!drm_hdmi_ctx) { 1024 if (!drm_hdmi_ctx) {
1037 DRM_ERROR("failed to allocate common hdmi context.\n"); 1025 DRM_ERROR("failed to allocate common hdmi context.\n");
1038 return -ENOMEM; 1026 return -ENOMEM;
1039 } 1027 }
1040 1028
1041 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1029 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
1042 if (!ctx) { 1030 if (!ctx) {
1043 DRM_ERROR("failed to alloc mixer context.\n"); 1031 DRM_ERROR("failed to alloc mixer context.\n");
1044 kfree(drm_hdmi_ctx);
1045 return -ENOMEM; 1032 return -ENOMEM;
1046 } 1033 }
1047 1034
@@ -1072,17 +1059,10 @@ fail:
1072 1059
1073static int mixer_remove(struct platform_device *pdev) 1060static int mixer_remove(struct platform_device *pdev)
1074{ 1061{
1075 struct device *dev = &pdev->dev; 1062 dev_info(&pdev->dev, "remove successful\n");
1076 struct exynos_drm_hdmi_context *drm_hdmi_ctx =
1077 platform_get_drvdata(pdev);
1078 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1079
1080 dev_info(dev, "remove successful\n");
1081 1063
1082 pm_runtime_disable(&pdev->dev); 1064 pm_runtime_disable(&pdev->dev);
1083 1065
1084 mixer_resources_cleanup(ctx);
1085
1086 return 0; 1066 return 0;
1087} 1067}
1088 1068
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 9764045428ce..b7e7b49d8f62 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -78,21 +78,6 @@ static int cdv_backlight_combination_mode(struct drm_device *dev)
78 return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE; 78 return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
79} 79}
80 80
81static int cdv_get_brightness(struct backlight_device *bd)
82{
83 struct drm_device *dev = bl_get_data(bd);
84 u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
85
86 if (cdv_backlight_combination_mode(dev)) {
87 u8 lbpc;
88
89 val &= ~1;
90 pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
91 val *= lbpc;
92 }
93 return val;
94}
95
96static u32 cdv_get_max_backlight(struct drm_device *dev) 81static u32 cdv_get_max_backlight(struct drm_device *dev)
97{ 82{
98 u32 max = REG_READ(BLC_PWM_CTL); 83 u32 max = REG_READ(BLC_PWM_CTL);
@@ -110,6 +95,22 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
110 return max; 95 return max;
111} 96}
112 97
98static int cdv_get_brightness(struct backlight_device *bd)
99{
100 struct drm_device *dev = bl_get_data(bd);
101 u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
102
103 if (cdv_backlight_combination_mode(dev)) {
104 u8 lbpc;
105
106 val &= ~1;
107 pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
108 val *= lbpc;
109 }
110 return (val * 100)/cdv_get_max_backlight(dev);
111
112}
113
113static int cdv_set_brightness(struct backlight_device *bd) 114static int cdv_set_brightness(struct backlight_device *bd)
114{ 115{
115 struct drm_device *dev = bl_get_data(bd); 116 struct drm_device *dev = bl_get_data(bd);
@@ -120,6 +121,9 @@ static int cdv_set_brightness(struct backlight_device *bd)
120 if (level < 1) 121 if (level < 1)
121 level = 1; 122 level = 1;
122 123
124 level *= cdv_get_max_backlight(dev);
125 level /= 100;
126
123 if (cdv_backlight_combination_mode(dev)) { 127 if (cdv_backlight_combination_mode(dev)) {
124 u32 max = cdv_get_max_backlight(dev); 128 u32 max = cdv_get_max_backlight(dev);
125 u8 lbpc; 129 u8 lbpc;
@@ -157,7 +161,6 @@ static int cdv_backlight_init(struct drm_device *dev)
157 161
158 cdv_backlight_device->props.brightness = 162 cdv_backlight_device->props.brightness =
159 cdv_get_brightness(cdv_backlight_device); 163 cdv_get_brightness(cdv_backlight_device);
160 cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
161 backlight_update_status(cdv_backlight_device); 164 backlight_update_status(cdv_backlight_device);
162 dev_priv->backlight_device = cdv_backlight_device; 165 dev_priv->backlight_device = cdv_backlight_device;
163 return 0; 166 return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 187422018601..8c175345d85c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -82,7 +82,7 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
82} 82}
83 83
84static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder, 84static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
85 struct drm_display_mode *mode, 85 const struct drm_display_mode *mode,
86 struct drm_display_mode *adjusted_mode) 86 struct drm_display_mode *adjusted_mode)
87{ 87{
88 return true; 88 return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index c3e9a0f701df..a68509ba22a8 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -913,7 +913,7 @@ static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
913} 913}
914 914
915static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc, 915static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
916 struct drm_display_mode *mode, 916 const struct drm_display_mode *mode,
917 struct drm_display_mode *adjusted_mode) 917 struct drm_display_mode *adjusted_mode)
918{ 918{
919 return true; 919 return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 88b59d4a7b7f..a86f87b9ddde 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -90,7 +90,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
90} 90}
91 91
92static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder, 92static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
93 struct drm_display_mode *mode, 93 const struct drm_display_mode *mode,
94 struct drm_display_mode *adjusted_mode) 94 struct drm_display_mode *adjusted_mode)
95{ 95{
96 return true; 96 return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ff5b58eb878c..c7f9468b74ba 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -270,7 +270,7 @@ static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
270} 270}
271 271
272static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder, 272static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
273 struct drm_display_mode *mode, 273 const struct drm_display_mode *mode,
274 struct drm_display_mode *adjusted_mode) 274 struct drm_display_mode *adjusted_mode)
275{ 275{
276 struct drm_device *dev = encoder->dev; 276 struct drm_device *dev = encoder->dev;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 973d7f6d66b7..8d7caf0f363e 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -427,7 +427,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
427 * 427 *
428 * Returns 0 on success, nonzero on failure. 428 * Returns 0 on success, nonzero on failure.
429 */ 429 */
430bool psb_intel_init_bios(struct drm_device *dev) 430int psb_intel_init_bios(struct drm_device *dev)
431{ 431{
432 struct drm_psb_private *dev_priv = dev->dev_private; 432 struct drm_psb_private *dev_priv = dev->dev_private;
433 struct pci_dev *pdev = dev->pdev; 433 struct pci_dev *pdev = dev->pdev;
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 0a738663eb5a..2e95523b84b1 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -431,7 +431,7 @@ struct bdb_driver_features {
431 u8 custom_vbt_version; 431 u8 custom_vbt_version;
432} __attribute__((packed)); 432} __attribute__((packed));
433 433
434extern bool psb_intel_init_bios(struct drm_device *dev); 434extern int psb_intel_init_bios(struct drm_device *dev);
435extern void psb_intel_destroy_bios(struct drm_device *dev); 435extern void psb_intel_destroy_bios(struct drm_device *dev);
436 436
437/* 437/*
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index b34ff097b979..d4813e03f5ee 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -684,7 +684,7 @@ void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
684} 684}
685 685
686bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, 686bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
687 struct drm_display_mode *mode, 687 const struct drm_display_mode *mode,
688 struct drm_display_mode *adjusted_mode) 688 struct drm_display_mode *adjusted_mode)
689{ 689{
690 struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); 690 struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
index 6f762478b959..2b40663e1696 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
@@ -65,7 +65,7 @@ extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
65/* MDFLD DPI helper functions */ 65/* MDFLD DPI helper functions */
66extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode); 66extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
67extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, 67extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
68 struct drm_display_mode *mode, 68 const struct drm_display_mode *mode,
69 struct drm_display_mode *adjusted_mode); 69 struct drm_display_mode *adjusted_mode);
70extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder); 70extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
71extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder); 71extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 3f3cd619c79f..dec6a9aea3c6 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -117,7 +117,7 @@ static void psb_intel_crtc_commit(struct drm_crtc *crtc)
117} 117}
118 118
119static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, 119static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
120 struct drm_display_mode *mode, 120 const struct drm_display_mode *mode,
121 struct drm_display_mode *adjusted_mode) 121 struct drm_display_mode *adjusted_mode)
122{ 122{
123 return true; 123 return true;
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index f821c835ca90..cdafd2acc72f 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -487,7 +487,7 @@ oaktrail_crtc_mode_set_exit:
487} 487}
488 488
489static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc, 489static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
490 struct drm_display_mode *mode, 490 const struct drm_display_mode *mode,
491 struct drm_display_mode *adjusted_mode) 491 struct drm_display_mode *adjusted_mode)
492{ 492{
493 return true; 493 return true;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index c10899c953b9..2eb3dc4e9c9b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -191,7 +191,7 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
191} 191}
192 192
193static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder, 193static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
194 struct drm_display_mode *mode, 194 const struct drm_display_mode *mode,
195 struct drm_display_mode *adjusted_mode) 195 struct drm_display_mode *adjusted_mode)
196{ 196{
197 return true; 197 return true;
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 4f186eca3a30..c430bd424681 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -144,6 +144,8 @@ struct opregion_asle {
144 144
145#define ASLE_CBLV_VALID (1<<31) 145#define ASLE_CBLV_VALID (1<<31)
146 146
147static struct psb_intel_opregion *system_opregion;
148
147static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 149static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
148{ 150{
149 struct drm_psb_private *dev_priv = dev->dev_private; 151 struct drm_psb_private *dev_priv = dev->dev_private;
@@ -205,7 +207,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
205 struct drm_psb_private *dev_priv = dev->dev_private; 207 struct drm_psb_private *dev_priv = dev->dev_private;
206 struct opregion_asle *asle = dev_priv->opregion.asle; 208 struct opregion_asle *asle = dev_priv->opregion.asle;
207 209
208 if (asle) { 210 if (asle && system_opregion ) {
209 /* Don't do this on Medfield or other non PC like devices, they 211 /* Don't do this on Medfield or other non PC like devices, they
210 use the bit for something different altogether */ 212 use the bit for something different altogether */
211 psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 213 psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
@@ -221,7 +223,6 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
221#define ACPI_EV_LID (1<<1) 223#define ACPI_EV_LID (1<<1)
222#define ACPI_EV_DOCK (1<<2) 224#define ACPI_EV_DOCK (1<<2)
223 225
224static struct psb_intel_opregion *system_opregion;
225 226
226static int psb_intel_opregion_video_event(struct notifier_block *nb, 227static int psb_intel_opregion_video_event(struct notifier_block *nb,
227 unsigned long val, void *data) 228 unsigned long val, void *data)
@@ -266,9 +267,6 @@ void psb_intel_opregion_init(struct drm_device *dev)
266 system_opregion = opregion; 267 system_opregion = opregion;
267 register_acpi_notifier(&psb_intel_opregion_notifier); 268 register_acpi_notifier(&psb_intel_opregion_notifier);
268 } 269 }
269
270 if (opregion->asle)
271 psb_intel_opregion_enable_asle(dev);
272} 270}
273 271
274void psb_intel_opregion_fini(struct drm_device *dev) 272void psb_intel_opregion_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/opregion.h b/drivers/gpu/drm/gma500/opregion.h
index 72dc6b921265..4a90f8b0e16c 100644
--- a/drivers/gpu/drm/gma500/opregion.h
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -27,6 +27,7 @@ extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
27extern void psb_intel_opregion_init(struct drm_device *dev); 27extern void psb_intel_opregion_init(struct drm_device *dev);
28extern void psb_intel_opregion_fini(struct drm_device *dev); 28extern void psb_intel_opregion_fini(struct drm_device *dev);
29extern int psb_intel_opregion_setup(struct drm_device *dev); 29extern int psb_intel_opregion_setup(struct drm_device *dev);
30extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
30 31
31#else 32#else
32 33
@@ -46,4 +47,8 @@ extern inline int psb_intel_opregion_setup(struct drm_device *dev)
46{ 47{
47 return 0; 48 return 0;
48} 49}
50
51extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
52{
53}
49#endif 54#endif
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index eff039bf92d4..5971bc82b765 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -144,6 +144,10 @@ static int psb_backlight_init(struct drm_device *dev)
144 psb_backlight_device->props.max_brightness = 100; 144 psb_backlight_device->props.max_brightness = 100;
145 backlight_update_status(psb_backlight_device); 145 backlight_update_status(psb_backlight_device);
146 dev_priv->backlight_device = psb_backlight_device; 146 dev_priv->backlight_device = psb_backlight_device;
147
148 /* This must occur after the backlight is properly initialised */
149 psb_lid_timer_init(dev_priv);
150
147 return 0; 151 return 0;
148} 152}
149 153
@@ -354,13 +358,6 @@ static int psb_chip_setup(struct drm_device *dev)
354 return 0; 358 return 0;
355} 359}
356 360
357/* Not exactly an erratum more an irritation */
358static void psb_chip_errata(struct drm_device *dev)
359{
360 struct drm_psb_private *dev_priv = dev->dev_private;
361 psb_lid_timer_init(dev_priv);
362}
363
364static void psb_chip_teardown(struct drm_device *dev) 361static void psb_chip_teardown(struct drm_device *dev)
365{ 362{
366 struct drm_psb_private *dev_priv = dev->dev_private; 363 struct drm_psb_private *dev_priv = dev->dev_private;
@@ -379,7 +376,6 @@ const struct psb_ops psb_chip_ops = {
379 .sgx_offset = PSB_SGX_OFFSET, 376 .sgx_offset = PSB_SGX_OFFSET,
380 .chip_setup = psb_chip_setup, 377 .chip_setup = psb_chip_setup,
381 .chip_teardown = psb_chip_teardown, 378 .chip_teardown = psb_chip_teardown,
382 .errata = psb_chip_errata,
383 379
384 .crtc_helper = &psb_intel_helper_funcs, 380 .crtc_helper = &psb_intel_helper_funcs,
385 .crtc_funcs = &psb_intel_crtc_funcs, 381 .crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index caba6e08693c..0c4737438530 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -374,6 +374,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
374 374
375 if (ret) 375 if (ret)
376 return ret; 376 return ret;
377 psb_intel_opregion_enable_asle(dev);
377#if 0 378#if 0
378 /*enable runtime pm at last*/ 379 /*enable runtime pm at last*/
379 pm_runtime_enable(&dev->pdev->dev); 380 pm_runtime_enable(&dev->pdev->dev);
@@ -632,7 +633,6 @@ static struct drm_driver driver = {
632 .open = psb_driver_open, 633 .open = psb_driver_open,
633 .preclose = psb_driver_preclose, 634 .preclose = psb_driver_preclose,
634 .postclose = psb_driver_close, 635 .postclose = psb_driver_close,
635 .reclaim_buffers = drm_core_reclaim_buffers,
636 636
637 .gem_init_object = psb_gem_init_object, 637 .gem_init_object = psb_gem_init_object,
638 .gem_free_object = psb_gem_free_object, 638 .gem_free_object = psb_gem_free_object,
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 36c3c99612f6..8033526bb53b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -543,7 +543,7 @@ void psb_intel_encoder_destroy(struct drm_encoder *encoder)
543} 543}
544 544
545static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, 545static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
546 struct drm_display_mode *mode, 546 const struct drm_display_mode *mode,
547 struct drm_display_mode *adjusted_mode) 547 struct drm_display_mode *adjusted_mode)
548{ 548{
549 return true; 549 return true;
@@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1362 (struct drm_connector **) (psb_intel_crtc + 1); 1362 (struct drm_connector **) (psb_intel_crtc + 1);
1363 psb_intel_crtc->mode_set.num_connectors = 0; 1363 psb_intel_crtc->mode_set.num_connectors = 0;
1364 psb_intel_cursor_init(dev, psb_intel_crtc); 1364 psb_intel_cursor_init(dev, psb_intel_crtc);
1365
1366 /* Set to true so that the pipe is forced off on initial config. */
1367 psb_intel_crtc->active = true;
1365} 1368}
1366 1369
1367int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 1370int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 2515f83248cb..ebe1a28f60e1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -268,7 +268,7 @@ extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
268 *mode_cmd, 268 *mode_cmd,
269 void *mm_private); 269 void *mm_private);
270extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, 270extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
271 struct drm_display_mode *mode, 271 const struct drm_display_mode *mode,
272 struct drm_display_mode *adjusted_mode); 272 struct drm_display_mode *adjusted_mode);
273extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, 273extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
274 struct drm_display_mode *mode); 274 struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index c83f5b5d1057..37adc9edf974 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -375,7 +375,7 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
375} 375}
376 376
377bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, 377bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
378 struct drm_display_mode *mode, 378 const struct drm_display_mode *mode,
379 struct drm_display_mode *adjusted_mode) 379 struct drm_display_mode *adjusted_mode)
380{ 380{
381 struct drm_device *dev = encoder->dev; 381 struct drm_device *dev = encoder->dev;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index d39b15be7649..0466c7b985f8 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -901,7 +901,7 @@ static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
901 901
902static bool 902static bool
903psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo, 903psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
904 struct drm_display_mode *mode) 904 const struct drm_display_mode *mode)
905{ 905{
906 struct psb_intel_sdvo_dtd output_dtd; 906 struct psb_intel_sdvo_dtd output_dtd;
907 907
@@ -918,7 +918,7 @@ psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdv
918 918
919static bool 919static bool
920psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo, 920psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
921 struct drm_display_mode *mode, 921 const struct drm_display_mode *mode,
922 struct drm_display_mode *adjusted_mode) 922 struct drm_display_mode *adjusted_mode)
923{ 923{
924 /* Reset the input timing to the screen. Assume always input 0. */ 924 /* Reset the input timing to the screen. Assume always input 0. */
@@ -942,7 +942,7 @@ psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
942} 942}
943 943
944static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder, 944static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
945 struct drm_display_mode *mode, 945 const struct drm_display_mode *mode,
946 struct drm_display_mode *adjusted_mode) 946 struct drm_display_mode *adjusted_mode)
947{ 947{
948 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); 948 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index d3f2e8785010..36d952280c50 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -88,7 +88,7 @@ static void ch7006_encoder_restore(struct drm_encoder *encoder)
88} 88}
89 89
90static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder, 90static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
91 struct drm_display_mode *mode, 91 const struct drm_display_mode *mode,
92 struct drm_display_mode *adjusted_mode) 92 struct drm_display_mode *adjusted_mode)
93{ 93{
94 struct ch7006_priv *priv = to_ch7006_priv(encoder); 94 struct ch7006_priv *priv = to_ch7006_priv(encoder);
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index c860f24a5afc..9b83574141a6 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -172,7 +172,7 @@ struct ch7006_mode ch7006_modes[] = {
172}; 172};
173 173
174struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, 174struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
175 struct drm_display_mode *drm_mode) 175 const struct drm_display_mode *drm_mode)
176{ 176{
177 struct ch7006_priv *priv = to_ch7006_priv(encoder); 177 struct ch7006_priv *priv = to_ch7006_priv(encoder);
178 struct ch7006_mode *mode; 178 struct ch7006_mode *mode;
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index 17667b7d57e7..09599f4c0c9a 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -111,7 +111,7 @@ extern struct ch7006_tv_norm_info ch7006_tv_norms[];
111extern struct ch7006_mode ch7006_modes[]; 111extern struct ch7006_mode ch7006_modes[];
112 112
113struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, 113struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
114 struct drm_display_mode *drm_mode); 114 const struct drm_display_mode *drm_mode);
115 115
116void ch7006_setup_levels(struct drm_encoder *encoder); 116void ch7006_setup_levels(struct drm_encoder *encoder);
117void ch7006_setup_subcarrier(struct drm_encoder *encoder); 117void ch7006_setup_subcarrier(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index b7d45ab4ba69..30b8ae5e5c4a 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -254,7 +254,7 @@ sil164_encoder_restore(struct drm_encoder *encoder)
254 254
255static bool 255static bool
256sil164_encoder_mode_fixup(struct drm_encoder *encoder, 256sil164_encoder_mode_fixup(struct drm_encoder *encoder,
257 struct drm_display_mode *mode, 257 const struct drm_display_mode *mode,
258 struct drm_display_mode *adjusted_mode) 258 struct drm_display_mode *adjusted_mode)
259{ 259{
260 return true; 260 return true;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index fa9439159ebd..57d892eaaa6e 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -881,7 +881,7 @@ static int i810_flush_queue(struct drm_device *dev)
881} 881}
882 882
883/* Must be called with the lock held */ 883/* Must be called with the lock held */
884static void i810_reclaim_buffers(struct drm_device *dev, 884void i810_driver_reclaim_buffers(struct drm_device *dev,
885 struct drm_file *file_priv) 885 struct drm_file *file_priv)
886{ 886{
887 struct drm_device_dma *dma = dev->dma; 887 struct drm_device_dma *dma = dev->dma;
@@ -1220,12 +1220,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1220 if (dev_priv->page_flipping) 1220 if (dev_priv->page_flipping)
1221 i810_do_cleanup_pageflip(dev); 1221 i810_do_cleanup_pageflip(dev);
1222 } 1222 }
1223}
1224 1223
1225void i810_driver_reclaim_buffers_locked(struct drm_device *dev, 1224 if (file_priv->master && file_priv->master->lock.hw_lock) {
1226 struct drm_file *file_priv) 1225 drm_idlelock_take(&file_priv->master->lock);
1227{ 1226 i810_driver_reclaim_buffers(dev, file_priv);
1228 i810_reclaim_buffers(dev, file_priv); 1227 drm_idlelock_release(&file_priv->master->lock);
1228 } else {
1229 /* master disappeared, clean up stuff anyway and hope nothing
1230 * goes wrong */
1231 i810_driver_reclaim_buffers(dev, file_priv);
1232 }
1233
1229} 1234}
1230 1235
1231int i810_driver_dma_quiescent(struct drm_device *dev) 1236int i810_driver_dma_quiescent(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index ec12f7dc717a..f9924ad04d09 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,13 +57,12 @@ static const struct file_operations i810_driver_fops = {
57static struct drm_driver driver = { 57static struct drm_driver driver = {
58 .driver_features = 58 .driver_features =
59 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 59 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
60 DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, 60 DRIVER_HAVE_DMA,
61 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 61 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
62 .load = i810_driver_load, 62 .load = i810_driver_load,
63 .lastclose = i810_driver_lastclose, 63 .lastclose = i810_driver_lastclose,
64 .preclose = i810_driver_preclose, 64 .preclose = i810_driver_preclose,
65 .device_is_agp = i810_driver_device_is_agp, 65 .device_is_agp = i810_driver_device_is_agp,
66 .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
67 .dma_quiescent = i810_driver_dma_quiescent, 66 .dma_quiescent = i810_driver_dma_quiescent,
68 .ioctls = i810_ioctls, 67 .ioctls = i810_ioctls,
69 .fops = &i810_driver_fops, 68 .fops = &i810_driver_fops,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index c9339f481795..6e0acad9e0f5 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -116,14 +116,12 @@ typedef struct drm_i810_private {
116 116
117 /* i810_dma.c */ 117 /* i810_dma.c */
118extern int i810_driver_dma_quiescent(struct drm_device *dev); 118extern int i810_driver_dma_quiescent(struct drm_device *dev);
119extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, 119void i810_driver_reclaim_buffers(struct drm_device *dev,
120 struct drm_file *file_priv); 120 struct drm_file *file_priv);
121extern int i810_driver_load(struct drm_device *, unsigned long flags); 121extern int i810_driver_load(struct drm_device *, unsigned long flags);
122extern void i810_driver_lastclose(struct drm_device *dev); 122extern void i810_driver_lastclose(struct drm_device *dev);
123extern void i810_driver_preclose(struct drm_device *dev, 123extern void i810_driver_preclose(struct drm_device *dev,
124 struct drm_file *file_priv); 124 struct drm_file *file_priv);
125extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
126 struct drm_file *file_priv);
127extern int i810_driver_device_is_agp(struct drm_device *dev); 125extern int i810_driver_device_is_agp(struct drm_device *dev);
128 126
129extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 127extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2e9268da58d8..b0bacdba6d7e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,6 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
7 i915_debugfs.o \ 7 i915_debugfs.o \
8 i915_suspend.o \ 8 i915_suspend.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_context.o \
10 i915_gem_debug.o \ 11 i915_gem_debug.o \
11 i915_gem_evict.o \ 12 i915_gem_evict.o \
12 i915_gem_execbuffer.o \ 13 i915_gem_execbuffer.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 8c2ad014c47f..58914691a77b 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -86,7 +86,7 @@ struct intel_dvo_dev_ops {
86 * buses with clock limitations. 86 * buses with clock limitations.
87 */ 87 */
88 bool (*mode_fixup)(struct intel_dvo_device *dvo, 88 bool (*mode_fixup)(struct intel_dvo_device *dvo,
89 struct drm_display_mode *mode, 89 const struct drm_display_mode *mode,
90 struct drm_display_mode *adjusted_mode); 90 struct drm_display_mode *adjusted_mode);
91 91
92 /* 92 /*
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5363e9c66c27..359f6e8b9b00 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -676,6 +676,7 @@ static void i915_ring_error_state(struct seq_file *m,
676 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 676 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
677 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 677 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
678 if (INTEL_INFO(dev)->gen >= 6) { 678 if (INTEL_INFO(dev)->gen >= 6) {
679 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
679 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 680 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
680 seq_printf(m, " SYNC_0: 0x%08x\n", 681 seq_printf(m, " SYNC_0: 0x%08x\n",
681 error->semaphore_mboxes[ring][0]); 682 error->semaphore_mboxes[ring][0]);
@@ -713,6 +714,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
713 seq_printf(m, "EIR: 0x%08x\n", error->eir); 714 seq_printf(m, "EIR: 0x%08x\n", error->eir);
714 seq_printf(m, "IER: 0x%08x\n", error->ier); 715 seq_printf(m, "IER: 0x%08x\n", error->ier);
715 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 716 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
717 seq_printf(m, "CCID: 0x%08x\n", error->ccid);
716 718
717 for (i = 0; i < dev_priv->num_fence_regs; i++) 719 for (i = 0; i < dev_priv->num_fence_regs; i++)
718 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 720 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -1765,6 +1767,64 @@ static const struct file_operations i915_max_freq_fops = {
1765}; 1767};
1766 1768
1767static ssize_t 1769static ssize_t
1770i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1771 loff_t *ppos)
1772{
1773 struct drm_device *dev = filp->private_data;
1774 drm_i915_private_t *dev_priv = dev->dev_private;
1775 char buf[80];
1776 int len;
1777
1778 len = snprintf(buf, sizeof(buf),
1779 "min freq: %d\n", dev_priv->min_delay * 50);
1780
1781 if (len > sizeof(buf))
1782 len = sizeof(buf);
1783
1784 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1785}
1786
1787static ssize_t
1788i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1789 loff_t *ppos)
1790{
1791 struct drm_device *dev = filp->private_data;
1792 struct drm_i915_private *dev_priv = dev->dev_private;
1793 char buf[20];
1794 int val = 1;
1795
1796 if (cnt > 0) {
1797 if (cnt > sizeof(buf) - 1)
1798 return -EINVAL;
1799
1800 if (copy_from_user(buf, ubuf, cnt))
1801 return -EFAULT;
1802 buf[cnt] = 0;
1803
1804 val = simple_strtoul(buf, NULL, 0);
1805 }
1806
1807 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
1808
1809 /*
1810 * Turbo will still be enabled, but won't go below the set value.
1811 */
1812 dev_priv->min_delay = val / 50;
1813
1814 gen6_set_rps(dev, val / 50);
1815
1816 return cnt;
1817}
1818
1819static const struct file_operations i915_min_freq_fops = {
1820 .owner = THIS_MODULE,
1821 .open = simple_open,
1822 .read = i915_min_freq_read,
1823 .write = i915_min_freq_write,
1824 .llseek = default_llseek,
1825};
1826
1827static ssize_t
1768i915_cache_sharing_read(struct file *filp, 1828i915_cache_sharing_read(struct file *filp,
1769 char __user *ubuf, 1829 char __user *ubuf,
1770 size_t max, 1830 size_t max,
@@ -1997,6 +2057,12 @@ int i915_debugfs_init(struct drm_minor *minor)
1997 return ret; 2057 return ret;
1998 2058
1999 ret = i915_debugfs_create(minor->debugfs_root, minor, 2059 ret = i915_debugfs_create(minor->debugfs_root, minor,
2060 "i915_min_freq",
2061 &i915_min_freq_fops);
2062 if (ret)
2063 return ret;
2064
2065 ret = i915_debugfs_create(minor->debugfs_root, minor,
2000 "i915_cache_sharing", 2066 "i915_cache_sharing",
2001 &i915_cache_sharing_fops); 2067 &i915_cache_sharing_fops);
2002 if (ret) 2068 if (ret)
@@ -2028,6 +2094,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
2028 1, minor); 2094 1, minor);
2029 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 2095 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
2030 1, minor); 2096 1, minor);
2097 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
2098 1, minor);
2031 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2099 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2032 1, minor); 2100 1, minor);
2033 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2101 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f94792626b94..9cf7dfe022b9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1006,6 +1006,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
1006 case I915_PARAM_HAS_ALIASING_PPGTT: 1006 case I915_PARAM_HAS_ALIASING_PPGTT:
1007 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 1007 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1008 break; 1008 break;
1009 case I915_PARAM_HAS_WAIT_TIMEOUT:
1010 value = 1;
1011 break;
1009 default: 1012 default:
1010 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1013 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1011 param->param); 1014 param->param);
@@ -1082,8 +1085,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1082 1085
1083 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 1086 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1084 1087
1085 dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr, 1088 dev_priv->dri1.gfx_hws_cpu_addr =
1086 4096); 1089 ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
1087 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { 1090 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1088 i915_dma_cleanup(dev); 1091 i915_dma_cleanup(dev);
1089 ring->status_page.gfx_addr = 0; 1092 ring->status_page.gfx_addr = 0;
@@ -1401,6 +1404,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1401 } 1404 }
1402} 1405}
1403 1406
1407static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1408{
1409 struct apertures_struct *ap;
1410 struct pci_dev *pdev = dev_priv->dev->pdev;
1411 bool primary;
1412
1413 ap = alloc_apertures(1);
1414 if (!ap)
1415 return;
1416
1417 ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
1418 ap->ranges[0].size =
1419 dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1420 primary =
1421 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1422
1423 remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1424
1425 kfree(ap);
1426}
1427
1404/** 1428/**
1405 * i915_driver_load - setup chip and create an initial config 1429 * i915_driver_load - setup chip and create an initial config
1406 * @dev: DRM device 1430 * @dev: DRM device
@@ -1446,6 +1470,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1446 goto free_priv; 1470 goto free_priv;
1447 } 1471 }
1448 1472
1473 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
1474 if (!ret) {
1475 DRM_ERROR("failed to set up gmch\n");
1476 ret = -EIO;
1477 goto put_bridge;
1478 }
1479
1480 dev_priv->mm.gtt = intel_gtt_get();
1481 if (!dev_priv->mm.gtt) {
1482 DRM_ERROR("Failed to initialize GTT\n");
1483 ret = -ENODEV;
1484 goto put_gmch;
1485 }
1486
1487 i915_kick_out_firmware_fb(dev_priv);
1488
1449 pci_set_master(dev->pdev); 1489 pci_set_master(dev->pdev);
1450 1490
1451 /* overlay on gen2 is broken and can't address above 1G */ 1491 /* overlay on gen2 is broken and can't address above 1G */
@@ -1468,26 +1508,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1468 if (!dev_priv->regs) { 1508 if (!dev_priv->regs) {
1469 DRM_ERROR("failed to map registers\n"); 1509 DRM_ERROR("failed to map registers\n");
1470 ret = -EIO; 1510 ret = -EIO;
1471 goto put_bridge; 1511 goto put_gmch;
1472 }
1473
1474 dev_priv->mm.gtt = intel_gtt_get();
1475 if (!dev_priv->mm.gtt) {
1476 DRM_ERROR("Failed to initialize GTT\n");
1477 ret = -ENODEV;
1478 goto out_rmmap;
1479 } 1512 }
1480 1513
1481 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1514 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1515 dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
1482 1516
1483 dev_priv->mm.gtt_mapping = 1517 dev_priv->mm.gtt_mapping =
1484 io_mapping_create_wc(dev->agp->base, aperture_size); 1518 io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
1519 aperture_size);
1485 if (dev_priv->mm.gtt_mapping == NULL) { 1520 if (dev_priv->mm.gtt_mapping == NULL) {
1486 ret = -EIO; 1521 ret = -EIO;
1487 goto out_rmmap; 1522 goto out_rmmap;
1488 } 1523 }
1489 1524
1490 i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size); 1525 i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
1526 aperture_size);
1491 1527
1492 /* The i915 workqueue is primarily used for batched retirement of 1528 /* The i915 workqueue is primarily used for batched retirement of
1493 * requests (and thus managing bo) once the task has been completed 1529 * requests (and thus managing bo) once the task has been completed
@@ -1511,7 +1547,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1511 goto out_mtrrfree; 1547 goto out_mtrrfree;
1512 } 1548 }
1513 1549
1550 /* This must be called before any calls to HAS_PCH_* */
1551 intel_detect_pch(dev);
1552
1514 intel_irq_init(dev); 1553 intel_irq_init(dev);
1554 intel_gt_init(dev);
1515 1555
1516 /* Try to make sure MCHBAR is enabled before poking at it */ 1556 /* Try to make sure MCHBAR is enabled before poking at it */
1517 intel_setup_mchbar(dev); 1557 intel_setup_mchbar(dev);
@@ -1544,7 +1584,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1544 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1584 if (!IS_I945G(dev) && !IS_I945GM(dev))
1545 pci_enable_msi(dev->pdev); 1585 pci_enable_msi(dev->pdev);
1546 1586
1547 spin_lock_init(&dev_priv->gt_lock);
1548 spin_lock_init(&dev_priv->irq_lock); 1587 spin_lock_init(&dev_priv->irq_lock);
1549 spin_lock_init(&dev_priv->error_lock); 1588 spin_lock_init(&dev_priv->error_lock);
1550 spin_lock_init(&dev_priv->rps_lock); 1589 spin_lock_init(&dev_priv->rps_lock);
@@ -1563,8 +1602,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1563 /* Start out suspended */ 1602 /* Start out suspended */
1564 dev_priv->mm.suspended = 1; 1603 dev_priv->mm.suspended = 1;
1565 1604
1566 intel_detect_pch(dev);
1567
1568 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1605 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1569 ret = i915_load_modeset_init(dev); 1606 ret = i915_load_modeset_init(dev);
1570 if (ret < 0) { 1607 if (ret < 0) {
@@ -1599,13 +1636,16 @@ out_gem_unload:
1599 destroy_workqueue(dev_priv->wq); 1636 destroy_workqueue(dev_priv->wq);
1600out_mtrrfree: 1637out_mtrrfree:
1601 if (dev_priv->mm.gtt_mtrr >= 0) { 1638 if (dev_priv->mm.gtt_mtrr >= 0) {
1602 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 1639 mtrr_del(dev_priv->mm.gtt_mtrr,
1603 dev->agp->agp_info.aper_size * 1024 * 1024); 1640 dev_priv->mm.gtt_base_addr,
1641 aperture_size);
1604 dev_priv->mm.gtt_mtrr = -1; 1642 dev_priv->mm.gtt_mtrr = -1;
1605 } 1643 }
1606 io_mapping_free(dev_priv->mm.gtt_mapping); 1644 io_mapping_free(dev_priv->mm.gtt_mapping);
1607out_rmmap: 1645out_rmmap:
1608 pci_iounmap(dev->pdev, dev_priv->regs); 1646 pci_iounmap(dev->pdev, dev_priv->regs);
1647put_gmch:
1648 intel_gmch_remove();
1609put_bridge: 1649put_bridge:
1610 pci_dev_put(dev_priv->bridge_dev); 1650 pci_dev_put(dev_priv->bridge_dev);
1611free_priv: 1651free_priv:
@@ -1637,8 +1677,9 @@ int i915_driver_unload(struct drm_device *dev)
1637 1677
1638 io_mapping_free(dev_priv->mm.gtt_mapping); 1678 io_mapping_free(dev_priv->mm.gtt_mapping);
1639 if (dev_priv->mm.gtt_mtrr >= 0) { 1679 if (dev_priv->mm.gtt_mtrr >= 0) {
1640 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 1680 mtrr_del(dev_priv->mm.gtt_mtrr,
1641 dev->agp->agp_info.aper_size * 1024 * 1024); 1681 dev_priv->mm.gtt_base_addr,
1682 dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
1642 dev_priv->mm.gtt_mtrr = -1; 1683 dev_priv->mm.gtt_mtrr = -1;
1643 } 1684 }
1644 1685
@@ -1679,6 +1720,7 @@ int i915_driver_unload(struct drm_device *dev)
1679 mutex_lock(&dev->struct_mutex); 1720 mutex_lock(&dev->struct_mutex);
1680 i915_gem_free_all_phys_object(dev); 1721 i915_gem_free_all_phys_object(dev);
1681 i915_gem_cleanup_ringbuffer(dev); 1722 i915_gem_cleanup_ringbuffer(dev);
1723 i915_gem_context_fini(dev);
1682 mutex_unlock(&dev->struct_mutex); 1724 mutex_unlock(&dev->struct_mutex);
1683 i915_gem_cleanup_aliasing_ppgtt(dev); 1725 i915_gem_cleanup_aliasing_ppgtt(dev);
1684 i915_gem_cleanup_stolen(dev); 1726 i915_gem_cleanup_stolen(dev);
@@ -1718,6 +1760,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1718 spin_lock_init(&file_priv->mm.lock); 1760 spin_lock_init(&file_priv->mm.lock);
1719 INIT_LIST_HEAD(&file_priv->mm.request_list); 1761 INIT_LIST_HEAD(&file_priv->mm.request_list);
1720 1762
1763 idr_init(&file_priv->context_idr);
1764
1721 return 0; 1765 return 0;
1722} 1766}
1723 1767
@@ -1737,7 +1781,13 @@ void i915_driver_lastclose(struct drm_device * dev)
1737{ 1781{
1738 drm_i915_private_t *dev_priv = dev->dev_private; 1782 drm_i915_private_t *dev_priv = dev->dev_private;
1739 1783
1740 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 1784 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1785 * goes right around and calls lastclose. Check for this and don't clean
1786 * up anything. */
1787 if (!dev_priv)
1788 return;
1789
1790 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1741 intel_fb_restore_mode(dev); 1791 intel_fb_restore_mode(dev);
1742 vga_switcheroo_process_delayed_switch(); 1792 vga_switcheroo_process_delayed_switch();
1743 return; 1793 return;
@@ -1750,6 +1800,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1750 1800
1751void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1801void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1752{ 1802{
1803 i915_gem_context_close(dev, file_priv);
1753 i915_gem_release(dev, file_priv); 1804 i915_gem_release(dev, file_priv);
1754} 1805}
1755 1806
@@ -1803,6 +1854,9 @@ struct drm_ioctl_desc i915_ioctls[] = {
1803 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1854 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1804 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1855 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1805 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1856 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1857 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1858 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1859 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1806}; 1860};
1807 1861
1808int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1862int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9fe9ebe52a7a..a24ffbe97c01 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35#include "i915_trace.h"
35#include "intel_drv.h" 36#include "intel_drv.h"
36 37
37#include <linux/console.h> 38#include <linux/console.h>
@@ -215,7 +216,6 @@ static const struct intel_device_info intel_ironlake_d_info = {
215 .gen = 5, 216 .gen = 5,
216 .need_gfx_hws = 1, .has_hotplug = 1, 217 .need_gfx_hws = 1, .has_hotplug = 1,
217 .has_bsd_ring = 1, 218 .has_bsd_ring = 1,
218 .has_pch_split = 1,
219}; 219};
220 220
221static const struct intel_device_info intel_ironlake_m_info = { 221static const struct intel_device_info intel_ironlake_m_info = {
@@ -223,7 +223,6 @@ static const struct intel_device_info intel_ironlake_m_info = {
223 .need_gfx_hws = 1, .has_hotplug = 1, 223 .need_gfx_hws = 1, .has_hotplug = 1,
224 .has_fbc = 1, 224 .has_fbc = 1,
225 .has_bsd_ring = 1, 225 .has_bsd_ring = 1,
226 .has_pch_split = 1,
227}; 226};
228 227
229static const struct intel_device_info intel_sandybridge_d_info = { 228static const struct intel_device_info intel_sandybridge_d_info = {
@@ -232,7 +231,6 @@ static const struct intel_device_info intel_sandybridge_d_info = {
232 .has_bsd_ring = 1, 231 .has_bsd_ring = 1,
233 .has_blt_ring = 1, 232 .has_blt_ring = 1,
234 .has_llc = 1, 233 .has_llc = 1,
235 .has_pch_split = 1,
236 .has_force_wake = 1, 234 .has_force_wake = 1,
237}; 235};
238 236
@@ -243,7 +241,6 @@ static const struct intel_device_info intel_sandybridge_m_info = {
243 .has_bsd_ring = 1, 241 .has_bsd_ring = 1,
244 .has_blt_ring = 1, 242 .has_blt_ring = 1,
245 .has_llc = 1, 243 .has_llc = 1,
246 .has_pch_split = 1,
247 .has_force_wake = 1, 244 .has_force_wake = 1,
248}; 245};
249 246
@@ -253,7 +250,6 @@ static const struct intel_device_info intel_ivybridge_d_info = {
253 .has_bsd_ring = 1, 250 .has_bsd_ring = 1,
254 .has_blt_ring = 1, 251 .has_blt_ring = 1,
255 .has_llc = 1, 252 .has_llc = 1,
256 .has_pch_split = 1,
257 .has_force_wake = 1, 253 .has_force_wake = 1,
258}; 254};
259 255
@@ -264,7 +260,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
264 .has_bsd_ring = 1, 260 .has_bsd_ring = 1,
265 .has_blt_ring = 1, 261 .has_blt_ring = 1,
266 .has_llc = 1, 262 .has_llc = 1,
267 .has_pch_split = 1,
268 .has_force_wake = 1, 263 .has_force_wake = 1,
269}; 264};
270 265
@@ -292,7 +287,6 @@ static const struct intel_device_info intel_haswell_d_info = {
292 .has_bsd_ring = 1, 287 .has_bsd_ring = 1,
293 .has_blt_ring = 1, 288 .has_blt_ring = 1,
294 .has_llc = 1, 289 .has_llc = 1,
295 .has_pch_split = 1,
296 .has_force_wake = 1, 290 .has_force_wake = 1,
297}; 291};
298 292
@@ -302,7 +296,6 @@ static const struct intel_device_info intel_haswell_m_info = {
302 .has_bsd_ring = 1, 296 .has_bsd_ring = 1,
303 .has_blt_ring = 1, 297 .has_blt_ring = 1,
304 .has_llc = 1, 298 .has_llc = 1,
305 .has_pch_split = 1,
306 .has_force_wake = 1, 299 .has_force_wake = 1,
307}; 300};
308 301
@@ -353,11 +346,43 @@ static const struct pci_device_id pciidlist[] = { /* aka */
353 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ 346 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
354 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ 347 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
355 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ 348 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
349 INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
356 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ 350 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
357 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ 351 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
352 INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
358 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ 353 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
359 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ 354 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
360 INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ 355 INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
356 INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
357 INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
358 INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
359 INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
360 INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
361 INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
362 INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
363 INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
364 INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
365 INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
366 INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
367 INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
368 INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
369 INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
370 INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
371 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
372 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
373 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
374 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
375 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
376 INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
377 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
378 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
379 INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
380 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
381 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
382 INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
383 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
384 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
385 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
361 {0, 0, 0} 386 {0, 0, 0}
362}; 387};
363 388
@@ -429,135 +454,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
429 return 1; 454 return 1;
430} 455}
431 456
432void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
433{
434 int count;
435
436 count = 0;
437 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
438 udelay(10);
439
440 I915_WRITE_NOTRACE(FORCEWAKE, 1);
441 POSTING_READ(FORCEWAKE);
442
443 count = 0;
444 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
445 udelay(10);
446}
447
448void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
449{
450 int count;
451
452 count = 0;
453 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
454 udelay(10);
455
456 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
457 POSTING_READ(FORCEWAKE_MT);
458
459 count = 0;
460 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
461 udelay(10);
462}
463
464/*
465 * Generally this is called implicitly by the register read function. However,
466 * if some sequence requires the GT to not power down then this function should
467 * be called at the beginning of the sequence followed by a call to
468 * gen6_gt_force_wake_put() at the end of the sequence.
469 */
470void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
471{
472 unsigned long irqflags;
473
474 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
475 if (dev_priv->forcewake_count++ == 0)
476 dev_priv->display.force_wake_get(dev_priv);
477 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
478}
479
480static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
481{
482 u32 gtfifodbg;
483 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
484 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
485 "MMIO read or write has been dropped %x\n", gtfifodbg))
486 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
487}
488
489void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
490{
491 I915_WRITE_NOTRACE(FORCEWAKE, 0);
492 /* The below doubles as a POSTING_READ */
493 gen6_gt_check_fifodbg(dev_priv);
494}
495
496void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
497{
498 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
499 /* The below doubles as a POSTING_READ */
500 gen6_gt_check_fifodbg(dev_priv);
501}
502
503/*
504 * see gen6_gt_force_wake_get()
505 */
506void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
507{
508 unsigned long irqflags;
509
510 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
511 if (--dev_priv->forcewake_count == 0)
512 dev_priv->display.force_wake_put(dev_priv);
513 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
514}
515
516int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
517{
518 int ret = 0;
519
520 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
521 int loop = 500;
522 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
523 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
524 udelay(10);
525 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
526 }
527 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
528 ++ret;
529 dev_priv->gt_fifo_count = fifo;
530 }
531 dev_priv->gt_fifo_count--;
532
533 return ret;
534}
535
536void vlv_force_wake_get(struct drm_i915_private *dev_priv)
537{
538 int count;
539
540 count = 0;
541
542 /* Already awake? */
543 if ((I915_READ(0x130094) & 0xa1) == 0xa1)
544 return;
545
546 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
547 POSTING_READ(FORCEWAKE_VLV);
548
549 count = 0;
550 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
551 udelay(10);
552}
553
554void vlv_force_wake_put(struct drm_i915_private *dev_priv)
555{
556 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
557 /* FIXME: confirm VLV behavior with Punit folks */
558 POSTING_READ(FORCEWAKE_VLV);
559}
560
561static int i915_drm_freeze(struct drm_device *dev) 457static int i915_drm_freeze(struct drm_device *dev)
562{ 458{
563 struct drm_i915_private *dev_priv = dev->dev_private; 459 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -637,7 +533,7 @@ static int i915_drm_thaw(struct drm_device *dev)
637 533
638 /* KMS EnterVT equivalent */ 534 /* KMS EnterVT equivalent */
639 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 535 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
640 if (HAS_PCH_SPLIT(dev)) 536 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
641 ironlake_init_pch_refclk(dev); 537 ironlake_init_pch_refclk(dev);
642 538
643 mutex_lock(&dev->struct_mutex); 539 mutex_lock(&dev->struct_mutex);
@@ -794,9 +690,9 @@ static int gen6_do_reset(struct drm_device *dev)
794 690
795 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 691 /* If reset with a user forcewake, try to restore, otherwise turn it off */
796 if (dev_priv->forcewake_count) 692 if (dev_priv->forcewake_count)
797 dev_priv->display.force_wake_get(dev_priv); 693 dev_priv->gt.force_wake_get(dev_priv);
798 else 694 else
799 dev_priv->display.force_wake_put(dev_priv); 695 dev_priv->gt.force_wake_put(dev_priv);
800 696
801 /* Restore fifo count */ 697 /* Restore fifo count */
802 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 698 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
@@ -805,7 +701,7 @@ static int gen6_do_reset(struct drm_device *dev)
805 return ret; 701 return ret;
806} 702}
807 703
808static int intel_gpu_reset(struct drm_device *dev) 704int intel_gpu_reset(struct drm_device *dev)
809{ 705{
810 struct drm_i915_private *dev_priv = dev->dev_private; 706 struct drm_i915_private *dev_priv = dev->dev_private;
811 int ret = -ENODEV; 707 int ret = -ENODEV;
@@ -863,10 +759,7 @@ int i915_reset(struct drm_device *dev)
863 if (!i915_try_reset) 759 if (!i915_try_reset)
864 return 0; 760 return 0;
865 761
866 if (!mutex_trylock(&dev->struct_mutex)) 762 mutex_lock(&dev->struct_mutex);
867 return -EBUSY;
868
869 dev_priv->stop_rings = 0;
870 763
871 i915_gem_reset(dev); 764 i915_gem_reset(dev);
872 765
@@ -909,12 +802,16 @@ int i915_reset(struct drm_device *dev)
909 for_each_ring(ring, dev_priv, i) 802 for_each_ring(ring, dev_priv, i)
910 ring->init(ring); 803 ring->init(ring);
911 804
805 i915_gem_context_init(dev);
912 i915_gem_init_ppgtt(dev); 806 i915_gem_init_ppgtt(dev);
913 807
914 mutex_unlock(&dev->struct_mutex); 808 /*
809 * It would make sense to re-init all the other hw state, at
810 * least the rps/rc6/emon init done within modeset_init_hw. For
811 * some unknown reason, this blows up my ilk, so don't.
812 */
915 813
916 if (drm_core_check_feature(dev, DRIVER_MODESET)) 814 mutex_unlock(&dev->struct_mutex);
917 intel_modeset_init_hw(dev);
918 815
919 drm_irq_uninstall(dev); 816 drm_irq_uninstall(dev);
920 drm_irq_install(dev); 817 drm_irq_install(dev);
@@ -925,10 +822,12 @@ int i915_reset(struct drm_device *dev)
925 return 0; 822 return 0;
926} 823}
927 824
928
929static int __devinit 825static int __devinit
930i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 826i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
931{ 827{
828 struct intel_device_info *intel_info =
829 (struct intel_device_info *) ent->driver_data;
830
932 /* Only bind to function 0 of the device. Early generations 831 /* Only bind to function 0 of the device. Early generations
933 * used function 1 as a placeholder for multi-head. This causes 832 * used function 1 as a placeholder for multi-head. This causes
934 * us confusion instead, especially on the systems where both 833 * us confusion instead, especially on the systems where both
@@ -937,6 +836,18 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
937 if (PCI_FUNC(pdev->devfn)) 836 if (PCI_FUNC(pdev->devfn))
938 return -ENODEV; 837 return -ENODEV;
939 838
839 /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
840 * implementation for gen3 (and only gen3) that used legacy drm maps
841 * (gasp!) to share buffers between X and the client. Hence we need to
842 * keep around the fake agp stuff for gen3, even when kms is enabled. */
843 if (intel_info->gen != 3) {
844 driver.driver_features &=
845 ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
846 } else if (!intel_agp_enabled) {
847 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
848 return -ENODEV;
849 }
850
940 return drm_get_pci_dev(pdev, ent, &driver); 851 return drm_get_pci_dev(pdev, ent, &driver);
941} 852}
942 853
@@ -1058,7 +969,6 @@ static struct drm_driver driver = {
1058 .resume = i915_resume, 969 .resume = i915_resume,
1059 970
1060 .device_is_agp = i915_driver_device_is_agp, 971 .device_is_agp = i915_driver_device_is_agp,
1061 .reclaim_buffers = drm_core_reclaim_buffers,
1062 .master_create = i915_master_create, 972 .master_create = i915_master_create,
1063 .master_destroy = i915_master_destroy, 973 .master_destroy = i915_master_destroy,
1064#if defined(CONFIG_DEBUG_FS) 974#if defined(CONFIG_DEBUG_FS)
@@ -1097,11 +1007,6 @@ static struct pci_driver i915_pci_driver = {
1097 1007
1098static int __init i915_init(void) 1008static int __init i915_init(void)
1099{ 1009{
1100 if (!intel_agp_enabled) {
1101 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
1102 return -ENODEV;
1103 }
1104
1105 driver.num_ioctls = i915_max_ioctl; 1010 driver.num_ioctls = i915_max_ioctl;
1106 1011
1107 /* 1012 /*
@@ -1149,6 +1054,84 @@ MODULE_LICENSE("GPL and additional rights");
1149 ((reg) < 0x40000) && \ 1054 ((reg) < 0x40000) && \
1150 ((reg) != FORCEWAKE)) 1055 ((reg) != FORCEWAKE))
1151 1056
1057static bool IS_DISPLAYREG(u32 reg)
1058{
1059 /*
1060 * This should make it easier to transition modules over to the
1061 * new register block scheme, since we can do it incrementally.
1062 */
1063 if (reg >= 0x180000)
1064 return false;
1065
1066 if (reg >= RENDER_RING_BASE &&
1067 reg < RENDER_RING_BASE + 0xff)
1068 return false;
1069 if (reg >= GEN6_BSD_RING_BASE &&
1070 reg < GEN6_BSD_RING_BASE + 0xff)
1071 return false;
1072 if (reg >= BLT_RING_BASE &&
1073 reg < BLT_RING_BASE + 0xff)
1074 return false;
1075
1076 if (reg == PGTBL_ER)
1077 return false;
1078
1079 if (reg >= IPEIR_I965 &&
1080 reg < HWSTAM)
1081 return false;
1082
1083 if (reg == MI_MODE)
1084 return false;
1085
1086 if (reg == GFX_MODE_GEN7)
1087 return false;
1088
1089 if (reg == RENDER_HWS_PGA_GEN7 ||
1090 reg == BSD_HWS_PGA_GEN7 ||
1091 reg == BLT_HWS_PGA_GEN7)
1092 return false;
1093
1094 if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
1095 reg == GEN6_BSD_RNCID)
1096 return false;
1097
1098 if (reg == GEN6_BLITTER_ECOSKPD)
1099 return false;
1100
1101 if (reg >= 0x4000c &&
1102 reg <= 0x4002c)
1103 return false;
1104
1105 if (reg >= 0x4f000 &&
1106 reg <= 0x4f08f)
1107 return false;
1108
1109 if (reg >= 0x4f100 &&
1110 reg <= 0x4f11f)
1111 return false;
1112
1113 if (reg >= VLV_MASTER_IER &&
1114 reg <= GEN6_PMIER)
1115 return false;
1116
1117 if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
1118 reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
1119 return false;
1120
1121 if (reg >= VLV_IIR_RW &&
1122 reg <= VLV_ISR)
1123 return false;
1124
1125 if (reg == FORCEWAKE_VLV ||
1126 reg == FORCEWAKE_ACK_VLV)
1127 return false;
1128
1129 if (reg == GEN6_GDRST)
1130 return false;
1131
1132 return true;
1133}
1134
1152#define __i915_read(x, y) \ 1135#define __i915_read(x, y) \
1153u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1136u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1154 u##x val = 0; \ 1137 u##x val = 0; \
@@ -1156,11 +1139,13 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1156 unsigned long irqflags; \ 1139 unsigned long irqflags; \
1157 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ 1140 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1158 if (dev_priv->forcewake_count == 0) \ 1141 if (dev_priv->forcewake_count == 0) \
1159 dev_priv->display.force_wake_get(dev_priv); \ 1142 dev_priv->gt.force_wake_get(dev_priv); \
1160 val = read##y(dev_priv->regs + reg); \ 1143 val = read##y(dev_priv->regs + reg); \
1161 if (dev_priv->forcewake_count == 0) \ 1144 if (dev_priv->forcewake_count == 0) \
1162 dev_priv->display.force_wake_put(dev_priv); \ 1145 dev_priv->gt.force_wake_put(dev_priv); \
1163 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ 1146 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1147 } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1148 val = read##y(dev_priv->regs + reg + 0x180000); \
1164 } else { \ 1149 } else { \
1165 val = read##y(dev_priv->regs + reg); \ 1150 val = read##y(dev_priv->regs + reg); \
1166 } \ 1151 } \
@@ -1181,7 +1166,11 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1181 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1166 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1182 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1167 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1183 } \ 1168 } \
1184 write##y(val, dev_priv->regs + reg); \ 1169 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1170 write##y(val, dev_priv->regs + reg + 0x180000); \
1171 } else { \
1172 write##y(val, dev_priv->regs + reg); \
1173 } \
1185 if (unlikely(__fifo_ret)) { \ 1174 if (unlikely(__fifo_ret)) { \
1186 gen6_gt_check_fifodbg(dev_priv); \ 1175 gen6_gt_check_fifodbg(dev_priv); \
1187 } \ 1176 } \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b0b676abde0d..627fe35781b4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,6 +79,10 @@ enum port {
79 79
80#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 80#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
81 81
82#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
83 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
84 if ((intel_encoder)->base.crtc == (__crtc))
85
82struct intel_pch_pll { 86struct intel_pch_pll {
83 int refcount; /* count of number of CRTCs sharing this PLL */ 87 int refcount; /* count of number of CRTCs sharing this PLL */
84 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 88 int active; /* count of number of active CRTCs (i.e. DPMS on) */
@@ -176,6 +180,7 @@ struct drm_i915_error_state {
176 u32 eir; 180 u32 eir;
177 u32 pgtbl_er; 181 u32 pgtbl_er;
178 u32 ier; 182 u32 ier;
183 u32 ccid;
179 bool waiting[I915_NUM_RINGS]; 184 bool waiting[I915_NUM_RINGS];
180 u32 pipestat[I915_MAX_PIPES]; 185 u32 pipestat[I915_MAX_PIPES];
181 u32 tail[I915_NUM_RINGS]; 186 u32 tail[I915_NUM_RINGS];
@@ -185,6 +190,7 @@ struct drm_i915_error_state {
185 u32 instdone[I915_NUM_RINGS]; 190 u32 instdone[I915_NUM_RINGS];
186 u32 acthd[I915_NUM_RINGS]; 191 u32 acthd[I915_NUM_RINGS];
187 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 192 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
193 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
188 /* our own tracking of ring head and tail */ 194 /* our own tracking of ring head and tail */
189 u32 cpu_ring_head[I915_NUM_RINGS]; 195 u32 cpu_ring_head[I915_NUM_RINGS];
190 u32 cpu_ring_tail[I915_NUM_RINGS]; 196 u32 cpu_ring_tail[I915_NUM_RINGS];
@@ -261,8 +267,6 @@ struct drm_i915_display_funcs {
261 struct drm_i915_gem_object *obj); 267 struct drm_i915_gem_object *obj);
262 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 268 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
263 int x, int y); 269 int x, int y);
264 void (*force_wake_get)(struct drm_i915_private *dev_priv);
265 void (*force_wake_put)(struct drm_i915_private *dev_priv);
266 /* clock updates for mode set */ 270 /* clock updates for mode set */
267 /* cursor updates */ 271 /* cursor updates */
268 /* render clock increase/decrease */ 272 /* render clock increase/decrease */
@@ -270,6 +274,11 @@ struct drm_i915_display_funcs {
270 /* pll clock increase/decrease */ 274 /* pll clock increase/decrease */
271}; 275};
272 276
277struct drm_i915_gt_funcs {
278 void (*force_wake_get)(struct drm_i915_private *dev_priv);
279 void (*force_wake_put)(struct drm_i915_private *dev_priv);
280};
281
273struct intel_device_info { 282struct intel_device_info {
274 u8 gen; 283 u8 gen;
275 u8 is_mobile:1; 284 u8 is_mobile:1;
@@ -284,7 +293,6 @@ struct intel_device_info {
284 u8 is_crestline:1; 293 u8 is_crestline:1;
285 u8 is_ivybridge:1; 294 u8 is_ivybridge:1;
286 u8 is_valleyview:1; 295 u8 is_valleyview:1;
287 u8 has_pch_split:1;
288 u8 has_force_wake:1; 296 u8 has_force_wake:1;
289 u8 is_haswell:1; 297 u8 is_haswell:1;
290 u8 has_fbc:1; 298 u8 has_fbc:1;
@@ -309,6 +317,17 @@ struct i915_hw_ppgtt {
309 dma_addr_t scratch_page_dma_addr; 317 dma_addr_t scratch_page_dma_addr;
310}; 318};
311 319
320
321/* This must match up with the value previously used for execbuf2.rsvd1. */
322#define DEFAULT_CONTEXT_ID 0
323struct i915_hw_context {
324 int id;
325 bool is_initialized;
326 struct drm_i915_file_private *file_priv;
327 struct intel_ring_buffer *ring;
328 struct drm_i915_gem_object *obj;
329};
330
312enum no_fbc_reason { 331enum no_fbc_reason {
313 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 332 FBC_NO_OUTPUT, /* no outputs enabled to compress */
314 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 333 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
@@ -321,6 +340,7 @@ enum no_fbc_reason {
321}; 340};
322 341
323enum intel_pch { 342enum intel_pch {
343 PCH_NONE = 0, /* No PCH present */
324 PCH_IBX, /* Ibexpeak PCH */ 344 PCH_IBX, /* Ibexpeak PCH */
325 PCH_CPT, /* Cougarpoint PCH */ 345 PCH_CPT, /* Cougarpoint PCH */
326 PCH_LPT, /* Lynxpoint PCH */ 346 PCH_LPT, /* Lynxpoint PCH */
@@ -350,6 +370,8 @@ typedef struct drm_i915_private {
350 int relative_constants_mode; 370 int relative_constants_mode;
351 371
352 void __iomem *regs; 372 void __iomem *regs;
373
374 struct drm_i915_gt_funcs gt;
353 /** gt_fifo_count and the subsequent register write are synchronized 375 /** gt_fifo_count and the subsequent register write are synchronized
354 * with dev->struct_mutex. */ 376 * with dev->struct_mutex. */
355 unsigned gt_fifo_count; 377 unsigned gt_fifo_count;
@@ -652,11 +674,14 @@ typedef struct drm_i915_private {
652 unsigned long gtt_end; 674 unsigned long gtt_end;
653 675
654 struct io_mapping *gtt_mapping; 676 struct io_mapping *gtt_mapping;
677 phys_addr_t gtt_base_addr;
655 int gtt_mtrr; 678 int gtt_mtrr;
656 679
657 /** PPGTT used for aliasing the PPGTT with the GTT */ 680 /** PPGTT used for aliasing the PPGTT with the GTT */
658 struct i915_hw_ppgtt *aliasing_ppgtt; 681 struct i915_hw_ppgtt *aliasing_ppgtt;
659 682
683 u32 *l3_remap_info;
684
660 struct shrinker inactive_shrinker; 685 struct shrinker inactive_shrinker;
661 686
662 /** 687 /**
@@ -817,6 +842,10 @@ typedef struct drm_i915_private {
817 842
818 struct drm_property *broadcast_rgb_property; 843 struct drm_property *broadcast_rgb_property;
819 struct drm_property *force_audio_property; 844 struct drm_property *force_audio_property;
845
846 struct work_struct parity_error_work;
847 bool hw_contexts_disabled;
848 uint32_t hw_context_size;
820} drm_i915_private_t; 849} drm_i915_private_t;
821 850
822/* Iterate over initialised rings */ 851/* Iterate over initialised rings */
@@ -1026,6 +1055,7 @@ struct drm_i915_file_private {
1026 struct spinlock lock; 1055 struct spinlock lock;
1027 struct list_head request_list; 1056 struct list_head request_list;
1028 } mm; 1057 } mm;
1058 struct idr context_idr;
1029}; 1059};
1030 1060
1031#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1061#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
@@ -1071,7 +1101,8 @@ struct drm_i915_file_private {
1071#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1101#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1072#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1102#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1073 1103
1074#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6) 1104#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1105#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1075 1106
1076#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1107#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1077#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1108#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1094,13 +1125,13 @@ struct drm_i915_file_private {
1094#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1125#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1095#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1126#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1096 1127
1097#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
1098#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1128#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1099 1129
1100#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1130#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1101#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1131#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1102#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1132#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1103#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1133#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1134#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1104 1135
1105#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1136#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1106 1137
@@ -1166,6 +1197,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1166extern int i915_emit_box(struct drm_device *dev, 1197extern int i915_emit_box(struct drm_device *dev,
1167 struct drm_clip_rect *box, 1198 struct drm_clip_rect *box,
1168 int DR1, int DR4); 1199 int DR1, int DR4);
1200extern int intel_gpu_reset(struct drm_device *dev);
1169extern int i915_reset(struct drm_device *dev); 1201extern int i915_reset(struct drm_device *dev);
1170extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1202extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1171extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1203extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -1178,6 +1210,7 @@ void i915_hangcheck_elapsed(unsigned long data);
1178void i915_handle_error(struct drm_device *dev, bool wedged); 1210void i915_handle_error(struct drm_device *dev, bool wedged);
1179 1211
1180extern void intel_irq_init(struct drm_device *dev); 1212extern void intel_irq_init(struct drm_device *dev);
1213extern void intel_gt_init(struct drm_device *dev);
1181 1214
1182void i915_error_state_free(struct kref *error_ref); 1215void i915_error_state_free(struct kref *error_ref);
1183 1216
@@ -1237,6 +1270,8 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
1237 struct drm_file *file_priv); 1270 struct drm_file *file_priv);
1238int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 1271int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1239 struct drm_file *file_priv); 1272 struct drm_file *file_priv);
1273int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv);
1240void i915_gem_load(struct drm_device *dev); 1275void i915_gem_load(struct drm_device *dev);
1241int i915_gem_init_object(struct drm_gem_object *obj); 1276int i915_gem_init_object(struct drm_gem_object *obj);
1242int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, 1277int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
@@ -1306,6 +1341,8 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1306 1341
1307void i915_gem_retire_requests(struct drm_device *dev); 1342void i915_gem_retire_requests(struct drm_device *dev);
1308void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1343void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1344int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1345 bool interruptible);
1309 1346
1310void i915_gem_reset(struct drm_device *dev); 1347void i915_gem_reset(struct drm_device *dev);
1311void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1348void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1315,6 +1352,7 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1315int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1352int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1316int __must_check i915_gem_init(struct drm_device *dev); 1353int __must_check i915_gem_init(struct drm_device *dev);
1317int __must_check i915_gem_init_hw(struct drm_device *dev); 1354int __must_check i915_gem_init_hw(struct drm_device *dev);
1355void i915_gem_l3_remap(struct drm_device *dev);
1318void i915_gem_init_swizzling(struct drm_device *dev); 1356void i915_gem_init_swizzling(struct drm_device *dev);
1319void i915_gem_init_ppgtt(struct drm_device *dev); 1357void i915_gem_init_ppgtt(struct drm_device *dev);
1320void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1358void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
@@ -1323,8 +1361,8 @@ int __must_check i915_gem_idle(struct drm_device *dev);
1323int __must_check i915_add_request(struct intel_ring_buffer *ring, 1361int __must_check i915_add_request(struct intel_ring_buffer *ring,
1324 struct drm_file *file, 1362 struct drm_file *file,
1325 struct drm_i915_gem_request *request); 1363 struct drm_i915_gem_request *request);
1326int __must_check i915_wait_request(struct intel_ring_buffer *ring, 1364int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1327 uint32_t seqno); 1365 uint32_t seqno);
1328int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1366int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1329int __must_check 1367int __must_check
1330i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1368i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1358,6 +1396,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1358struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 1396struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1359 struct drm_gem_object *gem_obj, int flags); 1397 struct drm_gem_object *gem_obj, int flags);
1360 1398
1399/* i915_gem_context.c */
1400void i915_gem_context_init(struct drm_device *dev);
1401void i915_gem_context_fini(struct drm_device *dev);
1402void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
1403int i915_switch_context(struct intel_ring_buffer *ring,
1404 struct drm_file *file, int to_id);
1405int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1406 struct drm_file *file);
1407int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1408 struct drm_file *file);
1361 1409
1362/* i915_gem_gtt.c */ 1410/* i915_gem_gtt.c */
1363int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); 1411int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
@@ -1475,20 +1523,12 @@ extern bool intel_fbc_enabled(struct drm_device *dev);
1475extern void intel_disable_fbc(struct drm_device *dev); 1523extern void intel_disable_fbc(struct drm_device *dev);
1476extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1524extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1477extern void ironlake_init_pch_refclk(struct drm_device *dev); 1525extern void ironlake_init_pch_refclk(struct drm_device *dev);
1478extern void ironlake_enable_rc6(struct drm_device *dev);
1479extern void gen6_set_rps(struct drm_device *dev, u8 val); 1526extern void gen6_set_rps(struct drm_device *dev, u8 val);
1480extern void intel_detect_pch(struct drm_device *dev); 1527extern void intel_detect_pch(struct drm_device *dev);
1481extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1528extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1482extern int intel_enable_rc6(const struct drm_device *dev); 1529extern int intel_enable_rc6(const struct drm_device *dev);
1483 1530
1484extern bool i915_semaphore_is_enabled(struct drm_device *dev); 1531extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1485extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1486extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1487extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1488extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1489
1490extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
1491extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
1492 1532
1493/* overlay */ 1533/* overlay */
1494#ifdef CONFIG_DEBUG_FS 1534#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 288d7b8f49ae..489e2b162b27 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -96,9 +96,18 @@ i915_gem_wait_for_error(struct drm_device *dev)
96 if (!atomic_read(&dev_priv->mm.wedged)) 96 if (!atomic_read(&dev_priv->mm.wedged))
97 return 0; 97 return 0;
98 98
99 ret = wait_for_completion_interruptible(x); 99 /*
100 if (ret) 100 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
101 * userspace. If it takes that long something really bad is going on and
102 * we should simply try to bail out and fail as gracefully as possible.
103 */
104 ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
105 if (ret == 0) {
106 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
107 return -EIO;
108 } else if (ret < 0) {
101 return ret; 109 return ret;
110 }
102 111
103 if (atomic_read(&dev_priv->mm.wedged)) { 112 if (atomic_read(&dev_priv->mm.wedged)) {
104 /* GPU is hung, bump the completion count to account for 113 /* GPU is hung, bump the completion count to account for
@@ -1122,7 +1131,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1122 1131
1123 obj->fault_mappable = true; 1132 obj->fault_mappable = true;
1124 1133
1125 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + 1134 pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
1126 page_offset; 1135 page_offset;
1127 1136
1128 /* Finally, remap it using the new GTT offset */ 1137 /* Finally, remap it using the new GTT offset */
@@ -1132,6 +1141,11 @@ unlock:
1132out: 1141out:
1133 switch (ret) { 1142 switch (ret) {
1134 case -EIO: 1143 case -EIO:
1144 /* If this -EIO is due to a gpu hang, give the reset code a
1145 * chance to clean up the mess. Otherwise return the proper
1146 * SIGBUS. */
1147 if (!atomic_read(&dev_priv->mm.wedged))
1148 return VM_FAULT_SIGBUS;
1135 case -EAGAIN: 1149 case -EAGAIN:
1136 /* Give the error handler a chance to run and move the 1150 /* Give the error handler a chance to run and move the
1137 * objects off the GPU active list. Next time we service the 1151 * objects off the GPU active list. Next time we service the
@@ -1568,6 +1582,21 @@ i915_add_request(struct intel_ring_buffer *ring,
1568 int was_empty; 1582 int was_empty;
1569 int ret; 1583 int ret;
1570 1584
1585 /*
1586 * Emit any outstanding flushes - execbuf can fail to emit the flush
1587 * after having emitted the batchbuffer command. Hence we need to fix
1588 * things up similar to emitting the lazy request. The difference here
1589 * is that the flush _must_ happen before the next request, no matter
1590 * what.
1591 */
1592 if (ring->gpu_caches_dirty) {
1593 ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
1594 if (ret)
1595 return ret;
1596
1597 ring->gpu_caches_dirty = false;
1598 }
1599
1571 BUG_ON(request == NULL); 1600 BUG_ON(request == NULL);
1572 seqno = i915_gem_next_request_seqno(ring); 1601 seqno = i915_gem_next_request_seqno(ring);
1573 1602
@@ -1613,6 +1642,9 @@ i915_add_request(struct intel_ring_buffer *ring,
1613 queue_delayed_work(dev_priv->wq, 1642 queue_delayed_work(dev_priv->wq,
1614 &dev_priv->mm.retire_work, HZ); 1643 &dev_priv->mm.retire_work, HZ);
1615 } 1644 }
1645
1646 WARN_ON(!list_empty(&ring->gpu_write_list));
1647
1616 return 0; 1648 return 0;
1617} 1649}
1618 1650
@@ -1827,14 +1859,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
1827 */ 1859 */
1828 idle = true; 1860 idle = true;
1829 for_each_ring(ring, dev_priv, i) { 1861 for_each_ring(ring, dev_priv, i) {
1830 if (!list_empty(&ring->gpu_write_list)) { 1862 if (ring->gpu_caches_dirty) {
1831 struct drm_i915_gem_request *request; 1863 struct drm_i915_gem_request *request;
1832 int ret;
1833 1864
1834 ret = i915_gem_flush_ring(ring,
1835 0, I915_GEM_GPU_DOMAINS);
1836 request = kzalloc(sizeof(*request), GFP_KERNEL); 1865 request = kzalloc(sizeof(*request), GFP_KERNEL);
1837 if (ret || request == NULL || 1866 if (request == NULL ||
1838 i915_add_request(ring, NULL, request)) 1867 i915_add_request(ring, NULL, request))
1839 kfree(request); 1868 kfree(request);
1840 } 1869 }
@@ -1848,11 +1877,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
1848 mutex_unlock(&dev->struct_mutex); 1877 mutex_unlock(&dev->struct_mutex);
1849} 1878}
1850 1879
1851static int 1880int
1852i915_gem_check_wedge(struct drm_i915_private *dev_priv) 1881i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1882 bool interruptible)
1853{ 1883{
1854 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
1855
1856 if (atomic_read(&dev_priv->mm.wedged)) { 1884 if (atomic_read(&dev_priv->mm.wedged)) {
1857 struct completion *x = &dev_priv->error_completion; 1885 struct completion *x = &dev_priv->error_completion;
1858 bool recovery_complete; 1886 bool recovery_complete;
@@ -1863,7 +1891,16 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv)
1863 recovery_complete = x->done > 0; 1891 recovery_complete = x->done > 0;
1864 spin_unlock_irqrestore(&x->wait.lock, flags); 1892 spin_unlock_irqrestore(&x->wait.lock, flags);
1865 1893
1866 return recovery_complete ? -EIO : -EAGAIN; 1894 /* Non-interruptible callers can't handle -EAGAIN, hence return
1895 * -EIO unconditionally for these. */
1896 if (!interruptible)
1897 return -EIO;
1898
1899 /* Recovery complete, but still wedged means reset failure. */
1900 if (recovery_complete)
1901 return -EIO;
1902
1903 return -EAGAIN;
1867 } 1904 }
1868 1905
1869 return 0; 1906 return 0;
@@ -1899,34 +1936,85 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1899 return ret; 1936 return ret;
1900} 1937}
1901 1938
1939/**
1940 * __wait_seqno - wait until execution of seqno has finished
1941 * @ring: the ring expected to report seqno
1942 * @seqno: duh!
1943 * @interruptible: do an interruptible wait (normally yes)
1944 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1945 *
1946 * Returns 0 if the seqno was found within the alloted time. Else returns the
1947 * errno with remaining time filled in timeout argument.
1948 */
1902static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 1949static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1903 bool interruptible) 1950 bool interruptible, struct timespec *timeout)
1904{ 1951{
1905 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1952 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1906 int ret = 0; 1953 struct timespec before, now, wait_time={1,0};
1954 unsigned long timeout_jiffies;
1955 long end;
1956 bool wait_forever = true;
1957 int ret;
1907 1958
1908 if (i915_seqno_passed(ring->get_seqno(ring), seqno)) 1959 if (i915_seqno_passed(ring->get_seqno(ring), seqno))
1909 return 0; 1960 return 0;
1910 1961
1911 trace_i915_gem_request_wait_begin(ring, seqno); 1962 trace_i915_gem_request_wait_begin(ring, seqno);
1963
1964 if (timeout != NULL) {
1965 wait_time = *timeout;
1966 wait_forever = false;
1967 }
1968
1969 timeout_jiffies = timespec_to_jiffies(&wait_time);
1970
1912 if (WARN_ON(!ring->irq_get(ring))) 1971 if (WARN_ON(!ring->irq_get(ring)))
1913 return -ENODEV; 1972 return -ENODEV;
1914 1973
1974 /* Record current time in case interrupted by signal, or wedged * */
1975 getrawmonotonic(&before);
1976
1915#define EXIT_COND \ 1977#define EXIT_COND \
1916 (i915_seqno_passed(ring->get_seqno(ring), seqno) || \ 1978 (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
1917 atomic_read(&dev_priv->mm.wedged)) 1979 atomic_read(&dev_priv->mm.wedged))
1980 do {
1981 if (interruptible)
1982 end = wait_event_interruptible_timeout(ring->irq_queue,
1983 EXIT_COND,
1984 timeout_jiffies);
1985 else
1986 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1987 timeout_jiffies);
1918 1988
1919 if (interruptible) 1989 ret = i915_gem_check_wedge(dev_priv, interruptible);
1920 ret = wait_event_interruptible(ring->irq_queue, 1990 if (ret)
1921 EXIT_COND); 1991 end = ret;
1922 else 1992 } while (end == 0 && wait_forever);
1923 wait_event(ring->irq_queue, EXIT_COND); 1993
1994 getrawmonotonic(&now);
1924 1995
1925 ring->irq_put(ring); 1996 ring->irq_put(ring);
1926 trace_i915_gem_request_wait_end(ring, seqno); 1997 trace_i915_gem_request_wait_end(ring, seqno);
1927#undef EXIT_COND 1998#undef EXIT_COND
1928 1999
1929 return ret; 2000 if (timeout) {
2001 struct timespec sleep_time = timespec_sub(now, before);
2002 *timeout = timespec_sub(*timeout, sleep_time);
2003 }
2004
2005 switch (end) {
2006 case -EIO:
2007 case -EAGAIN: /* Wedged */
2008 case -ERESTARTSYS: /* Signal */
2009 return (int)end;
2010 case 0: /* Timeout */
2011 if (timeout)
2012 set_normalized_timespec(timeout, 0, 0);
2013 return -ETIME;
2014 default: /* Completed */
2015 WARN_ON(end < 0); /* We're not aware of other errors */
2016 return 0;
2017 }
1930} 2018}
1931 2019
1932/** 2020/**
@@ -1934,15 +2022,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1934 * request and object lists appropriately for that event. 2022 * request and object lists appropriately for that event.
1935 */ 2023 */
1936int 2024int
1937i915_wait_request(struct intel_ring_buffer *ring, 2025i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1938 uint32_t seqno)
1939{ 2026{
1940 drm_i915_private_t *dev_priv = ring->dev->dev_private; 2027 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1941 int ret = 0; 2028 int ret = 0;
1942 2029
1943 BUG_ON(seqno == 0); 2030 BUG_ON(seqno == 0);
1944 2031
1945 ret = i915_gem_check_wedge(dev_priv); 2032 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1946 if (ret) 2033 if (ret)
1947 return ret; 2034 return ret;
1948 2035
@@ -1950,9 +2037,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
1950 if (ret) 2037 if (ret)
1951 return ret; 2038 return ret;
1952 2039
1953 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible); 2040 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
1954 if (atomic_read(&dev_priv->mm.wedged))
1955 ret = -EAGAIN;
1956 2041
1957 return ret; 2042 return ret;
1958} 2043}
@@ -1975,7 +2060,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
1975 * it. 2060 * it.
1976 */ 2061 */
1977 if (obj->active) { 2062 if (obj->active) {
1978 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); 2063 ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
1979 if (ret) 2064 if (ret)
1980 return ret; 2065 return ret;
1981 i915_gem_retire_requests_ring(obj->ring); 2066 i915_gem_retire_requests_ring(obj->ring);
@@ -1985,6 +2070,115 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
1985} 2070}
1986 2071
1987/** 2072/**
2073 * Ensures that an object will eventually get non-busy by flushing any required
2074 * write domains, emitting any outstanding lazy request and retiring and
2075 * completed requests.
2076 */
2077static int
2078i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2079{
2080 int ret;
2081
2082 if (obj->active) {
2083 ret = i915_gem_object_flush_gpu_write_domain(obj);
2084 if (ret)
2085 return ret;
2086
2087 ret = i915_gem_check_olr(obj->ring,
2088 obj->last_rendering_seqno);
2089 if (ret)
2090 return ret;
2091 i915_gem_retire_requests_ring(obj->ring);
2092 }
2093
2094 return 0;
2095}
2096
2097/**
2098 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2099 * @DRM_IOCTL_ARGS: standard ioctl arguments
2100 *
2101 * Returns 0 if successful, else an error is returned with the remaining time in
2102 * the timeout parameter.
2103 * -ETIME: object is still busy after timeout
2104 * -ERESTARTSYS: signal interrupted the wait
2105 * -ENONENT: object doesn't exist
2106 * Also possible, but rare:
2107 * -EAGAIN: GPU wedged
2108 * -ENOMEM: damn
2109 * -ENODEV: Internal IRQ fail
2110 * -E?: The add request failed
2111 *
2112 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2113 * non-zero timeout parameter the wait ioctl will wait for the given number of
2114 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2115 * without holding struct_mutex the object may become re-busied before this
2116 * function completes. A similar but shorter * race condition exists in the busy
2117 * ioctl
2118 */
2119int
2120i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2121{
2122 struct drm_i915_gem_wait *args = data;
2123 struct drm_i915_gem_object *obj;
2124 struct intel_ring_buffer *ring = NULL;
2125 struct timespec timeout_stack, *timeout = NULL;
2126 u32 seqno = 0;
2127 int ret = 0;
2128
2129 if (args->timeout_ns >= 0) {
2130 timeout_stack = ns_to_timespec(args->timeout_ns);
2131 timeout = &timeout_stack;
2132 }
2133
2134 ret = i915_mutex_lock_interruptible(dev);
2135 if (ret)
2136 return ret;
2137
2138 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2139 if (&obj->base == NULL) {
2140 mutex_unlock(&dev->struct_mutex);
2141 return -ENOENT;
2142 }
2143
2144 /* Need to make sure the object gets inactive eventually. */
2145 ret = i915_gem_object_flush_active(obj);
2146 if (ret)
2147 goto out;
2148
2149 if (obj->active) {
2150 seqno = obj->last_rendering_seqno;
2151 ring = obj->ring;
2152 }
2153
2154 if (seqno == 0)
2155 goto out;
2156
2157 /* Do this after OLR check to make sure we make forward progress polling
2158 * on this IOCTL with a 0 timeout (like busy ioctl)
2159 */
2160 if (!args->timeout_ns) {
2161 ret = -ETIME;
2162 goto out;
2163 }
2164
2165 drm_gem_object_unreference(&obj->base);
2166 mutex_unlock(&dev->struct_mutex);
2167
2168 ret = __wait_seqno(ring, seqno, true, timeout);
2169 if (timeout) {
2170 WARN_ON(!timespec_valid(timeout));
2171 args->timeout_ns = timespec_to_ns(timeout);
2172 }
2173 return ret;
2174
2175out:
2176 drm_gem_object_unreference(&obj->base);
2177 mutex_unlock(&dev->struct_mutex);
2178 return ret;
2179}
2180
2181/**
1988 * i915_gem_object_sync - sync an object to a ring. 2182 * i915_gem_object_sync - sync an object to a ring.
1989 * 2183 *
1990 * @obj: object which may be in use on another ring. 2184 * @obj: object which may be in use on another ring.
@@ -2160,7 +2354,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
2160 return ret; 2354 return ret;
2161 } 2355 }
2162 2356
2163 return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); 2357 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
2164} 2358}
2165 2359
2166int i915_gpu_idle(struct drm_device *dev) 2360int i915_gpu_idle(struct drm_device *dev)
@@ -2171,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev)
2171 2365
2172 /* Flush everything onto the inactive list. */ 2366 /* Flush everything onto the inactive list. */
2173 for_each_ring(ring, dev_priv, i) { 2367 for_each_ring(ring, dev_priv, i) {
2368 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2369 if (ret)
2370 return ret;
2371
2174 ret = i915_ring_idle(ring); 2372 ret = i915_ring_idle(ring);
2175 if (ret) 2373 if (ret)
2176 return ret; 2374 return ret;
@@ -2364,7 +2562,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2364 } 2562 }
2365 2563
2366 if (obj->last_fenced_seqno) { 2564 if (obj->last_fenced_seqno) {
2367 ret = i915_wait_request(obj->ring, obj->last_fenced_seqno); 2565 ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2368 if (ret) 2566 if (ret)
2369 return ret; 2567 return ret;
2370 2568
@@ -2551,8 +2749,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2551 if (map_and_fenceable) 2749 if (map_and_fenceable)
2552 free_space = 2750 free_space =
2553 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, 2751 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2554 size, alignment, 0, 2752 size, alignment,
2555 dev_priv->mm.gtt_mappable_end, 2753 0, dev_priv->mm.gtt_mappable_end,
2556 0); 2754 0);
2557 else 2755 else
2558 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2756 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
@@ -2563,7 +2761,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2563 obj->gtt_space = 2761 obj->gtt_space =
2564 drm_mm_get_block_range_generic(free_space, 2762 drm_mm_get_block_range_generic(free_space,
2565 size, alignment, 0, 2763 size, alignment, 0,
2566 dev_priv->mm.gtt_mappable_end, 2764 0, dev_priv->mm.gtt_mappable_end,
2567 0); 2765 0);
2568 else 2766 else
2569 obj->gtt_space = 2767 obj->gtt_space =
@@ -3030,7 +3228,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3030 if (seqno == 0) 3228 if (seqno == 0)
3031 return 0; 3229 return 0;
3032 3230
3033 ret = __wait_seqno(ring, seqno, true); 3231 ret = __wait_seqno(ring, seqno, true, NULL);
3034 if (ret == 0) 3232 if (ret == 0)
3035 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3233 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3036 3234
@@ -3199,30 +3397,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3199 * become non-busy without any further actions, therefore emit any 3397 * become non-busy without any further actions, therefore emit any
3200 * necessary flushes here. 3398 * necessary flushes here.
3201 */ 3399 */
3202 args->busy = obj->active; 3400 ret = i915_gem_object_flush_active(obj);
3203 if (args->busy) {
3204 /* Unconditionally flush objects, even when the gpu still uses this
3205 * object. Userspace calling this function indicates that it wants to
3206 * use this buffer rather sooner than later, so issuing the required
3207 * flush earlier is beneficial.
3208 */
3209 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3210 ret = i915_gem_flush_ring(obj->ring,
3211 0, obj->base.write_domain);
3212 } else {
3213 ret = i915_gem_check_olr(obj->ring,
3214 obj->last_rendering_seqno);
3215 }
3216 3401
3217 /* Update the active list for the hardware's current position. 3402 args->busy = obj->active;
3218 * Otherwise this only updates on a delayed timer or when irqs
3219 * are actually unmasked, and our working set ends up being
3220 * larger than required.
3221 */
3222 i915_gem_retire_requests_ring(obj->ring);
3223
3224 args->busy = obj->active;
3225 }
3226 3403
3227 drm_gem_object_unreference(&obj->base); 3404 drm_gem_object_unreference(&obj->base);
3228unlock: 3405unlock:
@@ -3435,6 +3612,38 @@ i915_gem_idle(struct drm_device *dev)
3435 return 0; 3612 return 0;
3436} 3613}
3437 3614
3615void i915_gem_l3_remap(struct drm_device *dev)
3616{
3617 drm_i915_private_t *dev_priv = dev->dev_private;
3618 u32 misccpctl;
3619 int i;
3620
3621 if (!IS_IVYBRIDGE(dev))
3622 return;
3623
3624 if (!dev_priv->mm.l3_remap_info)
3625 return;
3626
3627 misccpctl = I915_READ(GEN7_MISCCPCTL);
3628 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3629 POSTING_READ(GEN7_MISCCPCTL);
3630
3631 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3632 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3633 if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
3634 DRM_DEBUG("0x%x was already programmed to %x\n",
3635 GEN7_L3LOG_BASE + i, remap);
3636 if (remap && !dev_priv->mm.l3_remap_info[i/4])
3637 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3638 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
3639 }
3640
3641 /* Make sure all the writes land before disabling dop clock gating */
3642 POSTING_READ(GEN7_L3LOG_BASE);
3643
3644 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3645}
3646
3438void i915_gem_init_swizzling(struct drm_device *dev) 3647void i915_gem_init_swizzling(struct drm_device *dev)
3439{ 3648{
3440 drm_i915_private_t *dev_priv = dev->dev_private; 3649 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3518,12 +3727,33 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
3518 } 3727 }
3519} 3728}
3520 3729
3730static bool
3731intel_enable_blt(struct drm_device *dev)
3732{
3733 if (!HAS_BLT(dev))
3734 return false;
3735
3736 /* The blitter was dysfunctional on early prototypes */
3737 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3738 DRM_INFO("BLT not supported on this pre-production hardware;"
3739 " graphics performance will be degraded.\n");
3740 return false;
3741 }
3742
3743 return true;
3744}
3745
3521int 3746int
3522i915_gem_init_hw(struct drm_device *dev) 3747i915_gem_init_hw(struct drm_device *dev)
3523{ 3748{
3524 drm_i915_private_t *dev_priv = dev->dev_private; 3749 drm_i915_private_t *dev_priv = dev->dev_private;
3525 int ret; 3750 int ret;
3526 3751
3752 if (!intel_enable_gtt())
3753 return -EIO;
3754
3755 i915_gem_l3_remap(dev);
3756
3527 i915_gem_init_swizzling(dev); 3757 i915_gem_init_swizzling(dev);
3528 3758
3529 ret = intel_init_render_ring_buffer(dev); 3759 ret = intel_init_render_ring_buffer(dev);
@@ -3536,7 +3766,7 @@ i915_gem_init_hw(struct drm_device *dev)
3536 goto cleanup_render_ring; 3766 goto cleanup_render_ring;
3537 } 3767 }
3538 3768
3539 if (HAS_BLT(dev)) { 3769 if (intel_enable_blt(dev)) {
3540 ret = intel_init_blt_ring_buffer(dev); 3770 ret = intel_init_blt_ring_buffer(dev);
3541 if (ret) 3771 if (ret)
3542 goto cleanup_bsd_ring; 3772 goto cleanup_bsd_ring;
@@ -3544,6 +3774,11 @@ i915_gem_init_hw(struct drm_device *dev)
3544 3774
3545 dev_priv->next_seqno = 1; 3775 dev_priv->next_seqno = 1;
3546 3776
3777 /*
3778 * XXX: There was some w/a described somewhere suggesting loading
3779 * contexts before PPGTT.
3780 */
3781 i915_gem_context_init(dev);
3547 i915_gem_init_ppgtt(dev); 3782 i915_gem_init_ppgtt(dev);
3548 3783
3549 return 0; 3784 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
new file mode 100644
index 000000000000..a9d58d72bb4d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -0,0 +1,535 @@
1/*
2 * Copyright © 2011-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28/*
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
35 *
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
46 *
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
51 *
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
63 *
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
73 *
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded it's state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
80 *
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
85 *
86 */
87
88#include "drmP.h"
89#include "i915_drm.h"
90#include "i915_drv.h"
91
92/* This is a HW constraint. The value below is the largest known requirement
93 * I've seen in a spec to date, and that was a workaround for a non-shipping
94 * part. It should be safe to decrease this, but it's more future proof as is.
95 */
96#define CONTEXT_ALIGN (64<<10)
97
98static struct i915_hw_context *
99i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
100static int do_switch(struct drm_i915_gem_object *from_obj,
101 struct i915_hw_context *to, u32 seqno);
102
103static int get_context_size(struct drm_device *dev)
104{
105 struct drm_i915_private *dev_priv = dev->dev_private;
106 int ret;
107 u32 reg;
108
109 switch (INTEL_INFO(dev)->gen) {
110 case 6:
111 reg = I915_READ(CXT_SIZE);
112 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
113 break;
114 case 7:
115 reg = I915_READ(GEN7_CXT_SIZE);
116 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
117 break;
118 default:
119 BUG();
120 }
121
122 return ret;
123}
124
125static void do_destroy(struct i915_hw_context *ctx)
126{
127 struct drm_device *dev = ctx->obj->base.dev;
128 struct drm_i915_private *dev_priv = dev->dev_private;
129
130 if (ctx->file_priv)
131 idr_remove(&ctx->file_priv->context_idr, ctx->id);
132 else
133 BUG_ON(ctx != dev_priv->ring[RCS].default_context);
134
135 drm_gem_object_unreference(&ctx->obj->base);
136 kfree(ctx);
137}
138
139static struct i915_hw_context *
140create_hw_context(struct drm_device *dev,
141 struct drm_i915_file_private *file_priv)
142{
143 struct drm_i915_private *dev_priv = dev->dev_private;
144 struct i915_hw_context *ctx;
145 int ret, id;
146
147 ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
148 if (ctx == NULL)
149 return ERR_PTR(-ENOMEM);
150
151 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
152 if (ctx->obj == NULL) {
153 kfree(ctx);
154 DRM_DEBUG_DRIVER("Context object allocated failed\n");
155 return ERR_PTR(-ENOMEM);
156 }
157
158 /* The ring associated with the context object is handled by the normal
159 * object tracking code. We give an initial ring value simple to pass an
160 * assertion in the context switch code.
161 */
162 ctx->ring = &dev_priv->ring[RCS];
163
164 /* Default context will never have a file_priv */
165 if (file_priv == NULL)
166 return ctx;
167
168 ctx->file_priv = file_priv;
169
170again:
171 if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
172 ret = -ENOMEM;
173 DRM_DEBUG_DRIVER("idr allocation failed\n");
174 goto err_out;
175 }
176
177 ret = idr_get_new_above(&file_priv->context_idr, ctx,
178 DEFAULT_CONTEXT_ID + 1, &id);
179 if (ret == 0)
180 ctx->id = id;
181
182 if (ret == -EAGAIN)
183 goto again;
184 else if (ret)
185 goto err_out;
186
187 return ctx;
188
189err_out:
190 do_destroy(ctx);
191 return ERR_PTR(ret);
192}
193
194static inline bool is_default_context(struct i915_hw_context *ctx)
195{
196 return (ctx == ctx->ring->default_context);
197}
198
199/**
200 * The default context needs to exist per ring that uses contexts. It stores the
201 * context state of the GPU for applications that don't utilize HW contexts, as
202 * well as an idle case.
203 */
204static int create_default_context(struct drm_i915_private *dev_priv)
205{
206 struct i915_hw_context *ctx;
207 int ret;
208
209 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
210
211 ctx = create_hw_context(dev_priv->dev, NULL);
212 if (IS_ERR(ctx))
213 return PTR_ERR(ctx);
214
215 /* We may need to do things with the shrinker which require us to
216 * immediately switch back to the default context. This can cause a
217 * problem as pinning the default context also requires GTT space which
218 * may not be available. To avoid this we always pin the
219 * default context.
220 */
221 dev_priv->ring[RCS].default_context = ctx;
222 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
223 if (ret) {
224 do_destroy(ctx);
225 return ret;
226 }
227
228 ret = do_switch(NULL, ctx, 0);
229 if (ret) {
230 i915_gem_object_unpin(ctx->obj);
231 do_destroy(ctx);
232 } else {
233 DRM_DEBUG_DRIVER("Default HW context loaded\n");
234 }
235
236 return ret;
237}
238
239void i915_gem_context_init(struct drm_device *dev)
240{
241 struct drm_i915_private *dev_priv = dev->dev_private;
242 uint32_t ctx_size;
243
244 if (!HAS_HW_CONTEXTS(dev)) {
245 dev_priv->hw_contexts_disabled = true;
246 return;
247 }
248
249 /* If called from reset, or thaw... we've been here already */
250 if (dev_priv->hw_contexts_disabled ||
251 dev_priv->ring[RCS].default_context)
252 return;
253
254 ctx_size = get_context_size(dev);
255 dev_priv->hw_context_size = get_context_size(dev);
256 dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
257
258 if (ctx_size <= 0 || ctx_size > (1<<20)) {
259 dev_priv->hw_contexts_disabled = true;
260 return;
261 }
262
263 if (create_default_context(dev_priv)) {
264 dev_priv->hw_contexts_disabled = true;
265 return;
266 }
267
268 DRM_DEBUG_DRIVER("HW context support initialized\n");
269}
270
271void i915_gem_context_fini(struct drm_device *dev)
272{
273 struct drm_i915_private *dev_priv = dev->dev_private;
274
275 if (dev_priv->hw_contexts_disabled)
276 return;
277
278 /* The only known way to stop the gpu from accessing the hw context is
279 * to reset it. Do this as the very last operation to avoid confusing
280 * other code, leading to spurious errors. */
281 intel_gpu_reset(dev);
282
283 i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
284
285 do_destroy(dev_priv->ring[RCS].default_context);
286}
287
288static int context_idr_cleanup(int id, void *p, void *data)
289{
290 struct i915_hw_context *ctx = p;
291
292 BUG_ON(id == DEFAULT_CONTEXT_ID);
293
294 do_destroy(ctx);
295
296 return 0;
297}
298
299void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
300{
301 struct drm_i915_file_private *file_priv = file->driver_priv;
302
303 mutex_lock(&dev->struct_mutex);
304 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
305 idr_destroy(&file_priv->context_idr);
306 mutex_unlock(&dev->struct_mutex);
307}
308
309static struct i915_hw_context *
310i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
311{
312 return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
313}
314
315static inline int
316mi_set_context(struct intel_ring_buffer *ring,
317 struct i915_hw_context *new_context,
318 u32 hw_flags)
319{
320 int ret;
321
322 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
323 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
324 * explicitly, so we rely on the value at ring init, stored in
325 * itlb_before_ctx_switch.
326 */
327 if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
328 ret = ring->flush(ring, 0, 0);
329 if (ret)
330 return ret;
331 }
332
333 ret = intel_ring_begin(ring, 6);
334 if (ret)
335 return ret;
336
337 if (IS_GEN7(ring->dev))
338 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
339 else
340 intel_ring_emit(ring, MI_NOOP);
341
342 intel_ring_emit(ring, MI_NOOP);
343 intel_ring_emit(ring, MI_SET_CONTEXT);
344 intel_ring_emit(ring, new_context->obj->gtt_offset |
345 MI_MM_SPACE_GTT |
346 MI_SAVE_EXT_STATE_EN |
347 MI_RESTORE_EXT_STATE_EN |
348 hw_flags);
349 /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
350 intel_ring_emit(ring, MI_NOOP);
351
352 if (IS_GEN7(ring->dev))
353 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
354 else
355 intel_ring_emit(ring, MI_NOOP);
356
357 intel_ring_advance(ring);
358
359 return ret;
360}
361
362static int do_switch(struct drm_i915_gem_object *from_obj,
363 struct i915_hw_context *to,
364 u32 seqno)
365{
366 struct intel_ring_buffer *ring = NULL;
367 u32 hw_flags = 0;
368 int ret;
369
370 BUG_ON(to == NULL);
371 BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
372
373 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
374 if (ret)
375 return ret;
376
377 /* Clear this page out of any CPU caches for coherent swap-in/out. Note
378 * that thanks to write = false in this call and us not setting any gpu
379 * write domains when putting a context object onto the active list
380 * (when switching away from it), this won't block.
381 * XXX: We need a real interface to do this instead of trickery. */
382 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
383 if (ret) {
384 i915_gem_object_unpin(to->obj);
385 return ret;
386 }
387
388 if (!to->obj->has_global_gtt_mapping)
389 i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
390
391 if (!to->is_initialized || is_default_context(to))
392 hw_flags |= MI_RESTORE_INHIBIT;
393 else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
394 hw_flags |= MI_FORCE_RESTORE;
395
396 ring = to->ring;
397 ret = mi_set_context(ring, to, hw_flags);
398 if (ret) {
399 i915_gem_object_unpin(to->obj);
400 return ret;
401 }
402
403 /* The backing object for the context is done after switching to the
404 * *next* context. Therefore we cannot retire the previous context until
405 * the next context has already started running. In fact, the below code
406 * is a bit suboptimal because the retiring can occur simply after the
407 * MI_SET_CONTEXT instead of when the next seqno has completed.
408 */
409 if (from_obj != NULL) {
410 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
411 i915_gem_object_move_to_active(from_obj, ring, seqno);
412 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
413 * whole damn pipeline, we don't need to explicitly mark the
414 * object dirty. The only exception is that the context must be
415 * correct in case the object gets swapped out. Ideally we'd be
416 * able to defer doing this until we know the object would be
417 * swapped, but there is no way to do that yet.
418 */
419 from_obj->dirty = 1;
420 BUG_ON(from_obj->ring != to->ring);
421 i915_gem_object_unpin(from_obj);
422
423 drm_gem_object_unreference(&from_obj->base);
424 }
425
426 drm_gem_object_reference(&to->obj->base);
427 ring->last_context_obj = to->obj;
428 to->is_initialized = true;
429
430 return 0;
431}
432
433/**
434 * i915_switch_context() - perform a GPU context switch.
435 * @ring: ring for which we'll execute the context switch
436 * @file_priv: file_priv associated with the context, may be NULL
437 * @id: context id number
438 * @seqno: sequence number by which the new context will be switched to
439 * @flags:
440 *
441 * The context life cycle is simple. The context refcount is incremented and
442 * decremented by 1 and create and destroy. If the context is in use by the GPU,
443 * it will have a refoucnt > 1. This allows us to destroy the context abstract
444 * object while letting the normal object tracking destroy the backing BO.
445 */
446int i915_switch_context(struct intel_ring_buffer *ring,
447 struct drm_file *file,
448 int to_id)
449{
450 struct drm_i915_private *dev_priv = ring->dev->dev_private;
451 struct drm_i915_file_private *file_priv = NULL;
452 struct i915_hw_context *to;
453 struct drm_i915_gem_object *from_obj = ring->last_context_obj;
454
455 if (dev_priv->hw_contexts_disabled)
456 return 0;
457
458 if (ring != &dev_priv->ring[RCS])
459 return 0;
460
461 if (file)
462 file_priv = file->driver_priv;
463
464 if (to_id == DEFAULT_CONTEXT_ID) {
465 to = ring->default_context;
466 } else {
467 to = i915_gem_context_get(file_priv, to_id);
468 if (to == NULL)
469 return -ENOENT;
470 }
471
472 if (from_obj == to->obj)
473 return 0;
474
475 return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
476}
477
478int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
479 struct drm_file *file)
480{
481 struct drm_i915_private *dev_priv = dev->dev_private;
482 struct drm_i915_gem_context_create *args = data;
483 struct drm_i915_file_private *file_priv = file->driver_priv;
484 struct i915_hw_context *ctx;
485 int ret;
486
487 if (!(dev->driver->driver_features & DRIVER_GEM))
488 return -ENODEV;
489
490 if (dev_priv->hw_contexts_disabled)
491 return -ENODEV;
492
493 ret = i915_mutex_lock_interruptible(dev);
494 if (ret)
495 return ret;
496
497 ctx = create_hw_context(dev, file_priv);
498 mutex_unlock(&dev->struct_mutex);
499 if (IS_ERR(ctx))
500 return PTR_ERR(ctx);
501
502 args->ctx_id = ctx->id;
503 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
504
505 return 0;
506}
507
508int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
509 struct drm_file *file)
510{
511 struct drm_i915_gem_context_destroy *args = data;
512 struct drm_i915_file_private *file_priv = file->driver_priv;
513 struct i915_hw_context *ctx;
514 int ret;
515
516 if (!(dev->driver->driver_features & DRIVER_GEM))
517 return -ENODEV;
518
519 ret = i915_mutex_lock_interruptible(dev);
520 if (ret)
521 return ret;
522
523 ctx = i915_gem_context_get(file_priv, args->ctx_id);
524 if (!ctx) {
525 mutex_unlock(&dev->struct_mutex);
526 return -ENOENT;
527 }
528
529 do_destroy(ctx);
530
531 mutex_unlock(&dev->struct_mutex);
532
533 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
534 return 0;
535}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index a4f6aaabca99..bddf7bed183f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -132,7 +132,8 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
132 __func__, obj, obj->gtt_offset, handle, 132 __func__, obj, obj->gtt_offset, handle,
133 obj->size / 1024); 133 obj->size / 1024);
134 134
135 gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); 135 gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
136 obj->base.size);
136 if (gtt_mapping == NULL) { 137 if (gtt_mapping == NULL) {
137 DRM_ERROR("failed to map GTT space\n"); 138 DRM_ERROR("failed to map GTT space\n");
138 return; 139 return;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ae7c24e12e52..eba0308f10e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -78,11 +78,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
78 78
79 INIT_LIST_HEAD(&unwind_list); 79 INIT_LIST_HEAD(&unwind_list);
80 if (mappable) 80 if (mappable)
81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size, 81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
82 alignment, 0, 82 min_size, alignment, 0,
83 dev_priv->mm.gtt_mappable_end); 83 0, dev_priv->mm.gtt_mappable_end);
84 else 84 else
85 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); 85 drm_mm_init_scan(&dev_priv->mm.gtt_space,
86 min_size, alignment, 0);
86 87
87 /* First see if there is a large enough contiguous idle region... */ 88 /* First see if there is a large enough contiguous idle region... */
88 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 89 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 974a9f1068a3..ff2819ea0813 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
291 target_i915_obj = to_intel_bo(target_obj); 291 target_i915_obj = to_intel_bo(target_obj);
292 target_offset = target_i915_obj->gtt_offset; 292 target_offset = target_i915_obj->gtt_offset;
293 293
294 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
295 * pipe_control writes because the gpu doesn't properly redirect them
296 * through the ppgtt for non_secure batchbuffers. */
297 if (unlikely(IS_GEN6(dev) &&
298 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
299 !target_i915_obj->has_global_gtt_mapping)) {
300 i915_gem_gtt_bind_object(target_i915_obj,
301 target_i915_obj->cache_level);
302 }
303
294 /* The target buffer should have appeared before us in the 304 /* The target buffer should have appeared before us in the
295 * exec_object list, so it should have a GTT space bound by now. 305 * exec_object list, so it should have a GTT space bound by now.
296 */ 306 */
@@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
399 io_mapping_unmap_atomic(reloc_page); 409 io_mapping_unmap_atomic(reloc_page);
400 } 410 }
401 411
402 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
403 * pipe_control writes because the gpu doesn't properly redirect them
404 * through the ppgtt for non_secure batchbuffers. */
405 if (unlikely(IS_GEN6(dev) &&
406 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
407 !target_i915_obj->has_global_gtt_mapping)) {
408 i915_gem_gtt_bind_object(target_i915_obj,
409 target_i915_obj->cache_level);
410 }
411
412 /* and update the user's relocation entry */ 412 /* and update the user's relocation entry */
413 reloc->presumed_offset = target_offset; 413 reloc->presumed_offset = target_offset;
414 414
@@ -810,33 +810,16 @@ err:
810 return ret; 810 return ret;
811} 811}
812 812
813static int 813static void
814i915_gem_execbuffer_flush(struct drm_device *dev, 814i915_gem_execbuffer_flush(struct drm_device *dev,
815 uint32_t invalidate_domains, 815 uint32_t invalidate_domains,
816 uint32_t flush_domains, 816 uint32_t flush_domains)
817 uint32_t flush_rings)
818{ 817{
819 drm_i915_private_t *dev_priv = dev->dev_private;
820 int i, ret;
821
822 if (flush_domains & I915_GEM_DOMAIN_CPU) 818 if (flush_domains & I915_GEM_DOMAIN_CPU)
823 intel_gtt_chipset_flush(); 819 intel_gtt_chipset_flush();
824 820
825 if (flush_domains & I915_GEM_DOMAIN_GTT) 821 if (flush_domains & I915_GEM_DOMAIN_GTT)
826 wmb(); 822 wmb();
827
828 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
829 for (i = 0; i < I915_NUM_RINGS; i++)
830 if (flush_rings & (1 << i)) {
831 ret = i915_gem_flush_ring(&dev_priv->ring[i],
832 invalidate_domains,
833 flush_domains);
834 if (ret)
835 return ret;
836 }
837 }
838
839 return 0;
840} 823}
841 824
842static int 825static int
@@ -885,12 +868,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
885 i915_gem_object_set_to_gpu_domain(obj, ring, &cd); 868 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
886 869
887 if (cd.invalidate_domains | cd.flush_domains) { 870 if (cd.invalidate_domains | cd.flush_domains) {
888 ret = i915_gem_execbuffer_flush(ring->dev, 871 i915_gem_execbuffer_flush(ring->dev,
889 cd.invalidate_domains, 872 cd.invalidate_domains,
890 cd.flush_domains, 873 cd.flush_domains);
891 cd.flush_rings);
892 if (ret)
893 return ret;
894 } 874 }
895 875
896 if (cd.flips) { 876 if (cd.flips) {
@@ -905,6 +885,16 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
905 return ret; 885 return ret;
906 } 886 }
907 887
888 /* Unconditionally invalidate gpu caches and ensure that we do flush
889 * any residual writes from the previous batch.
890 */
891 ret = i915_gem_flush_ring(ring,
892 I915_GEM_GPU_DOMAINS,
893 ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
894 if (ret)
895 return ret;
896
897 ring->gpu_caches_dirty = false;
908 return 0; 898 return 0;
909} 899}
910 900
@@ -983,26 +973,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
983 struct intel_ring_buffer *ring) 973 struct intel_ring_buffer *ring)
984{ 974{
985 struct drm_i915_gem_request *request; 975 struct drm_i915_gem_request *request;
986 u32 invalidate;
987 976
988 /* 977 /* Unconditionally force add_request to emit a full flush. */
989 * Ensure that the commands in the batch buffer are 978 ring->gpu_caches_dirty = true;
990 * finished before the interrupt fires.
991 *
992 * The sampler always gets flushed on i965 (sigh).
993 */
994 invalidate = I915_GEM_DOMAIN_COMMAND;
995 if (INTEL_INFO(dev)->gen >= 4)
996 invalidate |= I915_GEM_DOMAIN_SAMPLER;
997 if (ring->flush(ring, invalidate, 0)) {
998 i915_gem_next_request_seqno(ring);
999 return;
1000 }
1001 979
1002 /* Add a breadcrumb for the completion of the batch buffer */ 980 /* Add a breadcrumb for the completion of the batch buffer */
1003 request = kzalloc(sizeof(*request), GFP_KERNEL); 981 request = kzalloc(sizeof(*request), GFP_KERNEL);
1004 if (request == NULL || i915_add_request(ring, file, request)) { 982 if (request == NULL || i915_add_request(ring, file, request)) {
1005 i915_gem_next_request_seqno(ring);
1006 kfree(request); 983 kfree(request);
1007 } 984 }
1008} 985}
@@ -1044,6 +1021,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1044 struct drm_i915_gem_object *batch_obj; 1021 struct drm_i915_gem_object *batch_obj;
1045 struct drm_clip_rect *cliprects = NULL; 1022 struct drm_clip_rect *cliprects = NULL;
1046 struct intel_ring_buffer *ring; 1023 struct intel_ring_buffer *ring;
1024 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1047 u32 exec_start, exec_len; 1025 u32 exec_start, exec_len;
1048 u32 seqno; 1026 u32 seqno;
1049 u32 mask; 1027 u32 mask;
@@ -1065,9 +1043,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1065 break; 1043 break;
1066 case I915_EXEC_BSD: 1044 case I915_EXEC_BSD:
1067 ring = &dev_priv->ring[VCS]; 1045 ring = &dev_priv->ring[VCS];
1046 if (ctx_id != 0) {
1047 DRM_DEBUG("Ring %s doesn't support contexts\n",
1048 ring->name);
1049 return -EPERM;
1050 }
1068 break; 1051 break;
1069 case I915_EXEC_BLT: 1052 case I915_EXEC_BLT:
1070 ring = &dev_priv->ring[BCS]; 1053 ring = &dev_priv->ring[BCS];
1054 if (ctx_id != 0) {
1055 DRM_DEBUG("Ring %s doesn't support contexts\n",
1056 ring->name);
1057 return -EPERM;
1058 }
1071 break; 1059 break;
1072 default: 1060 default:
1073 DRM_DEBUG("execbuf with unknown ring: %d\n", 1061 DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1240,6 +1228,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1240 } 1228 }
1241 } 1229 }
1242 1230
1231 ret = i915_switch_context(ring, file, ctx_id);
1232 if (ret)
1233 goto err;
1234
1243 if (ring == &dev_priv->ring[RCS] && 1235 if (ring == &dev_priv->ring[RCS] &&
1244 mode != dev_priv->relative_constants_mode) { 1236 mode != dev_priv->relative_constants_mode) {
1245 ret = intel_ring_begin(ring, 4); 1237 ret = intel_ring_begin(ring, 4);
@@ -1367,6 +1359,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1367 exec2.num_cliprects = args->num_cliprects; 1359 exec2.num_cliprects = args->num_cliprects;
1368 exec2.cliprects_ptr = args->cliprects_ptr; 1360 exec2.cliprects_ptr = args->cliprects_ptr;
1369 exec2.flags = I915_EXEC_RENDER; 1361 exec2.flags = I915_EXEC_RENDER;
1362 i915_execbuffer2_set_context_id(exec2, 0);
1370 1363
1371 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1364 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1372 if (!ret) { 1365 if (!ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9fd25a435536..60815b861ec2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
72 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 72 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
73 * entries. For aliasing ppgtt support we just steal them at the end for 73 * entries. For aliasing ppgtt support we just steal them at the end for
74 * now. */ 74 * now. */
75 first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; 75 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
76 76
77 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 77 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
78 if (!ppgtt) 78 if (!ppgtt)
@@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
261 pte_flags |= GEN6_PTE_CACHE_LLC; 261 pte_flags |= GEN6_PTE_CACHE_LLC;
262 break; 262 break;
263 case I915_CACHE_NONE: 263 case I915_CACHE_NONE:
264 pte_flags |= GEN6_PTE_UNCACHED; 264 if (IS_HASWELL(dev))
265 pte_flags |= HSW_PTE_UNCACHED;
266 else
267 pte_flags |= GEN6_PTE_UNCACHED;
265 break; 268 break;
266 default: 269 default:
267 BUG(); 270 BUG();
@@ -361,7 +364,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
361 struct drm_device *dev = obj->base.dev; 364 struct drm_device *dev = obj->base.dev;
362 struct drm_i915_private *dev_priv = dev->dev_private; 365 struct drm_i915_private *dev_priv = dev->dev_private;
363 366
364 if (dev_priv->mm.gtt->needs_dmar) 367 /* don't map imported dma buf objects */
368 if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
365 return intel_gtt_map_memory(obj->pages, 369 return intel_gtt_map_memory(obj->pages,
366 obj->base.size >> PAGE_SHIFT, 370 obj->base.size >> PAGE_SHIFT,
367 &obj->sg_list, 371 &obj->sg_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b1fe0edda955..8a3828528b9d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -375,6 +375,86 @@ static void gen6_pm_rps_work(struct work_struct *work)
375 mutex_unlock(&dev_priv->dev->struct_mutex); 375 mutex_unlock(&dev_priv->dev->struct_mutex);
376} 376}
377 377
378
379/**
380 * ivybridge_parity_work - Workqueue called when a parity error interrupt
381 * occurred.
382 * @work: workqueue struct
383 *
384 * Doesn't actually do anything except notify userspace. As a consequence of
385 * this event, userspace should try to remap the bad rows since statistically
386 * it is likely the same row is more likely to go bad again.
387 */
388static void ivybridge_parity_work(struct work_struct *work)
389{
390 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
391 parity_error_work);
392 u32 error_status, row, bank, subbank;
393 char *parity_event[5];
394 uint32_t misccpctl;
395 unsigned long flags;
396
397 /* We must turn off DOP level clock gating to access the L3 registers.
398 * In order to prevent a get/put style interface, acquire struct mutex
399 * any time we access those registers.
400 */
401 mutex_lock(&dev_priv->dev->struct_mutex);
402
403 misccpctl = I915_READ(GEN7_MISCCPCTL);
404 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
405 POSTING_READ(GEN7_MISCCPCTL);
406
407 error_status = I915_READ(GEN7_L3CDERRST1);
408 row = GEN7_PARITY_ERROR_ROW(error_status);
409 bank = GEN7_PARITY_ERROR_BANK(error_status);
410 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
411
412 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
413 GEN7_L3CDERRST1_ENABLE);
414 POSTING_READ(GEN7_L3CDERRST1);
415
416 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
417
418 spin_lock_irqsave(&dev_priv->irq_lock, flags);
419 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
420 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
421 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
422
423 mutex_unlock(&dev_priv->dev->struct_mutex);
424
425 parity_event[0] = "L3_PARITY_ERROR=1";
426 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
427 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
428 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
429 parity_event[4] = NULL;
430
431 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
432 KOBJ_CHANGE, parity_event);
433
434 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
435 row, bank, subbank);
436
437 kfree(parity_event[3]);
438 kfree(parity_event[2]);
439 kfree(parity_event[1]);
440}
441
442static void ivybridge_handle_parity_error(struct drm_device *dev)
443{
444 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
445 unsigned long flags;
446
447 if (!IS_IVYBRIDGE(dev))
448 return;
449
450 spin_lock_irqsave(&dev_priv->irq_lock, flags);
451 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
452 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
453 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
454
455 queue_work(dev_priv->wq, &dev_priv->parity_error_work);
456}
457
378static void snb_gt_irq_handler(struct drm_device *dev, 458static void snb_gt_irq_handler(struct drm_device *dev,
379 struct drm_i915_private *dev_priv, 459 struct drm_i915_private *dev_priv,
380 u32 gt_iir) 460 u32 gt_iir)
@@ -394,6 +474,9 @@ static void snb_gt_irq_handler(struct drm_device *dev,
394 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 474 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
395 i915_handle_error(dev, false); 475 i915_handle_error(dev, false);
396 } 476 }
477
478 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
479 ivybridge_handle_parity_error(dev);
397} 480}
398 481
399static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 482static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
@@ -412,7 +495,6 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
412 */ 495 */
413 496
414 spin_lock_irqsave(&dev_priv->rps_lock, flags); 497 spin_lock_irqsave(&dev_priv->rps_lock, flags);
415 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
416 dev_priv->pm_iir |= pm_iir; 498 dev_priv->pm_iir |= pm_iir;
417 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); 499 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
418 POSTING_READ(GEN6_PMIMR); 500 POSTING_READ(GEN6_PMIMR);
@@ -430,15 +512,10 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
430 unsigned long irqflags; 512 unsigned long irqflags;
431 int pipe; 513 int pipe;
432 u32 pipe_stats[I915_MAX_PIPES]; 514 u32 pipe_stats[I915_MAX_PIPES];
433 u32 vblank_status;
434 int vblank = 0;
435 bool blc_event; 515 bool blc_event;
436 516
437 atomic_inc(&dev_priv->irq_received); 517 atomic_inc(&dev_priv->irq_received);
438 518
439 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
440 PIPE_VBLANK_INTERRUPT_STATUS;
441
442 while (true) { 519 while (true) {
443 iir = I915_READ(VLV_IIR); 520 iir = I915_READ(VLV_IIR);
444 gt_iir = I915_READ(GTIIR); 521 gt_iir = I915_READ(GTIIR);
@@ -468,6 +545,16 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
468 } 545 }
469 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 546 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
470 547
548 for_each_pipe(pipe) {
549 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
550 drm_handle_vblank(dev, pipe);
551
552 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
553 intel_prepare_page_flip(dev, pipe);
554 intel_finish_page_flip(dev, pipe);
555 }
556 }
557
471 /* Consume port. Then clear IIR or we'll miss events */ 558 /* Consume port. Then clear IIR or we'll miss events */
472 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 559 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
473 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 560 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
@@ -482,19 +569,6 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
482 I915_READ(PORT_HOTPLUG_STAT); 569 I915_READ(PORT_HOTPLUG_STAT);
483 } 570 }
484 571
485
486 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
487 drm_handle_vblank(dev, 0);
488 vblank++;
489 intel_finish_page_flip(dev, 0);
490 }
491
492 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
493 drm_handle_vblank(dev, 1);
494 vblank++;
495 intel_finish_page_flip(dev, 0);
496 }
497
498 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 572 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
499 blc_event = true; 573 blc_event = true;
500 574
@@ -992,6 +1066,7 @@ static void i915_record_ring_state(struct drm_device *dev,
992 struct drm_i915_private *dev_priv = dev->dev_private; 1066 struct drm_i915_private *dev_priv = dev->dev_private;
993 1067
994 if (INTEL_INFO(dev)->gen >= 6) { 1068 if (INTEL_INFO(dev)->gen >= 6) {
1069 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
995 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1070 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
996 error->semaphore_mboxes[ring->id][0] 1071 error->semaphore_mboxes[ring->id][0]
997 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1072 = I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -1105,6 +1180,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1105 kref_init(&error->ref); 1180 kref_init(&error->ref);
1106 error->eir = I915_READ(EIR); 1181 error->eir = I915_READ(EIR);
1107 error->pgtbl_er = I915_READ(PGTBL_ER); 1182 error->pgtbl_er = I915_READ(PGTBL_ER);
1183 error->ccid = I915_READ(CCID);
1108 1184
1109 if (HAS_PCH_SPLIT(dev)) 1185 if (HAS_PCH_SPLIT(dev))
1110 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1186 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1427,23 +1503,20 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1427{ 1503{
1428 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1504 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1429 unsigned long irqflags; 1505 unsigned long irqflags;
1430 u32 dpfl, imr; 1506 u32 imr;
1431 1507
1432 if (!i915_pipe_enabled(dev, pipe)) 1508 if (!i915_pipe_enabled(dev, pipe))
1433 return -EINVAL; 1509 return -EINVAL;
1434 1510
1435 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1511 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1436 dpfl = I915_READ(VLV_DPFLIPSTAT);
1437 imr = I915_READ(VLV_IMR); 1512 imr = I915_READ(VLV_IMR);
1438 if (pipe == 0) { 1513 if (pipe == 0)
1439 dpfl |= PIPEA_VBLANK_INT_EN;
1440 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1514 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1441 } else { 1515 else
1442 dpfl |= PIPEA_VBLANK_INT_EN;
1443 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1516 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1444 }
1445 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1446 I915_WRITE(VLV_IMR, imr); 1517 I915_WRITE(VLV_IMR, imr);
1518 i915_enable_pipestat(dev_priv, pipe,
1519 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1447 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1520 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1448 1521
1449 return 0; 1522 return 0;
@@ -1493,20 +1566,17 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1493{ 1566{
1494 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1495 unsigned long irqflags; 1568 unsigned long irqflags;
1496 u32 dpfl, imr; 1569 u32 imr;
1497 1570
1498 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1571 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1499 dpfl = I915_READ(VLV_DPFLIPSTAT); 1572 i915_disable_pipestat(dev_priv, pipe,
1573 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1500 imr = I915_READ(VLV_IMR); 1574 imr = I915_READ(VLV_IMR);
1501 if (pipe == 0) { 1575 if (pipe == 0)
1502 dpfl &= ~PIPEA_VBLANK_INT_EN;
1503 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1576 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1504 } else { 1577 else
1505 dpfl &= ~PIPEB_VBLANK_INT_EN;
1506 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1578 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1507 }
1508 I915_WRITE(VLV_IMR, imr); 1579 I915_WRITE(VLV_IMR, imr);
1509 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1510 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1580 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1511} 1581}
1512 1582
@@ -1649,7 +1719,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1649 1719
1650 atomic_set(&dev_priv->irq_received, 0); 1720 atomic_set(&dev_priv->irq_received, 0);
1651 1721
1652
1653 I915_WRITE(HWSTAM, 0xeffe); 1722 I915_WRITE(HWSTAM, 0xeffe);
1654 1723
1655 /* XXX hotplug from PCH */ 1724 /* XXX hotplug from PCH */
@@ -1812,13 +1881,13 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1812 DE_PIPEA_VBLANK_IVB); 1881 DE_PIPEA_VBLANK_IVB);
1813 POSTING_READ(DEIER); 1882 POSTING_READ(DEIER);
1814 1883
1815 dev_priv->gt_irq_mask = ~0; 1884 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1816 1885
1817 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1886 I915_WRITE(GTIIR, I915_READ(GTIIR));
1818 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1887 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1819 1888
1820 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 1889 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1821 GEN6_BLITTER_USER_INTERRUPT; 1890 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1822 I915_WRITE(GTIER, render_irqs); 1891 I915_WRITE(GTIER, render_irqs);
1823 POSTING_READ(GTIER); 1892 POSTING_READ(GTIER);
1824 1893
@@ -1841,16 +1910,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1841static int valleyview_irq_postinstall(struct drm_device *dev) 1910static int valleyview_irq_postinstall(struct drm_device *dev)
1842{ 1911{
1843 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1912 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1844 u32 render_irqs;
1845 u32 enable_mask; 1913 u32 enable_mask;
1846 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1914 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1915 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1847 u16 msid; 1916 u16 msid;
1848 1917
1849 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 1918 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1850 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 1919 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1920 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1921 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1851 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1922 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1852 1923
1853 dev_priv->irq_mask = ~enable_mask; 1924 /*
1925 *Leave vblank interrupts masked initially. enable/disable will
1926 * toggle them based on usage.
1927 */
1928 dev_priv->irq_mask = (~enable_mask) |
1929 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1930 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1854 1931
1855 dev_priv->pipestat[0] = 0; 1932 dev_priv->pipestat[0] = 0;
1856 dev_priv->pipestat[1] = 0; 1933 dev_priv->pipestat[1] = 0;
@@ -1869,26 +1946,27 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
1869 I915_WRITE(PIPESTAT(1), 0xffff); 1946 I915_WRITE(PIPESTAT(1), 0xffff);
1870 POSTING_READ(VLV_IER); 1947 POSTING_READ(VLV_IER);
1871 1948
1949 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1950 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1951
1872 I915_WRITE(VLV_IIR, 0xffffffff); 1952 I915_WRITE(VLV_IIR, 0xffffffff);
1873 I915_WRITE(VLV_IIR, 0xffffffff); 1953 I915_WRITE(VLV_IIR, 0xffffffff);
1874 1954
1875 render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | 1955 dev_priv->gt_irq_mask = ~0;
1876 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1877 GT_GEN6_BLT_USER_INTERRUPT |
1878 GT_GEN6_BSD_USER_INTERRUPT |
1879 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1880 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1881 GT_PIPE_NOTIFY |
1882 GT_RENDER_CS_ERROR_INTERRUPT |
1883 GT_SYNC_STATUS |
1884 GT_USER_INTERRUPT;
1885
1886 dev_priv->gt_irq_mask = ~render_irqs;
1887 1956
1888 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1957 I915_WRITE(GTIIR, I915_READ(GTIIR));
1889 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1958 I915_WRITE(GTIIR, I915_READ(GTIIR));
1890 I915_WRITE(GTIMR, 0); 1959 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1891 I915_WRITE(GTIER, render_irqs); 1960 I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1961 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1962 GT_GEN6_BLT_USER_INTERRUPT |
1963 GT_GEN6_BSD_USER_INTERRUPT |
1964 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1965 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1966 GT_PIPE_NOTIFY |
1967 GT_RENDER_CS_ERROR_INTERRUPT |
1968 GT_SYNC_STATUS |
1969 GT_USER_INTERRUPT);
1892 POSTING_READ(GTIER); 1970 POSTING_READ(GTIER);
1893 1971
1894 /* ack & enable invalid PTE error interrupts */ 1972 /* ack & enable invalid PTE error interrupts */
@@ -2167,9 +2245,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
2167 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2245 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2168 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2246 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2169 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2247 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2170 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 2248 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2171 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2249 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2172 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 2250 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2173 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2251 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2174 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2252 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2175 hotplug_en |= CRT_HOTPLUG_INT_EN; 2253 hotplug_en |= CRT_HOTPLUG_INT_EN;
@@ -2329,10 +2407,8 @@ static void i965_irq_preinstall(struct drm_device * dev)
2329 2407
2330 atomic_set(&dev_priv->irq_received, 0); 2408 atomic_set(&dev_priv->irq_received, 0);
2331 2409
2332 if (I915_HAS_HOTPLUG(dev)) { 2410 I915_WRITE(PORT_HOTPLUG_EN, 0);
2333 I915_WRITE(PORT_HOTPLUG_EN, 0); 2411 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2334 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2335 }
2336 2412
2337 I915_WRITE(HWSTAM, 0xeffe); 2413 I915_WRITE(HWSTAM, 0xeffe);
2338 for_each_pipe(pipe) 2414 for_each_pipe(pipe)
@@ -2345,11 +2421,13 @@ static void i965_irq_preinstall(struct drm_device * dev)
2345static int i965_irq_postinstall(struct drm_device *dev) 2421static int i965_irq_postinstall(struct drm_device *dev)
2346{ 2422{
2347 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2423 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2424 u32 hotplug_en;
2348 u32 enable_mask; 2425 u32 enable_mask;
2349 u32 error_mask; 2426 u32 error_mask;
2350 2427
2351 /* Unmask the interrupts that we always want on. */ 2428 /* Unmask the interrupts that we always want on. */
2352 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2429 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2430 I915_DISPLAY_PORT_INTERRUPT |
2353 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2431 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2354 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2432 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2355 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2433 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -2365,13 +2443,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
2365 dev_priv->pipestat[0] = 0; 2443 dev_priv->pipestat[0] = 0;
2366 dev_priv->pipestat[1] = 0; 2444 dev_priv->pipestat[1] = 0;
2367 2445
2368 if (I915_HAS_HOTPLUG(dev)) {
2369 /* Enable in IER... */
2370 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2371 /* and unmask in IMR */
2372 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2373 }
2374
2375 /* 2446 /*
2376 * Enable some error detection, note the instruction error mask 2447 * Enable some error detection, note the instruction error mask
2377 * bit is reserved, so we leave it masked. 2448 * bit is reserved, so we leave it masked.
@@ -2391,36 +2462,40 @@ static int i965_irq_postinstall(struct drm_device *dev)
2391 I915_WRITE(IER, enable_mask); 2462 I915_WRITE(IER, enable_mask);
2392 POSTING_READ(IER); 2463 POSTING_READ(IER);
2393 2464
2394 if (I915_HAS_HOTPLUG(dev)) { 2465 /* Note HDMI and DP share hotplug bits */
2395 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2466 hotplug_en = 0;
2396 2467 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2397 /* Note HDMI and DP share bits */ 2468 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2398 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2469 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2399 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2470 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2400 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2471 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2401 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2472 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2402 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2473 if (IS_G4X(dev)) {
2403 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2474 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2404 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2405 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2475 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2406 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 2476 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2407 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2477 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2408 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2478 } else {
2409 hotplug_en |= CRT_HOTPLUG_INT_EN; 2479 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2480 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2481 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2482 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2483 }
2484 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2485 hotplug_en |= CRT_HOTPLUG_INT_EN;
2410 2486
2411 /* Programming the CRT detection parameters tends 2487 /* Programming the CRT detection parameters tends
2412 to generate a spurious hotplug event about three 2488 to generate a spurious hotplug event about three
2413 seconds later. So just do it once. 2489 seconds later. So just do it once.
2414 */ 2490 */
2415 if (IS_G4X(dev)) 2491 if (IS_G4X(dev))
2416 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2492 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2417 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2493 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2418 } 2494 }
2419 2495
2420 /* Ignore TV since it's buggy */ 2496 /* Ignore TV since it's buggy */
2421 2497
2422 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2498 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2423 }
2424 2499
2425 intel_opregion_enable_asle(dev); 2500 intel_opregion_enable_asle(dev);
2426 2501
@@ -2478,8 +2553,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2478 ret = IRQ_HANDLED; 2553 ret = IRQ_HANDLED;
2479 2554
2480 /* Consume port. Then clear IIR or we'll miss events */ 2555 /* Consume port. Then clear IIR or we'll miss events */
2481 if ((I915_HAS_HOTPLUG(dev)) && 2556 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2482 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2483 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2557 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2484 2558
2485 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2559 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
@@ -2552,10 +2626,8 @@ static void i965_irq_uninstall(struct drm_device * dev)
2552 if (!dev_priv) 2626 if (!dev_priv)
2553 return; 2627 return;
2554 2628
2555 if (I915_HAS_HOTPLUG(dev)) { 2629 I915_WRITE(PORT_HOTPLUG_EN, 0);
2556 I915_WRITE(PORT_HOTPLUG_EN, 0); 2630 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2557 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2558 }
2559 2631
2560 I915_WRITE(HWSTAM, 0xffffffff); 2632 I915_WRITE(HWSTAM, 0xffffffff);
2561 for_each_pipe(pipe) 2633 for_each_pipe(pipe)
@@ -2576,6 +2648,7 @@ void intel_irq_init(struct drm_device *dev)
2576 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2648 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2577 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2649 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2578 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); 2650 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
2651 INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
2579 2652
2580 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2653 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2581 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2654 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 48d5e8e051cf..28725ce5b82c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -115,6 +115,7 @@
115 115
116#define GEN6_PTE_VALID (1 << 0) 116#define GEN6_PTE_VALID (1 << 0)
117#define GEN6_PTE_UNCACHED (1 << 1) 117#define GEN6_PTE_UNCACHED (1 << 1)
118#define HSW_PTE_UNCACHED (0)
118#define GEN6_PTE_CACHE_LLC (2 << 1) 119#define GEN6_PTE_CACHE_LLC (2 << 1)
119#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 120#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
120#define GEN6_PTE_CACHE_BITS (3 << 1) 121#define GEN6_PTE_CACHE_BITS (3 << 1)
@@ -217,6 +218,9 @@
217#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 218#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
218#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 219#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
219#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 220#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
221#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
222#define MI_ARB_ENABLE (1<<0)
223#define MI_ARB_DISABLE (0<<0)
220 224
221#define MI_SET_CONTEXT MI_INSTR(0x18, 0) 225#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
222#define MI_MM_SPACE_GTT (1<<8) 226#define MI_MM_SPACE_GTT (1<<8)
@@ -299,6 +303,7 @@
299#define DISPLAY_PLANE_B (1<<20) 303#define DISPLAY_PLANE_B (1<<20)
300#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) 304#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
301#define PIPE_CONTROL_CS_STALL (1<<20) 305#define PIPE_CONTROL_CS_STALL (1<<20)
306#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
302#define PIPE_CONTROL_QW_WRITE (1<<14) 307#define PIPE_CONTROL_QW_WRITE (1<<14)
303#define PIPE_CONTROL_DEPTH_STALL (1<<13) 308#define PIPE_CONTROL_DEPTH_STALL (1<<13)
304#define PIPE_CONTROL_WRITE_FLUSH (1<<12) 309#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
@@ -686,10 +691,10 @@
686#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 691#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
687 692
688#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 693#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050