aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cachetlb.txt23
-rw-r--r--Documentation/cpu-freq/core.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt31
-rw-r--r--Documentation/filesystems/bfs.txt2
-rw-r--r--Documentation/hwmon/f71805f56
-rw-r--r--Documentation/hwmon/it8715
-rw-r--r--Documentation/hwmon/k8temp2
-rw-r--r--Documentation/hwmon/pc8742738
-rw-r--r--Documentation/hwmon/sysfs-interface4
-rw-r--r--Documentation/hwmon/w83627ehf2
-rw-r--r--Documentation/hwmon/w83791d2
-rw-r--r--Documentation/hwmon/w83793110
-rw-r--r--Documentation/kbuild/kconfig-language.txt8
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--MAINTAINERS30
-rw-r--r--Makefile18
-rw-r--r--arch/arm/kernel/apm.c2
-rw-r--r--arch/arm/kernel/ecard.c2
-rw-r--r--arch/arm/mach-omap1/Kconfig1
-rw-r--r--arch/arm/mach-omap1/board-osk.c1
-rw-r--r--arch/arm/mach-pxa/Kconfig16
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/arm26/kernel/ecard.c2
-rw-r--r--arch/arm26/kernel/irq.c2
-rw-r--r--arch/cris/arch-v10/drivers/axisflashmap.c2
-rw-r--r--arch/cris/arch-v10/drivers/gpio.c2
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c2
-rw-r--r--arch/cris/arch-v32/drivers/gpio.c2
-rw-r--r--arch/cris/arch-v32/kernel/signal.c2
-rw-r--r--arch/cris/kernel/profile.c2
-rw-r--r--arch/h8300/kernel/ints.c2
-rw-r--r--arch/h8300/platform/h8s/ints.c2
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Makefile2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c813
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c8
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c38
-rw-r--r--arch/i386/kernel/cpu/cpufreq/sc520_freq.c7
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c32
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c3
-rw-r--r--arch/i386/kernel/microcode.c6
-rw-r--r--arch/i386/kernel/smpboot.c2
-rw-r--r--arch/ia64/hp/sim/simserial.c2
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/crash.c22
-rw-r--r--arch/ia64/kernel/crash_dump.c48
-rw-r--r--arch/ia64/kernel/jprobes.S3
-rw-r--r--arch/ia64/kernel/kprobes.c226
-rw-r--r--arch/ia64/kernel/machine_kexec.c7
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/setup.c33
-rw-r--r--arch/ia64/kernel/smp.c4
-rw-r--r--arch/ia64/kernel/traps.c50
-rw-r--r--arch/ia64/mm/contig.c9
-rw-r--r--arch/ia64/mm/init.c9
-rw-r--r--arch/ia64/sn/kernel/setup.c12
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c2
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c15
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c64
-rw-r--r--arch/m68k/mm/kmap.c2
-rw-r--r--arch/mips/kernel/apm.c2
-rw-r--r--arch/mips/mm/init.c25
-rw-r--r--arch/parisc/hpux/sys_hpux.c2
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/powerpc/kernel/nvram_64.c4
-rw-r--r--arch/powerpc/kernel/pci_32.c2
-rw-r--r--arch/powerpc/mm/imalloc.c6
-rw-r--r--arch/powerpc/platforms/4xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/ppc/8260_io/fcc_enet.c4
-rw-r--r--arch/ppc/8xx_io/cs4218_tdm.c2
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/platforms/4xx/Kconfig2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/kernel/debug.c8
-rw-r--r--arch/s390/kernel/s390_ext.c2
-rw-r--r--arch/sparc/kernel/irq.c4
-rw-r--r--arch/sparc/kernel/sun4d_irq.c2
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c4
-rw-r--r--arch/um/drivers/net_kern.c18
-rw-r--r--arch/um/include/net_kern.h2
-rw-r--r--arch/um/sys-i386/ldt.c4
-rw-r--r--arch/v850/Kconfig28
-rw-r--r--arch/x86_64/kernel/cpufreq/Kconfig6
-rw-r--r--arch/x86_64/kernel/cpufreq/Makefile2
-rw-r--r--arch/xtensa/kernel/Makefile2
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--block/as-iosched.c15
-rw-r--r--block/cfq-iosched.c18
-rw-r--r--block/ll_rw_blk.c28
-rw-r--r--crypto/blkcipher.c1
-rw-r--r--drivers/acorn/block/fd1772.c4
-rw-r--r--drivers/ata/pdc_adma.c4
-rw-r--r--drivers/atm/eni.c4
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/dmapool.c2
-rw-r--r--drivers/base/platform.c4
-rw-r--r--drivers/block/Kconfig7
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/cciss.c25
-rw-r--r--drivers/block/cpqarray.c10
-rw-r--r--drivers/block/swim_iop.c578
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/cdrom/cm206.c2
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/agp/Kconfig4
-rw-r--r--drivers/char/agp/generic.c25
-rw-r--r--drivers/char/consolemap.c2
-rw-r--r--drivers/char/lcd.c2
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mxser_new.c8
-rw-r--r--drivers/char/n_r3964.c37
-rw-r--r--drivers/char/n_tty.c1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/rio/riocmd.c2
-rw-r--r--drivers/char/rtc.c38
-rw-r--r--drivers/char/sx.c8
-rw-r--r--drivers/char/synclink.c2
-rw-r--r--drivers/char/synclinkmp.c4
-rw-r--r--drivers/char/sysrq.c37
-rw-r--r--drivers/char/tty_io.c8
-rw-r--r--drivers/char/viocons.c10
-rw-r--r--drivers/char/vt.c2
-rw-r--r--drivers/char/vt_ioctl.c10
-rw-r--r--drivers/char/watchdog/at91rm9200_wdt.c6
-rw-r--r--drivers/char/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/char/watchdog/omap_wdt.c2
-rw-r--r--drivers/char/watchdog/pcwd_usb.c5
-rw-r--r--drivers/char/watchdog/rm9k_wdt.c44
-rw-r--r--drivers/cpufreq/cpufreq.c153
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c33
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c33
-rw-r--r--drivers/cpufreq/cpufreq_performance.c9
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c9
-rw-r--r--drivers/cpufreq/cpufreq_stats.c11
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c11
-rw-r--r--drivers/cpufreq/freq_table.c28
-rw-r--r--drivers/fc4/fc.c10
-rw-r--r--drivers/hwmon/Kconfig56
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/ams/Makefile8
-rw-r--r--drivers/hwmon/ams/ams-core.c265
-rw-r--r--drivers/hwmon/ams/ams-i2c.c299
-rw-r--r--drivers/hwmon/ams/ams-input.c160
-rw-r--r--drivers/hwmon/ams/ams-pmu.c207
-rw-r--r--drivers/hwmon/ams/ams.h72
-rw-r--r--drivers/hwmon/f71805f.c569
-rw-r--r--drivers/hwmon/hdaps.c68
-rw-r--r--drivers/hwmon/hwmon-vid.c4
-rw-r--r--drivers/hwmon/it87.c202
-rw-r--r--drivers/hwmon/k8temp.c4
-rw-r--r--drivers/hwmon/pc87360.c2
-rw-r--r--drivers/hwmon/pc87427.c627
-rw-r--r--drivers/hwmon/w83627ehf.c2
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/hwmon/w83793.c1609
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/chips/tps65010.c21
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-tape.c8
-rw-r--r--drivers/ide/pci/hpt366.c886
-rw-r--r--drivers/infiniband/core/Makefile6
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c416
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/mad.c90
-rw-r--r--drivers/infiniband/core/mad_priv.h6
-rw-r--r--drivers/infiniband/core/ucma.c874
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c5
-rw-r--r--drivers/infiniband/core/uverbs_mem.c12
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c13
-rw-r--r--drivers/infiniband/hw/ipath/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c189
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c75
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c125
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c81
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/input/keyboard/hilkbd.c5
-rw-r--r--drivers/isdn/act2000/act2000_isa.c2
-rw-r--r--drivers/isdn/capi/capidrv.c2
-rw-r--r--drivers/isdn/divert/divert_procfs.c2
-rw-r--r--drivers/isdn/divert/isdn_divert.c6
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c118
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c2
-rw-r--r--drivers/isdn/i4l/isdn_audio.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/isdn/pcbit/layer2.c2
-rw-r--r--drivers/kvm/Kconfig4
-rw-r--r--drivers/kvm/kvm.h8
-rw-r--r--drivers/kvm/kvm_main.c38
-rw-r--r--drivers/kvm/kvm_svm.h2
-rw-r--r--drivers/kvm/kvm_vmx.h2
-rw-r--r--drivers/kvm/mmu.c17
-rw-r--r--drivers/kvm/paging_tmpl.h20
-rw-r--r--drivers/kvm/svm.c78
-rw-r--r--drivers/kvm/vmx.c62
-rw-r--r--drivers/kvm/x86_emulate.c8
-rw-r--r--drivers/kvm/x86_emulate.h2
-rw-r--r--drivers/leds/Kconfig22
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/macintosh/apm_emu.c2
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/macintosh/via-pmu68k.c2
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/media/dvb/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c2
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusbdecfe.c4
-rw-r--r--drivers/media/video/dabusb.c4
-rw-r--r--drivers/media/video/ov7670.c2
-rw-r--r--drivers/media/video/planb.c2
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c2
-rw-r--r--drivers/media/video/videocodec.c2
-rw-r--r--drivers/message/i2o/core.h4
-rw-r--r--drivers/message/i2o/driver.c2
-rw-r--r--drivers/message/i2o/exec-osm.c2
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/rfd_ftl.c2
-rw-r--r--drivers/net/appletalk/ipddp.c2
-rw-r--r--drivers/net/bsd_comp.c2
-rw-r--r--drivers/net/e100.c3
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/irport.c2
-rw-r--r--drivers/net/lp486e.c4
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/ppp_deflate.c4
-rw-r--r--drivers/net/ppp_mppe.c2
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/slip.c6
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/x25_asy.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c2
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pcmcia/at91_cf.c2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pnp/isapnp/core.c22
-rw-r--r--drivers/pnp/pnpacpi/core.c6
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c22
-rw-r--r--drivers/pnp/pnpbios/core.c16
-rw-r--r--drivers/pnp/pnpbios/proc.c8
-rw-r--r--drivers/pnp/pnpbios/rsparser.c16
-rw-r--r--drivers/rtc/rtc-at91rm9200.c5
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/rtc/rtc-omap.c3
-rw-r--r--drivers/rtc/rtc-proc.c4
-rw-r--r--drivers/rtc/rtc-s3c.c6
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-sysfs.c2
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/ctcmain.c6
-rw-r--r--drivers/s390/net/iucv.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/sbus/char/vfc_dev.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic7xxx_old.c2
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dpt_i2o.c10
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/pluto.c2
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/sr_vendor.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/serial/8250_pci.c24
-rw-r--r--drivers/serial/Kconfig4
-rw-r--r--drivers/serial/icom.c2
-rw-r--r--drivers/usb/gadget/at91_udc.c2
-rw-r--r--drivers/usb/gadget/serial.c2
-rw-r--r--drivers/usb/host/hc_crisv10.c2
-rw-r--r--drivers/usb/misc/auerswald.c4
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/net/rndis_host.c2
-rw-r--r--drivers/usb/serial/cypress_m8.c2
-rw-r--r--drivers/usb/serial/digi_acceleport.c4
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/ipaq.c2
-rw-r--r--drivers/usb/serial/kobil_sct.c4
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/whiteheat.c4
-rw-r--r--drivers/usb/storage/sddr09.c2
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/amifb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/matrox/i2c-matroxfb.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c2
-rw-r--r--drivers/video/sstfb.c335
-rw-r--r--drivers/w1/slaves/Kconfig4
-rw-r--r--fs/Kconfig14
-rw-r--r--fs/aio.c2
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/befs/btree.c2
-rw-r--r--fs/befs/debug.c6
-rw-r--r--fs/bfs/inode.c4
-rw-r--r--fs/binfmt_elf_fdpic.c3
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/bio.c2
-rw-r--r--fs/block_dev.c202
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/debugfs/inode.c39
-rw-r--r--fs/inode.c43
-rw-r--r--fs/jffs/inode-v23.c4
-rw-r--r--fs/jffs/intrep.c14
-rw-r--r--fs/jfs/jfs_dtree.c4
-rw-r--r--fs/jfs/jfs_imap.c2
-rw-r--r--fs/lockd/clntlock.c10
-rw-r--r--fs/lockd/clntproc.c39
-rw-r--r--fs/lockd/svclock.c4
-rw-r--r--fs/lockd/svcshare.c2
-rw-r--r--fs/lockd/xdr.c8
-rw-r--r--fs/lockd/xdr4.c8
-rw-r--r--fs/namespace.c5
-rw-r--r--fs/ncpfs/inode.c34
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfsd/export.c33
-rw-r--r--fs/nfsd/lockd.c2
-rw-r--r--fs/nfsd/nfs4proc.c625
-rw-r--r--fs/nfsd/nfs4state.c91
-rw-r--r--fs/nfsd/nfs4xdr.c14
-rw-r--r--fs/nfsd/nfsfh.c6
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c4
-rw-r--r--fs/ocfs2/cluster/nodemanager.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c4
-rw-r--r--fs/ocfs2/dlm/dlmlock.c4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c6
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/ocfs2/localalloc.c2
-rw-r--r--fs/ocfs2/slot_map.c2
-rw-r--r--fs/ocfs2/suballoc.c6
-rw-r--r--fs/ocfs2/super.c6
-rw-r--r--fs/ocfs2/vote.c4
-rw-r--r--fs/pipe.c12
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/smbfs/inode.c5
-rw-r--r--fs/smbfs/proc.c6
-rw-r--r--fs/smbfs/smbiod.c5
-rw-r--r--fs/splice.c8
-rw-r--r--include/asm-alpha/cacheflush.h1
-rw-r--r--include/asm-arm/cacheflush.h2
-rw-r--r--include/asm-arm/thread_info.h2
-rw-r--r--include/asm-arm26/cacheflush.h1
-rw-r--r--include/asm-avr32/cacheflush.h1
-rw-r--r--include/asm-avr32/pgalloc.h2
-rw-r--r--include/asm-cris/cacheflush.h1
-rw-r--r--include/asm-frv/cacheflush.h1
-rw-r--r--include/asm-frv/thread_info.h2
-rw-r--r--include/asm-h8300/cacheflush.h1
-rw-r--r--include/asm-i386/cacheflush.h1
-rw-r--r--include/asm-i386/msr.h5
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-ia64/break.h4
-rw-r--r--include/asm-ia64/cacheflush.h1
-rw-r--r--include/asm-ia64/kprobes.h1
-rw-r--r--include/asm-ia64/pci.h3
-rw-r--r--include/asm-ia64/scatterlist.h3
-rw-r--r--include/asm-ia64/sn/xpc.h2
-rw-r--r--include/asm-ia64/thread_info.h2
-rw-r--r--include/asm-m32r/cacheflush.h3
-rw-r--r--include/asm-m68k/cacheflush.h2
-rw-r--r--include/asm-m68k/swim_iop.h221
-rw-r--r--include/asm-m68knommu/cacheflush.h1
-rw-r--r--include/asm-mips/cacheflush.h2
-rw-r--r--include/asm-mips/page.h16
-rw-r--r--include/asm-parisc/cacheflush.h2
-rw-r--r--include/asm-powerpc/cacheflush.h1
-rw-r--r--include/asm-powerpc/thread_info.h2
-rw-r--r--include/asm-s390/cacheflush.h1
-rw-r--r--include/asm-sh/cpu-sh2/cacheflush.h2
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h3
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h1
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sh64/cacheflush.h2
-rw-r--r--include/asm-sh64/pgalloc.h2
-rw-r--r--include/asm-sparc/cacheflush.h1
-rw-r--r--include/asm-sparc64/cacheflush.h1
-rw-r--r--include/asm-v850/cacheflush.h1
-rw-r--r--include/asm-x86_64/cacheflush.h1
-rw-r--r--include/asm-x86_64/msr.h4
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-xtensa/cacheflush.h2
-rw-r--r--include/asm-xtensa/termbits.h11
-rw-r--r--include/asm-xtensa/uaccess.h1
-rw-r--r--include/linux/aio.h1
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/coda_linux.h2
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/configfs.h25
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/cpuset.h22
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/freezer.h11
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/highmem.h7
-rw-r--r--include/linux/i2c-id.h1
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/lockd/bind.h2
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lockd/sm_inter.h2
-rw-r--r--include/linux/lockd/xdr.h8
-rw-r--r--include/linux/lockdep.h16
-rw-r--r--include/linux/mount.h1
-rw-r--r--include/linux/n_r3964.h2
-rw-r--r--include/linux/ncp_mount.h2
-rw-r--r--include/linux/nfsd/nfsd.h4
-rw-r--r--include/linux/nfsd/state.h1
-rw-r--r--include/linux/nfsd/xdr4.h40
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/pipe_fs_i.h5
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/reciprocal_div.h32
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/slab.h306
-rw-r--r--include/linux/slab_def.h100
-rw-r--r--include/linux/smb_fs_sb.h2
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/sysrq.h22
-rw-r--r--include/net/bluetooth/hci.h4
-rw-r--r--include/rdma/ib_marshall.h5
-rw-r--r--include/rdma/ib_verbs.h253
-rw-r--r--include/rdma/rdma_cm.h62
-rw-r--r--include/rdma/rdma_cm_ib.h3
-rw-r--r--include/rdma/rdma_user_cm.h206
-rw-r--r--include/video/sstfb.h13
-rw-r--r--init/Kconfig2
-rw-r--r--ipc/msgutil.c4
-rw-r--r--kernel/cpuset.c82
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/lockdep.c203
-rw-r--r--kernel/module.c25
-rw-r--r--kernel/nsproxy.c4
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/power/process.c21
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/timer.c14
-rw-r--r--lib/Kconfig5
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile5
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/reciprocal_div.c9
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/memory.c10
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c28
-rw-r--r--mm/slob.c16
-rw-r--r--mm/vmscan.c8
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c11
-rw-r--r--net/sunrpc/cache.c31
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svcauth_unix.c5
-rw-r--r--net/tipc/config.c2
-rw-r--r--scripts/kconfig/conf.c2
-rw-r--r--scripts/kconfig/confdata.c37
-rw-r--r--scripts/kconfig/gconf.c35
-rw-r--r--scripts/kconfig/gconf.glade4
-rw-r--r--scripts/kconfig/lkc.h2
-rw-r--r--scripts/kconfig/lkc_proto.h3
-rw-r--r--scripts/kconfig/mconf.c21
-rw-r--r--scripts/kconfig/qconf.cc15
-rw-r--r--scripts/kconfig/qconf.h3
-rw-r--r--scripts/kconfig/symbol.c3
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped2
-rw-r--r--scripts/kconfig/zconf.y2
-rw-r--r--scripts/mod/modpost.c1
-rw-r--r--sound/aoa/fabrics/Kconfig2
-rw-r--r--sound/core/oss/mixer_oss.c2
-rw-r--r--sound/oss/ad1848.c2
-rw-r--r--sound/oss/cs4232.c2
-rw-r--r--sound/oss/emu10k1/audio.c6
-rw-r--r--sound/oss/emu10k1/cardmi.c2
-rw-r--r--sound/oss/emu10k1/cardmo.c2
-rw-r--r--sound/oss/emu10k1/midi.c10
-rw-r--r--sound/oss/emu10k1/mixer.c2
-rw-r--r--sound/oss/hal2.c2
-rw-r--r--sound/oss/mpu401.c2
-rw-r--r--sound/oss/opl3.c2
-rw-r--r--sound/oss/sb_common.c2
-rw-r--r--sound/oss/sb_midi.c4
-rw-r--r--sound/oss/sb_mixer.c2
-rw-r--r--sound/oss/v_midi.c2
-rw-r--r--sound/oss/waveartist.c2
545 files changed, 10488 insertions, 4583 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 53245c429f7d..73e794f0ff09 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -179,10 +179,21 @@ Here are the routines, one by one:
179 lines associated with 'mm'. 179 lines associated with 'mm'.
180 180
181 This interface is used to handle whole address space 181 This interface is used to handle whole address space
182 page table operations such as what happens during 182 page table operations such as what happens during exit and exec.
183 fork, exit, and exec. 183
1842) void flush_cache_dup_mm(struct mm_struct *mm)
185
186 This interface flushes an entire user address space from
187 the caches. That is, after running, there will be no cache
188 lines associated with 'mm'.
189
190 This interface is used to handle whole address space
191 page table operations such as what happens during fork.
192
193 This option is separate from flush_cache_mm to allow some
194 optimizations for VIPT caches.
184 195
1852) void flush_cache_range(struct vm_area_struct *vma, 1963) void flush_cache_range(struct vm_area_struct *vma,
186 unsigned long start, unsigned long end) 197 unsigned long start, unsigned long end)
187 198
188 Here we are flushing a specific range of (user) virtual 199 Here we are flushing a specific range of (user) virtual
@@ -199,7 +210,7 @@ Here are the routines, one by one:
199 call flush_cache_page (see below) for each entry which may be 210 call flush_cache_page (see below) for each entry which may be
200 modified. 211 modified.
201 212
2023) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 2134) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
203 214
204 This time we need to remove a PAGE_SIZE sized range 215 This time we need to remove a PAGE_SIZE sized range
205 from the cache. The 'vma' is the backing structure used by 216 from the cache. The 'vma' is the backing structure used by
@@ -220,7 +231,7 @@ Here are the routines, one by one:
220 231
221 This is used primarily during fault processing. 232 This is used primarily during fault processing.
222 233
2234) void flush_cache_kmaps(void) 2345) void flush_cache_kmaps(void)
224 235
225 This routine need only be implemented if the platform utilizes 236 This routine need only be implemented if the platform utilizes
226 highmem. It will be called right before all of the kmaps 237 highmem. It will be called right before all of the kmaps
@@ -232,7 +243,7 @@ Here are the routines, one by one:
232 243
233 This routing should be implemented in asm/highmem.h 244 This routing should be implemented in asm/highmem.h
234 245
2355) void flush_cache_vmap(unsigned long start, unsigned long end) 2466) void flush_cache_vmap(unsigned long start, unsigned long end)
236 void flush_cache_vunmap(unsigned long start, unsigned long end) 247 void flush_cache_vunmap(unsigned long start, unsigned long end)
237 248
238 Here in these two interfaces we are flushing a specific range 249 Here in these two interfaces we are flushing a specific range
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt
index 29b3f9ffc66c..ce0666e51036 100644
--- a/Documentation/cpu-freq/core.txt
+++ b/Documentation/cpu-freq/core.txt
@@ -24,7 +24,7 @@ Contents:
241. General Information 241. General Information
25======================= 25=======================
26 26
27The CPUFreq core code is located in linux/kernel/cpufreq.c. This 27The CPUFreq core code is located in drivers/cpufreq/cpufreq.c. This
28cpufreq code offers a standardized interface for the CPUFreq 28cpufreq code offers a standardized interface for the CPUFreq
29architecture drivers (those pieces of code that do actual 29architecture drivers (those pieces of code that do actual
30frequency transitions), as well as to "notifiers". These are device 30frequency transitions), as well as to "notifiers". These are device
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 031029e89fd9..040f437c421b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -151,15 +151,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
151 151
152--------------------------- 152---------------------------
153 153
154What: I2C interface of the it87 driver
155When: January 2007
156Why: The ISA interface is faster and should be always available. The I2C
157 probing is also known to cause trouble in at least one case (see
158 bug #5889.)
159Who: Jean Delvare <khali@linux-fr.org>
160
161---------------------------
162
163What: Unused EXPORT_SYMBOL/EXPORT_SYMBOL_GPL exports 154What: Unused EXPORT_SYMBOL/EXPORT_SYMBOL_GPL exports
164 (temporary transition config option provided until then) 155 (temporary transition config option provided until then)
165 The transition config option will also be removed at the same time. 156 The transition config option will also be removed at the same time.
@@ -259,3 +250,25 @@ Why: The new layer 3 independant connection tracking replaces the old
259Who: Patrick McHardy <kaber@trash.net> 250Who: Patrick McHardy <kaber@trash.net>
260 251
261--------------------------- 252---------------------------
253
254What: ACPI hooks (X86_SPEEDSTEP_CENTRINO_ACPI) in speedstep-centrino driver
255When: December 2006
256Why: Speedstep-centrino driver with ACPI hooks and acpi-cpufreq driver are
257 functionally very much similar. They talk to ACPI in same way. Only
258 difference between them is the way they do frequency transitions.
259 One uses MSRs and the other one uses IO ports. Functionaliy of
260 speedstep_centrino with ACPI hooks is now merged into acpi-cpufreq.
261 That means one common driver will support all Intel Enhanced Speedstep
262 capable CPUs. That means less confusion over name of
263 speedstep-centrino driver (with that driver supposed to be used on
264 non-centrino platforms). That means less duplication of code and
265 less maintenance effort and no possibility of these two drivers
266 going out of sync.
267 Current users of speedstep_centrino with ACPI hooks are requested to
268 switch over to acpi-cpufreq driver. speedstep-centrino will continue
269 to work using older non-ACPI static table based scheme even after this
270 date.
271
272Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
273
274---------------------------
diff --git a/Documentation/filesystems/bfs.txt b/Documentation/filesystems/bfs.txt
index d2841e0bcf02..ea825e178e79 100644
--- a/Documentation/filesystems/bfs.txt
+++ b/Documentation/filesystems/bfs.txt
@@ -54,4 +54,4 @@ The first 4 bytes should be 0x1badface.
54If you have any patches, questions or suggestions regarding this BFS 54If you have any patches, questions or suggestions regarding this BFS
55implementation please contact the author: 55implementation please contact the author:
56 56
57Tigran A. Aivazian <tigran@veritas.com> 57Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
diff --git a/Documentation/hwmon/f71805f b/Documentation/hwmon/f71805f
index 2ca69df669c3..bfd0f154959c 100644
--- a/Documentation/hwmon/f71805f
+++ b/Documentation/hwmon/f71805f
@@ -6,6 +6,10 @@ Supported chips:
6 Prefix: 'f71805f' 6 Prefix: 'f71805f'
7 Addresses scanned: none, address read from Super I/O config space 7 Addresses scanned: none, address read from Super I/O config space
8 Datasheet: Provided by Fintek on request 8 Datasheet: Provided by Fintek on request
9 * Fintek F71872F/FG
10 Prefix: 'f71872f'
11 Addresses scanned: none, address read from Super I/O config space
12 Datasheet: Provided by Fintek on request
9 13
10Author: Jean Delvare <khali@linux-fr.org> 14Author: Jean Delvare <khali@linux-fr.org>
11 15
@@ -13,8 +17,8 @@ Thanks to Denis Kieft from Barracuda Networks for the donation of a
13test system (custom Jetway K8M8MS motherboard, with CPU and RAM) and 17test system (custom Jetway K8M8MS motherboard, with CPU and RAM) and
14for providing initial documentation. 18for providing initial documentation.
15 19
16Thanks to Kris Chen from Fintek for answering technical questions and 20Thanks to Kris Chen and Aaron Huang from Fintek for answering technical
17providing additional documentation. 21questions and providing additional documentation.
18 22
19Thanks to Chris Lin from Jetway for providing wiring schematics and 23Thanks to Chris Lin from Jetway for providing wiring schematics and
20answering technical questions. 24answering technical questions.
@@ -28,8 +32,11 @@ capabilities. It can monitor up to 9 voltages (counting its own power
28source), 3 fans and 3 temperature sensors. 32source), 3 fans and 3 temperature sensors.
29 33
30This chip also has fan controlling features, using either DC or PWM, in 34This chip also has fan controlling features, using either DC or PWM, in
31three different modes (one manual, two automatic). The driver doesn't 35three different modes (one manual, two automatic).
32support these features yet. 36
37The Fintek F71872F/FG Super I/O chip is almost the same, with two
38additional internal voltages monitored (VSB and battery). It also features
396 VID inputs. The VID inputs are not yet supported by this driver.
33 40
34The driver assumes that no more than one chip is present, which seems 41The driver assumes that no more than one chip is present, which seems
35reasonable. 42reasonable.
@@ -42,7 +49,8 @@ Voltages are sampled by an 8-bit ADC with a LSB of 8 mV. The supported
42range is thus from 0 to 2.040 V. Voltage values outside of this range 49range is thus from 0 to 2.040 V. Voltage values outside of this range
43need external resistors. An exception is in0, which is used to monitor 50need external resistors. An exception is in0, which is used to monitor
44the chip's own power source (+3.3V), and is divided internally by a 51the chip's own power source (+3.3V), and is divided internally by a
45factor 2. 52factor 2. For the F71872F/FG, in9 (VSB) and in10 (battery) are also
53divided internally by a factor 2.
46 54
47The two LSB of the voltage limit registers are not used (always 0), so 55The two LSB of the voltage limit registers are not used (always 0), so
48you can only set the limits in steps of 32 mV (before scaling). 56you can only set the limits in steps of 32 mV (before scaling).
@@ -61,9 +69,12 @@ in5 VIN5 +12V 200K 20K 11.00 1.05 V
61in6 VIN6 VCC1.5V 10K - 1.00 1.50 V 69in6 VIN6 VCC1.5V 10K - 1.00 1.50 V
62in7 VIN7 VCORE 10K - 1.00 ~1.40 V (1) 70in7 VIN7 VCORE 10K - 1.00 ~1.40 V (1)
63in8 VIN8 VSB5V 200K 47K 1.00 0.95 V 71in8 VIN8 VSB5V 200K 47K 1.00 0.95 V
72in10 VSB VSB3.3V int. int. 2.00 1.65 V (3)
73in9 VBAT VBATTERY int. int. 2.00 1.50 V (3)
64 74
65(1) Depends on your hardware setup. 75(1) Depends on your hardware setup.
66(2) Obviously not correct, swapping R1 and R2 would make more sense. 76(2) Obviously not correct, swapping R1 and R2 would make more sense.
77(3) F71872F/FG only.
67 78
68These values can be used as hints at best, as motherboard manufacturers 79These values can be used as hints at best, as motherboard manufacturers
69are free to use a completely different setup. As a matter of fact, the 80are free to use a completely different setup. As a matter of fact, the
@@ -103,3 +114,38 @@ sensor. Each channel can be used for connecting either a thermal diode
103or a thermistor. The driver reports the currently selected mode, but 114or a thermistor. The driver reports the currently selected mode, but
104doesn't allow changing it. In theory, the BIOS should have configured 115doesn't allow changing it. In theory, the BIOS should have configured
105everything properly. 116everything properly.
117
118
119Fan Control
120-----------
121
122Both PWM (pulse-width modulation) and DC fan speed control methods are
123supported. The right one to use depends on external circuitry on the
124motherboard, so the driver assumes that the BIOS set the method
125properly. The driver will report the method, but won't let you change
126it.
127
128When the PWM method is used, you can select the operating frequency,
129from 187.5 kHz (default) to 31 Hz. The best frequency depends on the
130fan model. As a rule of thumb, lower frequencies seem to give better
131control, but may generate annoying high-pitch noise. Fintek recommends
132not going below 1 kHz, as the fan tachometers get confused by lower
133frequencies as well.
134
135When the DC method is used, Fintek recommends not going below 5 V, which
136corresponds to a pwm value of 106 for the driver. The driver doesn't
137enforce this limit though.
138
139Three different fan control modes are supported:
140
141* Manual mode
142 You ask for a specific PWM duty cycle or DC voltage.
143
144* Fan speed mode
145 You ask for a specific fan speed. This mode assumes that pwm1
146 corresponds to fan1, pwm2 to fan2 and pwm3 to fan3.
147
148* Temperature mode
149 You define 3 temperature/fan speed trip points, and the fan speed is
150 adjusted depending on the measured temperature, using interpolation.
151 This mode is not yet supported by the driver.
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index e783fd62e308..74a80992d237 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -9,8 +9,7 @@ Supported chips:
9 http://www.ite.com.tw/ 9 http://www.ite.com.tw/
10 * IT8712F 10 * IT8712F
11 Prefix: 'it8712' 11 Prefix: 'it8712'
12 Addresses scanned: I2C 0x2d 12 Addresses scanned: from Super I/O config space (8 I/O ports)
13 from Super I/O config space (8 I/O ports)
14 Datasheet: Publicly available at the ITE website 13 Datasheet: Publicly available at the ITE website
15 http://www.ite.com.tw/ 14 http://www.ite.com.tw/
16 * IT8716F 15 * IT8716F
@@ -53,6 +52,18 @@ Module Parameters
53 misconfigured by BIOS - PWM values would be inverted. This option tries 52 misconfigured by BIOS - PWM values would be inverted. This option tries
54 to fix this. Please contact your BIOS manufacturer and ask him for fix. 53 to fix this. Please contact your BIOS manufacturer and ask him for fix.
55 54
55
56Hardware Interfaces
57-------------------
58
59All the chips suported by this driver are LPC Super-I/O chips, accessed
60through the LPC bus (ISA-like I/O ports). The IT8712F additionally has an
61SMBus interface to the hardware monitoring functions. This driver no
62longer supports this interface though, as it is slower and less reliable
63than the ISA access, and was only available on a small number of
64motherboard models.
65
66
56Description 67Description
57----------- 68-----------
58 69
diff --git a/Documentation/hwmon/k8temp b/Documentation/hwmon/k8temp
index 30d123b8d920..0005c7166146 100644
--- a/Documentation/hwmon/k8temp
+++ b/Documentation/hwmon/k8temp
@@ -8,7 +8,7 @@ Supported chips:
8 Datasheet: http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf 8 Datasheet: http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf
9 9
10Author: Rudolf Marek 10Author: Rudolf Marek
11Contact: Rudolf Marek <r.marek@sh.cvut.cz> 11Contact: Rudolf Marek <r.marek@assembler.cz>
12 12
13Description 13Description
14----------- 14-----------
diff --git a/Documentation/hwmon/pc87427 b/Documentation/hwmon/pc87427
new file mode 100644
index 000000000000..9a0708f9f49e
--- /dev/null
+++ b/Documentation/hwmon/pc87427
@@ -0,0 +1,38 @@
1Kernel driver pc87427
2=====================
3
4Supported chips:
5 * National Semiconductor PC87427
6 Prefix: 'pc87427'
7 Addresses scanned: none, address read from Super I/O config space
8 Datasheet: http://www.winbond.com.tw/E-WINBONDHTM/partner/apc_007.html
9
10Author: Jean Delvare <khali@linux-fr.org>
11
12Thanks to Amir Habibi at Candelis for setting up a test system, and to
13Michael Kress for testing several iterations of this driver.
14
15
16Description
17-----------
18
19The National Semiconductor Super I/O chip includes complete hardware
20monitoring capabilities. It can monitor up to 18 voltages, 8 fans and
216 temperature sensors. Only the fans are supported at the moment.
22
23This chip also has fan controlling features, which are not yet supported
24by this driver either.
25
26The driver assumes that no more than one chip is present, which seems
27reasonable.
28
29
30Fan Monitoring
31--------------
32
33Fan rotation speeds are reported as 14-bit values from a gated clock
34signal. Speeds down to 83 RPM can be measured.
35
36An alarm is triggered if the rotation speed drops below a programmable
37limit. Another alarm is triggered if the speed is too low to to be measured
38(including stalled or missing fan).
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index d1d390aaf620..efef3b962cd3 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -208,12 +208,14 @@ temp[1-*]_auto_point[1-*]_temp_hyst
208**************** 208****************
209 209
210temp[1-*]_type Sensor type selection. 210temp[1-*]_type Sensor type selection.
211 Integers 1 to 4 or thermistor Beta value (typically 3435) 211 Integers 1 to 6 or thermistor Beta value (typically 3435)
212 RW 212 RW
213 1: PII/Celeron Diode 213 1: PII/Celeron Diode
214 2: 3904 transistor 214 2: 3904 transistor
215 3: thermal diode 215 3: thermal diode
216 4: thermistor (default/unknown Beta) 216 4: thermistor (default/unknown Beta)
217 5: AMD AMDSI
218 6: Intel PECI
217 Not all types are supported by all chips 219 Not all types are supported by all chips
218 220
219temp[1-*]_max Temperature max value. 221temp[1-*]_max Temperature max value.
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index caa610a297e8..8a15a7408753 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -10,7 +10,7 @@ Supported chips:
10Authors: 10Authors:
11 Jean Delvare <khali@linux-fr.org> 11 Jean Delvare <khali@linux-fr.org>
12 Yuan Mu (Winbond) 12 Yuan Mu (Winbond)
13 Rudolf Marek <r.marek@sh.cvut.cz> 13 Rudolf Marek <r.marek@assembler.cz>
14 14
15Description 15Description
16----------- 16-----------
diff --git a/Documentation/hwmon/w83791d b/Documentation/hwmon/w83791d
index 19b2ed739fa1..db9881df88a5 100644
--- a/Documentation/hwmon/w83791d
+++ b/Documentation/hwmon/w83791d
@@ -18,7 +18,7 @@ Credits:
18 and Mark Studebaker <mdsxyz123@yahoo.com> 18 and Mark Studebaker <mdsxyz123@yahoo.com>
19 w83792d.c: 19 w83792d.c:
20 Chunhao Huang <DZShen@Winbond.com.tw>, 20 Chunhao Huang <DZShen@Winbond.com.tw>,
21 Rudolf Marek <r.marek@sh.cvut.cz> 21 Rudolf Marek <r.marek@assembler.cz>
22 22
23Additional contributors: 23Additional contributors:
24 Sven Anders <anders@anduras.de> 24 Sven Anders <anders@anduras.de>
diff --git a/Documentation/hwmon/w83793 b/Documentation/hwmon/w83793
new file mode 100644
index 000000000000..45e5408340e0
--- /dev/null
+++ b/Documentation/hwmon/w83793
@@ -0,0 +1,110 @@
1Kernel driver w83793
2====================
3
4Supported chips:
5 * Winbond W83793G/W83793R
6 Prefix: 'w83793'
7 Addresses scanned: I2C 0x2c - 0x2f
8 Datasheet: Still not published
9
10Authors:
11 Yuan Mu (Winbond Electronics)
12 Rudolf Marek <r.marek@assembler.cz>
13
14
15Module parameters
16-----------------
17
18* reset int
19 (default 0)
20 This parameter is not recommended, it will lose motherboard specific
21 settings. Use 'reset=1' to reset the chip when loading this module.
22
23* force_subclients=bus,caddr,saddr1,saddr2
24 This is used to force the i2c addresses for subclients of
25 a certain chip. Typical usage is `force_subclients=0,0x2f,0x4a,0x4b'
26 to force the subclients of chip 0x2f on bus 0 to i2c addresses
27 0x4a and 0x4b.
28
29
30Description
31-----------
32
33This driver implements support for Winbond W83793G/W83793R chips.
34
35* Exported features
36 This driver exports 10 voltage sensors, up to 12 fan tachometer inputs,
37 6 remote temperatures, up to 8 sets of PWM fan controls, SmartFan
38 (automatic fan speed control) on all temperature/PWM combinations, 2
39 sets of 6-pin CPU VID input.
40
41* Sensor resolutions
42 If your motherboard maker used the reference design, the resolution of
43 voltage0-2 is 2mV, resolution of voltage3/4/5 is 16mV, 8mV for voltage6,
44 24mV for voltage7/8. Temp1-4 have a 0.25 degree Celsius resolution,
45 temp5-6 have a 1 degree Celsiis resolution.
46
47* Temperature sensor types
48 Temp1-4 have 3 possible types. It can be read from (and written to)
49 temp[1-4]_type.
50 - If the value of 0, the related temperature channel stops
51 monitoring.
52 - If the value is 3, it starts monitoring using a remote termal diode
53 (default).
54 - If the value is 5, it starts monitoring using the temperature sensor
55 in AMD CPU and get result by AMDSI.
56 - If the value is 6, it starts monitoring using the temperature sensor
57 in Intel CPU and get result by PECI.
58 Temp5-6 can be connected to external thermistors (value of
59 temp[5-6]_type is 4). They can also be disabled (value is 0).
60
61* Alarm mechanism
62 For voltage sensors, an alarm triggers if the measured value is below
63 the low voltage limit or over the high voltage limit.
64 For temperature sensors, an alarm triggers if the measured value goes
65 above the high temperature limit, and wears off only after the measured
66 value drops below the hysteresis value.
67 For fan sensors, an alarm triggers if the measured value is below the
68 low speed limit.
69
70* SmartFan/PWM control
71 If you want to set a pwm fan to manual mode, you just need to make sure it
72 is not controlled by any temp channel, for example, you want to set fan1
73 to manual mode, you need to check the value of temp[1-6]_fan_map, make
74 sure bit 0 is cleared in the 6 values. And then set the pwm1 value to
75 control the fan.
76
77 Each temperature channel can control all the 8 PWM outputs (by setting the
78 corresponding bit in tempX_fan_map), you can set the temperature channel
79 mode using temp[1-6]_pwm_enable, 2 is Thermal Cruise mode and 3
80 is the SmartFanII mode. Temperature channels will try to speed up or
81 slow down all controlled fans, this means one fan can receive different
82 PWM value requests from different temperature channels, but the chip
83 will always pick the safest (max) PWM value for each fan.
84
85 In Thermal Cruise mode, the chip attempts to keep the temperature at a
86 predefined value, within a tolerance margin. So if tempX_input >
87 thermal_cruiseX + toleranceX, the chip will increase the PWM value,
88 if tempX_input < thermal_cruiseX - toleranceX, the chip will decrease
89 the PWM value. If the temperature is within the tolerance range, the PWM
90 value is left unchanged.
91
92 SmartFanII works differently, you have to define up to 7 PWM, temperature
93 trip points, defining a PWM/temperature curve which the chip will follow.
94 While not fundamentally different from the Thermal Cruise mode, the
95 implementation is quite different, giving you a finer-grained control.
96
97* Chassis
98 If the case open alarm triggers, it will stay in this state unless cleared
99 by any write to the sysfs file "chassis".
100
101* VID and VRM
102 The VRM version is detected automatically, don't modify the it unless you
103 *do* know the cpu VRM version and it's not properly detected.
104
105
106Notes
107-----
108
109 Only Fan1-5 and PWM1-3 are guaranteed to always exist, other fan inputs and
110 PWM outputs may or may not exist depending on the chip pin configuration.
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 125093c3ef76..536d5bfbdb8d 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -29,7 +29,7 @@ them. A single configuration option is defined like this:
29 29
30config MODVERSIONS 30config MODVERSIONS
31 bool "Set version information on all module symbols" 31 bool "Set version information on all module symbols"
32 depends MODULES 32 depends on MODULES
33 help 33 help
34 Usually, modules have to be recompiled whenever you switch to a new 34 Usually, modules have to be recompiled whenever you switch to a new
35 kernel. ... 35 kernel. ...
@@ -163,7 +163,7 @@ The position of a menu entry in the tree is determined in two ways. First
163it can be specified explicitly: 163it can be specified explicitly:
164 164
165menu "Network device support" 165menu "Network device support"
166 depends NET 166 depends on NET
167 167
168config NETDEVICES 168config NETDEVICES
169 ... 169 ...
@@ -188,10 +188,10 @@ config MODULES
188 188
189config MODVERSIONS 189config MODVERSIONS
190 bool "Set version information on all module symbols" 190 bool "Set version information on all module symbols"
191 depends MODULES 191 depends on MODULES
192 192
193comment "module support disabled" 193comment "module support disabled"
194 depends !MODULES 194 depends on !MODULES
195 195
196MODVERSIONS directly depends on MODULES, this means it's only visible if 196MODVERSIONS directly depends on MODULES, this means it's only visible if
197MODULES is different from 'n'. The comment on the other hand is always 197MODULES is different from 'n'. The comment on the other hand is always
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d8323b8893c3..ef69c75780bf 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1656,6 +1656,12 @@ and is between 256 and 4096 characters. It is defined in the file
1656 sym53c416= [HW,SCSI] 1656 sym53c416= [HW,SCSI]
1657 See header of drivers/scsi/sym53c416.c. 1657 See header of drivers/scsi/sym53c416.c.
1658 1658
1659 sysrq_always_enabled
1660 [KNL]
1661 Ignore sysrq setting - this boot parameter will
1662 neutralize any effect of /proc/sys/kernel/sysrq.
1663 Useful for debugging.
1664
1659 t128= [HW,SCSI] 1665 t128= [HW,SCSI]
1660 See header of drivers/scsi/t128.c. 1666 See header of drivers/scsi/t128.c.
1661 1667
diff --git a/MAINTAINERS b/MAINTAINERS
index b2024938adcf..dea5b2a6de0a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -277,7 +277,7 @@ S: Maintained
277 277
278ALI1563 I2C DRIVER 278ALI1563 I2C DRIVER
279P: Rudolf Marek 279P: Rudolf Marek
280M: r.marek@sh.cvut.cz 280M: r.marek@assembler.cz
281L: i2c@lm-sensors.org 281L: i2c@lm-sensors.org
282S: Maintained 282S: Maintained
283 283
@@ -296,6 +296,13 @@ L: info-linux@geode.amd.com
296W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html 296W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
297S: Supported 297S: Supported
298 298
299AMS (Apple Motion Sensor) DRIVER
300P: Stelian Pop
301M: stelian@popies.net
302P: Michael Hanselmann
303M: linux-kernel@hansmi.ch
304S: Supported
305
299AMSO1100 RNIC DRIVER 306AMSO1100 RNIC DRIVER
300P: Tom Tucker 307P: Tom Tucker
301M: tom@opengridcomputing.com 308M: tom@opengridcomputing.com
@@ -740,7 +747,7 @@ P: Dave Jones
740M: davej@codemonkey.org.uk 747M: davej@codemonkey.org.uk
741L: cpufreq@lists.linux.org.uk 748L: cpufreq@lists.linux.org.uk
742W: http://www.codemonkey.org.uk/projects/cpufreq/ 749W: http://www.codemonkey.org.uk/projects/cpufreq/
743T: git kernel.org/pub/scm/linux/kernel/davej/cpufreq.git 750T: git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
744S: Maintained 751S: Maintained
745 752
746CPUID/MSR DRIVER 753CPUID/MSR DRIVER
@@ -1504,8 +1511,10 @@ T: git kernel.org:/pub/scm/linux/kernel/git/dtor/input.git
1504S: Maintained 1511S: Maintained
1505 1512
1506INOTIFY 1513INOTIFY
1507P: John McCutchan and Robert Love 1514P: John McCutchan
1508M: ttb@tentacle.dhs.org and rml@novell.com 1515M: ttb@tentacle.dhs.org
1516P: Robert Love
1517M: rml@novell.com
1509L: linux-kernel@vger.kernel.org 1518L: linux-kernel@vger.kernel.org
1510S: Maintained 1519S: Maintained
1511 1520
@@ -1745,6 +1754,13 @@ W: http://nfs.sourceforge.net/
1745W: http://www.cse.unsw.edu.au/~neilb/patches/linux-devel/ 1754W: http://www.cse.unsw.edu.au/~neilb/patches/linux-devel/
1746S: Maintained 1755S: Maintained
1747 1756
1757KERNEL VIRTUAL MACHINE (KVM)
1758P: Avi Kivity
1759M: avi@qumranet.com
1760L: kvm-devel@lists.sourceforge.net
1761W: kvm.sourceforge.net
1762S: Supported
1763
1748KEXEC 1764KEXEC
1749P: Eric Biederman 1765P: Eric Biederman
1750M: ebiederm@xmission.com 1766M: ebiederm@xmission.com
@@ -3427,6 +3443,12 @@ M: bezaur@gmail.com
3427L: lm-sensors@lm-sensors.org 3443L: lm-sensors@lm-sensors.org
3428S: Maintained 3444S: Maintained
3429 3445
3446W83793 HARDWARE MONITORING DRIVER
3447P: Rudolf Marek
3448M: r.marek@assembler.cz
3449L: lm-sensors@lm-sensors.org
3450S: Maintained
3451
3430W83L51xD SD/MMC CARD INTERFACE DRIVER 3452W83L51xD SD/MMC CARD INTERFACE DRIVER
3431P: Pierre Ossman 3453P: Pierre Ossman
3432M: drzeus-wbsd@drzeus.cx 3454M: drzeus-wbsd@drzeus.cx
diff --git a/Makefile b/Makefile
index 73e825b39a08..f732e75be43d 100644
--- a/Makefile
+++ b/Makefile
@@ -1100,9 +1100,9 @@ boards := $(notdir $(boards))
1100 1100
1101help: 1101help:
1102 @echo 'Cleaning targets:' 1102 @echo 'Cleaning targets:'
1103 @echo ' clean - remove most generated files but keep the config and' 1103 @echo ' clean - Remove most generated files but keep the config and'
1104 @echo ' enough build support to build external modules' 1104 @echo ' enough build support to build external modules'
1105 @echo ' mrproper - remove all generated files + config + various backup files' 1105 @echo ' mrproper - Remove all generated files + config + various backup files'
1106 @echo ' distclean - mrproper + remove editor backup and patch files' 1106 @echo ' distclean - mrproper + remove editor backup and patch files'
1107 @echo '' 1107 @echo ''
1108 @echo 'Configuration targets:' 1108 @echo 'Configuration targets:'
@@ -1390,12 +1390,18 @@ endif #ifeq ($(mixed-targets),1)
1390 1390
1391PHONY += checkstack kernelrelease kernelversion 1391PHONY += checkstack kernelrelease kernelversion
1392 1392
1393# Use $(SUBARCH) here instead of $(ARCH) so that this works for UML. 1393# UML needs a little special treatment here. It wants to use the host
1394# In the UML case, $(SUBARCH) is the name of the underlying 1394# toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone
1395# architecture, while for all other arches, it is the same as $(ARCH). 1395# else wants $(ARCH), including people doing cross-builds, which means
1396# that $(SUBARCH) doesn't work here.
1397ifeq ($(ARCH), um)
1398CHECKSTACK_ARCH := $(SUBARCH)
1399else
1400CHECKSTACK_ARCH := $(ARCH)
1401endif
1396checkstack: 1402checkstack:
1397 $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ 1403 $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
1398 $(PERL) $(src)/scripts/checkstack.pl $(SUBARCH) 1404 $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
1399 1405
1400kernelrelease: 1406kernelrelease:
1401 $(if $(wildcard include/config/kernel.release), $(Q)echo $(KERNELRELEASE), \ 1407 $(if $(wildcard include/config/kernel.release), $(Q)echo $(KERNELRELEASE), \
diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c
index a11fb9a40c04..2c37b70b17ab 100644
--- a/arch/arm/kernel/apm.c
+++ b/arch/arm/kernel/apm.c
@@ -423,7 +423,7 @@ static int apm_open(struct inode * inode, struct file * filp)
423{ 423{
424 struct apm_user *as; 424 struct apm_user *as;
425 425
426 as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL); 426 as = kzalloc(sizeof(*as), GFP_KERNEL);
427 if (as) { 427 if (as) {
428 /* 428 /*
429 * XXX - this is a tiny bit broken, when we consider BSD 429 * XXX - this is a tiny bit broken, when we consider BSD
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index a786f769035d..71257e3d513f 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -353,7 +353,7 @@ int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
353 } 353 }
354 if (c_id(&excd) == 0x80) { /* loader */ 354 if (c_id(&excd) == 0x80) { /* loader */
355 if (!ec->loader) { 355 if (!ec->loader) {
356 ec->loader = (loader_t)kmalloc(c_len(&excd), 356 ec->loader = kmalloc(c_len(&excd),
357 GFP_KERNEL); 357 GFP_KERNEL);
358 if (ec->loader) 358 if (ec->loader)
359 ecard_readbytes(ec->loader, ec, 359 ecard_readbytes(ec->loader, ec,
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index d135568dc9e7..8781aaeb576b 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -43,6 +43,7 @@ config MACH_OMAP_H3
43config MACH_OMAP_OSK 43config MACH_OMAP_OSK
44 bool "TI OSK Support" 44 bool "TI OSK Support"
45 depends on ARCH_OMAP1 && ARCH_OMAP16XX 45 depends on ARCH_OMAP1 && ARCH_OMAP16XX
46 select TPS65010
46 help 47 help
47 TI OMAP 5912 OSK (OMAP Starter Kit) board support. Say Y here 48 TI OMAP 5912 OSK (OMAP Starter Kit) board support. Say Y here
48 if you have such a board. 49 if you have such a board.
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 3a622801d7b0..7d0cf7af88ce 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -30,6 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/irq.h> 32#include <linux/irq.h>
33#include <linux/interrupt.h>
33 34
34#include <linux/mtd/mtd.h> 35#include <linux/mtd/mtd.h>
35#include <linux/mtd/partitions.h> 36#include <linux/mtd/partitions.h>
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 9e3d0bdcba07..5c0a10041cd1 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -75,28 +75,28 @@ endmenu
75 75
76config MACH_POODLE 76config MACH_POODLE
77 bool "Enable Sharp SL-5600 (Poodle) Support" 77 bool "Enable Sharp SL-5600 (Poodle) Support"
78 depends PXA_SHARPSL_25x 78 depends on PXA_SHARPSL_25x
79 select SHARP_LOCOMO 79 select SHARP_LOCOMO
80 select PXA_SSP 80 select PXA_SSP
81 81
82config MACH_CORGI 82config MACH_CORGI
83 bool "Enable Sharp SL-C700 (Corgi) Support" 83 bool "Enable Sharp SL-C700 (Corgi) Support"
84 depends PXA_SHARPSL_25x 84 depends on PXA_SHARPSL_25x
85 select PXA_SHARP_C7xx 85 select PXA_SHARP_C7xx
86 86
87config MACH_SHEPHERD 87config MACH_SHEPHERD
88 bool "Enable Sharp SL-C750 (Shepherd) Support" 88 bool "Enable Sharp SL-C750 (Shepherd) Support"
89 depends PXA_SHARPSL_25x 89 depends on PXA_SHARPSL_25x
90 select PXA_SHARP_C7xx 90 select PXA_SHARP_C7xx
91 91
92config MACH_HUSKY 92config MACH_HUSKY
93 bool "Enable Sharp SL-C760 (Husky) Support" 93 bool "Enable Sharp SL-C760 (Husky) Support"
94 depends PXA_SHARPSL_25x 94 depends on PXA_SHARPSL_25x
95 select PXA_SHARP_C7xx 95 select PXA_SHARP_C7xx
96 96
97config MACH_AKITA 97config MACH_AKITA
98 bool "Enable Sharp SL-1000 (Akita) Support" 98 bool "Enable Sharp SL-1000 (Akita) Support"
99 depends PXA_SHARPSL_27x 99 depends on PXA_SHARPSL_27x
100 select PXA_SHARP_Cxx00 100 select PXA_SHARP_Cxx00
101 select MACH_SPITZ 101 select MACH_SPITZ
102 select I2C 102 select I2C
@@ -104,17 +104,17 @@ config MACH_AKITA
104 104
105config MACH_SPITZ 105config MACH_SPITZ
106 bool "Enable Sharp Zaurus SL-3000 (Spitz) Support" 106 bool "Enable Sharp Zaurus SL-3000 (Spitz) Support"
107 depends PXA_SHARPSL_27x 107 depends on PXA_SHARPSL_27x
108 select PXA_SHARP_Cxx00 108 select PXA_SHARP_Cxx00
109 109
110config MACH_BORZOI 110config MACH_BORZOI
111 bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support" 111 bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support"
112 depends PXA_SHARPSL_27x 112 depends on PXA_SHARPSL_27x
113 select PXA_SHARP_Cxx00 113 select PXA_SHARP_Cxx00
114 114
115config MACH_TOSA 115config MACH_TOSA
116 bool "Enable Sharp SL-6000x (Tosa) Support" 116 bool "Enable Sharp SL-6000x (Tosa) Support"
117 depends PXA_SHARPSL_25x 117 depends on PXA_SHARPSL_25x
118 118
119config PXA25x 119config PXA25x
120 bool 120 bool
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index ec752e16d618..f2dc363de66b 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -113,7 +113,7 @@ endchoice
113 113
114config OMAP_SERIAL_WAKE 114config OMAP_SERIAL_WAKE
115 bool "Enable wake-up events for serial ports" 115 bool "Enable wake-up events for serial ports"
116 depends OMAP_MUX 116 depends on OMAP_MUX
117 default y 117 default y
118 help 118 help
119 Select this option if you want to have your system wake up 119 Select this option if you want to have your system wake up
diff --git a/arch/arm26/kernel/ecard.c b/arch/arm26/kernel/ecard.c
index 43dd41be71fb..9dbc17247c6f 100644
--- a/arch/arm26/kernel/ecard.c
+++ b/arch/arm26/kernel/ecard.c
@@ -215,7 +215,7 @@ int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
215 } 215 }
216 if (c_id(&excd) == 0x80) { /* loader */ 216 if (c_id(&excd) == 0x80) { /* loader */
217 if (!ec->loader) { 217 if (!ec->loader) {
218 ec->loader = (loader_t)kmalloc(c_len(&excd), 218 ec->loader = kmalloc(c_len(&excd),
219 GFP_KERNEL); 219 GFP_KERNEL);
220 if (ec->loader) 220 if (ec->loader)
221 ecard_readbytes(ec->loader, ec, 221 ecard_readbytes(ec->loader, ec,
diff --git a/arch/arm26/kernel/irq.c b/arch/arm26/kernel/irq.c
index d87d68b77d66..d53382c83bf9 100644
--- a/arch/arm26/kernel/irq.c
+++ b/arch/arm26/kernel/irq.c
@@ -545,7 +545,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
545 (irq_flags & IRQF_SHARED && !dev_id)) 545 (irq_flags & IRQF_SHARED && !dev_id))
546 return -EINVAL; 546 return -EINVAL;
547 547
548 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); 548 action = kmalloc(sizeof(struct irqaction), GFP_KERNEL);
549 if (!action) 549 if (!action)
550 return -ENOMEM; 550 return -ENOMEM;
551 551
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index 4fa81abab0c7..ffade19a14e6 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -516,7 +516,7 @@ static int __init init_axis_flash(void)
516#else 516#else
517 struct mtd_info *mtd_ram; 517 struct mtd_info *mtd_ram;
518 518
519 mtd_ram = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), 519 mtd_ram = kmalloc(sizeof(struct mtd_info),
520 GFP_KERNEL); 520 GFP_KERNEL);
521 if (!mtd_ram) { 521 if (!mtd_ram) {
522 panic("axisflashmap couldn't allocate memory for " 522 panic("axisflashmap couldn't allocate memory for "
diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c
index fcba6632ed7b..9aba18b931dd 100644
--- a/arch/cris/arch-v10/drivers/gpio.c
+++ b/arch/cris/arch-v10/drivers/gpio.c
@@ -440,7 +440,7 @@ gpio_open(struct inode *inode, struct file *filp)
440 if (p > GPIO_MINOR_LAST) 440 if (p > GPIO_MINOR_LAST)
441 return -EINVAL; 441 return -EINVAL;
442 442
443 priv = (struct gpio_private *)kmalloc(sizeof(struct gpio_private), 443 priv = kmalloc(sizeof(struct gpio_private),
444 GFP_KERNEL); 444 GFP_KERNEL);
445 445
446 if (!priv) 446 if (!priv)
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 41952320e00a..5180d45412fc 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -427,7 +427,7 @@ static int __init init_axis_flash(void)
427#else 427#else
428 struct mtd_info *mtd_ram; 428 struct mtd_info *mtd_ram;
429 429
430 mtd_ram = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), 430 mtd_ram = kmalloc(sizeof(struct mtd_info),
431 GFP_KERNEL); 431 GFP_KERNEL);
432 if (!mtd_ram) { 432 if (!mtd_ram) {
433 panic("axisflashmap couldn't allocate memory for " 433 panic("axisflashmap couldn't allocate memory for "
diff --git a/arch/cris/arch-v32/drivers/gpio.c b/arch/cris/arch-v32/drivers/gpio.c
index c3f876b4da6b..08d36f0955c6 100644
--- a/arch/cris/arch-v32/drivers/gpio.c
+++ b/arch/cris/arch-v32/drivers/gpio.c
@@ -423,7 +423,7 @@ gpio_open(struct inode *inode, struct file *filp)
423 if (p > GPIO_MINOR_LAST) 423 if (p > GPIO_MINOR_LAST)
424 return -EINVAL; 424 return -EINVAL;
425 425
426 priv = (struct gpio_private *)kmalloc(sizeof(struct gpio_private), 426 priv = kmalloc(sizeof(struct gpio_private),
427 GFP_KERNEL); 427 GFP_KERNEL);
428 428
429 if (!priv) 429 if (!priv)
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index 99e59b3eacf8..7cd6ac803409 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -686,7 +686,7 @@ keep_debug_flags(unsigned long oldccs, unsigned long oldspc,
686int __init 686int __init
687cris_init_signal(void) 687cris_init_signal(void)
688{ 688{
689 u16* data = (u16*)kmalloc(PAGE_SIZE, GFP_KERNEL); 689 u16* data = kmalloc(PAGE_SIZE, GFP_KERNEL);
690 690
691 /* This is movu.w __NR_sigreturn, r9; break 13; */ 691 /* This is movu.w __NR_sigreturn, r9; break 13; */
692 data[0] = 0x9c5f; 692 data[0] = 0x9c5f;
diff --git a/arch/cris/kernel/profile.c b/arch/cris/kernel/profile.c
index 69c52189f044..f60ab785f235 100644
--- a/arch/cris/kernel/profile.c
+++ b/arch/cris/kernel/profile.c
@@ -59,7 +59,7 @@ static int
59__init init_cris_profile(void) 59__init init_cris_profile(void)
60{ 60{
61 struct proc_dir_entry *entry; 61 struct proc_dir_entry *entry;
62 sample_buffer = (char*)kmalloc(SAMPLE_BUFFER_SIZE, GFP_KERNEL); 62 sample_buffer = kmalloc(SAMPLE_BUFFER_SIZE, GFP_KERNEL);
63 sample_buffer_pos = sample_buffer; 63 sample_buffer_pos = sample_buffer;
64 entry = create_proc_entry("system_profile", S_IWUSR | S_IRUGO, NULL); 64 entry = create_proc_entry("system_profile", S_IWUSR | S_IRUGO, NULL);
65 if (entry) { 65 if (entry) {
diff --git a/arch/h8300/kernel/ints.c b/arch/h8300/kernel/ints.c
index 1bfc77e391d5..587ef7f4fcc7 100644
--- a/arch/h8300/kernel/ints.c
+++ b/arch/h8300/kernel/ints.c
@@ -141,7 +141,7 @@ int request_irq(unsigned int irq,
141 return -EBUSY; 141 return -EBUSY;
142 142
143 if (use_kmalloc) 143 if (use_kmalloc)
144 irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC); 144 irq_handle = kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
145 else { 145 else {
146 /* use bootmem allocater */ 146 /* use bootmem allocater */
147 irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t)); 147 irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t));
diff --git a/arch/h8300/platform/h8s/ints.c b/arch/h8300/platform/h8s/ints.c
index 270440de4610..567f681ddfec 100644
--- a/arch/h8300/platform/h8s/ints.c
+++ b/arch/h8300/platform/h8s/ints.c
@@ -176,7 +176,7 @@ int request_irq(unsigned int irq,
176 } 176 }
177 177
178 if (use_kmalloc) 178 if (use_kmalloc)
179 irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC); 179 irq_handle = kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
180 else { 180 else {
181 /* use bootmem allocater */ 181 /* use bootmem allocater */
182 irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t)); 182 irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t));
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index a97847da9ed5..b75cff25de4b 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -1604,7 +1604,7 @@ static int do_open(struct inode * inode, struct file * filp)
1604{ 1604{
1605 struct apm_user * as; 1605 struct apm_user * as;
1606 1606
1607 as = (struct apm_user *)kmalloc(sizeof(*as), GFP_KERNEL); 1607 as = kmalloc(sizeof(*as), GFP_KERNEL);
1608 if (as == NULL) { 1608 if (as == NULL) {
1609 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", 1609 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n",
1610 sizeof(*as)); 1610 sizeof(*as));
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index ccc1edff5c97..5299c5bf4454 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -17,6 +17,7 @@ config X86_ACPI_CPUFREQ
17 help 17 help
18 This driver adds a CPUFreq driver which utilizes the ACPI 18 This driver adds a CPUFreq driver which utilizes the ACPI
19 Processor Performance States. 19 Processor Performance States.
20 This driver also supports Intel Enhanced Speedstep.
20 21
21 For details, take a look at <file:Documentation/cpu-freq/>. 22 For details, take a look at <file:Documentation/cpu-freq/>.
22 23
@@ -121,11 +122,14 @@ config X86_SPEEDSTEP_CENTRINO
121 If in doubt, say N. 122 If in doubt, say N.
122 123
123config X86_SPEEDSTEP_CENTRINO_ACPI 124config X86_SPEEDSTEP_CENTRINO_ACPI
124 bool "Use ACPI tables to decode valid frequency/voltage pairs" 125 bool "Use ACPI tables to decode valid frequency/voltage (deprecated)"
125 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR 126 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR
126 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m) 127 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m)
127 default y 128 default y
128 help 129 help
130 This is deprecated and this functionality is now merged into
131 acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
132 speedstep_centrino.
129 Use primarily the information provided in the BIOS ACPI tables 133 Use primarily the information provided in the BIOS ACPI tables
130 to determine valid CPU frequency and voltage pairings. It is 134 to determine valid CPU frequency and voltage pairings. It is
131 required for the driver to work on non-Banias CPUs. 135 required for the driver to work on non-Banias CPUs.
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile
index 2e894f1c8910..8de3abe322a9 100644
--- a/arch/i386/kernel/cpu/cpufreq/Makefile
+++ b/arch/i386/kernel/cpu/cpufreq/Makefile
@@ -7,9 +7,9 @@ obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
7obj-$(CONFIG_X86_LONGRUN) += longrun.o 7obj-$(CONFIG_X86_LONGRUN) += longrun.o
8obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o 8obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
9obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o 9obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
10obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
11obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o 10obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
12obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o 11obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
13obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o 12obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
13obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
14obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 14obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
15obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 15obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 57c880bf0bd6..18f4715c655d 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.3 $) 2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
3 * 3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> 6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
7 * 8 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * 10 *
@@ -27,202 +28,387 @@
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/module.h> 29#include <linux/module.h>
29#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/smp.h>
32#include <linux/sched.h>
30#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/compiler.h> 34#include <linux/compiler.h>
34#include <linux/sched.h> /* current */
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <asm/io.h>
37#include <asm/delay.h>
38#include <asm/uaccess.h>
39 36
40#include <linux/acpi.h> 37#include <linux/acpi.h>
41#include <acpi/processor.h> 38#include <acpi/processor.h>
42 39
40#include <asm/io.h>
41#include <asm/msr.h>
42#include <asm/processor.h>
43#include <asm/cpufeature.h>
44#include <asm/delay.h>
45#include <asm/uaccess.h>
46
43#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) 47#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
44 48
45MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 49MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
46MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 50MODULE_DESCRIPTION("ACPI Processor P-States Driver");
47MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
48 52
53enum {
54 UNDEFINED_CAPABLE = 0,
55 SYSTEM_INTEL_MSR_CAPABLE,
56 SYSTEM_IO_CAPABLE,
57};
58
59#define INTEL_MSR_RANGE (0xffff)
60#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
49 61
50struct cpufreq_acpi_io { 62struct acpi_cpufreq_data {
51 struct acpi_processor_performance *acpi_data; 63 struct acpi_processor_performance *acpi_data;
52 struct cpufreq_frequency_table *freq_table; 64 struct cpufreq_frequency_table *freq_table;
53 unsigned int resume; 65 unsigned int max_freq;
66 unsigned int resume;
67 unsigned int cpu_feature;
54}; 68};
55 69
56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; 70static struct acpi_cpufreq_data *drv_data[NR_CPUS];
57static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; 71static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
58 72
59static struct cpufreq_driver acpi_cpufreq_driver; 73static struct cpufreq_driver acpi_cpufreq_driver;
60 74
61static unsigned int acpi_pstate_strict; 75static unsigned int acpi_pstate_strict;
62 76
63static int 77static int check_est_cpu(unsigned int cpuid)
64acpi_processor_write_port( 78{
65 u16 port, 79 struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
66 u8 bit_width, 80
67 u32 value) 81 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
82 !cpu_has(cpu, X86_FEATURE_EST))
83 return 0;
84
85 return 1;
86}
87
88static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
89{
90 struct acpi_processor_performance *perf;
91 int i;
92
93 perf = data->acpi_data;
94
95 for (i=0; i<perf->state_count; i++) {
96 if (value == perf->states[i].status)
97 return data->freq_table[i].frequency;
98 }
99 return 0;
100}
101
102static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
68{ 103{
69 if (bit_width <= 8) { 104 int i;
105 struct acpi_processor_performance *perf;
106
107 msr &= INTEL_MSR_RANGE;
108 perf = data->acpi_data;
109
110 for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
111 if (msr == perf->states[data->freq_table[i].index].status)
112 return data->freq_table[i].frequency;
113 }
114 return data->freq_table[0].frequency;
115}
116
117static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
118{
119 switch (data->cpu_feature) {
120 case SYSTEM_INTEL_MSR_CAPABLE:
121 return extract_msr(val, data);
122 case SYSTEM_IO_CAPABLE:
123 return extract_io(val, data);
124 default:
125 return 0;
126 }
127}
128
129static void wrport(u16 port, u8 bit_width, u32 value)
130{
131 if (bit_width <= 8)
70 outb(value, port); 132 outb(value, port);
71 } else if (bit_width <= 16) { 133 else if (bit_width <= 16)
72 outw(value, port); 134 outw(value, port);
73 } else if (bit_width <= 32) { 135 else if (bit_width <= 32)
74 outl(value, port); 136 outl(value, port);
75 } else {
76 return -ENODEV;
77 }
78 return 0;
79} 137}
80 138
81static int 139static void rdport(u16 port, u8 bit_width, u32 * ret)
82acpi_processor_read_port(
83 u16 port,
84 u8 bit_width,
85 u32 *ret)
86{ 140{
87 *ret = 0; 141 *ret = 0;
88 if (bit_width <= 8) { 142 if (bit_width <= 8)
89 *ret = inb(port); 143 *ret = inb(port);
90 } else if (bit_width <= 16) { 144 else if (bit_width <= 16)
91 *ret = inw(port); 145 *ret = inw(port);
92 } else if (bit_width <= 32) { 146 else if (bit_width <= 32)
93 *ret = inl(port); 147 *ret = inl(port);
94 } else { 148}
95 return -ENODEV; 149
150struct msr_addr {
151 u32 reg;
152};
153
154struct io_addr {
155 u16 port;
156 u8 bit_width;
157};
158
159typedef union {
160 struct msr_addr msr;
161 struct io_addr io;
162} drv_addr_union;
163
164struct drv_cmd {
165 unsigned int type;
166 cpumask_t mask;
167 drv_addr_union addr;
168 u32 val;
169};
170
171static void do_drv_read(struct drv_cmd *cmd)
172{
173 u32 h;
174
175 switch (cmd->type) {
176 case SYSTEM_INTEL_MSR_CAPABLE:
177 rdmsr(cmd->addr.msr.reg, cmd->val, h);
178 break;
179 case SYSTEM_IO_CAPABLE:
180 rdport(cmd->addr.io.port, cmd->addr.io.bit_width, &cmd->val);
181 break;
182 default:
183 break;
96 } 184 }
97 return 0;
98} 185}
99 186
100static int 187static void do_drv_write(struct drv_cmd *cmd)
101acpi_processor_set_performance (
102 struct cpufreq_acpi_io *data,
103 unsigned int cpu,
104 int state)
105{ 188{
106 u16 port = 0; 189 u32 h = 0;
107 u8 bit_width = 0; 190
108 int i = 0; 191 switch (cmd->type) {
109 int ret = 0; 192 case SYSTEM_INTEL_MSR_CAPABLE:
110 u32 value = 0; 193 wrmsr(cmd->addr.msr.reg, cmd->val, h);
111 int retval; 194 break;
112 struct acpi_processor_performance *perf; 195 case SYSTEM_IO_CAPABLE:
113 196 wrport(cmd->addr.io.port, cmd->addr.io.bit_width, cmd->val);
114 dprintk("acpi_processor_set_performance\n"); 197 break;
115 198 default:
116 retval = 0; 199 break;
117 perf = data->acpi_data;
118 if (state == perf->state) {
119 if (unlikely(data->resume)) {
120 dprintk("Called after resume, resetting to P%d\n", state);
121 data->resume = 0;
122 } else {
123 dprintk("Already at target state (P%d)\n", state);
124 return (retval);
125 }
126 } 200 }
201}
127 202
128 dprintk("Transitioning from P%d to P%d\n", perf->state, state); 203static void drv_read(struct drv_cmd *cmd)
204{
205 cpumask_t saved_mask = current->cpus_allowed;
206 cmd->val = 0;
129 207
130 /* 208 set_cpus_allowed(current, cmd->mask);
131 * First we write the target state's 'control' value to the 209 do_drv_read(cmd);
132 * control_register. 210 set_cpus_allowed(current, saved_mask);
133 */ 211}
212
213static void drv_write(struct drv_cmd *cmd)
214{
215 cpumask_t saved_mask = current->cpus_allowed;
216 unsigned int i;
217
218 for_each_cpu_mask(i, cmd->mask) {
219 set_cpus_allowed(current, cpumask_of_cpu(i));
220 do_drv_write(cmd);
221 }
222
223 set_cpus_allowed(current, saved_mask);
224 return;
225}
226
227static u32 get_cur_val(cpumask_t mask)
228{
229 struct acpi_processor_performance *perf;
230 struct drv_cmd cmd;
231
232 if (unlikely(cpus_empty(mask)))
233 return 0;
234
235 switch (drv_data[first_cpu(mask)]->cpu_feature) {
236 case SYSTEM_INTEL_MSR_CAPABLE:
237 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
238 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
239 break;
240 case SYSTEM_IO_CAPABLE:
241 cmd.type = SYSTEM_IO_CAPABLE;
242 perf = drv_data[first_cpu(mask)]->acpi_data;
243 cmd.addr.io.port = perf->control_register.address;
244 cmd.addr.io.bit_width = perf->control_register.bit_width;
245 break;
246 default:
247 return 0;
248 }
249
250 cmd.mask = mask;
134 251
135 port = perf->control_register.address; 252 drv_read(&cmd);
136 bit_width = perf->control_register.bit_width;
137 value = (u32) perf->states[state].control;
138 253
139 dprintk("Writing 0x%08x to port 0x%04x\n", value, port); 254 dprintk("get_cur_val = %u\n", cmd.val);
140 255
141 ret = acpi_processor_write_port(port, bit_width, value); 256 return cmd.val;
142 if (ret) { 257}
143 dprintk("Invalid port width 0x%04x\n", bit_width); 258
144 return (ret); 259/*
260 * Return the measured active (C0) frequency on this CPU since last call
261 * to this function.
262 * Input: cpu number
263 * Return: Average CPU frequency in terms of max frequency (zero on error)
264 *
265 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
266 * over a period of time, while CPU is in C0 state.
267 * IA32_MPERF counts at the rate of max advertised frequency
268 * IA32_APERF counts at the rate of actual CPU frequency
269 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
270 * no meaning should be associated with absolute values of these MSRs.
271 */
272static unsigned int get_measured_perf(unsigned int cpu)
273{
274 union {
275 struct {
276 u32 lo;
277 u32 hi;
278 } split;
279 u64 whole;
280 } aperf_cur, mperf_cur;
281
282 cpumask_t saved_mask;
283 unsigned int perf_percent;
284 unsigned int retval;
285
286 saved_mask = current->cpus_allowed;
287 set_cpus_allowed(current, cpumask_of_cpu(cpu));
288 if (get_cpu() != cpu) {
289 /* We were not able to run on requested processor */
290 put_cpu();
291 return 0;
145 } 292 }
146 293
294 rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
295 rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
296
297 wrmsr(MSR_IA32_APERF, 0,0);
298 wrmsr(MSR_IA32_MPERF, 0,0);
299
300#ifdef __i386__
147 /* 301 /*
148 * Assume the write went through when acpi_pstate_strict is not used. 302 * We dont want to do 64 bit divide with 32 bit kernel
149 * As read status_register is an expensive operation and there 303 * Get an approximate value. Return failure in case we cannot get
150 * are no specific error cases where an IO port write will fail. 304 * an approximate value.
151 */ 305 */
152 if (acpi_pstate_strict) { 306 if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
153 /* Then we read the 'status_register' and compare the value 307 int shift_count;
154 * with the target state's 'status' to make sure the 308 u32 h;
155 * transition was successful. 309
156 * Note that we'll poll for up to 1ms (100 cycles of 10us) 310 h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
157 * before giving up. 311 shift_count = fls(h);
158 */ 312
159 313 aperf_cur.whole >>= shift_count;
160 port = perf->status_register.address; 314 mperf_cur.whole >>= shift_count;
161 bit_width = perf->status_register.bit_width; 315 }
162 316
163 dprintk("Looking for 0x%08x from port 0x%04x\n", 317 if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
164 (u32) perf->states[state].status, port); 318 int shift_count = 7;
165 319 aperf_cur.split.lo >>= shift_count;
166 for (i = 0; i < 100; i++) { 320 mperf_cur.split.lo >>= shift_count;
167 ret = acpi_processor_read_port(port, bit_width, &value); 321 }
168 if (ret) { 322
169 dprintk("Invalid port width 0x%04x\n", bit_width); 323 if (aperf_cur.split.lo && mperf_cur.split.lo)
170 return (ret); 324 perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
171 } 325 else
172 if (value == (u32) perf->states[state].status) 326 perf_percent = 0;
173 break; 327
174 udelay(10); 328#else
175 } 329 if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
176 } else { 330 int shift_count = 7;
177 value = (u32) perf->states[state].status; 331 aperf_cur.whole >>= shift_count;
332 mperf_cur.whole >>= shift_count;
178 } 333 }
179 334
180 if (unlikely(value != (u32) perf->states[state].status)) { 335 if (aperf_cur.whole && mperf_cur.whole)
181 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n"); 336 perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
182 retval = -ENODEV; 337 else
183 return (retval); 338 perf_percent = 0;
339
340#endif
341
342 retval = drv_data[cpu]->max_freq * perf_percent / 100;
343
344 put_cpu();
345 set_cpus_allowed(current, saved_mask);
346
347 dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
348 return retval;
349}
350
351static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
352{
353 struct acpi_cpufreq_data *data = drv_data[cpu];
354 unsigned int freq;
355
356 dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
357
358 if (unlikely(data == NULL ||
359 data->acpi_data == NULL || data->freq_table == NULL)) {
360 return 0;
184 } 361 }
185 362
186 dprintk("Transition successful after %d microseconds\n", i * 10); 363 freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
364 dprintk("cur freq = %u\n", freq);
187 365
188 perf->state = state; 366 return freq;
189 return (retval);
190} 367}
191 368
369static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
370 struct acpi_cpufreq_data *data)
371{
372 unsigned int cur_freq;
373 unsigned int i;
374
375 for (i=0; i<100; i++) {
376 cur_freq = extract_freq(get_cur_val(mask), data);
377 if (cur_freq == freq)
378 return 1;
379 udelay(10);
380 }
381 return 0;
382}
192 383
193static int 384static int acpi_cpufreq_target(struct cpufreq_policy *policy,
194acpi_cpufreq_target ( 385 unsigned int target_freq, unsigned int relation)
195 struct cpufreq_policy *policy,
196 unsigned int target_freq,
197 unsigned int relation)
198{ 386{
199 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 387 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
200 struct acpi_processor_performance *perf; 388 struct acpi_processor_performance *perf;
201 struct cpufreq_freqs freqs; 389 struct cpufreq_freqs freqs;
202 cpumask_t online_policy_cpus; 390 cpumask_t online_policy_cpus;
203 cpumask_t saved_mask; 391 struct drv_cmd cmd;
204 cpumask_t set_mask; 392 unsigned int msr;
205 cpumask_t covered_cpus;
206 unsigned int cur_state = 0;
207 unsigned int next_state = 0; 393 unsigned int next_state = 0;
208 unsigned int result = 0; 394 unsigned int next_perf_state = 0;
209 unsigned int j; 395 unsigned int i;
210 unsigned int tmp; 396 int result = 0;
211 397
212 dprintk("acpi_cpufreq_setpolicy\n"); 398 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
213 399
214 result = cpufreq_frequency_table_target(policy, 400 if (unlikely(data == NULL ||
215 data->freq_table, 401 data->acpi_data == NULL || data->freq_table == NULL)) {
216 target_freq, 402 return -ENODEV;
217 relation, 403 }
218 &next_state);
219 if (unlikely(result))
220 return (result);
221 404
222 perf = data->acpi_data; 405 perf = data->acpi_data;
223 cur_state = perf->state; 406 result = cpufreq_frequency_table_target(policy,
224 freqs.old = data->freq_table[cur_state].frequency; 407 data->freq_table,
225 freqs.new = data->freq_table[next_state].frequency; 408 target_freq,
409 relation, &next_state);
410 if (unlikely(result))
411 return -ENODEV;
226 412
227#ifdef CONFIG_HOTPLUG_CPU 413#ifdef CONFIG_HOTPLUG_CPU
228 /* cpufreq holds the hotplug lock, so we are safe from here on */ 414 /* cpufreq holds the hotplug lock, so we are safe from here on */
@@ -231,106 +417,84 @@ acpi_cpufreq_target (
231 online_policy_cpus = policy->cpus; 417 online_policy_cpus = policy->cpus;
232#endif 418#endif
233 419
234 for_each_cpu_mask(j, online_policy_cpus) { 420 next_perf_state = data->freq_table[next_state].index;
235 freqs.cpu = j; 421 if (perf->state == next_perf_state) {
236 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 422 if (unlikely(data->resume)) {
423 dprintk("Called after resume, resetting to P%d\n",
424 next_perf_state);
425 data->resume = 0;
426 } else {
427 dprintk("Already at target state (P%d)\n",
428 next_perf_state);
429 return 0;
430 }
237 } 431 }
238 432
239 /* 433 switch (data->cpu_feature) {
240 * We need to call driver->target() on all or any CPU in 434 case SYSTEM_INTEL_MSR_CAPABLE:
241 * policy->cpus, depending on policy->shared_type. 435 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
242 */ 436 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
243 saved_mask = current->cpus_allowed; 437 msr =
244 cpus_clear(covered_cpus); 438 (u32) perf->states[next_perf_state].
245 for_each_cpu_mask(j, online_policy_cpus) { 439 control & INTEL_MSR_RANGE;
246 /* 440 cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
247 * Support for SMP systems. 441 break;
248 * Make sure we are running on CPU that wants to change freq 442 case SYSTEM_IO_CAPABLE:
249 */ 443 cmd.type = SYSTEM_IO_CAPABLE;
250 cpus_clear(set_mask); 444 cmd.addr.io.port = perf->control_register.address;
251 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 445 cmd.addr.io.bit_width = perf->control_register.bit_width;
252 cpus_or(set_mask, set_mask, online_policy_cpus); 446 cmd.val = (u32) perf->states[next_perf_state].control;
253 else 447 break;
254 cpu_set(j, set_mask); 448 default:
255 449 return -ENODEV;
256 set_cpus_allowed(current, set_mask); 450 }
257 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
258 dprintk("couldn't limit to CPUs in this domain\n");
259 result = -EAGAIN;
260 break;
261 }
262 451
263 result = acpi_processor_set_performance (data, j, next_state); 452 cpus_clear(cmd.mask);
264 if (result) {
265 result = -EAGAIN;
266 break;
267 }
268 453
269 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 454 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
270 break; 455 cmd.mask = online_policy_cpus;
271 456 else
272 cpu_set(j, covered_cpus); 457 cpu_set(policy->cpu, cmd.mask);
273 }
274 458
275 for_each_cpu_mask(j, online_policy_cpus) { 459 freqs.old = data->freq_table[perf->state].frequency;
276 freqs.cpu = j; 460 freqs.new = data->freq_table[next_perf_state].frequency;
277 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 461 for_each_cpu_mask(i, cmd.mask) {
462 freqs.cpu = i;
463 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
278 } 464 }
279 465
280 if (unlikely(result)) { 466 drv_write(&cmd);
281 /*
282 * We have failed halfway through the frequency change.
283 * We have sent callbacks to online_policy_cpus and
284 * acpi_processor_set_performance() has been called on
285 * coverd_cpus. Best effort undo..
286 */
287
288 if (!cpus_empty(covered_cpus)) {
289 for_each_cpu_mask(j, covered_cpus) {
290 policy->cpu = j;
291 acpi_processor_set_performance (data,
292 j,
293 cur_state);
294 }
295 }
296 467
297 tmp = freqs.new; 468 if (acpi_pstate_strict) {
298 freqs.new = freqs.old; 469 if (!check_freqs(cmd.mask, freqs.new, data)) {
299 freqs.old = tmp; 470 dprintk("acpi_cpufreq_target failed (%d)\n",
300 for_each_cpu_mask(j, online_policy_cpus) { 471 policy->cpu);
301 freqs.cpu = j; 472 return -EAGAIN;
302 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
303 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
304 } 473 }
305 } 474 }
306 475
307 set_cpus_allowed(current, saved_mask); 476 for_each_cpu_mask(i, cmd.mask) {
308 return (result); 477 freqs.cpu = i;
309} 478 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
479 }
480 perf->state = next_perf_state;
310 481
482 return result;
483}
311 484
312static int 485static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
313acpi_cpufreq_verify (
314 struct cpufreq_policy *policy)
315{ 486{
316 unsigned int result = 0; 487 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
317 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
318 488
319 dprintk("acpi_cpufreq_verify\n"); 489 dprintk("acpi_cpufreq_verify\n");
320 490
321 result = cpufreq_frequency_table_verify(policy, 491 return cpufreq_frequency_table_verify(policy, data->freq_table);
322 data->freq_table);
323
324 return (result);
325} 492}
326 493
327
328static unsigned long 494static unsigned long
329acpi_cpufreq_guess_freq ( 495acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
330 struct cpufreq_acpi_io *data,
331 unsigned int cpu)
332{ 496{
333 struct acpi_processor_performance *perf = data->acpi_data; 497 struct acpi_processor_performance *perf = data->acpi_data;
334 498
335 if (cpu_khz) { 499 if (cpu_khz) {
336 /* search the closest match to cpu_khz */ 500 /* search the closest match to cpu_khz */
@@ -338,16 +502,16 @@ acpi_cpufreq_guess_freq (
338 unsigned long freq; 502 unsigned long freq;
339 unsigned long freqn = perf->states[0].core_frequency * 1000; 503 unsigned long freqn = perf->states[0].core_frequency * 1000;
340 504
341 for (i = 0; i < (perf->state_count - 1); i++) { 505 for (i=0; i<(perf->state_count-1); i++) {
342 freq = freqn; 506 freq = freqn;
343 freqn = perf->states[i+1].core_frequency * 1000; 507 freqn = perf->states[i+1].core_frequency * 1000;
344 if ((2 * cpu_khz) > (freqn + freq)) { 508 if ((2 * cpu_khz) > (freqn + freq)) {
345 perf->state = i; 509 perf->state = i;
346 return (freq); 510 return freq;
347 } 511 }
348 } 512 }
349 perf->state = perf->state_count - 1; 513 perf->state = perf->state_count-1;
350 return (freqn); 514 return freqn;
351 } else { 515 } else {
352 /* assume CPU is at P0... */ 516 /* assume CPU is at P0... */
353 perf->state = 0; 517 perf->state = 0;
@@ -355,7 +519,6 @@ acpi_cpufreq_guess_freq (
355 } 519 }
356} 520}
357 521
358
359/* 522/*
360 * acpi_cpufreq_early_init - initialize ACPI P-States library 523 * acpi_cpufreq_early_init - initialize ACPI P-States library
361 * 524 *
@@ -364,30 +527,34 @@ acpi_cpufreq_guess_freq (
364 * do _PDC and _PSD and find out the processor dependency for the 527 * do _PDC and _PSD and find out the processor dependency for the
365 * actual init that will happen later... 528 * actual init that will happen later...
366 */ 529 */
367static int acpi_cpufreq_early_init_acpi(void) 530static int acpi_cpufreq_early_init(void)
368{ 531{
369 struct acpi_processor_performance *data; 532 struct acpi_processor_performance *data;
370 unsigned int i, j; 533 cpumask_t covered;
534 unsigned int i, j;
371 535
372 dprintk("acpi_cpufreq_early_init\n"); 536 dprintk("acpi_cpufreq_early_init\n");
373 537
374 for_each_possible_cpu(i) { 538 for_each_possible_cpu(i) {
375 data = kzalloc(sizeof(struct acpi_processor_performance), 539 data = kzalloc(sizeof(struct acpi_processor_performance),
376 GFP_KERNEL); 540 GFP_KERNEL);
377 if (!data) { 541 if (!data) {
378 for_each_possible_cpu(j) { 542 for_each_cpu_mask(j, covered) {
379 kfree(acpi_perf_data[j]); 543 kfree(acpi_perf_data[j]);
380 acpi_perf_data[j] = NULL; 544 acpi_perf_data[j] = NULL;
381 } 545 }
382 return (-ENOMEM); 546 return -ENOMEM;
383 } 547 }
384 acpi_perf_data[i] = data; 548 acpi_perf_data[i] = data;
549 cpu_set(i, covered);
385 } 550 }
386 551
387 /* Do initialization in ACPI core */ 552 /* Do initialization in ACPI core */
388 return acpi_processor_preregister_performance(acpi_perf_data); 553 acpi_processor_preregister_performance(acpi_perf_data);
554 return 0;
389} 555}
390 556
557#ifdef CONFIG_SMP
391/* 558/*
392 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 559 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
393 * or do it in BIOS firmware and won't inform about it to OS. If not 560 * or do it in BIOS firmware and won't inform about it to OS. If not
@@ -414,39 +581,42 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
414 }, 581 },
415 { } 582 { }
416}; 583};
584#endif
417 585
418static int 586static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
419acpi_cpufreq_cpu_init (
420 struct cpufreq_policy *policy)
421{ 587{
422 unsigned int i; 588 unsigned int i;
423 unsigned int cpu = policy->cpu; 589 unsigned int valid_states = 0;
424 struct cpufreq_acpi_io *data; 590 unsigned int cpu = policy->cpu;
425 unsigned int result = 0; 591 struct acpi_cpufreq_data *data;
592 unsigned int result = 0;
426 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 593 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
427 struct acpi_processor_performance *perf; 594 struct acpi_processor_performance *perf;
428 595
429 dprintk("acpi_cpufreq_cpu_init\n"); 596 dprintk("acpi_cpufreq_cpu_init\n");
430 597
431 if (!acpi_perf_data[cpu]) 598 if (!acpi_perf_data[cpu])
432 return (-ENODEV); 599 return -ENODEV;
433 600
434 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 601 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
435 if (!data) 602 if (!data)
436 return (-ENOMEM); 603 return -ENOMEM;
437 604
438 data->acpi_data = acpi_perf_data[cpu]; 605 data->acpi_data = acpi_perf_data[cpu];
439 acpi_io_data[cpu] = data; 606 drv_data[cpu] = data;
440 607
441 result = acpi_processor_register_performance(data->acpi_data, cpu); 608 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
609 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
442 610
611 result = acpi_processor_register_performance(data->acpi_data, cpu);
443 if (result) 612 if (result)
444 goto err_free; 613 goto err_free;
445 614
446 perf = data->acpi_data; 615 perf = data->acpi_data;
447 policy->shared_type = perf->shared_type; 616 policy->shared_type = perf->shared_type;
617
448 /* 618 /*
449 * Will let policy->cpus know about dependency only when software 619 * Will let policy->cpus know about dependency only when software
450 * coordination is required. 620 * coordination is required.
451 */ 621 */
452 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 622 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
@@ -462,10 +632,6 @@ acpi_cpufreq_cpu_init (
462 } 632 }
463#endif 633#endif
464 634
465 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
466 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
467 }
468
469 /* capability check */ 635 /* capability check */
470 if (perf->state_count <= 1) { 636 if (perf->state_count <= 1) {
471 dprintk("No P-States\n"); 637 dprintk("No P-States\n");
@@ -473,17 +639,33 @@ acpi_cpufreq_cpu_init (
473 goto err_unreg; 639 goto err_unreg;
474 } 640 }
475 641
476 if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) || 642 if (perf->control_register.space_id != perf->status_register.space_id) {
477 (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { 643 result = -ENODEV;
478 dprintk("Unsupported address space [%d, %d]\n", 644 goto err_unreg;
479 (u32) (perf->control_register.space_id), 645 }
480 (u32) (perf->status_register.space_id)); 646
647 switch (perf->control_register.space_id) {
648 case ACPI_ADR_SPACE_SYSTEM_IO:
649 dprintk("SYSTEM IO addr space\n");
650 data->cpu_feature = SYSTEM_IO_CAPABLE;
651 break;
652 case ACPI_ADR_SPACE_FIXED_HARDWARE:
653 dprintk("HARDWARE addr space\n");
654 if (!check_est_cpu(cpu)) {
655 result = -ENODEV;
656 goto err_unreg;
657 }
658 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
659 break;
660 default:
661 dprintk("Unknown addr space %d\n",
662 (u32) (perf->control_register.space_id));
481 result = -ENODEV; 663 result = -ENODEV;
482 goto err_unreg; 664 goto err_unreg;
483 } 665 }
484 666
485 /* alloc freq_table */ 667 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
486 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL); 668 (perf->state_count+1), GFP_KERNEL);
487 if (!data->freq_table) { 669 if (!data->freq_table) {
488 result = -ENOMEM; 670 result = -ENOMEM;
489 goto err_unreg; 671 goto err_unreg;
@@ -492,129 +674,140 @@ acpi_cpufreq_cpu_init (
492 /* detect transition latency */ 674 /* detect transition latency */
493 policy->cpuinfo.transition_latency = 0; 675 policy->cpuinfo.transition_latency = 0;
494 for (i=0; i<perf->state_count; i++) { 676 for (i=0; i<perf->state_count; i++) {
495 if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) 677 if ((perf->states[i].transition_latency * 1000) >
496 policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; 678 policy->cpuinfo.transition_latency)
679 policy->cpuinfo.transition_latency =
680 perf->states[i].transition_latency * 1000;
497 } 681 }
498 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 682 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
499 683
500 /* The current speed is unknown and not detectable by ACPI... */ 684 data->max_freq = perf->states[0].core_frequency * 1000;
501 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
502
503 /* table init */ 685 /* table init */
504 for (i=0; i<=perf->state_count; i++) 686 for (i=0; i<perf->state_count; i++) {
505 { 687 if (i>0 && perf->states[i].core_frequency ==
506 data->freq_table[i].index = i; 688 perf->states[i-1].core_frequency)
507 if (i<perf->state_count) 689 continue;
508 data->freq_table[i].frequency = perf->states[i].core_frequency * 1000; 690
509 else 691 data->freq_table[valid_states].index = i;
510 data->freq_table[i].frequency = CPUFREQ_TABLE_END; 692 data->freq_table[valid_states].frequency =
693 perf->states[i].core_frequency * 1000;
694 valid_states++;
511 } 695 }
696 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
512 697
513 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); 698 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
514 if (result) { 699 if (result)
515 goto err_freqfree; 700 goto err_freqfree;
701
702 switch (data->cpu_feature) {
703 case ACPI_ADR_SPACE_SYSTEM_IO:
704 /* Current speed is unknown and not detectable by IO port */
705 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
706 break;
707 case ACPI_ADR_SPACE_FIXED_HARDWARE:
708 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
709 get_cur_freq_on_cpu(cpu);
710 break;
711 default:
712 break;
516 } 713 }
517 714
518 /* notify BIOS that we exist */ 715 /* notify BIOS that we exist */
519 acpi_processor_notify_smm(THIS_MODULE); 716 acpi_processor_notify_smm(THIS_MODULE);
520 717
521 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n", 718 /* Check for APERF/MPERF support in hardware */
522 cpu); 719 if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
720 unsigned int ecx;
721 ecx = cpuid_ecx(6);
722 if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
723 acpi_cpufreq_driver.getavg = get_measured_perf;
724 }
725
726 dprintk("CPU%u - ACPI performance management activated.\n", cpu);
523 for (i = 0; i < perf->state_count; i++) 727 for (i = 0; i < perf->state_count; i++)
524 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", 728 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
525 (i == perf->state?'*':' '), i, 729 (i == perf->state ? '*' : ' '), i,
526 (u32) perf->states[i].core_frequency, 730 (u32) perf->states[i].core_frequency,
527 (u32) perf->states[i].power, 731 (u32) perf->states[i].power,
528 (u32) perf->states[i].transition_latency); 732 (u32) perf->states[i].transition_latency);
529 733
530 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 734 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
531 735
532 /* 736 /*
533 * the first call to ->target() should result in us actually 737 * the first call to ->target() should result in us actually
534 * writing something to the appropriate registers. 738 * writing something to the appropriate registers.
535 */ 739 */
536 data->resume = 1; 740 data->resume = 1;
537
538 return (result);
539 741
540 err_freqfree: 742 return result;
743
744err_freqfree:
541 kfree(data->freq_table); 745 kfree(data->freq_table);
542 err_unreg: 746err_unreg:
543 acpi_processor_unregister_performance(perf, cpu); 747 acpi_processor_unregister_performance(perf, cpu);
544 err_free: 748err_free:
545 kfree(data); 749 kfree(data);
546 acpi_io_data[cpu] = NULL; 750 drv_data[cpu] = NULL;
547 751
548 return (result); 752 return result;
549} 753}
550 754
551 755static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
552static int
553acpi_cpufreq_cpu_exit (
554 struct cpufreq_policy *policy)
555{ 756{
556 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 757 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
557
558 758
559 dprintk("acpi_cpufreq_cpu_exit\n"); 759 dprintk("acpi_cpufreq_cpu_exit\n");
560 760
561 if (data) { 761 if (data) {
562 cpufreq_frequency_table_put_attr(policy->cpu); 762 cpufreq_frequency_table_put_attr(policy->cpu);
563 acpi_io_data[policy->cpu] = NULL; 763 drv_data[policy->cpu] = NULL;
564 acpi_processor_unregister_performance(data->acpi_data, policy->cpu); 764 acpi_processor_unregister_performance(data->acpi_data,
765 policy->cpu);
565 kfree(data); 766 kfree(data);
566 } 767 }
567 768
568 return (0); 769 return 0;
569} 770}
570 771
571static int 772static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
572acpi_cpufreq_resume (
573 struct cpufreq_policy *policy)
574{ 773{
575 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 774 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
576
577 775
578 dprintk("acpi_cpufreq_resume\n"); 776 dprintk("acpi_cpufreq_resume\n");
579 777
580 data->resume = 1; 778 data->resume = 1;
581 779
582 return (0); 780 return 0;
583} 781}
584 782
585 783static struct freq_attr *acpi_cpufreq_attr[] = {
586static struct freq_attr* acpi_cpufreq_attr[] = {
587 &cpufreq_freq_attr_scaling_available_freqs, 784 &cpufreq_freq_attr_scaling_available_freqs,
588 NULL, 785 NULL,
589}; 786};
590 787
591static struct cpufreq_driver acpi_cpufreq_driver = { 788static struct cpufreq_driver acpi_cpufreq_driver = {
592 .verify = acpi_cpufreq_verify, 789 .verify = acpi_cpufreq_verify,
593 .target = acpi_cpufreq_target, 790 .target = acpi_cpufreq_target,
594 .init = acpi_cpufreq_cpu_init, 791 .init = acpi_cpufreq_cpu_init,
595 .exit = acpi_cpufreq_cpu_exit, 792 .exit = acpi_cpufreq_cpu_exit,
596 .resume = acpi_cpufreq_resume, 793 .resume = acpi_cpufreq_resume,
597 .name = "acpi-cpufreq", 794 .name = "acpi-cpufreq",
598 .owner = THIS_MODULE, 795 .owner = THIS_MODULE,
599 .attr = acpi_cpufreq_attr, 796 .attr = acpi_cpufreq_attr,
600}; 797};
601 798
602 799static int __init acpi_cpufreq_init(void)
603static int __init
604acpi_cpufreq_init (void)
605{ 800{
606 dprintk("acpi_cpufreq_init\n"); 801 dprintk("acpi_cpufreq_init\n");
607 802
608 acpi_cpufreq_early_init_acpi(); 803 acpi_cpufreq_early_init();
609 804
610 return cpufreq_register_driver(&acpi_cpufreq_driver); 805 return cpufreq_register_driver(&acpi_cpufreq_driver);
611} 806}
612 807
613 808static void __exit acpi_cpufreq_exit(void)
614static void __exit
615acpi_cpufreq_exit (void)
616{ 809{
617 unsigned int i; 810 unsigned int i;
618 dprintk("acpi_cpufreq_exit\n"); 811 dprintk("acpi_cpufreq_exit\n");
619 812
620 cpufreq_unregister_driver(&acpi_cpufreq_driver); 813 cpufreq_unregister_driver(&acpi_cpufreq_driver);
@@ -627,7 +820,9 @@ acpi_cpufreq_exit (void)
627} 820}
628 821
629module_param(acpi_pstate_strict, uint, 0644); 822module_param(acpi_pstate_strict, uint, 0644);
630MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes."); 823MODULE_PARM_DESC(acpi_pstate_strict,
824 "value 0 or non-zero. non-zero -> strict ACPI checks are "
825 "performed during frequency changes.");
631 826
632late_initcall(acpi_cpufreq_init); 827late_initcall(acpi_cpufreq_init);
633module_exit(acpi_cpufreq_exit); 828module_exit(acpi_cpufreq_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index 92afa3bc84f1..6667e9cceb9f 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -447,7 +447,6 @@ static int __init cpufreq_gx_init(void)
447 int ret; 447 int ret;
448 struct gxfreq_params *params; 448 struct gxfreq_params *params;
449 struct pci_dev *gx_pci; 449 struct pci_dev *gx_pci;
450 u32 class_rev;
451 450
452 /* Test if we have the right hardware */ 451 /* Test if we have the right hardware */
453 if ((gx_pci = gx_detect_chipset()) == NULL) 452 if ((gx_pci = gx_detect_chipset()) == NULL)
@@ -472,8 +471,7 @@ static int __init cpufreq_gx_init(void)
472 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); 471 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
473 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); 472 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
474 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); 473 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration));
475 pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); 474 pci_read_config_byte(params->cs55x0, PCI_REVISION_ID, &params->pci_rev);
476 params->pci_rev = class_rev && 0xff;
477 475
478 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { 476 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
479 kfree(params); 477 kfree(params);
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 7233abe5d695..c548daad3476 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -410,7 +410,7 @@ static int __init longhaul_get_ranges(void)
410 maxmult=longhaul_get_cpu_mult(); 410 maxmult=longhaul_get_cpu_mult();
411 411
412 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */ 412 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */
413 if ((cpu_khz/1000) > 1200) 413 if ((cpu_khz/maxmult) > 13400)
414 fsb = 200; 414 fsb = 200;
415 else 415 else
416 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; 416 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
@@ -583,6 +583,10 @@ static int enable_arbiter_disable(void)
583 if (dev == NULL) { 583 if (dev == NULL) {
584 reg = 0x76; 584 reg = 0x76;
585 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL); 585 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL);
586 /* Find CN400 V-Link host bridge */
587 if (dev == NULL)
588 dev = pci_find_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
589
586 } 590 }
587 if (dev != NULL) { 591 if (dev != NULL) {
588 /* Enable access to port 0x22 */ 592 /* Enable access to port 0x22 */
@@ -734,7 +738,7 @@ print_support_type:
734 return 0; 738 return 0;
735 739
736err_acpi: 740err_acpi:
737 printk(KERN_ERR PFX "No ACPI support. No VT8601 or VT8623 northbridge. Aborting.\n"); 741 printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge. Aborting.\n");
738 return -ENODEV; 742 return -ENODEV;
739} 743}
740 744
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 304d2eaa4a1b..bec50170b75a 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -163,29 +163,27 @@ static int cpufreq_p4_verify(struct cpufreq_policy *policy)
163 163
164static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) 164static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
165{ 165{
166 if ((c->x86 == 0x06) && (c->x86_model == 0x09)) { 166 if (c->x86 == 0x06) {
167 /* Pentium M (Banias) */ 167 if (cpu_has(c, X86_FEATURE_EST))
168 printk(KERN_WARNING PFX "Warning: Pentium M detected. " 168 printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. "
169 "The speedstep_centrino module offers voltage scaling" 169 "The acpi-cpufreq module offers voltage scaling"
170 " in addition of frequency scaling. You should use " 170 " in addition of frequency scaling. You should use "
171 "that instead of p4-clockmod, if possible.\n"); 171 "that instead of p4-clockmod, if possible.\n");
172 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM); 172 switch (c->x86_model) {
173 } 173 case 0x0E: /* Core */
174 174 case 0x0F: /* Core Duo */
175 if ((c->x86 == 0x06) && (c->x86_model == 0x0D)) { 175 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
176 /* Pentium M (Dothan) */ 176 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE);
177 printk(KERN_WARNING PFX "Warning: Pentium M detected. " 177 case 0x0D: /* Pentium M (Dothan) */
178 "The speedstep_centrino module offers voltage scaling" 178 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
179 " in addition of frequency scaling. You should use " 179 /* fall through */
180 "that instead of p4-clockmod, if possible.\n"); 180 case 0x09: /* Pentium M (Banias) */
181 /* on P-4s, the TSC runs with constant frequency independent whether 181 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
182 * throttling is active or not. */ 182 }
183 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
184 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
185 } 183 }
186 184
187 if (c->x86 != 0xF) { 185 if (c->x86 != 0xF) {
188 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <linux@brodo.de>\n"); 186 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@lists.linux.org.uk>\n");
189 return 0; 187 return 0;
190 } 188 }
191 189
diff --git a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
index ef457d50f4ac..b8fb4b521c62 100644
--- a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
@@ -153,6 +153,7 @@ static struct cpufreq_driver sc520_freq_driver = {
153static int __init sc520_freq_init(void) 153static int __init sc520_freq_init(void)
154{ 154{
155 struct cpuinfo_x86 *c = cpu_data; 155 struct cpuinfo_x86 *c = cpu_data;
156 int err;
156 157
157 /* Test if we have the right hardware */ 158 /* Test if we have the right hardware */
158 if(c->x86_vendor != X86_VENDOR_AMD || 159 if(c->x86_vendor != X86_VENDOR_AMD ||
@@ -166,7 +167,11 @@ static int __init sc520_freq_init(void)
166 return -ENOMEM; 167 return -ENOMEM;
167 } 168 }
168 169
169 return cpufreq_register_driver(&sc520_freq_driver); 170 err = cpufreq_register_driver(&sc520_freq_driver);
171 if (err)
172 iounmap(cpuctl);
173
174 return err;
170} 175}
171 176
172 177
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index e8993baf3d14..5113e9231634 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -36,6 +36,7 @@
36 36
37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
38 38
39#define INTEL_MSR_RANGE (0xffff)
39 40
40struct cpu_id 41struct cpu_id
41{ 42{
@@ -379,6 +380,7 @@ static int centrino_cpu_early_init_acpi(void)
379} 380}
380 381
381 382
383#ifdef CONFIG_SMP
382/* 384/*
383 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 385 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
384 * or do it in BIOS firmware and won't inform about it to OS. If not 386 * or do it in BIOS firmware and won't inform about it to OS. If not
@@ -392,7 +394,6 @@ static int sw_any_bug_found(struct dmi_system_id *d)
392 return 0; 394 return 0;
393} 395}
394 396
395
396static struct dmi_system_id sw_any_bug_dmi_table[] = { 397static struct dmi_system_id sw_any_bug_dmi_table[] = {
397 { 398 {
398 .callback = sw_any_bug_found, 399 .callback = sw_any_bug_found,
@@ -405,7 +406,7 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
405 }, 406 },
406 { } 407 { }
407}; 408};
408 409#endif
409 410
410/* 411/*
411 * centrino_cpu_init_acpi - register with ACPI P-States library 412 * centrino_cpu_init_acpi - register with ACPI P-States library
@@ -463,8 +464,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
463 } 464 }
464 465
465 for (i=0; i<p->state_count; i++) { 466 for (i=0; i<p->state_count; i++) {
466 if (p->states[i].control != p->states[i].status) { 467 if ((p->states[i].control & INTEL_MSR_RANGE) !=
467 dprintk("Different control (%llu) and status values (%llu)\n", 468 (p->states[i].status & INTEL_MSR_RANGE)) {
469 dprintk("Different MSR bits in control (%llu) and status (%llu)\n",
468 p->states[i].control, p->states[i].status); 470 p->states[i].control, p->states[i].status);
469 result = -EINVAL; 471 result = -EINVAL;
470 goto err_unreg; 472 goto err_unreg;
@@ -500,7 +502,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
500 } 502 }
501 503
502 for (i=0; i<p->state_count; i++) { 504 for (i=0; i<p->state_count; i++) {
503 centrino_model[cpu]->op_points[i].index = p->states[i].control; 505 centrino_model[cpu]->op_points[i].index = p->states[i].control & INTEL_MSR_RANGE;
504 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000; 506 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
505 dprintk("adding state %i with frequency %u and control value %04x\n", 507 dprintk("adding state %i with frequency %u and control value %04x\n",
506 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index); 508 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
@@ -531,6 +533,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
531 533
532 /* notify BIOS that we exist */ 534 /* notify BIOS that we exist */
533 acpi_processor_notify_smm(THIS_MODULE); 535 acpi_processor_notify_smm(THIS_MODULE);
536 printk("speedstep-centrino with X86_SPEEDSTEP_CENTRINO_ACPI"
537 "config is deprecated.\n "
538 "Use X86_ACPI_CPUFREQ (acpi-cpufreq instead.\n" );
534 539
535 return 0; 540 return 0;
536 541
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 4f46cac155c4..d59277c00911 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -123,6 +123,36 @@ static unsigned int pentiumM_get_frequency(void)
123 return (msr_tmp * 100 * 1000); 123 return (msr_tmp * 100 * 1000);
124} 124}
125 125
126static unsigned int pentium_core_get_frequency(void)
127{
128 u32 fsb = 0;
129 u32 msr_lo, msr_tmp;
130
131 rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
132 /* see table B-2 of 25366920.pdf */
133 switch (msr_lo & 0x07) {
134 case 5:
135 fsb = 100000;
136 break;
137 case 1:
138 fsb = 133333;
139 break;
140 case 3:
141 fsb = 166667;
142 break;
143 default:
144 printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
145 }
146
147 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
148 dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
149
150 msr_tmp = (msr_lo >> 22) & 0x1f;
151 dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb));
152
153 return (msr_tmp * fsb);
154}
155
126 156
127static unsigned int pentium4_get_frequency(void) 157static unsigned int pentium4_get_frequency(void)
128{ 158{
@@ -174,6 +204,8 @@ static unsigned int pentium4_get_frequency(void)
174unsigned int speedstep_get_processor_frequency(unsigned int processor) 204unsigned int speedstep_get_processor_frequency(unsigned int processor)
175{ 205{
176 switch (processor) { 206 switch (processor) {
207 case SPEEDSTEP_PROCESSOR_PCORE:
208 return pentium_core_get_frequency();
177 case SPEEDSTEP_PROCESSOR_PM: 209 case SPEEDSTEP_PROCESSOR_PM:
178 return pentiumM_get_frequency(); 210 return pentiumM_get_frequency();
179 case SPEEDSTEP_PROCESSOR_P4D: 211 case SPEEDSTEP_PROCESSOR_P4D:
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
index b735429c50b4..b11bcc608cac 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -22,6 +22,7 @@
22 * the speedstep_get_processor_frequency() call. */ 22 * the speedstep_get_processor_frequency() call. */
23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */ 23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */
24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */ 24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */
25#define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */
25 26
26/* speedstep states -- only two of them */ 27/* speedstep states -- only two of them */
27 28
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index c28333d53646..ff0d89806114 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -360,9 +360,6 @@ static int __init speedstep_init(void)
360 case SPEEDSTEP_PROCESSOR_PIII_C: 360 case SPEEDSTEP_PROCESSOR_PIII_C:
361 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: 361 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
362 break; 362 break;
363 case SPEEDSTEP_PROCESSOR_P4M:
364 printk(KERN_INFO "speedstep-smi: you're trying to use this cpufreq driver on a Pentium 4-based CPU. Most likely it will not work.\n");
365 break;
366 default: 363 default:
367 speedstep_processor = 0; 364 speedstep_processor = 0;
368 } 365 }
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 972346604f9d..47ffec57c0cb 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Intel CPU Microcode Update Driver for Linux 2 * Intel CPU Microcode Update Driver for Linux
3 * 3 *
4 * Copyright (C) 2000-2004 Tigran Aivazian 4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com> 5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 * 6 *
7 * This driver allows to upgrade microcode on Intel processors 7 * This driver allows to upgrade microcode on Intel processors
@@ -92,7 +92,7 @@
92#include <asm/processor.h> 92#include <asm/processor.h>
93 93
94MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver"); 94MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
95MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>"); 95MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
96MODULE_LICENSE("GPL"); 96MODULE_LICENSE("GPL");
97 97
98#define MICROCODE_VERSION "1.14a" 98#define MICROCODE_VERSION "1.14a"
@@ -752,7 +752,7 @@ static int __init microcode_init (void)
752 register_hotcpu_notifier(&mc_cpu_notifier); 752 register_hotcpu_notifier(&mc_cpu_notifier);
753 753
754 printk(KERN_INFO 754 printk(KERN_INFO
755 "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n"); 755 "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@aivazian.fsnet.co.uk>\n");
756 return 0; 756 return 0;
757} 757}
758 758
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index b0f84e5778ad..aef39be81361 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -69,9 +69,7 @@ static int __devinitdata smp_b_stepping;
69 69
70/* Number of siblings per CPU package */ 70/* Number of siblings per CPU package */
71int smp_num_siblings = 1; 71int smp_num_siblings = 1;
72#ifdef CONFIG_SMP
73EXPORT_SYMBOL(smp_num_siblings); 72EXPORT_SYMBOL(smp_num_siblings);
74#endif
75 73
76/* Last level cache ID of each logical CPU */ 74/* Last level cache ID of each logical CPU */
77int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 75int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 1f16ebb9a800..324ea7565e2c 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -488,7 +488,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
488 488
489#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) 489#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
490 490
491static void rs_set_termios(struct tty_struct *tty, struct termios *old_termios) 491static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
492{ 492{
493 unsigned int cflag = tty->termios->c_cflag; 493 unsigned int cflag = tty->termios->c_cflag;
494 494
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 8ae384eb5357..098ee605bf5e 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
31obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o 31obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
32obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
32obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 33obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
33obj-$(CONFIG_AUDIT) += audit.o 34obj-$(CONFIG_AUDIT) += audit.o
34obj-$(CONFIG_PCI_MSI) += msi_ia64.o 35obj-$(CONFIG_PCI_MSI) += msi_ia64.o
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 0aabedf95dad..bc2f64d72244 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -19,29 +19,11 @@
19 19
20#include <asm/kdebug.h> 20#include <asm/kdebug.h>
21#include <asm/mca.h> 21#include <asm/mca.h>
22#include <asm/uaccess.h>
23 22
24int kdump_status[NR_CPUS]; 23int kdump_status[NR_CPUS];
25atomic_t kdump_cpu_freezed; 24atomic_t kdump_cpu_freezed;
26atomic_t kdump_in_progress; 25atomic_t kdump_in_progress;
27int kdump_on_init = 1; 26int kdump_on_init = 1;
28ssize_t
29copy_oldmem_page(unsigned long pfn, char *buf,
30 size_t csize, unsigned long offset, int userbuf)
31{
32 void *vaddr;
33
34 if (!csize)
35 return 0;
36 vaddr = __va(pfn<<PAGE_SHIFT);
37 if (userbuf) {
38 if (copy_to_user(buf, (vaddr + offset), csize)) {
39 return -EFAULT;
40 }
41 } else
42 memcpy(buf, (vaddr + offset), csize);
43 return csize;
44}
45 27
46static inline Elf64_Word 28static inline Elf64_Word
47*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, 29*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
@@ -225,14 +207,10 @@ static ctl_table sys_table[] = {
225static int 207static int
226machine_crash_setup(void) 208machine_crash_setup(void)
227{ 209{
228 char *from = strstr(saved_command_line, "elfcorehdr=");
229 static struct notifier_block kdump_init_notifier_nb = { 210 static struct notifier_block kdump_init_notifier_nb = {
230 .notifier_call = kdump_init_notifier, 211 .notifier_call = kdump_init_notifier,
231 }; 212 };
232 int ret; 213 int ret;
233 if (from)
234 elfcorehdr_addr = memparse(from+11, &from);
235 saved_max_pfn = (unsigned long)-1;
236 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) 214 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
237 return ret; 215 return ret;
238#ifdef CONFIG_SYSCTL 216#ifdef CONFIG_SYSCTL
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
new file mode 100644
index 000000000000..83b8c91c1408
--- /dev/null
+++ b/arch/ia64/kernel/crash_dump.c
@@ -0,0 +1,48 @@
1/*
2 * kernel/crash_dump.c - Memory preserving reboot related code.
3 *
4 * Created by: Simon Horman <horms@verge.net.au>
5 * Original code moved from kernel/crash.c
6 * Original code comment copied from the i386 version of this file
7 */
8
9#include <linux/errno.h>
10#include <linux/types.h>
11
12#include <linux/uaccess.h>
13
14/**
15 * copy_oldmem_page - copy one page from "oldmem"
16 * @pfn: page frame number to be copied
17 * @buf: target memory address for the copy; this can be in kernel address
18 * space or user address space (see @userbuf)
19 * @csize: number of bytes to copy
20 * @offset: offset in bytes into the page (based on pfn) to begin the copy
21 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
22 * otherwise @buf is in kernel address space, use memcpy().
23 *
24 * Copy a page from "oldmem". For this page, there is no pte mapped
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
26 *
27 * Calling copy_to_user() in atomic context is not desirable. Hence first
28 * copying the data to a pre-allocated kernel page and then copying to user
29 * space in non-atomic context.
30 */
31ssize_t
32copy_oldmem_page(unsigned long pfn, char *buf,
33 size_t csize, unsigned long offset, int userbuf)
34{
35 void *vaddr;
36
37 if (!csize)
38 return 0;
39 vaddr = __va(pfn<<PAGE_SHIFT);
40 if (userbuf) {
41 if (copy_to_user(buf, (vaddr + offset), csize)) {
42 return -EFAULT;
43 }
44 } else
45 memcpy(buf, (vaddr + offset), csize);
46 return csize;
47}
48
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index 5cd6226f44f2..621630256c4a 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -45,13 +45,14 @@
45 * to the correct location. 45 * to the correct location.
46 */ 46 */
47#include <asm/asmmacro.h> 47#include <asm/asmmacro.h>
48#include <asm-ia64/break.h>
48 49
49 /* 50 /*
50 * void jprobe_break(void) 51 * void jprobe_break(void)
51 */ 52 */
52 .section .kprobes.text, "ax" 53 .section .kprobes.text, "ax"
53ENTRY(jprobe_break) 54ENTRY(jprobe_break)
54 break.m 0x80300 55 break.m __IA64_BREAK_JPROBE
55END(jprobe_break) 56END(jprobe_break)
56 57
57 /* 58 /*
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 76e778951e20..6cb56dd4056d 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -88,6 +88,7 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
88{ 88{
89 p->ainsn.inst_flag = 0; 89 p->ainsn.inst_flag = 0;
90 p->ainsn.target_br_reg = 0; 90 p->ainsn.target_br_reg = 0;
91 p->ainsn.slot = slot;
91 92
92 /* Check for Break instruction 93 /* Check for Break instruction
93 * Bits 37:40 Major opcode to be zero 94 * Bits 37:40 Major opcode to be zero
@@ -129,48 +130,6 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
129 130
130/* 131/*
131 * In this function we check to see if the instruction 132 * In this function we check to see if the instruction
132 * on which we are inserting kprobe is supported.
133 * Returns 0 if supported
134 * Returns -EINVAL if unsupported
135 */
136static int __kprobes unsupported_inst(uint template, uint slot,
137 uint major_opcode,
138 unsigned long kprobe_inst,
139 unsigned long addr)
140{
141 if (bundle_encoding[template][slot] == I) {
142 switch (major_opcode) {
143 case 0x0: //I_UNIT_MISC_OPCODE:
144 /*
145 * Check for Integer speculation instruction
146 * - Bit 33-35 to be equal to 0x1
147 */
148 if (((kprobe_inst >> 33) & 0x7) == 1) {
149 printk(KERN_WARNING
150 "Kprobes on speculation inst at <0x%lx> not supported\n",
151 addr);
152 return -EINVAL;
153 }
154
155 /*
156 * IP relative mov instruction
157 * - Bit 27-35 to be equal to 0x30
158 */
159 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
160 printk(KERN_WARNING
161 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
162 addr);
163 return -EINVAL;
164
165 }
166 }
167 }
168 return 0;
169}
170
171
172/*
173 * In this function we check to see if the instruction
174 * (qp) cmpx.crel.ctype p1,p2=r2,r3 133 * (qp) cmpx.crel.ctype p1,p2=r2,r3
175 * on which we are inserting kprobe is cmp instruction 134 * on which we are inserting kprobe is cmp instruction
176 * with ctype as unc. 135 * with ctype as unc.
@@ -206,26 +165,136 @@ out:
206} 165}
207 166
208/* 167/*
168 * In this function we check to see if the instruction
169 * on which we are inserting kprobe is supported.
170 * Returns qp value if supported
171 * Returns -EINVAL if unsupported
172 */
173static int __kprobes unsupported_inst(uint template, uint slot,
174 uint major_opcode,
175 unsigned long kprobe_inst,
176 unsigned long addr)
177{
178 int qp;
179
180 qp = kprobe_inst & 0x3f;
181 if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) {
182 if (slot == 1 && qp) {
183 printk(KERN_WARNING "Kprobes on cmp unc"
184 "instruction on slot 1 at <0x%lx>"
185 "is not supported\n", addr);
186 return -EINVAL;
187
188 }
189 qp = 0;
190 }
191 else if (bundle_encoding[template][slot] == I) {
192 if (major_opcode == 0) {
193 /*
194 * Check for Integer speculation instruction
195 * - Bit 33-35 to be equal to 0x1
196 */
197 if (((kprobe_inst >> 33) & 0x7) == 1) {
198 printk(KERN_WARNING
199 "Kprobes on speculation inst at <0x%lx> not supported\n",
200 addr);
201 return -EINVAL;
202 }
203 /*
204 * IP relative mov instruction
205 * - Bit 27-35 to be equal to 0x30
206 */
207 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
208 printk(KERN_WARNING
209 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
210 addr);
211 return -EINVAL;
212
213 }
214 }
215 else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) &&
216 (kprobe_inst & (0x1UL << 12))) {
217 /* test bit instructions, tbit,tnat,tf
218 * bit 33-36 to be equal to 0
219 * bit 12 to be equal to 1
220 */
221 if (slot == 1 && qp) {
222 printk(KERN_WARNING "Kprobes on test bit"
223 "instruction on slot at <0x%lx>"
224 "is not supported\n", addr);
225 return -EINVAL;
226 }
227 qp = 0;
228 }
229 }
230 else if (bundle_encoding[template][slot] == B) {
231 if (major_opcode == 7) {
232 /* IP-Relative Predict major code is 7 */
233 printk(KERN_WARNING "Kprobes on IP-Relative"
234 "Predict is not supported\n");
235 return -EINVAL;
236 }
237 else if (major_opcode == 2) {
238 /* Indirect Predict, major code is 2
239 * bit 27-32 to be equal to 10 or 11
240 */
241 int x6=(kprobe_inst >> 27) & 0x3F;
242 if ((x6 == 0x10) || (x6 == 0x11)) {
243 printk(KERN_WARNING "Kprobes on"
244 "Indirect Predict is not supported\n");
245 return -EINVAL;
246 }
247 }
248 }
249 /* kernel does not use float instruction, here for safety kprobe
250 * will judge whether it is fcmp/flass/float approximation instruction
251 */
252 else if (unlikely(bundle_encoding[template][slot] == F)) {
253 if ((major_opcode == 4 || major_opcode == 5) &&
254 (kprobe_inst & (0x1 << 12))) {
255 /* fcmp/fclass unc instruction */
256 if (slot == 1 && qp) {
257 printk(KERN_WARNING "Kprobes on fcmp/fclass "
258 "instruction on slot at <0x%lx> "
259 "is not supported\n", addr);
260 return -EINVAL;
261
262 }
263 qp = 0;
264 }
265 if ((major_opcode == 0 || major_opcode == 1) &&
266 (kprobe_inst & (0x1UL << 33))) {
267 /* float Approximation instruction */
268 if (slot == 1 && qp) {
269 printk(KERN_WARNING "Kprobes on float Approx "
270 "instr at <0x%lx> is not supported\n",
271 addr);
272 return -EINVAL;
273 }
274 qp = 0;
275 }
276 }
277 return qp;
278}
279
280/*
209 * In this function we override the bundle with 281 * In this function we override the bundle with
210 * the break instruction at the given slot. 282 * the break instruction at the given slot.
211 */ 283 */
212static void __kprobes prepare_break_inst(uint template, uint slot, 284static void __kprobes prepare_break_inst(uint template, uint slot,
213 uint major_opcode, 285 uint major_opcode,
214 unsigned long kprobe_inst, 286 unsigned long kprobe_inst,
215 struct kprobe *p) 287 struct kprobe *p,
288 int qp)
216{ 289{
217 unsigned long break_inst = BREAK_INST; 290 unsigned long break_inst = BREAK_INST;
218 bundle_t *bundle = &p->opcode.bundle; 291 bundle_t *bundle = &p->opcode.bundle;
219 292
220 /* 293 /*
221 * Copy the original kprobe_inst qualifying predicate(qp) 294 * Copy the original kprobe_inst qualifying predicate(qp)
222 * to the break instruction iff !is_cmp_ctype_unc_inst 295 * to the break instruction
223 * because for cmp instruction with ctype equal to unc,
224 * which is a special instruction always needs to be
225 * executed regradless of qp
226 */ 296 */
227 if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) 297 break_inst |= qp;
228 break_inst |= (0x3f & kprobe_inst);
229 298
230 switch (slot) { 299 switch (slot) {
231 case 0: 300 case 0:
@@ -296,12 +365,6 @@ static int __kprobes valid_kprobe_addr(int template, int slot,
296 return -EINVAL; 365 return -EINVAL;
297 } 366 }
298 367
299 if (slot == 1 && bundle_encoding[template][1] != L) {
300 printk(KERN_WARNING "Inserting kprobes on slot #1 "
301 "is not supported\n");
302 return -EINVAL;
303 }
304
305 return 0; 368 return 0;
306} 369}
307 370
@@ -427,6 +490,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
427 unsigned long kprobe_inst=0; 490 unsigned long kprobe_inst=0;
428 unsigned int slot = addr & 0xf, template, major_opcode = 0; 491 unsigned int slot = addr & 0xf, template, major_opcode = 0;
429 bundle_t *bundle; 492 bundle_t *bundle;
493 int qp;
430 494
431 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; 495 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
432 template = bundle->quad0.template; 496 template = bundle->quad0.template;
@@ -441,9 +505,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
441 /* Get kprobe_inst and major_opcode from the bundle */ 505 /* Get kprobe_inst and major_opcode from the bundle */
442 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 506 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
443 507
444 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, addr)) 508 qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr);
445 return -EINVAL; 509 if (qp < 0)
446 510 return -EINVAL;
447 511
448 p->ainsn.insn = get_insn_slot(); 512 p->ainsn.insn = get_insn_slot();
449 if (!p->ainsn.insn) 513 if (!p->ainsn.insn)
@@ -451,30 +515,56 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
451 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); 515 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
452 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); 516 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
453 517
454 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p); 518 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
455 519
456 return 0; 520 return 0;
457} 521}
458 522
459void __kprobes arch_arm_kprobe(struct kprobe *p) 523void __kprobes arch_arm_kprobe(struct kprobe *p)
460{ 524{
461 unsigned long addr = (unsigned long)p->addr; 525 unsigned long arm_addr;
462 unsigned long arm_addr = addr & ~0xFULL; 526 bundle_t *src, *dest;
527
528 arm_addr = ((unsigned long)p->addr) & ~0xFUL;
529 dest = &((kprobe_opcode_t *)arm_addr)->bundle;
530 src = &p->opcode.bundle;
463 531
464 flush_icache_range((unsigned long)p->ainsn.insn, 532 flush_icache_range((unsigned long)p->ainsn.insn,
465 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); 533 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
466 memcpy((char *)arm_addr, &p->opcode, sizeof(kprobe_opcode_t)); 534 switch (p->ainsn.slot) {
535 case 0:
536 dest->quad0.slot0 = src->quad0.slot0;
537 break;
538 case 1:
539 dest->quad1.slot1_p1 = src->quad1.slot1_p1;
540 break;
541 case 2:
542 dest->quad1.slot2 = src->quad1.slot2;
543 break;
544 }
467 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 545 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
468} 546}
469 547
470void __kprobes arch_disarm_kprobe(struct kprobe *p) 548void __kprobes arch_disarm_kprobe(struct kprobe *p)
471{ 549{
472 unsigned long addr = (unsigned long)p->addr; 550 unsigned long arm_addr;
473 unsigned long arm_addr = addr & ~0xFULL; 551 bundle_t *src, *dest;
474 552
553 arm_addr = ((unsigned long)p->addr) & ~0xFUL;
554 dest = &((kprobe_opcode_t *)arm_addr)->bundle;
475 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ 555 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
476 memcpy((char *) arm_addr, (char *) p->ainsn.insn, 556 src = &p->ainsn.insn->bundle;
477 sizeof(kprobe_opcode_t)); 557 switch (p->ainsn.slot) {
558 case 0:
559 dest->quad0.slot0 = src->quad0.slot0;
560 break;
561 case 1:
562 dest->quad1.slot1_p1 = src->quad1.slot1_p1;
563 break;
564 case 2:
565 dest->quad1.slot2 = src->quad1.slot2;
566 break;
567 }
478 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 568 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
479} 569}
480 570
@@ -807,7 +897,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
807 switch(val) { 897 switch(val) {
808 case DIE_BREAK: 898 case DIE_BREAK:
809 /* err is break number from ia64_bad_break() */ 899 /* err is break number from ia64_bad_break() */
810 if (args->err == 0x80200 || args->err == 0x80300 || args->err == 0) 900 if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
901 || args->err == __IA64_BREAK_JPROBE
902 || args->err == 0)
811 if (pre_kprobes_handler(args)) 903 if (pre_kprobes_handler(args))
812 ret = NOTIFY_STOP; 904 ret = NOTIFY_STOP;
813 break; 905 break;
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 468233fa2cee..e2ccc9f660c5 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -19,8 +19,11 @@
19#include <asm/delay.h> 19#include <asm/delay.h>
20#include <asm/meminit.h> 20#include <asm/meminit.h>
21 21
22typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long, 22typedef NORET_TYPE void (*relocate_new_kernel_t)(
23 struct ia64_boot_param *, unsigned long); 23 unsigned long indirection_page,
24 unsigned long start_address,
25 struct ia64_boot_param *boot_param,
26 unsigned long pal_addr) ATTRIB_NORET;
24 27
25struct kimage *ia64_kimage; 28struct kimage *ia64_kimage;
26 29
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 87c1c4f42872..a76add3e76a2 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1239,7 +1239,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1239 } else { 1239 } else {
1240 /* Dump buffered message to console */ 1240 /* Dump buffered message to console */
1241 ia64_mlogbuf_finish(1); 1241 ia64_mlogbuf_finish(1);
1242#ifdef CONFIG_CRASH_DUMP 1242#ifdef CONFIG_KEXEC
1243 atomic_set(&kdump_in_progress, 1); 1243 atomic_set(&kdump_in_progress, 1);
1244 monarch_cpu = -1; 1244 monarch_cpu = -1;
1245#endif 1245#endif
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 14e1200376a9..ad567b8d432e 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -256,7 +256,7 @@ reserve_memory (void)
256 256
257#ifdef CONFIG_KEXEC 257#ifdef CONFIG_KEXEC
258 /* crashkernel=size@offset specifies the size to reserve for a crash 258 /* crashkernel=size@offset specifies the size to reserve for a crash
259 * kernel.(offset is ingored for keep compatibility with other archs) 259 * kernel. If offset is 0, then it is determined automatically.
260 * By reserving this memory we guarantee that linux never set's it 260 * By reserving this memory we guarantee that linux never set's it
261 * up as a DMA target.Useful for holding code to do something 261 * up as a DMA target.Useful for holding code to do something
262 * appropriate after a kernel panic. 262 * appropriate after a kernel panic.
@@ -266,10 +266,16 @@ reserve_memory (void)
266 unsigned long base, size; 266 unsigned long base, size;
267 if (from) { 267 if (from) {
268 size = memparse(from + 12, &from); 268 size = memparse(from + 12, &from);
269 if (*from == '@')
270 base = memparse(from+1, &from);
271 else
272 base = 0;
269 if (size) { 273 if (size) {
270 sort_regions(rsvd_region, n); 274 if (!base) {
271 base = kdump_find_rsvd_region(size, 275 sort_regions(rsvd_region, n);
272 rsvd_region, n); 276 base = kdump_find_rsvd_region(size,
277 rsvd_region, n);
278 }
273 if (base != ~0UL) { 279 if (base != ~0UL) {
274 rsvd_region[n].start = 280 rsvd_region[n].start =
275 (unsigned long)__va(base); 281 (unsigned long)__va(base);
@@ -434,6 +440,21 @@ static __init int setup_nomca(char *s)
434} 440}
435early_param("nomca", setup_nomca); 441early_param("nomca", setup_nomca);
436 442
443#ifdef CONFIG_PROC_VMCORE
444/* elfcorehdr= specifies the location of elf core header
445 * stored by the crashed kernel.
446 */
447static int __init parse_elfcorehdr(char *arg)
448{
449 if (!arg)
450 return -EINVAL;
451
452 elfcorehdr_addr = memparse(arg, &arg);
453 return 0;
454}
455early_param("elfcorehdr", parse_elfcorehdr);
456#endif /* CONFIG_PROC_VMCORE */
457
437void __init 458void __init
438setup_arch (char **cmdline_p) 459setup_arch (char **cmdline_p)
439{ 460{
@@ -653,6 +674,7 @@ get_model_name(__u8 family, __u8 model)
653{ 674{
654 char brand[128]; 675 char brand[128];
655 676
677 memcpy(brand, "Unknown", 8);
656 if (ia64_pal_get_brand_info(brand)) { 678 if (ia64_pal_get_brand_info(brand)) {
657 if (family == 0x7) 679 if (family == 0x7)
658 memcpy(brand, "Merced", 7); 680 memcpy(brand, "Merced", 7);
@@ -660,8 +682,7 @@ get_model_name(__u8 family, __u8 model)
660 case 0: memcpy(brand, "McKinley", 9); break; 682 case 0: memcpy(brand, "McKinley", 9); break;
661 case 1: memcpy(brand, "Madison", 8); break; 683 case 1: memcpy(brand, "Madison", 8); break;
662 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 684 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
663 } else 685 }
664 memcpy(brand, "Unknown", 8);
665 } 686 }
666 if (brandname[0] == '\0') 687 if (brandname[0] == '\0')
667 return strcpy(brandname, brand); 688 return strcpy(brandname, brand);
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index b1b9aa4364b9..f4c7f7769cf7 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -157,7 +157,7 @@ handle_IPI (int irq, void *dev_id)
157 case IPI_CPU_STOP: 157 case IPI_CPU_STOP:
158 stop_this_cpu(); 158 stop_this_cpu();
159 break; 159 break;
160#ifdef CONFIG_CRASH_DUMP 160#ifdef CONFIG_KEXEC
161 case IPI_KDUMP_CPU_STOP: 161 case IPI_KDUMP_CPU_STOP:
162 unw_init_running(kdump_cpu_freeze, NULL); 162 unw_init_running(kdump_cpu_freeze, NULL);
163 break; 163 break;
@@ -219,7 +219,7 @@ send_IPI_self (int op)
219 send_IPI_single(smp_processor_id(), op); 219 send_IPI_single(smp_processor_id(), op);
220} 220}
221 221
222#ifdef CONFIG_CRASH_DUMP 222#ifdef CONFIG_KEXEC
223void 223void
224kdump_smp_send_stop() 224kdump_smp_send_stop()
225{ 225{
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index fffa9e0826bc..ab684747036f 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -307,6 +307,15 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long
307 return ret.status; 307 return ret.status;
308} 308}
309 309
310struct fpu_swa_msg {
311 unsigned long count;
312 unsigned long time;
313};
314static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
315DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
316static struct fpu_swa_msg last __cacheline_aligned;
317
318
310/* 319/*
311 * Handle floating-point assist faults and traps. 320 * Handle floating-point assist faults and traps.
312 */ 321 */
@@ -316,8 +325,6 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
316 long exception, bundle[2]; 325 long exception, bundle[2];
317 unsigned long fault_ip; 326 unsigned long fault_ip;
318 struct siginfo siginfo; 327 struct siginfo siginfo;
319 static int fpu_swa_count = 0;
320 static unsigned long last_time;
321 328
322 fault_ip = regs->cr_iip; 329 fault_ip = regs->cr_iip;
323 if (!fp_fault && (ia64_psr(regs)->ri == 0)) 330 if (!fp_fault && (ia64_psr(regs)->ri == 0))
@@ -325,14 +332,37 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
325 if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle))) 332 if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
326 return -1; 333 return -1;
327 334
328 if (jiffies - last_time > 5*HZ) 335 if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
329 fpu_swa_count = 0; 336 unsigned long count, current_jiffies = jiffies;
330 if ((fpu_swa_count < 4) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { 337 struct fpu_swa_msg *cp = &__get_cpu_var(cpulast);
331 last_time = jiffies; 338
332 ++fpu_swa_count; 339 if (unlikely(current_jiffies > cp->time))
333 printk(KERN_WARNING 340 cp->count = 0;
334 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", 341 if (unlikely(cp->count < 5)) {
335 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); 342 cp->count++;
343 cp->time = current_jiffies + 5 * HZ;
344
345 /* minimize races by grabbing a copy of count BEFORE checking last.time. */
346 count = last.count;
347 barrier();
348
349 /*
350 * Lower 4 bits are used as a count. Upper bits are a sequence
351 * number that is updated when count is reset. The cmpxchg will
352 * fail is seqno has changed. This minimizes mutiple cpus
353 * reseting the count.
354 */
355 if (current_jiffies > last.time)
356 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
357
358 /* used fetchadd to atomically update the count */
359 if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
360 last.time = current_jiffies + 5 * HZ;
361 printk(KERN_WARNING
362 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
363 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr);
364 }
365 }
336 } 366 }
337 367
338 exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr, 368 exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 82deaa3a7c48..1e79551231b9 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -174,6 +174,12 @@ find_memory (void)
174 reserve_bootmem(bootmap_start, bootmap_size); 174 reserve_bootmem(bootmap_start, bootmap_size);
175 175
176 find_initrd(); 176 find_initrd();
177
178#ifdef CONFIG_CRASH_DUMP
179 /* If we are doing a crash dump, we still need to know the real mem
180 * size before original memory map is * reset. */
181 saved_max_pfn = max_pfn;
182#endif
177} 183}
178 184
179#ifdef CONFIG_SMP 185#ifdef CONFIG_SMP
@@ -226,7 +232,6 @@ void __init
226paging_init (void) 232paging_init (void)
227{ 233{
228 unsigned long max_dma; 234 unsigned long max_dma;
229 unsigned long nid = 0;
230 unsigned long max_zone_pfns[MAX_NR_ZONES]; 235 unsigned long max_zone_pfns[MAX_NR_ZONES];
231 236
232 num_physpages = 0; 237 num_physpages = 0;
@@ -238,7 +243,7 @@ paging_init (void)
238 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 243 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
239 244
240#ifdef CONFIG_VIRTUAL_MEM_MAP 245#ifdef CONFIG_VIRTUAL_MEM_MAP
241 efi_memmap_walk(register_active_ranges, &nid); 246 efi_memmap_walk(register_active_ranges, NULL);
242 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 247 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
243 if (max_gap < LARGE_GAP) { 248 if (max_gap < LARGE_GAP) {
244 vmem_map = (struct page *) 0; 249 vmem_map = (struct page *) 0;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 56dc2024220e..1a3d8a2feb94 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -595,14 +595,9 @@ find_largest_hole (u64 start, u64 end, void *arg)
595} 595}
596 596
597int __init 597int __init
598register_active_ranges(u64 start, u64 end, void *nid) 598register_active_ranges(u64 start, u64 end, void *arg)
599{ 599{
600 BUG_ON(nid == NULL); 600 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
601 BUG_ON(*(unsigned long *)nid >= MAX_NUMNODES);
602
603 add_active_range(*(unsigned long *)nid,
604 __pa(start) >> PAGE_SHIFT,
605 __pa(end) >> PAGE_SHIFT);
606 return 0; 601 return 0;
607} 602}
608#endif /* CONFIG_VIRTUAL_MEM_MAP */ 603#endif /* CONFIG_VIRTUAL_MEM_MAP */
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index a934ad069425..8571e52c2efd 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -580,7 +580,7 @@ void __cpuinit sn_cpu_init(void)
580 int slice; 580 int slice;
581 int cnode; 581 int cnode;
582 int i; 582 int i;
583 static int wars_have_been_checked; 583 static int wars_have_been_checked, set_cpu0_number;
584 584
585 cpuid = smp_processor_id(); 585 cpuid = smp_processor_id();
586 if (cpuid == 0 && IS_MEDUSA()) { 586 if (cpuid == 0 && IS_MEDUSA()) {
@@ -605,8 +605,16 @@ void __cpuinit sn_cpu_init(void)
605 /* 605 /*
606 * Don't check status. The SAL call is not supported on all PROMs 606 * Don't check status. The SAL call is not supported on all PROMs
607 * but a failure is harmless. 607 * but a failure is harmless.
608 * Architechtuallly, cpu_init is always called twice on cpu 0. We
609 * should set cpu_number on cpu 0 once.
608 */ 610 */
609 (void) ia64_sn_set_cpu_number(cpuid); 611 if (cpuid == 0) {
612 if (!set_cpu0_number) {
613 (void) ia64_sn_set_cpu_number(cpuid);
614 set_cpu0_number = 1;
615 }
616 } else
617 (void) ia64_sn_set_cpu_number(cpuid);
610 618
611 /* 619 /*
612 * The boot cpu makes this call again after platform initialization is 620 * The boot cpu makes this call again after platform initialization is
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 462ea178f49a..33367996d72d 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -189,7 +189,7 @@ static void print_pci_topology(struct seq_file *s)
189 int e; 189 int e;
190 190
191 for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) { 191 for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) {
192 if (!(p = (char *)kmalloc(sz, GFP_KERNEL))) 192 if (!(p = kmalloc(sz, GFP_KERNEL)))
193 break; 193 break;
194 e = ia64_sn_ioif_get_pci_topology(__pa(p), sz); 194 e = ia64_sn_ioif_get_pci_topology(__pa(p), sz);
195 if (e == SALRET_OK) 195 if (e == SALRET_OK)
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 1f3540826e68..c08db9c2375d 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -632,7 +632,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
632 ch->number, ch->partid); 632 ch->number, ch->partid);
633 633
634 spin_unlock_irqrestore(&ch->lock, *irq_flags); 634 spin_unlock_irqrestore(&ch->lock, *irq_flags);
635 xpc_create_kthreads(ch, 1); 635 xpc_create_kthreads(ch, 1, 0);
636 spin_lock_irqsave(&ch->lock, *irq_flags); 636 spin_lock_irqsave(&ch->lock, *irq_flags);
637} 637}
638 638
@@ -754,12 +754,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
754 754
755 /* make sure all activity has settled down first */ 755 /* make sure all activity has settled down first */
756 756
757 if (atomic_read(&ch->references) > 0 || 757 if (atomic_read(&ch->kthreads_assigned) > 0 ||
758 ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 758 atomic_read(&ch->references) > 0) {
759 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) {
760 return; 759 return;
761 } 760 }
762 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 761 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
762 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
763 763
764 if (part->act_state == XPC_P_DEACTIVATING) { 764 if (part->act_state == XPC_P_DEACTIVATING) {
765 /* can't proceed until the other side disengages from us */ 765 /* can't proceed until the other side disengages from us */
@@ -1651,6 +1651,11 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1651 /* wake all idle kthreads so they can exit */ 1651 /* wake all idle kthreads so they can exit */
1652 if (atomic_read(&ch->kthreads_idle) > 0) { 1652 if (atomic_read(&ch->kthreads_idle) > 0) {
1653 wake_up_all(&ch->idle_wq); 1653 wake_up_all(&ch->idle_wq);
1654
1655 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
1656 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
1657 /* start a kthread that will do the xpcDisconnecting callout */
1658 xpc_create_kthreads(ch, 1, 1);
1654 } 1659 }
1655 1660
1656 /* wake those waiting to allocate an entry from the local msg queue */ 1661 /* wake those waiting to allocate an entry from the local msg queue */
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index fa96dfc0e1aa..7a387d237363 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -681,7 +681,7 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
681 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", 681 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
682 needed, ch->partid, ch->number); 682 needed, ch->partid, ch->number);
683 683
684 xpc_create_kthreads(ch, needed); 684 xpc_create_kthreads(ch, needed, 0);
685} 685}
686 686
687 687
@@ -775,26 +775,28 @@ xpc_daemonize_kthread(void *args)
775 xpc_kthread_waitmsgs(part, ch); 775 xpc_kthread_waitmsgs(part, ch);
776 } 776 }
777 777
778 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 778 /* let registerer know that connection is disconnecting */
779 spin_lock_irqsave(&ch->lock, irq_flags);
780 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
781 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
782 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
783 spin_unlock_irqrestore(&ch->lock, irq_flags);
784 779
785 xpc_disconnect_callout(ch, xpcDisconnecting); 780 spin_lock_irqsave(&ch->lock, irq_flags);
786 781 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
787 spin_lock_irqsave(&ch->lock, irq_flags); 782 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
788 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; 783 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
789 }
790 spin_unlock_irqrestore(&ch->lock, irq_flags); 784 spin_unlock_irqrestore(&ch->lock, irq_flags);
785
786 xpc_disconnect_callout(ch, xpcDisconnecting);
787
788 spin_lock_irqsave(&ch->lock, irq_flags);
789 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
790 }
791 spin_unlock_irqrestore(&ch->lock, irq_flags);
792
793 if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
791 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 794 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
792 xpc_mark_partition_disengaged(part); 795 xpc_mark_partition_disengaged(part);
793 xpc_IPI_send_disengage(part); 796 xpc_IPI_send_disengage(part);
794 } 797 }
795 } 798 }
796 799
797
798 xpc_msgqueue_deref(ch); 800 xpc_msgqueue_deref(ch);
799 801
800 dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", 802 dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
@@ -818,7 +820,8 @@ xpc_daemonize_kthread(void *args)
818 * partition. 820 * partition.
819 */ 821 */
820void 822void
821xpc_create_kthreads(struct xpc_channel *ch, int needed) 823xpc_create_kthreads(struct xpc_channel *ch, int needed,
824 int ignore_disconnecting)
822{ 825{
823 unsigned long irq_flags; 826 unsigned long irq_flags;
824 pid_t pid; 827 pid_t pid;
@@ -833,16 +836,38 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed)
833 * kthread. That kthread is responsible for doing the 836 * kthread. That kthread is responsible for doing the
834 * counterpart to the following before it exits. 837 * counterpart to the following before it exits.
835 */ 838 */
839 if (ignore_disconnecting) {
840 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
841 /* kthreads assigned had gone to zero */
842 BUG_ON(!(ch->flags &
843 XPC_C_DISCONNECTINGCALLOUT_MADE));
844 break;
845 }
846
847 } else if (ch->flags & XPC_C_DISCONNECTING) {
848 break;
849
850 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) {
851 if (atomic_inc_return(&part->nchannels_engaged) == 1)
852 xpc_mark_partition_engaged(part);
853 }
836 (void) xpc_part_ref(part); 854 (void) xpc_part_ref(part);
837 xpc_msgqueue_ref(ch); 855 xpc_msgqueue_ref(ch);
838 if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
839 atomic_inc_return(&part->nchannels_engaged) == 1) {
840 xpc_mark_partition_engaged(part);
841 }
842 856
843 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 857 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
844 if (pid < 0) { 858 if (pid < 0) {
845 /* the fork failed */ 859 /* the fork failed */
860
861 /*
862 * NOTE: if (ignore_disconnecting &&
863 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
864 * then we'll deadlock if all other kthreads assigned
865 * to this channel are blocked in the channel's
866 * registerer, because the only thing that will unblock
867 * them is the xpcDisconnecting callout that this
868 * failed kernel_thread would have made.
869 */
870
846 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 871 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
847 atomic_dec_return(&part->nchannels_engaged) == 0) { 872 atomic_dec_return(&part->nchannels_engaged) == 0) {
848 xpc_mark_partition_disengaged(part); 873 xpc_mark_partition_disengaged(part);
@@ -857,9 +882,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed)
857 * Flag this as an error only if we have an 882 * Flag this as an error only if we have an
858 * insufficient #of kthreads for the channel 883 * insufficient #of kthreads for the channel
859 * to function. 884 * to function.
860 *
861 * No xpc_msgqueue_ref() is needed here since
862 * the channel mgr is doing this.
863 */ 885 */
864 spin_lock_irqsave(&ch->lock, irq_flags); 886 spin_lock_irqsave(&ch->lock, irq_flags);
865 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, 887 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index b54ef1726c55..46b7d6035aab 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -59,7 +59,7 @@ static struct vm_struct *get_io_area(unsigned long size)
59 unsigned long addr; 59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area; 60 struct vm_struct **p, *tmp, *area;
61 61
62 area = (struct vm_struct *)kmalloc(sizeof(*area), GFP_KERNEL); 62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area) 63 if (!area)
64 return NULL; 64 return NULL;
65 addr = KMAP_START; 65 addr = KMAP_START;
diff --git a/arch/mips/kernel/apm.c b/arch/mips/kernel/apm.c
index 528e731049c1..ba16d07588cb 100644
--- a/arch/mips/kernel/apm.c
+++ b/arch/mips/kernel/apm.c
@@ -356,7 +356,7 @@ static int apm_open(struct inode * inode, struct file * filp)
356{ 356{
357 struct apm_user *as; 357 struct apm_user *as;
358 358
359 as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL); 359 as = kzalloc(sizeof(*as), GFP_KERNEL);
360 if (as) { 360 if (as) {
361 /* 361 /*
362 * XXX - this is a tiny bit broken, when we consider BSD 362 * XXX - this is a tiny bit broken, when we consider BSD
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index ea2d15370bb7..30245c09d025 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -203,6 +203,31 @@ static inline void kunmap_coherent(struct page *page)
203 preempt_check_resched(); 203 preempt_check_resched();
204} 204}
205 205
206void copy_user_highpage(struct page *to, struct page *from,
207 unsigned long vaddr, struct vm_area_struct *vma)
208{
209 void *vfrom, *vto;
210
211 vto = kmap_atomic(to, KM_USER1);
212 if (cpu_has_dc_aliases) {
213 vfrom = kmap_coherent(from, vaddr);
214 copy_page(vto, vfrom);
215 kunmap_coherent(from);
216 } else {
217 vfrom = kmap_atomic(from, KM_USER0);
218 copy_page(vto, vfrom);
219 kunmap_atomic(vfrom, KM_USER0);
220 }
221 if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
222 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
223 flush_data_cache_page((unsigned long)vto);
224 kunmap_atomic(vto, KM_USER1);
225 /* Make sure this page is cleared on other CPU's too before using it */
226 smp_wmb();
227}
228
229EXPORT_SYMBOL(copy_user_highpage);
230
206void copy_to_user_page(struct vm_area_struct *vma, 231void copy_to_user_page(struct vm_area_struct *vma,
207 struct page *page, unsigned long vaddr, void *dst, const void *src, 232 struct page *page, unsigned long vaddr, void *dst, const void *src,
208 unsigned long len) 233 unsigned long len)
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index d88309209f56..04c2ff444396 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -475,7 +475,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
475 printk(KERN_DEBUG "len of arg1 = %d\n", len); 475 printk(KERN_DEBUG "len of arg1 = %d\n", len);
476 if (len == 0) 476 if (len == 0)
477 return 0; 477 return 0;
478 fsname = (char *) kmalloc(len, GFP_KERNEL); 478 fsname = kmalloc(len, GFP_KERNEL);
479 if ( !fsname ) { 479 if ( !fsname ) {
480 printk(KERN_DEBUG "failed to kmalloc fsname\n"); 480 printk(KERN_DEBUG "failed to kmalloc fsname\n");
481 return 0; 481 return 0;
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 920bdbf8404f..c10ab47d81fa 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -343,7 +343,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
343 struct pt_regs *r = &t->thread.regs; 343 struct pt_regs *r = &t->thread.regs;
344 struct pt_regs *r2; 344 struct pt_regs *r2;
345 345
346 r2 = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL); 346 r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
347 if (!r2) 347 if (!r2)
348 return; 348 return;
349 *r2 = *r; 349 *r2 = *r;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 6960f090991e..869cebbba967 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -505,7 +505,7 @@ static int nvram_scan_partitions(void)
505 return -ENODEV; 505 return -ENODEV;
506 total_size = ppc_md.nvram_size(); 506 total_size = ppc_md.nvram_size();
507 507
508 header = (char *) kmalloc(NVRAM_HEADER_LEN, GFP_KERNEL); 508 header = kmalloc(NVRAM_HEADER_LEN, GFP_KERNEL);
509 if (!header) { 509 if (!header) {
510 printk(KERN_ERR "nvram_scan_partitions: Failed kmalloc\n"); 510 printk(KERN_ERR "nvram_scan_partitions: Failed kmalloc\n");
511 return -ENOMEM; 511 return -ENOMEM;
@@ -574,7 +574,7 @@ static int __init nvram_init(void)
574 } 574 }
575 575
576 /* initialize our anchor for the nvram partition list */ 576 /* initialize our anchor for the nvram partition list */
577 nvram_part = (struct nvram_partition *) kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); 577 nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
578 if (!nvram_part) { 578 if (!nvram_part) {
579 printk(KERN_ERR "nvram_init: Failed kmalloc\n"); 579 printk(KERN_ERR "nvram_init: Failed kmalloc\n");
580 return -ENOMEM; 580 return -ENOMEM;
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 8336deafc624..2847cd51a2d7 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -670,7 +670,7 @@ pcibios_make_OF_bus_map(void)
670 struct pci_controller* hose; 670 struct pci_controller* hose;
671 struct property *map_prop; 671 struct property *map_prop;
672 672
673 pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL); 673 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
674 if (!pci_to_OF_bus_map) { 674 if (!pci_to_OF_bus_map) {
675 printk(KERN_ERR "Can't allocate OF bus map !\n"); 675 printk(KERN_ERR "Can't allocate OF bus map !\n");
676 return; 676 return;
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index add8c1a9af68..c831815c31f0 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -138,7 +138,7 @@ static struct vm_struct * split_im_region(unsigned long v_addr,
138 struct vm_struct *vm2 = NULL; 138 struct vm_struct *vm2 = NULL;
139 struct vm_struct *new_vm = NULL; 139 struct vm_struct *new_vm = NULL;
140 140
141 vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL); 141 vm1 = kmalloc(sizeof(*vm1), GFP_KERNEL);
142 if (vm1 == NULL) { 142 if (vm1 == NULL) {
143 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__); 143 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
144 return NULL; 144 return NULL;
@@ -172,7 +172,7 @@ static struct vm_struct * split_im_region(unsigned long v_addr,
172 * uppermost remainder, and use existing parent one for the 172 * uppermost remainder, and use existing parent one for the
173 * lower remainder of parent range 173 * lower remainder of parent range
174 */ 174 */
175 vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL); 175 vm2 = kmalloc(sizeof(*vm2), GFP_KERNEL);
176 if (vm2 == NULL) { 176 if (vm2 == NULL) {
177 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__); 177 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
178 kfree(vm1); 178 kfree(vm1);
@@ -206,7 +206,7 @@ static struct vm_struct * __add_new_im_area(unsigned long req_addr,
206 break; 206 break;
207 } 207 }
208 208
209 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL); 209 area = kmalloc(sizeof(*area), GFP_KERNEL);
210 if (!area) 210 if (!area)
211 return NULL; 211 return NULL;
212 area->flags = 0; 212 area->flags = 0;
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
index ed39d6a3d22a..2f2a13ed7667 100644
--- a/arch/powerpc/platforms/4xx/Kconfig
+++ b/arch/powerpc/platforms/4xx/Kconfig
@@ -179,7 +179,7 @@ config BIOS_FIXUP
179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards 179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
180config 403GCX 180config 403GCX
181 bool 181 bool
182 depends OAK 182 depends on OAK
183 default y 183 default y
184 184
185config 405EP 185config 405EP
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index ddbe398fbd48..b3c2ce4cb7a8 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -35,7 +35,7 @@ config HDPU
35 Select HDPU if configuring a Sky Computers Compute Blade. 35 Select HDPU if configuring a Sky Computers Compute Blade.
36 36
37config HDPU_FEATURES 37config HDPU_FEATURES
38 depends HDPU 38 depends on HDPU
39 tristate "HDPU-Features" 39 tristate "HDPU-Features"
40 help 40 help
41 Select to enable HDPU enhanced features. 41 Select to enable HDPU enhanced features.
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index b6b462d3c604..f2bae04424f8 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -153,7 +153,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
153 return piar; 153 return piar;
154 } 154 }
155 } 155 }
156 piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); 156 piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
157 if (!piar) 157 if (!piar)
158 return NULL; 158 return NULL;
159 159
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index 709952c25f29..06b84c372e58 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -1892,10 +1892,10 @@ init_fcc_param(fcc_info_t *fip, struct net_device *dev,
1892 /* Allocate space for the buffer descriptors from regular memory. 1892 /* Allocate space for the buffer descriptors from regular memory.
1893 * Initialize base addresses for the buffer descriptors. 1893 * Initialize base addresses for the buffer descriptors.
1894 */ 1894 */
1895 cep->rx_bd_base = (cbd_t *)kmalloc(sizeof(cbd_t) * RX_RING_SIZE, 1895 cep->rx_bd_base = kmalloc(sizeof(cbd_t) * RX_RING_SIZE,
1896 GFP_KERNEL | GFP_DMA); 1896 GFP_KERNEL | GFP_DMA);
1897 ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base); 1897 ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base);
1898 cep->tx_bd_base = (cbd_t *)kmalloc(sizeof(cbd_t) * TX_RING_SIZE, 1898 cep->tx_bd_base = kmalloc(sizeof(cbd_t) * TX_RING_SIZE,
1899 GFP_KERNEL | GFP_DMA); 1899 GFP_KERNEL | GFP_DMA);
1900 ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base); 1900 ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base);
1901 1901
diff --git a/arch/ppc/8xx_io/cs4218_tdm.c b/arch/ppc/8xx_io/cs4218_tdm.c
index c71ef3c2e7bf..b7bb5f0b3c5f 100644
--- a/arch/ppc/8xx_io/cs4218_tdm.c
+++ b/arch/ppc/8xx_io/cs4218_tdm.c
@@ -2601,7 +2601,7 @@ int __init tdm8xx_sound_init(void)
2601 /* Initialize beep stuff */ 2601 /* Initialize beep stuff */
2602 orig_mksound = kd_mksound; 2602 orig_mksound = kd_mksound;
2603 kd_mksound = cs_mksound; 2603 kd_mksound = cs_mksound;
2604 beep_buf = (short *) kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL); 2604 beep_buf = kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL);
2605 if (beep_buf == NULL) 2605 if (beep_buf == NULL)
2606 printk(KERN_WARNING "dmasound: no memory for " 2606 printk(KERN_WARNING "dmasound: no memory for "
2607 "beep buffer\n"); 2607 "beep buffer\n");
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 692b5ba53209..8eb82efe05a1 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -624,7 +624,7 @@ config HDPU
624 Select HDPU if configuring a Sky Computers Compute Blade. 624 Select HDPU if configuring a Sky Computers Compute Blade.
625 625
626config HDPU_FEATURES 626config HDPU_FEATURES
627 depends HDPU 627 depends on HDPU
628 tristate "HDPU-Features" 628 tristate "HDPU-Features"
629 help 629 help
630 Select to enable HDPU enhanced features. 630 Select to enable HDPU enhanced features.
@@ -735,7 +735,7 @@ config LITE5200
735 735
736config LITE5200B 736config LITE5200B
737 bool "Freescale LITE5200B" 737 bool "Freescale LITE5200B"
738 depends LITE5200 738 depends on LITE5200
739 help 739 help
740 Support for the LITE5200B dev board for the MPC5200 from Freescale. 740 Support for the LITE5200B dev board for the MPC5200 from Freescale.
741 This is the new board with 2 PCI slots. 741 This is the new board with 2 PCI slots.
diff --git a/arch/ppc/platforms/4xx/Kconfig b/arch/ppc/platforms/4xx/Kconfig
index 293bd489e7d9..6980de420e92 100644
--- a/arch/ppc/platforms/4xx/Kconfig
+++ b/arch/ppc/platforms/4xx/Kconfig
@@ -189,7 +189,7 @@ config BIOS_FIXUP
189# OAK doesn't exist but wanted to keep this around for any future 403GCX boards 189# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
190config 403GCX 190config 403GCX
191 bool 191 bool
192 depends OAK 192 depends on OAK
193 default y 193 default y
194 194
195config 405EP 195config 405EP
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff690564edbd..12272361c018 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -407,7 +407,7 @@ config APPLDATA_BASE
407 407
408config APPLDATA_MEM 408config APPLDATA_MEM
409 tristate "Monitor memory management statistics" 409 tristate "Monitor memory management statistics"
410 depends on APPLDATA_BASE 410 depends on APPLDATA_BASE && VM_EVENT_COUNTERS
411 help 411 help
412 This provides memory management related data to the Linux - VM Monitor 412 This provides memory management related data to the Linux - VM Monitor
413 Stream, like paging/swapping rate, memory utilisation, etc. 413 Stream, like paging/swapping rate, memory utilisation, etc.
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ef5266fbce62..bb57bc0e3fc8 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -191,13 +191,13 @@ debug_areas_alloc(int pages_per_area, int nr_areas)
191 debug_entry_t*** areas; 191 debug_entry_t*** areas;
192 int i,j; 192 int i,j;
193 193
194 areas = (debug_entry_t ***) kmalloc(nr_areas * 194 areas = kmalloc(nr_areas *
195 sizeof(debug_entry_t**), 195 sizeof(debug_entry_t**),
196 GFP_KERNEL); 196 GFP_KERNEL);
197 if (!areas) 197 if (!areas)
198 goto fail_malloc_areas; 198 goto fail_malloc_areas;
199 for (i = 0; i < nr_areas; i++) { 199 for (i = 0; i < nr_areas; i++) {
200 areas[i] = (debug_entry_t**) kmalloc(pages_per_area * 200 areas[i] = kmalloc(pages_per_area *
201 sizeof(debug_entry_t*),GFP_KERNEL); 201 sizeof(debug_entry_t*),GFP_KERNEL);
202 if (!areas[i]) { 202 if (!areas[i]) {
203 goto fail_malloc_areas2; 203 goto fail_malloc_areas2;
@@ -242,7 +242,7 @@ debug_info_alloc(char *name, int pages_per_area, int nr_areas, int buf_size,
242 242
243 /* alloc everything */ 243 /* alloc everything */
244 244
245 rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_KERNEL); 245 rc = kmalloc(sizeof(debug_info_t), GFP_KERNEL);
246 if(!rc) 246 if(!rc)
247 goto fail_malloc_rc; 247 goto fail_malloc_rc;
248 rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL); 248 rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
@@ -634,7 +634,7 @@ found:
634 rc = -ENOMEM; 634 rc = -ENOMEM;
635 goto out; 635 goto out;
636 } 636 }
637 p_info = (file_private_info_t *) kmalloc(sizeof(file_private_info_t), 637 p_info = kmalloc(sizeof(file_private_info_t),
638 GFP_KERNEL); 638 GFP_KERNEL);
639 if(!p_info){ 639 if(!p_info){
640 if(debug_info_snapshot) 640 if(debug_info_snapshot)
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index 4faf96f8a834..bc5beaa8f98e 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -37,7 +37,7 @@ int register_external_interrupt(__u16 code, ext_int_handler_t handler)
37 ext_int_info_t *p; 37 ext_int_info_t *p;
38 int index; 38 int index;
39 39
40 p = (ext_int_info_t *) kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC); 40 p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
41 if (p == NULL) 41 if (p == NULL)
42 return -ENOMEM; 42 return -ENOMEM;
43 p->code = code; 43 p->code = code;
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index c8cb211b9072..5b4841d067c1 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -425,7 +425,7 @@ int request_fast_irq(unsigned int irq,
425 } 425 }
426 426
427 if (action == NULL) 427 if (action == NULL)
428 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), 428 action = kmalloc(sizeof(struct irqaction),
429 GFP_ATOMIC); 429 GFP_ATOMIC);
430 430
431 if (!action) { 431 if (!action) {
@@ -528,7 +528,7 @@ int request_irq(unsigned int irq,
528 } 528 }
529 529
530 if (action == NULL) 530 if (action == NULL)
531 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), 531 action = kmalloc(sizeof(struct irqaction),
532 GFP_ATOMIC); 532 GFP_ATOMIC);
533 533
534 if (!action) { 534 if (!action) {
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index cf1b8baa57ea..0e27e226e0e2 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -327,7 +327,7 @@ int sun4d_request_irq(unsigned int irq,
327 } 327 }
328 328
329 if (action == NULL) 329 if (action == NULL)
330 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), 330 action = kmalloc(sizeof(struct irqaction),
331 GFP_ATOMIC); 331 GFP_ATOMIC);
332 332
333 if (!action) { 333 if (!action) {
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index 4446f66590fa..2ebc2c051383 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -1055,7 +1055,7 @@ asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
1055 break; 1055 break;
1056 case 2: 1056 case 2:
1057 rval = -EFAULT; 1057 rval = -EFAULT;
1058 kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3, 1058 kmbuf = kmalloc(sizeof(struct msgbuf) + arg3,
1059 GFP_KERNEL); 1059 GFP_KERNEL);
1060 if (!kmbuf) 1060 if (!kmbuf)
1061 break; 1061 break;
@@ -1078,7 +1078,7 @@ asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
1078 break; 1078 break;
1079 case 3: 1079 case 3:
1080 rval = -EFAULT; 1080 rval = -EFAULT;
1081 kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3, 1081 kmbuf = kmalloc(sizeof(struct msgbuf) + arg3,
1082 GFP_KERNEL); 1082 GFP_KERNEL);
1083 if (!kmbuf || sunos_msgbuf_get((struct msgbuf32 __user *)(unsigned long)arg2, 1083 if (!kmbuf || sunos_msgbuf_get((struct msgbuf32 __user *)(unsigned long)arg2,
1084 kmbuf, arg3)) 1084 kmbuf, arg3))
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 286bc0b3207f..afe3d427ddfa 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -72,9 +72,11 @@ static int uml_net_rx(struct net_device *dev)
72 return pkt_len; 72 return pkt_len;
73} 73}
74 74
75static void uml_dev_close(void* dev) 75static void uml_dev_close(struct work_struct *work)
76{ 76{
77 dev_close( (struct net_device *) dev); 77 struct uml_net_private *lp =
78 container_of(work, struct uml_net_private, work);
79 dev_close(lp->dev);
78} 80}
79 81
80irqreturn_t uml_net_interrupt(int irq, void *dev_id) 82irqreturn_t uml_net_interrupt(int irq, void *dev_id)
@@ -89,7 +91,6 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
89 spin_lock(&lp->lock); 91 spin_lock(&lp->lock);
90 while((err = uml_net_rx(dev)) > 0) ; 92 while((err = uml_net_rx(dev)) > 0) ;
91 if(err < 0) { 93 if(err < 0) {
92 DECLARE_WORK(close_work, uml_dev_close, dev);
93 printk(KERN_ERR 94 printk(KERN_ERR
94 "Device '%s' read returned %d, shutting it down\n", 95 "Device '%s' read returned %d, shutting it down\n",
95 dev->name, err); 96 dev->name, err);
@@ -97,9 +98,10 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
97 * again lp->lock. 98 * again lp->lock.
98 * And dev_close() can be safely called multiple times on the 99 * And dev_close() can be safely called multiple times on the
99 * same device, since it tests for (dev->flags & IFF_UP). So 100 * same device, since it tests for (dev->flags & IFF_UP). So
100 * there's no harm in delaying the device shutdown. */ 101 * there's no harm in delaying the device shutdown.
101 schedule_work(&close_work); 102 * Furthermore, the workqueue will not re-enqueue an already
102#error this is not permitted - close_work will go out of scope 103 * enqueued work item. */
104 schedule_work(&lp->work);
103 goto out; 105 goto out;
104 } 106 }
105 reactivate_fd(lp->fd, UM_ETH_IRQ); 107 reactivate_fd(lp->fd, UM_ETH_IRQ);
@@ -334,13 +336,12 @@ static int eth_configure(int n, void *init, char *mac,
334 size = transport->private_size + sizeof(struct uml_net_private) + 336 size = transport->private_size + sizeof(struct uml_net_private) +
335 sizeof(((struct uml_net_private *) 0)->user); 337 sizeof(((struct uml_net_private *) 0)->user);
336 338
337 device = kmalloc(sizeof(*device), GFP_KERNEL); 339 device = kzalloc(sizeof(*device), GFP_KERNEL);
338 if (device == NULL) { 340 if (device == NULL) {
339 printk(KERN_ERR "eth_configure failed to allocate uml_net\n"); 341 printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
340 return(1); 342 return(1);
341 } 343 }
342 344
343 memset(device, 0, sizeof(*device));
344 INIT_LIST_HEAD(&device->list); 345 INIT_LIST_HEAD(&device->list);
345 device->index = n; 346 device->index = n;
346 347
@@ -366,6 +367,7 @@ static int eth_configure(int n, void *init, char *mac,
366 /* This points to the transport private data. It's still clear, but we 367 /* This points to the transport private data. It's still clear, but we
367 * must memset it to 0 *now*. Let's help the drivers. */ 368 * must memset it to 0 *now*. Let's help the drivers. */
368 memset(lp, 0, size); 369 memset(lp, 0, size);
370 INIT_WORK(&lp->work, uml_dev_close);
369 371
370 /* sysfs register */ 372 /* sysfs register */
371 if (!driver_registered) { 373 if (!driver_registered) {
diff --git a/arch/um/include/net_kern.h b/arch/um/include/net_kern.h
index 280459fb0b26..218f8b47fdcd 100644
--- a/arch/um/include/net_kern.h
+++ b/arch/um/include/net_kern.h
@@ -11,6 +11,7 @@
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/socket.h> 12#include <linux/socket.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/workqueue.h>
14 15
15struct uml_net { 16struct uml_net {
16 struct list_head list; 17 struct list_head list;
@@ -26,6 +27,7 @@ struct uml_net_private {
26 struct net_device *dev; 27 struct net_device *dev;
27 struct timer_list tl; 28 struct timer_list tl;
28 struct net_device_stats stats; 29 struct net_device_stats stats;
30 struct work_struct work;
29 int fd; 31 int fd;
30 unsigned char mac[ETH_ALEN]; 32 unsigned char mac[ETH_ALEN];
31 unsigned short (*protocol)(struct sk_buff *); 33 unsigned short (*protocol)(struct sk_buff *);
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index 49057d8bc668..5db7737df0ff 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -166,7 +166,7 @@ static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
166 struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) { 166 struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
167 .func = 0, 167 .func = 0,
168 .bytecount = bytecount, 168 .bytecount = bytecount,
169 .ptr = (void *)kmalloc(bytecount, GFP_KERNEL)}; 169 .ptr = kmalloc(bytecount, GFP_KERNEL)};
170 u32 cpu; 170 u32 cpu;
171 171
172 if(ptrace_ldt.ptr == NULL) 172 if(ptrace_ldt.ptr == NULL)
@@ -426,7 +426,7 @@ void ldt_get_host_info(void)
426 host_ldt_entries = dummy_list; 426 host_ldt_entries = dummy_list;
427 else { 427 else {
428 size = (size + 1) * sizeof(dummy_list[0]); 428 size = (size + 1) * sizeof(dummy_list[0]);
429 host_ldt_entries = (short *)kmalloc(size, GFP_KERNEL); 429 host_ldt_entries = kmalloc(size, GFP_KERNEL);
430 if(host_ldt_entries == NULL) { 430 if(host_ldt_entries == NULL) {
431 printk("ldt_get_host_info: couldn't allocate host ldt list\n"); 431 printk("ldt_get_host_info: couldn't allocate host ldt list\n");
432 goto out_free; 432 goto out_free;
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index bcf825875d17..f0d4d72e560f 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -105,17 +105,17 @@ menu "Processor type and features"
105 # currently support 105 # currently support
106 config V850E_MA1 106 config V850E_MA1
107 bool 107 bool
108 depends RTE_CB_MA1 108 depends on RTE_CB_MA1
109 default y 109 default y
110 # Similarly for the RTE-V850E/NB85E-CB - V850E/TEG 110 # Similarly for the RTE-V850E/NB85E-CB - V850E/TEG
111 config V850E_TEG 111 config V850E_TEG
112 bool 112 bool
113 depends RTE_CB_NB85E 113 depends on RTE_CB_NB85E
114 default y 114 default y
115 # ... and the RTE-V850E/ME2-CB - V850E/ME2 115 # ... and the RTE-V850E/ME2-CB - V850E/ME2
116 config V850E_ME2 116 config V850E_ME2
117 bool 117 bool
118 depends RTE_CB_ME2 118 depends on RTE_CB_ME2
119 default y 119 default y
120 120
121 121
@@ -123,7 +123,7 @@ menu "Processor type and features"
123 123
124 config V850E2_SIM85E2 124 config V850E2_SIM85E2
125 bool 125 bool
126 depends V850E2_SIM85E2C || V850E2_SIM85E2S 126 depends on V850E2_SIM85E2C || V850E2_SIM85E2S
127 default y 127 default y
128 128
129 129
@@ -132,7 +132,7 @@ menu "Processor type and features"
132 # V850E2 processors 132 # V850E2 processors
133 config V850E2 133 config V850E2
134 bool 134 bool
135 depends V850E2_SIM85E2 || V850E2_FPGA85E2C || V850E2_ANNA 135 depends on V850E2_SIM85E2 || V850E2_FPGA85E2C || V850E2_ANNA
136 default y 136 default y
137 137
138 138
@@ -141,7 +141,7 @@ menu "Processor type and features"
141 # Boards in the RTE-x-CB series 141 # Boards in the RTE-x-CB series
142 config RTE_CB 142 config RTE_CB
143 bool 143 bool
144 depends RTE_CB_MA1 || RTE_CB_NB85E || RTE_CB_ME2 144 depends on RTE_CB_MA1 || RTE_CB_NB85E || RTE_CB_ME2
145 default y 145 default y
146 146
147 config RTE_CB_MULTI 147 config RTE_CB_MULTI
@@ -149,28 +149,28 @@ menu "Processor type and features"
149 # RTE_CB_NB85E can either have multi ROM support or not, but 149 # RTE_CB_NB85E can either have multi ROM support or not, but
150 # other platforms (currently only RTE_CB_MA1) require it. 150 # other platforms (currently only RTE_CB_MA1) require it.
151 prompt "Multi monitor ROM support" if RTE_CB_NB85E 151 prompt "Multi monitor ROM support" if RTE_CB_NB85E
152 depends RTE_CB_MA1 || RTE_CB_NB85E 152 depends on RTE_CB_MA1 || RTE_CB_NB85E
153 default y 153 default y
154 154
155 config RTE_CB_MULTI_DBTRAP 155 config RTE_CB_MULTI_DBTRAP
156 bool "Pass illegal insn trap / dbtrap to kernel" 156 bool "Pass illegal insn trap / dbtrap to kernel"
157 depends RTE_CB_MULTI 157 depends on RTE_CB_MULTI
158 default n 158 default n
159 159
160 config RTE_CB_MA1_KSRAM 160 config RTE_CB_MA1_KSRAM
161 bool "Kernel in SRAM (limits size of kernel)" 161 bool "Kernel in SRAM (limits size of kernel)"
162 depends RTE_CB_MA1 && RTE_CB_MULTI 162 depends on RTE_CB_MA1 && RTE_CB_MULTI
163 default n 163 default n
164 164
165 config RTE_MB_A_PCI 165 config RTE_MB_A_PCI
166 bool "Mother-A PCI support" 166 bool "Mother-A PCI support"
167 depends RTE_CB 167 depends on RTE_CB
168 default y 168 default y
169 169
170 # The GBUS is used to talk to the RTE-MOTHER-A board 170 # The GBUS is used to talk to the RTE-MOTHER-A board
171 config RTE_GBUS_INT 171 config RTE_GBUS_INT
172 bool 172 bool
173 depends RTE_MB_A_PCI 173 depends on RTE_MB_A_PCI
174 default y 174 default y
175 175
176 # The only PCI bus we support is on the RTE-MOTHER-A board 176 # The only PCI bus we support is on the RTE-MOTHER-A board
@@ -209,7 +209,7 @@ menu "Processor type and features"
209 209
210 config ROM_KERNEL 210 config ROM_KERNEL
211 bool "Kernel in ROM" 211 bool "Kernel in ROM"
212 depends V850E2_ANNA || V850E_AS85EP1 || RTE_CB_ME2 212 depends on V850E2_ANNA || V850E_AS85EP1 || RTE_CB_ME2
213 213
214 # Some platforms pre-zero memory, in which case the kernel doesn't need to 214 # Some platforms pre-zero memory, in which case the kernel doesn't need to
215 config ZERO_BSS 215 config ZERO_BSS
@@ -225,10 +225,10 @@ menu "Processor type and features"
225 225
226 config V850E_HIGHRES_TIMER 226 config V850E_HIGHRES_TIMER
227 bool "High resolution timer support" 227 bool "High resolution timer support"
228 depends V850E_TIMER_D 228 depends on V850E_TIMER_D
229 config TIME_BOOTUP 229 config TIME_BOOTUP
230 bool "Time bootup" 230 bool "Time bootup"
231 depends V850E_HIGHRES_TIMER 231 depends on V850E_HIGHRES_TIMER
232 232
233 config RESET_GUARD 233 config RESET_GUARD
234 bool "Reset Guard" 234 bool "Reset Guard"
diff --git a/arch/x86_64/kernel/cpufreq/Kconfig b/arch/x86_64/kernel/cpufreq/Kconfig
index 81f1562e5393..3abcfa3e1ed7 100644
--- a/arch/x86_64/kernel/cpufreq/Kconfig
+++ b/arch/x86_64/kernel/cpufreq/Kconfig
@@ -27,10 +27,13 @@ config X86_POWERNOW_K8_ACPI
27 default y 27 default y
28 28
29config X86_SPEEDSTEP_CENTRINO 29config X86_SPEEDSTEP_CENTRINO
30 tristate "Intel Enhanced SpeedStep" 30 tristate "Intel Enhanced SpeedStep (deprecated)"
31 select CPU_FREQ_TABLE 31 select CPU_FREQ_TABLE
32 depends on ACPI_PROCESSOR 32 depends on ACPI_PROCESSOR
33 help 33 help
34 This is deprecated and this functionality is now merged into
35 acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
36 speedstep_centrino.
34 This adds the CPUFreq driver for Enhanced SpeedStep enabled 37 This adds the CPUFreq driver for Enhanced SpeedStep enabled
35 mobile CPUs. This means Intel Pentium M (Centrino) CPUs 38 mobile CPUs. This means Intel Pentium M (Centrino) CPUs
36 or 64bit enabled Intel Xeons. 39 or 64bit enabled Intel Xeons.
@@ -50,6 +53,7 @@ config X86_ACPI_CPUFREQ
50 help 53 help
51 This driver adds a CPUFreq driver which utilizes the ACPI 54 This driver adds a CPUFreq driver which utilizes the ACPI
52 Processor Performance States. 55 Processor Performance States.
56 This driver also supports Intel Enhanced Speedstep.
53 57
54 For details, take a look at <file:Documentation/cpu-freq/>. 58 For details, take a look at <file:Documentation/cpu-freq/>.
55 59
diff --git a/arch/x86_64/kernel/cpufreq/Makefile b/arch/x86_64/kernel/cpufreq/Makefile
index d8b593879224..753ce1dd418e 100644
--- a/arch/x86_64/kernel/cpufreq/Makefile
+++ b/arch/x86_64/kernel/cpufreq/Makefile
@@ -5,8 +5,8 @@
5SRCDIR := ../../../i386/kernel/cpu/cpufreq 5SRCDIR := ../../../i386/kernel/cpu/cpufreq
6 6
7obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o 7obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
8obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
9obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o 8obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
9obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
10obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 10obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
11obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o 11obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
12 12
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index d573017a5dde..71f733c4f66d 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
6 6
7 7
8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ 8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
9 setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \ 9 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
10 pci-dma.o 10 pci-dma.o
11 11
12## windowspill.o 12## windowspill.o
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 418268f49766..fe3834bc1dbf 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -16,7 +16,7 @@
16 * 16 *
17 */ 17 */
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/syscalls.h> 19#include <asm/syscall.h>
20#include <asm/unistd.h> 20#include <asm/unistd.h>
21#include <linux/linkage.h> 21#include <linux/linkage.h>
22#include <linux/stringify.h> 22#include <linux/stringify.h>
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 5934c4bfd52a..ef126277b4b3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1462,20 +1462,7 @@ static struct elevator_type iosched_as = {
1462 1462
1463static int __init as_init(void) 1463static int __init as_init(void)
1464{ 1464{
1465 int ret; 1465 return elv_register(&iosched_as);
1466
1467 ret = elv_register(&iosched_as);
1468 if (!ret) {
1469 /*
1470 * don't allow AS to get unregistered, since we would have
1471 * to browse all tasks in the system and release their
1472 * as_io_context first
1473 */
1474 __module_get(THIS_MODULE);
1475 return 0;
1476 }
1477
1478 return ret;
1479} 1466}
1480 1467
1481static void __exit as_exit(void) 1468static void __exit as_exit(void)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 78c6b312bd30..533a2938ffd6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
219 return !cfqd->busy_queues; 219 return !cfqd->busy_queues;
220} 220}
221 221
222static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) 222static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
223{ 223{
224 if (rw == READ || rw == WRITE_SYNC) 224 /*
225 * Use the per-process queue, for read requests and syncronous writes
226 */
227 if (!(rw & REQ_RW) || is_sync)
225 return task->pid; 228 return task->pid;
226 229
227 return CFQ_KEY_ASYNC; 230 return CFQ_KEY_ASYNC;
@@ -473,7 +476,7 @@ static struct request *
473cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 476cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
474{ 477{
475 struct task_struct *tsk = current; 478 struct task_struct *tsk = current;
476 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); 479 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
477 struct cfq_queue *cfqq; 480 struct cfq_queue *cfqq;
478 481
479 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 482 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
@@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw)
1748 struct cfq_data *cfqd = q->elevator->elevator_data; 1751 struct cfq_data *cfqd = q->elevator->elevator_data;
1749 struct task_struct *tsk = current; 1752 struct task_struct *tsk = current;
1750 struct cfq_queue *cfqq; 1753 struct cfq_queue *cfqq;
1754 unsigned int key;
1755
1756 key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1751 1757
1752 /* 1758 /*
1753 * don't force setup of a queue from here, as a call to may_queue 1759 * don't force setup of a queue from here, as a call to may_queue
@@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
1755 * so just lookup a possibly existing queue, or return 'may queue' 1761 * so just lookup a possibly existing queue, or return 'may queue'
1756 * if that fails 1762 * if that fails
1757 */ 1763 */
1758 cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); 1764 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1759 if (cfqq) { 1765 if (cfqq) {
1760 cfq_init_prio_data(cfqq); 1766 cfq_init_prio_data(cfqq);
1761 cfq_prio_boost(cfqq); 1767 cfq_prio_boost(cfqq);
@@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1798 struct task_struct *tsk = current; 1804 struct task_struct *tsk = current;
1799 struct cfq_io_context *cic; 1805 struct cfq_io_context *cic;
1800 const int rw = rq_data_dir(rq); 1806 const int rw = rq_data_dir(rq);
1801 pid_t key = cfq_queue_pid(tsk, rw); 1807 const int is_sync = rq_is_sync(rq);
1808 pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1802 struct cfq_queue *cfqq; 1809 struct cfq_queue *cfqq;
1803 unsigned long flags; 1810 unsigned long flags;
1804 int is_sync = key != CFQ_KEY_ASYNC;
1805 1811
1806 might_sleep_if(gfp_mask & __GFP_WAIT); 1812 might_sleep_if(gfp_mask & __GFP_WAIT);
1807 1813
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a541b42c08e3..79807dbc306e 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
2058 * Returns NULL on failure, with queue_lock held. 2058 * Returns NULL on failure, with queue_lock held.
2059 * Returns !NULL on success, with queue_lock *not held*. 2059 * Returns !NULL on success, with queue_lock *not held*.
2060 */ 2060 */
2061static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, 2061static struct request *get_request(request_queue_t *q, int rw_flags,
2062 gfp_t gfp_mask) 2062 struct bio *bio, gfp_t gfp_mask)
2063{ 2063{
2064 struct request *rq = NULL; 2064 struct request *rq = NULL;
2065 struct request_list *rl = &q->rq; 2065 struct request_list *rl = &q->rq;
2066 struct io_context *ioc = NULL; 2066 struct io_context *ioc = NULL;
2067 const int rw = rw_flags & 0x01;
2067 int may_queue, priv; 2068 int may_queue, priv;
2068 2069
2069 may_queue = elv_may_queue(q, rw); 2070 may_queue = elv_may_queue(q, rw_flags);
2070 if (may_queue == ELV_MQUEUE_NO) 2071 if (may_queue == ELV_MQUEUE_NO)
2071 goto rq_starved; 2072 goto rq_starved;
2072 2073
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
2114 2115
2115 spin_unlock_irq(q->queue_lock); 2116 spin_unlock_irq(q->queue_lock);
2116 2117
2117 rq = blk_alloc_request(q, rw, priv, gfp_mask); 2118 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
2118 if (unlikely(!rq)) { 2119 if (unlikely(!rq)) {
2119 /* 2120 /*
2120 * Allocation failed presumably due to memory. Undo anything 2121 * Allocation failed presumably due to memory. Undo anything
@@ -2162,12 +2163,13 @@ out:
2162 * 2163 *
2163 * Called with q->queue_lock held, and returns with it unlocked. 2164 * Called with q->queue_lock held, and returns with it unlocked.
2164 */ 2165 */
2165static struct request *get_request_wait(request_queue_t *q, int rw, 2166static struct request *get_request_wait(request_queue_t *q, int rw_flags,
2166 struct bio *bio) 2167 struct bio *bio)
2167{ 2168{
2169 const int rw = rw_flags & 0x01;
2168 struct request *rq; 2170 struct request *rq;
2169 2171
2170 rq = get_request(q, rw, bio, GFP_NOIO); 2172 rq = get_request(q, rw_flags, bio, GFP_NOIO);
2171 while (!rq) { 2173 while (!rq) {
2172 DEFINE_WAIT(wait); 2174 DEFINE_WAIT(wait);
2173 struct request_list *rl = &q->rq; 2175 struct request_list *rl = &q->rq;
@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
2175 prepare_to_wait_exclusive(&rl->wait[rw], &wait, 2177 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
2176 TASK_UNINTERRUPTIBLE); 2178 TASK_UNINTERRUPTIBLE);
2177 2179
2178 rq = get_request(q, rw, bio, GFP_NOIO); 2180 rq = get_request(q, rw_flags, bio, GFP_NOIO);
2179 2181
2180 if (!rq) { 2182 if (!rq) {
2181 struct io_context *ioc; 2183 struct io_context *ioc;
@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2910 int el_ret, nr_sectors, barrier, err; 2912 int el_ret, nr_sectors, barrier, err;
2911 const unsigned short prio = bio_prio(bio); 2913 const unsigned short prio = bio_prio(bio);
2912 const int sync = bio_sync(bio); 2914 const int sync = bio_sync(bio);
2915 int rw_flags;
2913 2916
2914 nr_sectors = bio_sectors(bio); 2917 nr_sectors = bio_sectors(bio);
2915 2918
@@ -2984,10 +2987,19 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2984 2987
2985get_rq: 2988get_rq:
2986 /* 2989 /*
2990 * This sync check and mask will be re-done in init_request_from_bio(),
2991 * but we need to set it earlier to expose the sync flag to the
2992 * rq allocator and io schedulers.
2993 */
2994 rw_flags = bio_data_dir(bio);
2995 if (sync)
2996 rw_flags |= REQ_RW_SYNC;
2997
2998 /*
2987 * Grab a free request. This is might sleep but can not fail. 2999 * Grab a free request. This is might sleep but can not fail.
2988 * Returns with the queue unlocked. 3000 * Returns with the queue unlocked.
2989 */ 3001 */
2990 req = get_request_wait(q, bio_data_dir(bio), bio); 3002 req = get_request_wait(q, rw_flags, bio);
2991 3003
2992 /* 3004 /*
2993 * After dropping the lock and possibly sleeping here, our request 3005 * After dropping the lock and possibly sleeping here, our request
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 034c939bf91a..6e93004f2181 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -17,7 +17,6 @@
17#include <linux/crypto.h> 17#include <linux/crypto.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/io.h>
21#include <linux/module.h> 20#include <linux/module.h>
22#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
23#include <linux/seq_file.h> 22#include <linux/seq_file.h>
diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c
index 048542341204..674bf81c6e66 100644
--- a/drivers/acorn/block/fd1772.c
+++ b/drivers/acorn/block/fd1772.c
@@ -1549,12 +1549,12 @@ int fd1772_init(void)
1549#ifdef TRACKBUFFER 1549#ifdef TRACKBUFFER
1550 BufferDrive = BufferSide = BufferTrack = -1; 1550 BufferDrive = BufferSide = BufferTrack = -1;
1551 /* Atari uses 512 - I want to eventually cope with 1K sectors */ 1551 /* Atari uses 512 - I want to eventually cope with 1K sectors */
1552 DMABuffer = (char *)kmalloc((FD1772_MAX_SECTORS+1)*512,GFP_KERNEL); 1552 DMABuffer = kmalloc((FD1772_MAX_SECTORS+1)*512,GFP_KERNEL);
1553 TrackBuffer = DMABuffer + 512; 1553 TrackBuffer = DMABuffer + 512;
1554#else 1554#else
1555 /* Allocate memory for the DMAbuffer - on the Atari this takes it 1555 /* Allocate memory for the DMAbuffer - on the Atari this takes it
1556 out of some special memory... */ 1556 out of some special memory... */
1557 DMABuffer = (char *) kmalloc(2048); /* Copes with pretty large sectors */ 1557 DMABuffer = kmalloc(2048); /* Copes with pretty large sectors */
1558#endif 1558#endif
1559 err = -ENOMEM; 1559 err = -ENOMEM;
1560 if (!DMAbuffer) 1560 if (!DMAbuffer)
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 9021e34d2096..90786d7a20bb 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -551,7 +551,7 @@ static int adma_port_start(struct ata_port *ap)
551 return rc; 551 return rc;
552 adma_enter_reg_mode(ap); 552 adma_enter_reg_mode(ap);
553 rc = -ENOMEM; 553 rc = -ENOMEM;
554 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); 554 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
555 if (!pp) 555 if (!pp)
556 goto err_out; 556 goto err_out;
557 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, 557 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
@@ -672,7 +672,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
672 if (rc) 672 if (rc)
673 goto err_out_iounmap; 673 goto err_out_iounmap;
674 674
675 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL); 675 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
676 if (probe_ent == NULL) { 676 if (probe_ent == NULL) {
677 rc = -ENOMEM; 677 rc = -ENOMEM;
678 goto err_out_iounmap; 678 goto err_out_iounmap;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index bc1b13c8f5d7..5aab7bd473ac 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1832,7 +1832,7 @@ static int __devinit eni_start(struct atm_dev *dev)
1832 /* initialize memory management */ 1832 /* initialize memory management */
1833 buffer_mem = eni_dev->mem - (buf - eni_dev->ram); 1833 buffer_mem = eni_dev->mem - (buf - eni_dev->ram);
1834 eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2; 1834 eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2;
1835 eni_dev->free_list = (struct eni_free *) kmalloc( 1835 eni_dev->free_list = kmalloc(
1836 sizeof(struct eni_free)*(eni_dev->free_list_size+1),GFP_KERNEL); 1836 sizeof(struct eni_free)*(eni_dev->free_list_size+1),GFP_KERNEL);
1837 if (!eni_dev->free_list) { 1837 if (!eni_dev->free_list) {
1838 printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n", 1838 printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n",
@@ -2232,7 +2232,7 @@ static int __devinit eni_init_one(struct pci_dev *pci_dev,
2232 goto out0; 2232 goto out0;
2233 } 2233 }
2234 2234
2235 eni_dev = (struct eni_dev *) kmalloc(sizeof(struct eni_dev),GFP_KERNEL); 2235 eni_dev = kmalloc(sizeof(struct eni_dev),GFP_KERNEL);
2236 if (!eni_dev) goto out0; 2236 if (!eni_dev) goto out0;
2237 if (!cpu_zeroes) { 2237 if (!cpu_zeroes) {
2238 cpu_zeroes = pci_alloc_consistent(pci_dev,ENI_ZEROES_SIZE, 2238 cpu_zeroes = pci_alloc_consistent(pci_dev,ENI_ZEROES_SIZE,
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 7d9b4e52f0bf..db33f6f4dd2a 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2351,7 +2351,7 @@ he_open(struct atm_vcc *vcc)
2351 2351
2352 cid = he_mkcid(he_dev, vpi, vci); 2352 cid = he_mkcid(he_dev, vpi, vci);
2353 2353
2354 he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC); 2354 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2355 if (he_vcc == NULL) { 2355 if (he_vcc == NULL) {
2356 hprintk("unable to allocate he_vcc during open\n"); 2356 hprintk("unable to allocate he_vcc during open\n");
2357 return -ENOMEM; 2357 return -ENOMEM;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 267825501dfe..09f477d4237a 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2602,7 +2602,7 @@ static int __devinit lanai_init_one(struct pci_dev *pci,
2602 struct atm_dev *atmdev; 2602 struct atm_dev *atmdev;
2603 int result; 2603 int result;
2604 2604
2605 lanai = (struct lanai_dev *) kmalloc(sizeof(*lanai), GFP_KERNEL); 2605 lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
2606 if (lanai == NULL) { 2606 if (lanai == NULL) {
2607 printk(KERN_ERR DEV_LABEL 2607 printk(KERN_ERR DEV_LABEL
2608 ": couldn't allocate dev_data structure!\n"); 2608 ": couldn't allocate dev_data structure!\n");
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index bd0904594805..aab9b3733d52 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -997,7 +997,7 @@ static scq_info *get_scq(int size, u32 scd)
997 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) 997 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
998 return NULL; 998 return NULL;
999 999
1000 scq = (scq_info *) kmalloc(sizeof(scq_info), GFP_KERNEL); 1000 scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
1001 if (scq == NULL) 1001 if (scq == NULL)
1002 return NULL; 1002 return NULL;
1003 scq->org = kmalloc(2 * size, GFP_KERNEL); 1003 scq->org = kmalloc(2 * size, GFP_KERNEL);
@@ -1006,7 +1006,7 @@ static scq_info *get_scq(int size, u32 scd)
1006 kfree(scq); 1006 kfree(scq);
1007 return NULL; 1007 return NULL;
1008 } 1008 }
1009 scq->skb = (struct sk_buff **) kmalloc(sizeof(struct sk_buff *) * 1009 scq->skb = kmalloc(sizeof(struct sk_buff *) *
1010 (size / NS_SCQE_SIZE), GFP_KERNEL); 1010 (size / NS_SCQE_SIZE), GFP_KERNEL);
1011 if (scq->skb == NULL) 1011 if (scq->skb == NULL)
1012 { 1012 {
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 7df0f373188e..756d4f760da3 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -996,7 +996,7 @@ static int start_tx(struct atm_dev *dev)
996 996
997 DPRINTK("start_tx\n"); 997 DPRINTK("start_tx\n");
998 zatm_dev = ZATM_DEV(dev); 998 zatm_dev = ZATM_DEV(dev);
999 zatm_dev->tx_map = (struct atm_vcc **) kmalloc(sizeof(struct atm_vcc *)* 999 zatm_dev->tx_map = kmalloc(sizeof(struct atm_vcc *)*
1000 zatm_dev->chans,GFP_KERNEL); 1000 zatm_dev->chans,GFP_KERNEL);
1001 if (!zatm_dev->tx_map) return -ENOMEM; 1001 if (!zatm_dev->tx_map) return -ENOMEM;
1002 zatm_dev->tx_bw = ATM_OC3_PCR; 1002 zatm_dev->tx_bw = ATM_OC3_PCR;
@@ -1591,7 +1591,7 @@ static int __devinit zatm_init_one(struct pci_dev *pci_dev,
1591 struct zatm_dev *zatm_dev; 1591 struct zatm_dev *zatm_dev;
1592 int ret = -ENOMEM; 1592 int ret = -ENOMEM;
1593 1593
1594 zatm_dev = (struct zatm_dev *) kmalloc(sizeof(*zatm_dev), GFP_KERNEL); 1594 zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL);
1595 if (!zatm_dev) { 1595 if (!zatm_dev) {
1596 printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL); 1596 printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL);
1597 goto out; 1597 goto out;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index f098881f45b2..8bf2ca2e56b5 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -163,6 +163,8 @@ int class_register(struct class * cls)
163void class_unregister(struct class * cls) 163void class_unregister(struct class * cls)
164{ 164{
165 pr_debug("device class '%s': unregistering\n", cls->name); 165 pr_debug("device class '%s': unregistering\n", cls->name);
166 if (cls->virtual_dir)
167 kobject_unregister(cls->virtual_dir);
166 remove_class_attrs(cls); 168 remove_class_attrs(cls);
167 subsystem_unregister(&cls->subsys); 169 subsystem_unregister(&cls->subsys);
168} 170}
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index dbe0735f8c9e..f95d50277274 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -173,7 +173,7 @@ pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
173 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; 173 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
174 mapsize *= sizeof (long); 174 mapsize *= sizeof (long);
175 175
176 page = (struct dma_page *) kmalloc (mapsize + sizeof *page, mem_flags); 176 page = kmalloc(mapsize + sizeof *page, mem_flags);
177 if (!page) 177 if (!page)
178 return NULL; 178 return NULL;
179 page->vaddr = dma_alloc_coherent (pool->dev, 179 page->vaddr = dma_alloc_coherent (pool->dev,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index d1df4a087924..f9c903ba9fcd 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -212,7 +212,7 @@ EXPORT_SYMBOL_GPL(platform_device_add_resources);
212 * pointer. The memory associated with the platform data will be freed 212 * pointer. The memory associated with the platform data will be freed
213 * when the platform device is released. 213 * when the platform device is released.
214 */ 214 */
215int platform_device_add_data(struct platform_device *pdev, void *data, size_t size) 215int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size)
216{ 216{
217 void *d; 217 void *d;
218 218
@@ -473,7 +473,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
473 * Returns zero if the driver registered and bound to a device, else returns 473 * Returns zero if the driver registered and bound to a device, else returns
474 * a negative error code and with the driver not registered. 474 * a negative error code and with the driver not registered.
475 */ 475 */
476int platform_driver_probe(struct platform_driver *drv, 476int __init_or_module platform_driver_probe(struct platform_driver *drv,
477 int (*probe)(struct platform_device *)) 477 int (*probe)(struct platform_device *))
478{ 478{
479 int retval, code; 479 int retval, code;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ce9cfcb6071c..58c1debf86f1 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -28,13 +28,6 @@ config ATARI_FLOPPY
28 tristate "Atari floppy support" 28 tristate "Atari floppy support"
29 depends on ATARI 29 depends on ATARI
30 30
31config BLK_DEV_SWIM_IOP
32 bool "Macintosh IIfx/Quadra 900/Quadra 950 floppy support (EXPERIMENTAL)"
33 depends on MAC && EXPERIMENTAL && BROKEN
34 help
35 Say Y here to support the SWIM (Super Woz Integrated Machine) IOP
36 floppy controller on the Macintosh IIfx and Quadra 900/950.
37
38config MAC_FLOPPY 31config MAC_FLOPPY
39 tristate "Support for PowerMac floppy" 32 tristate "Support for PowerMac floppy"
40 depends on PPC_PMAC && !PPC_PMAC64 33 depends on PPC_PMAC && !PPC_PMAC64
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 410f259a8031..dd88e33c1eb1 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_MAC_FLOPPY) += swim3.o
9obj-$(CONFIG_BLK_DEV_FD) += floppy.o 9obj-$(CONFIG_BLK_DEV_FD) += floppy.o
10obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o 10obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o
11obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o 11obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
12obj-$(CONFIG_BLK_DEV_SWIM_IOP) += swim_iop.o
13obj-$(CONFIG_ATARI_ACSI) += acsi.o 12obj-$(CONFIG_ATARI_ACSI) += acsi.o
14obj-$(CONFIG_ATARI_SLM) += acsi_slm.o 13obj-$(CONFIG_ATARI_SLM) += acsi_slm.o
15obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o 14obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ee159edb6b88..d719a5d8f435 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1039,7 +1039,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
1039 status = -ENOMEM; 1039 status = -ENOMEM;
1040 goto cleanup1; 1040 goto cleanup1;
1041 } 1041 }
1042 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int), 1042 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1043 GFP_KERNEL); 1043 GFP_KERNEL);
1044 if (!buff_size) { 1044 if (!buff_size) {
1045 status = -ENOMEM; 1045 status = -ENOMEM;
@@ -2837,7 +2837,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2837 if (err) { 2837 if (err) {
2838 printk(KERN_ERR "cciss: Cannot obtain PCI resources, " 2838 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2839 "aborting\n"); 2839 "aborting\n");
2840 goto err_out_disable_pdev; 2840 return err;
2841 } 2841 }
2842 2842
2843 subsystem_vendor_id = pdev->subsystem_vendor; 2843 subsystem_vendor_id = pdev->subsystem_vendor;
@@ -2865,7 +2865,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2865#ifdef CCISS_DEBUG 2865#ifdef CCISS_DEBUG
2866 printk("address 0 = %x\n", c->paddr); 2866 printk("address 0 = %x\n", c->paddr);
2867#endif /* CCISS_DEBUG */ 2867#endif /* CCISS_DEBUG */
2868 c->vaddr = remap_pci_mem(c->paddr, 200); 2868 c->vaddr = remap_pci_mem(c->paddr, 0x250);
2869 2869
2870 /* Wait for the board to become ready. (PCI hotplug needs this.) 2870 /* Wait for the board to become ready. (PCI hotplug needs this.)
2871 * We poll for up to 120 secs, once per 100ms. */ 2871 * We poll for up to 120 secs, once per 100ms. */
@@ -3004,11 +3004,12 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3004 } 3004 }
3005 return 0; 3005 return 0;
3006 3006
3007 err_out_free_res: 3007err_out_free_res:
3008 /*
3009 * Deliberately omit pci_disable_device(): it does something nasty to
3010 * Smart Array controllers that pci_enable_device does not undo
3011 */
3008 pci_release_regions(pdev); 3012 pci_release_regions(pdev);
3009
3010 err_out_disable_pdev:
3011 pci_disable_device(pdev);
3012 return err; 3013 return err;
3013} 3014}
3014 3015
@@ -3382,8 +3383,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3382 if (drv->queue) 3383 if (drv->queue)
3383 blk_cleanup_queue(drv->queue); 3384 blk_cleanup_queue(drv->queue);
3384 } 3385 }
3386 /*
3387 * Deliberately omit pci_disable_device(): it does something nasty to
3388 * Smart Array controllers that pci_enable_device does not undo
3389 */
3385 pci_release_regions(pdev); 3390 pci_release_regions(pdev);
3386 pci_disable_device(pdev);
3387 pci_set_drvdata(pdev, NULL); 3391 pci_set_drvdata(pdev, NULL);
3388 free_hba(i); 3392 free_hba(i);
3389 return -1; 3393 return -1;
@@ -3452,8 +3456,11 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3452#ifdef CONFIG_CISS_SCSI_TAPE 3456#ifdef CONFIG_CISS_SCSI_TAPE
3453 kfree(hba[i]->scsi_rejects.complete); 3457 kfree(hba[i]->scsi_rejects.complete);
3454#endif 3458#endif
3459 /*
3460 * Deliberately omit pci_disable_device(): it does something nasty to
3461 * Smart Array controllers that pci_enable_device does not undo
3462 */
3455 pci_release_regions(pdev); 3463 pci_release_regions(pdev);
3456 pci_disable_device(pdev);
3457 pci_set_drvdata(pdev, NULL); 3464 pci_set_drvdata(pdev, NULL);
3458 free_hba(i); 3465 free_hba(i);
3459} 3466}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index d5f519ebbc08..b94cd1c32131 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1625,7 +1625,7 @@ static void start_fwbk(int ctlr)
1625 " processing\n"); 1625 " processing\n");
1626 /* Command does not return anything, but idasend command needs a 1626 /* Command does not return anything, but idasend command needs a
1627 buffer */ 1627 buffer */
1628 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL); 1628 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1629 if(id_ctlr_buf==NULL) 1629 if(id_ctlr_buf==NULL)
1630 { 1630 {
1631 printk(KERN_WARNING "cpqarray: Out of memory. " 1631 printk(KERN_WARNING "cpqarray: Out of memory. "
@@ -1660,14 +1660,14 @@ static void getgeometry(int ctlr)
1660 1660
1661 info_p->log_drv_map = 0; 1661 info_p->log_drv_map = 0;
1662 1662
1663 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL); 1663 id_ldrive = kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1664 if(id_ldrive == NULL) 1664 if(id_ldrive == NULL)
1665 { 1665 {
1666 printk( KERN_ERR "cpqarray: out of memory.\n"); 1666 printk( KERN_ERR "cpqarray: out of memory.\n");
1667 return; 1667 return;
1668 } 1668 }
1669 1669
1670 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL); 1670 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1671 if(id_ctlr_buf == NULL) 1671 if(id_ctlr_buf == NULL)
1672 { 1672 {
1673 kfree(id_ldrive); 1673 kfree(id_ldrive);
@@ -1675,7 +1675,7 @@ static void getgeometry(int ctlr)
1675 return; 1675 return;
1676 } 1676 }
1677 1677
1678 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL); 1678 id_lstatus_buf = kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1679 if(id_lstatus_buf == NULL) 1679 if(id_lstatus_buf == NULL)
1680 { 1680 {
1681 kfree(id_ctlr_buf); 1681 kfree(id_ctlr_buf);
@@ -1684,7 +1684,7 @@ static void getgeometry(int ctlr)
1684 return; 1684 return;
1685 } 1685 }
1686 1686
1687 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL); 1687 sense_config_buf = kmalloc(sizeof(config_t), GFP_KERNEL);
1688 if(sense_config_buf == NULL) 1688 if(sense_config_buf == NULL)
1689 { 1689 {
1690 kfree(id_lstatus_buf); 1690 kfree(id_lstatus_buf);
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c
deleted file mode 100644
index ed7b06cf3e68..000000000000
--- a/drivers/block/swim_iop.c
+++ /dev/null
@@ -1,578 +0,0 @@
1/*
2 * Driver for the SWIM (Super Woz Integrated Machine) IOP
3 * floppy controller on the Macintosh IIfx and Quadra 900/950
4 *
5 * Written by Joshua M. Thompson (funaho@jurai.org)
6 * based on the SWIM3 driver (c) 1996 by Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * 1999-06-12 (jmt) - Initial implementation.
14 */
15
16/*
17 * -------------------
18 * Theory of Operation
19 * -------------------
20 *
21 * Since the SWIM IOP is message-driven we implement a simple request queue
22 * system. One outstanding request may be queued at any given time (this is
23 * an IOP limitation); only when that request has completed can a new request
24 * be sent.
25 */
26
27#include <linux/stddef.h>
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/timer.h>
31#include <linux/delay.h>
32#include <linux/fd.h>
33#include <linux/ioctl.h>
34#include <linux/blkdev.h>
35#include <asm/io.h>
36#include <asm/uaccess.h>
37#include <asm/mac_iop.h>
38#include <asm/swim_iop.h>
39
40#define DRIVER_VERSION "Version 0.1 (1999-06-12)"
41
42#define MAX_FLOPPIES 4
43
44enum swim_state {
45 idle,
46 available,
47 revalidating,
48 transferring,
49 ejecting
50};
51
52struct floppy_state {
53 enum swim_state state;
54 int drive_num; /* device number */
55 int secpercyl; /* disk geometry information */
56 int secpertrack;
57 int total_secs;
58 int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
59 int ref_count;
60 struct timer_list timeout;
61 int ejected;
62 struct wait_queue *wait;
63 int wanted;
64 int timeout_pending;
65};
66
67struct swim_iop_req {
68 int sent;
69 int complete;
70 __u8 command[32];
71 struct floppy_state *fs;
72 void (*done)(struct swim_iop_req *);
73};
74
75static struct swim_iop_req *current_req;
76static int floppy_count;
77
78static struct floppy_state floppy_states[MAX_FLOPPIES];
79static DEFINE_SPINLOCK(swim_iop_lock);
80
81#define CURRENT elv_next_request(swim_queue)
82
83static char *drive_names[7] = {
84 "not installed", /* DRV_NONE */
85 "unknown (1)", /* DRV_UNKNOWN */
86 "a 400K drive", /* DRV_400K */
87 "an 800K drive" /* DRV_800K */
88 "unknown (4)", /* ???? */
89 "an FDHD", /* DRV_FDHD */
90 "unknown (6)", /* ???? */
91 "an Apple HD20" /* DRV_HD20 */
92};
93
94int swimiop_init(void);
95static void swimiop_init_request(struct swim_iop_req *);
96static int swimiop_send_request(struct swim_iop_req *);
97static void swimiop_receive(struct iop_msg *);
98static void swimiop_status_update(int, struct swim_drvstatus *);
99static int swimiop_eject(struct floppy_state *fs);
100
101static int floppy_ioctl(struct inode *inode, struct file *filp,
102 unsigned int cmd, unsigned long param);
103static int floppy_open(struct inode *inode, struct file *filp);
104static int floppy_release(struct inode *inode, struct file *filp);
105static int floppy_check_change(struct gendisk *disk);
106static int floppy_revalidate(struct gendisk *disk);
107static int grab_drive(struct floppy_state *fs, enum swim_state state,
108 int interruptible);
109static void release_drive(struct floppy_state *fs);
110static void set_timeout(struct floppy_state *fs, int nticks,
111 void (*proc)(unsigned long));
112static void fd_request_timeout(unsigned long);
113static void do_fd_request(request_queue_t * q);
114static void start_request(struct floppy_state *fs);
115
116static struct block_device_operations floppy_fops = {
117 .open = floppy_open,
118 .release = floppy_release,
119 .ioctl = floppy_ioctl,
120 .media_changed = floppy_check_change,
121 .revalidate_disk= floppy_revalidate,
122};
123
124static struct request_queue *swim_queue;
125/*
126 * SWIM IOP initialization
127 */
128
129int swimiop_init(void)
130{
131 volatile struct swim_iop_req req;
132 struct swimcmd_status *cmd = (struct swimcmd_status *) &req.command[0];
133 struct swim_drvstatus *ds = &cmd->status;
134 struct floppy_state *fs;
135 int i;
136
137 current_req = NULL;
138 floppy_count = 0;
139
140 if (!iop_ism_present)
141 return -ENODEV;
142
143 if (register_blkdev(FLOPPY_MAJOR, "fd"))
144 return -EBUSY;
145
146 swim_queue = blk_init_queue(do_fd_request, &swim_iop_lock);
147 if (!swim_queue) {
148 unregister_blkdev(FLOPPY_MAJOR, "fd");
149 return -ENOMEM;
150 }
151
152 printk("SWIM-IOP: %s by Joshua M. Thompson (funaho@jurai.org)\n",
153 DRIVER_VERSION);
154
155 if (iop_listen(SWIM_IOP, SWIM_CHAN, swimiop_receive, "SWIM") != 0) {
156 printk(KERN_ERR "SWIM-IOP: IOP channel already in use; can't initialize.\n");
157 unregister_blkdev(FLOPPY_MAJOR, "fd");
158 blk_cleanup_queue(swim_queue);
159 return -EBUSY;
160 }
161
162 printk(KERN_ERR "SWIM_IOP: probing for installed drives.\n");
163
164 for (i = 0 ; i < MAX_FLOPPIES ; i++) {
165 memset(&floppy_states[i], 0, sizeof(struct floppy_state));
166 fs = &floppy_states[floppy_count];
167
168 swimiop_init_request(&req);
169 cmd->code = CMD_STATUS;
170 cmd->drive_num = i + 1;
171 if (swimiop_send_request(&req) != 0) continue;
172 while (!req.complete);
173 if (cmd->error != 0) {
174 printk(KERN_ERR "SWIM-IOP: probe on drive %d returned error %d\n", i, (uint) cmd->error);
175 continue;
176 }
177 if (ds->installed != 0x01) continue;
178 printk("SWIM-IOP: drive %d is %s (%s, %s, %s, %s)\n", i,
179 drive_names[ds->info.type],
180 ds->info.external? "ext" : "int",
181 ds->info.scsi? "scsi" : "floppy",
182 ds->info.fixed? "fixed" : "removable",
183 ds->info.secondary? "secondary" : "primary");
184 swimiop_status_update(floppy_count, ds);
185 fs->state = idle;
186
187 init_timer(&fs->timeout);
188 floppy_count++;
189 }
190 printk("SWIM-IOP: detected %d installed drives.\n", floppy_count);
191
192 for (i = 0; i < floppy_count; i++) {
193 struct gendisk *disk = alloc_disk(1);
194 if (!disk)
195 continue;
196 disk->major = FLOPPY_MAJOR;
197 disk->first_minor = i;
198 disk->fops = &floppy_fops;
199 sprintf(disk->disk_name, "fd%d", i);
200 disk->private_data = &floppy_states[i];
201 disk->queue = swim_queue;
202 set_capacity(disk, 2880 * 2);
203 add_disk(disk);
204 }
205
206 return 0;
207}
208
209static void swimiop_init_request(struct swim_iop_req *req)
210{
211 req->sent = 0;
212 req->complete = 0;
213 req->done = NULL;
214}
215
216static int swimiop_send_request(struct swim_iop_req *req)
217{
218 unsigned long flags;
219 int err;
220
221 /* It's doubtful an interrupt routine would try to send */
222 /* a SWIM request, but I'd rather play it safe here. */
223
224 local_irq_save(flags);
225
226 if (current_req != NULL) {
227 local_irq_restore(flags);
228 return -ENOMEM;
229 }
230
231 current_req = req;
232
233 /* Interrupts should be back on for iop_send_message() */
234
235 local_irq_restore(flags);
236
237 err = iop_send_message(SWIM_IOP, SWIM_CHAN, (void *) req,
238 sizeof(req->command), (__u8 *) &req->command[0],
239 swimiop_receive);
240
241 /* No race condition here; we own current_req at this point */
242
243 if (err) {
244 current_req = NULL;
245 } else {
246 req->sent = 1;
247 }
248 return err;
249}
250
251/*
252 * Receive a SWIM message from the IOP.
253 *
254 * This will be called in two cases:
255 *
256 * 1. A message has been successfully sent to the IOP.
257 * 2. An unsolicited message was received from the IOP.
258 */
259
260void swimiop_receive(struct iop_msg *msg)
261{
262 struct swim_iop_req *req;
263 struct swimmsg_status *sm;
264 struct swim_drvstatus *ds;
265
266 req = current_req;
267
268 switch(msg->status) {
269 case IOP_MSGSTATUS_COMPLETE:
270 memcpy(&req->command[0], &msg->reply[0], sizeof(req->command));
271 req->complete = 1;
272 if (req->done) (*req->done)(req);
273 current_req = NULL;
274 break;
275 case IOP_MSGSTATUS_UNSOL:
276 sm = (struct swimmsg_status *) &msg->message[0];
277 ds = &sm->status;
278 swimiop_status_update(sm->drive_num, ds);
279 iop_complete_message(msg);
280 break;
281 }
282}
283
284static void swimiop_status_update(int drive_num, struct swim_drvstatus *ds)
285{
286 struct floppy_state *fs = &floppy_states[drive_num];
287
288 fs->write_prot = (ds->write_prot == 0x80);
289 if ((ds->disk_in_drive != 0x01) && (ds->disk_in_drive != 0x02)) {
290 fs->ejected = 1;
291 } else {
292 fs->ejected = 0;
293 }
294 switch(ds->info.type) {
295 case DRV_400K:
296 fs->secpercyl = 10;
297 fs->secpertrack = 10;
298 fs->total_secs = 800;
299 break;
300 case DRV_800K:
301 fs->secpercyl = 20;
302 fs->secpertrack = 10;
303 fs->total_secs = 1600;
304 break;
305 case DRV_FDHD:
306 fs->secpercyl = 36;
307 fs->secpertrack = 18;
308 fs->total_secs = 2880;
309 break;
310 default:
311 fs->secpercyl = 0;
312 fs->secpertrack = 0;
313 fs->total_secs = 0;
314 break;
315 }
316}
317
318static int swimiop_eject(struct floppy_state *fs)
319{
320 int err, n;
321 struct swim_iop_req req;
322 struct swimcmd_eject *cmd = (struct swimcmd_eject *) &req.command[0];
323
324 err = grab_drive(fs, ejecting, 1);
325 if (err) return err;
326
327 swimiop_init_request(&req);
328 cmd->code = CMD_EJECT;
329 cmd->drive_num = fs->drive_num;
330 err = swimiop_send_request(&req);
331 if (err) {
332 release_drive(fs);
333 return err;
334 }
335 for (n = 2*HZ; n > 0; --n) {
336 if (req.complete) break;
337 if (signal_pending(current)) {
338 err = -EINTR;
339 break;
340 }
341 schedule_timeout_interruptible(1);
342 }
343 release_drive(fs);
344 return cmd->error;
345}
346
347static struct floppy_struct floppy_type =
348 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
349
350static int floppy_ioctl(struct inode *inode, struct file *filp,
351 unsigned int cmd, unsigned long param)
352{
353 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
354 int err;
355
356 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
357 return -EPERM;
358
359 switch (cmd) {
360 case FDEJECT:
361 if (fs->ref_count != 1)
362 return -EBUSY;
363 err = swimiop_eject(fs);
364 return err;
365 case FDGETPRM:
366 if (copy_to_user((void *) param, (void *) &floppy_type,
367 sizeof(struct floppy_struct)))
368 return -EFAULT;
369 return 0;
370 }
371 return -ENOTTY;
372}
373
374static int floppy_open(struct inode *inode, struct file *filp)
375{
376 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
377
378 if (fs->ref_count == -1 || filp->f_flags & O_EXCL)
379 return -EBUSY;
380
381 if ((filp->f_flags & O_NDELAY) == 0 && (filp->f_mode & 3)) {
382 check_disk_change(inode->i_bdev);
383 if (fs->ejected)
384 return -ENXIO;
385 }
386
387 if ((filp->f_mode & 2) && fs->write_prot)
388 return -EROFS;
389
390 if (filp->f_flags & O_EXCL)
391 fs->ref_count = -1;
392 else
393 ++fs->ref_count;
394
395 return 0;
396}
397
398static int floppy_release(struct inode *inode, struct file *filp)
399{
400 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
401 if (fs->ref_count > 0)
402 fs->ref_count--;
403 return 0;
404}
405
406static int floppy_check_change(struct gendisk *disk)
407{
408 struct floppy_state *fs = disk->private_data;
409 return fs->ejected;
410}
411
412static int floppy_revalidate(struct gendisk *disk)
413{
414 struct floppy_state *fs = disk->private_data;
415 grab_drive(fs, revalidating, 0);
416 /* yadda, yadda */
417 release_drive(fs);
418 return 0;
419}
420
421static void floppy_off(unsigned int nr)
422{
423}
424
425static int grab_drive(struct floppy_state *fs, enum swim_state state,
426 int interruptible)
427{
428 unsigned long flags;
429
430 local_irq_save(flags);
431 if (fs->state != idle) {
432 ++fs->wanted;
433 while (fs->state != available) {
434 if (interruptible && signal_pending(current)) {
435 --fs->wanted;
436 local_irq_restore(flags);
437 return -EINTR;
438 }
439 interruptible_sleep_on(&fs->wait);
440 }
441 --fs->wanted;
442 }
443 fs->state = state;
444 local_irq_restore(flags);
445 return 0;
446}
447
448static void release_drive(struct floppy_state *fs)
449{
450 unsigned long flags;
451
452 local_irq_save(flags);
453 fs->state = idle;
454 start_request(fs);
455 local_irq_restore(flags);
456}
457
458static void set_timeout(struct floppy_state *fs, int nticks,
459 void (*proc)(unsigned long))
460{
461 unsigned long flags;
462
463 local_irq_save(flags);
464 if (fs->timeout_pending)
465 del_timer(&fs->timeout);
466 init_timer(&fs->timeout);
467 fs->timeout.expires = jiffies + nticks;
468 fs->timeout.function = proc;
469 fs->timeout.data = (unsigned long) fs;
470 add_timer(&fs->timeout);
471 fs->timeout_pending = 1;
472 local_irq_restore(flags);
473}
474
475static void do_fd_request(request_queue_t * q)
476{
477 int i;
478
479 for (i = 0 ; i < floppy_count ; i++) {
480 start_request(&floppy_states[i]);
481 }
482}
483
484static void fd_request_complete(struct swim_iop_req *req)
485{
486 struct floppy_state *fs = req->fs;
487 struct swimcmd_rw *cmd = (struct swimcmd_rw *) &req->command[0];
488
489 del_timer(&fs->timeout);
490 fs->timeout_pending = 0;
491 fs->state = idle;
492 if (cmd->error) {
493 printk(KERN_ERR "SWIM-IOP: error %d on read/write request.\n", cmd->error);
494 end_request(CURRENT, 0);
495 } else {
496 CURRENT->sector += cmd->num_blocks;
497 CURRENT->current_nr_sectors -= cmd->num_blocks;
498 if (CURRENT->current_nr_sectors <= 0) {
499 end_request(CURRENT, 1);
500 return;
501 }
502 }
503 start_request(fs);
504}
505
506static void fd_request_timeout(unsigned long data)
507{
508 struct floppy_state *fs = (struct floppy_state *) data;
509
510 fs->timeout_pending = 0;
511 end_request(CURRENT, 0);
512 fs->state = idle;
513}
514
515static void start_request(struct floppy_state *fs)
516{
517 volatile struct swim_iop_req req;
518 struct swimcmd_rw *cmd = (struct swimcmd_rw *) &req.command[0];
519
520 if (fs->state == idle && fs->wanted) {
521 fs->state = available;
522 wake_up(&fs->wait);
523 return;
524 }
525 while (CURRENT && fs->state == idle) {
526 if (CURRENT->bh && !buffer_locked(CURRENT->bh))
527 panic("floppy: block not locked");
528#if 0
529 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
530 CURRENT->rq_disk->disk_name, CURRENT->cmd,
531 CURRENT->sector, CURRENT->nr_sectors, CURRENT->buffer);
532 printk(" errors=%d current_nr_sectors=%ld\n",
533 CURRENT->errors, CURRENT->current_nr_sectors);
534#endif
535
536 if (CURRENT->sector < 0 || CURRENT->sector >= fs->total_secs) {
537 end_request(CURRENT, 0);
538 continue;
539 }
540 if (CURRENT->current_nr_sectors == 0) {
541 end_request(CURRENT, 1);
542 continue;
543 }
544 if (fs->ejected) {
545 end_request(CURRENT, 0);
546 continue;
547 }
548
549 swimiop_init_request(&req);
550 req.fs = fs;
551 req.done = fd_request_complete;
552
553 if (CURRENT->cmd == WRITE) {
554 if (fs->write_prot) {
555 end_request(CURRENT, 0);
556 continue;
557 }
558 cmd->code = CMD_WRITE;
559 } else {
560 cmd->code = CMD_READ;
561
562 }
563 cmd->drive_num = fs->drive_num;
564 cmd->buffer = CURRENT->buffer;
565 cmd->first_block = CURRENT->sector;
566 cmd->num_blocks = CURRENT->current_nr_sectors;
567
568 if (swimiop_send_request(&req)) {
569 end_request(CURRENT, 0);
570 continue;
571 }
572
573 set_timeout(fs, HZ*CURRENT->current_nr_sectors,
574 fd_request_timeout);
575
576 fs->state = transferring;
577 }
578}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 2df5cf4ec743..e4a2f8f3a1d7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1810,7 +1810,7 @@ static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
1810 1810
1811 size = sizeof(s->disckey.value) + 4; 1811 size = sizeof(s->disckey.value) + 4;
1812 1812
1813 if ((buf = (u_char *) kmalloc(size, GFP_KERNEL)) == NULL) 1813 if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
1814 return -ENOMEM; 1814 return -ENOMEM;
1815 1815
1816 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 1816 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
@@ -1861,7 +1861,7 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
1861 1861
1862 size = sizeof(s->manufact.value) + 4; 1862 size = sizeof(s->manufact.value) + 4;
1863 1863
1864 if ((buf = (u_char *) kmalloc(size, GFP_KERNEL)) == NULL) 1864 if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
1865 return -ENOMEM; 1865 return -ENOMEM;
1866 1866
1867 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 1867 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
@@ -2849,7 +2849,7 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
2849 /* FIXME: we need upper bound checking, too!! */ 2849 /* FIXME: we need upper bound checking, too!! */
2850 if (lba < 0) 2850 if (lba < 0)
2851 return -EINVAL; 2851 return -EINVAL;
2852 cgc.buffer = (char *) kmalloc(blocksize, GFP_KERNEL); 2852 cgc.buffer = kmalloc(blocksize, GFP_KERNEL);
2853 if (cgc.buffer == NULL) 2853 if (cgc.buffer == NULL)
2854 return -ENOMEM; 2854 return -ENOMEM;
2855 memset(&sense, 0, sizeof(sense)); 2855 memset(&sense, 0, sizeof(sense));
@@ -3031,7 +3031,7 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
3031 int size = sizeof(dvd_struct); 3031 int size = sizeof(dvd_struct);
3032 if (!CDROM_CAN(CDC_DVD)) 3032 if (!CDROM_CAN(CDC_DVD))
3033 return -ENOSYS; 3033 return -ENOSYS;
3034 if ((s = (dvd_struct *) kmalloc(size, GFP_KERNEL)) == NULL) 3034 if ((s = kmalloc(size, GFP_KERNEL)) == NULL)
3035 return -ENOMEM; 3035 return -ENOMEM;
3036 cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); 3036 cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
3037 if (copy_from_user(s, (dvd_struct __user *)arg, size)) { 3037 if (copy_from_user(s, (dvd_struct __user *)arg, size)) {
diff --git a/drivers/cdrom/cm206.c b/drivers/cdrom/cm206.c
index e6d8e9ededea..b6c61bbb20e1 100644
--- a/drivers/cdrom/cm206.c
+++ b/drivers/cdrom/cm206.c
@@ -1420,7 +1420,7 @@ int __init cm206_init(void)
1420 return -EIO; 1420 return -EIO;
1421 } 1421 }
1422 printk(" adapter at 0x%x", cm206_base); 1422 printk(" adapter at 0x%x", cm206_base);
1423 cd = (struct cm206_struct *) kmalloc(size, GFP_KERNEL); 1423 cd = kmalloc(size, GFP_KERNEL);
1424 if (!cd) 1424 if (!cd)
1425 goto out_base; 1425 goto out_base;
1426 /* Now we have found the adaptor card, try to reset it. As we have 1426 /* Now we have found the adaptor card, try to reset it. As we have
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b10f4d8fdc7f..9e43e39dc35c 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -97,7 +97,7 @@ config SERIAL_NONSTANDARD
97 97
98config COMPUTONE 98config COMPUTONE
99 tristate "Computone IntelliPort Plus serial support" 99 tristate "Computone IntelliPort Plus serial support"
100 depends on SERIAL_NONSTANDARD 100 depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
101 ---help--- 101 ---help---
102 This driver supports the entire family of Intelliport II/Plus 102 This driver supports the entire family of Intelliport II/Plus
103 controllers with the exception of the MicroChannel controllers and 103 controllers with the exception of the MicroChannel controllers and
@@ -203,7 +203,7 @@ config MOXA_SMARTIO
203 203
204config MOXA_SMARTIO_NEW 204config MOXA_SMARTIO_NEW
205 tristate "Moxa SmartIO support v. 2.0 (EXPERIMENTAL)" 205 tristate "Moxa SmartIO support v. 2.0 (EXPERIMENTAL)"
206 depends on SERIAL_NONSTANDARD 206 depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
207 help 207 help
208 Say Y here if you have a Moxa SmartIO multiport serial card and/or 208 Say Y here if you have a Moxa SmartIO multiport serial card and/or
209 want to help develop a new version of this driver. 209 want to help develop a new version of this driver.
@@ -218,7 +218,7 @@ config MOXA_SMARTIO_NEW
218 218
219config ISI 219config ISI
220 tristate "Multi-Tech multiport card support (EXPERIMENTAL)" 220 tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
221 depends on SERIAL_NONSTANDARD 221 depends on SERIAL_NONSTANDARD && PCI
222 select FW_LOADER 222 select FW_LOADER
223 help 223 help
224 This is a driver for the Multi-Tech cards which provide several 224 This is a driver for the Multi-Tech cards which provide several
@@ -312,7 +312,7 @@ config SPECIALIX_RTSCTS
312 312
313config SX 313config SX
314 tristate "Specialix SX (and SI) card support" 314 tristate "Specialix SX (and SI) card support"
315 depends on SERIAL_NONSTANDARD 315 depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
316 help 316 help
317 This is a driver for the SX and SI multiport serial cards. 317 This is a driver for the SX and SI multiport serial cards.
318 Please read the file <file:Documentation/sx.txt> for details. 318 Please read the file <file:Documentation/sx.txt> for details.
@@ -867,7 +867,7 @@ config SONYPI
867 867
868config TANBAC_TB0219 868config TANBAC_TB0219
869 tristate "TANBAC TB0219 base board support" 869 tristate "TANBAC TB0219 base board support"
870 depends TANBAC_TB022X 870 depends on TANBAC_TB022X
871 select GPIO_VR41XX 871 select GPIO_VR41XX
872 872
873source "drivers/char/agp/Kconfig" 873source "drivers/char/agp/Kconfig"
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index c603bf291580..a9f9c48c2424 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -86,7 +86,7 @@ config AGP_NVIDIA
86 86
87config AGP_SIS 87config AGP_SIS
88 tristate "SiS chipset support" 88 tristate "SiS chipset support"
89 depends on AGP 89 depends on AGP && X86
90 help 90 help
91 This option gives you AGP support for the GLX component of 91 This option gives you AGP support for the GLX component of
92 X on Silicon Integrated Systems [SiS] chipsets. 92 X on Silicon Integrated Systems [SiS] chipsets.
@@ -103,7 +103,7 @@ config AGP_SWORKS
103 103
104config AGP_VIA 104config AGP_VIA
105 tristate "VIA chipset support" 105 tristate "VIA chipset support"
106 depends on AGP 106 depends on AGP && X86
107 help 107 help
108 This option gives you AGP support for the GLX component of 108 This option gives you AGP support for the GLX component of
109 X on VIA MVP3/Apollo Pro chipsets. 109 X on VIA MVP3/Apollo Pro chipsets.
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 5ff457b41efb..883a36a27833 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -419,6 +419,31 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
419 *requested_mode &= ~AGP2_RESERVED_MASK; 419 *requested_mode &= ~AGP2_RESERVED_MASK;
420 } 420 }
421 421
422 /*
423 * Some dumb bridges are programmed to disobey the AGP2 spec.
424 * This is likely a BIOS misprogramming rather than poweron default, or
425 * it would be a lot more common.
426 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
427 * AGPv2 spec 6.1.9 states:
428 * The RATE field indicates the data transfer rates supported by this
429 * device. A.G.P. devices must report all that apply.
430 * Fix them up as best we can.
431 */
432 switch (*bridge_agpstat & 7) {
433 case 4:
434 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
435 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
436 "Fixing up support for x2 & x1\n");
437 break;
438 case 2:
439 *bridge_agpstat |= AGPSTAT2_1X;
440 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
441 "Fixing up support for x1\n");
442 break;
443 default:
444 break;
445 }
446
422 /* Check the speed bits make sense. Only one should be set. */ 447 /* Check the speed bits make sense. Only one should be set. */
423 tmp = *requested_mode & 7; 448 tmp = *requested_mode & 7;
424 switch (tmp) { 449 switch (tmp) {
diff --git a/drivers/char/consolemap.c b/drivers/char/consolemap.c
index 04a12027a740..b99b7561260d 100644
--- a/drivers/char/consolemap.c
+++ b/drivers/char/consolemap.c
@@ -443,7 +443,7 @@ int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
443 p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; 443 p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
444 if (p && p->readonly) return -EIO; 444 if (p && p->readonly) return -EIO;
445 if (!p || --p->refcount) { 445 if (!p || --p->refcount) {
446 q = (struct uni_pagedir *)kmalloc(sizeof(*p), GFP_KERNEL); 446 q = kmalloc(sizeof(*p), GFP_KERNEL);
447 if (!q) { 447 if (!q) {
448 if (p) p->refcount++; 448 if (p) p->refcount++;
449 return -ENOMEM; 449 return -ENOMEM;
diff --git a/drivers/char/lcd.c b/drivers/char/lcd.c
index da601fd6c07a..d649abbf0857 100644
--- a/drivers/char/lcd.c
+++ b/drivers/char/lcd.c
@@ -459,7 +459,7 @@ static int lcd_ioctl(struct inode *inode, struct file *file,
459 (&display, (struct lcd_display *) arg, 459 (&display, (struct lcd_display *) arg,
460 sizeof(struct lcd_display))) 460 sizeof(struct lcd_display)))
461 return -EFAULT; 461 return -EFAULT;
462 rom = (unsigned char *) kmalloc((128), GFP_ATOMIC); 462 rom = kmalloc((128), GFP_ATOMIC);
463 if (rom == NULL) { 463 if (rom == NULL) {
464 printk(KERN_ERR LCD "kmalloc() failed in %s\n", 464 printk(KERN_ERR LCD "kmalloc() failed in %s\n",
465 __FUNCTION__); 465 __FUNCTION__);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index b70b5388b5a8..b51d08be0bcf 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -525,7 +525,7 @@ static int lp_open(struct inode * inode, struct file * file)
525 return -EIO; 525 return -EIO;
526 } 526 }
527 } 527 }
528 lp_table[minor].lp_buffer = (char *) kmalloc(LP_BUFFER_SIZE, GFP_KERNEL); 528 lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL);
529 if (!lp_table[minor].lp_buffer) { 529 if (!lp_table[minor].lp_buffer) {
530 LP_F(minor) &= ~LP_BUSY; 530 LP_F(minor) &= ~LP_BUSY;
531 return -ENOMEM; 531 return -ENOMEM;
diff --git a/drivers/char/mxser_new.c b/drivers/char/mxser_new.c
index efa8076c33e0..cd989dce7c53 100644
--- a/drivers/char/mxser_new.c
+++ b/drivers/char/mxser_new.c
@@ -315,6 +315,7 @@ static struct mxser_mon_ext mon_data_ext;
315static int mxser_set_baud_method[MXSER_PORTS + 1]; 315static int mxser_set_baud_method[MXSER_PORTS + 1];
316static spinlock_t gm_lock; 316static spinlock_t gm_lock;
317 317
318#ifdef CONFIG_PCI
318static int CheckIsMoxaMust(int io) 319static int CheckIsMoxaMust(int io)
319{ 320{
320 u8 oldmcr, hwid; 321 u8 oldmcr, hwid;
@@ -337,6 +338,7 @@ static int CheckIsMoxaMust(int io)
337 } 338 }
338 return MOXA_OTHER_UART; 339 return MOXA_OTHER_UART;
339} 340}
341#endif
340 342
341static void process_txrx_fifo(struct mxser_port *info) 343static void process_txrx_fifo(struct mxser_port *info)
342{ 344{
@@ -2380,9 +2382,11 @@ static void mxser_release_res(struct mxser_board *brd, struct pci_dev *pdev,
2380 if (irq) 2382 if (irq)
2381 free_irq(brd->irq, brd); 2383 free_irq(brd->irq, brd);
2382 if (pdev != NULL) { /* PCI */ 2384 if (pdev != NULL) { /* PCI */
2385#ifdef CONFIG_PCI
2383 pci_release_region(pdev, 2); 2386 pci_release_region(pdev, 2);
2384 pci_release_region(pdev, 3); 2387 pci_release_region(pdev, 3);
2385 pci_dev_put(pdev); 2388 pci_dev_put(pdev);
2389#endif
2386 } else { 2390 } else {
2387 release_region(brd->ports[0].ioaddr, 8 * brd->info->nports); 2391 release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
2388 release_region(brd->vector, 1); 2392 release_region(brd->vector, 1);
@@ -2546,6 +2550,7 @@ static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
2546static int __devinit mxser_probe(struct pci_dev *pdev, 2550static int __devinit mxser_probe(struct pci_dev *pdev,
2547 const struct pci_device_id *ent) 2551 const struct pci_device_id *ent)
2548{ 2552{
2553#ifdef CONFIG_PCI
2549 struct mxser_board *brd; 2554 struct mxser_board *brd;
2550 unsigned int i, j; 2555 unsigned int i, j;
2551 unsigned long ioaddress; 2556 unsigned long ioaddress;
@@ -2644,6 +2649,9 @@ err_relio:
2644 brd->info = NULL; 2649 brd->info = NULL;
2645err: 2650err:
2646 return retval; 2651 return retval;
2652#else
2653 return -ENODEV;
2654#endif
2647} 2655}
2648 2656
2649static void __devexit mxser_remove(struct pci_dev *pdev) 2657static void __devexit mxser_remove(struct pci_dev *pdev)
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 103d338f21e2..dc6d41841457 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -125,8 +125,8 @@ static void transmit_block(struct r3964_info *pInfo);
125static void receive_char(struct r3964_info *pInfo, const unsigned char c); 125static void receive_char(struct r3964_info *pInfo, const unsigned char c);
126static void receive_error(struct r3964_info *pInfo, const char flag); 126static void receive_error(struct r3964_info *pInfo, const char flag);
127static void on_timeout(unsigned long priv); 127static void on_timeout(unsigned long priv);
128static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg); 128static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
129static int read_telegram(struct r3964_info *pInfo, pid_t pid, unsigned char __user *buf); 129static int read_telegram(struct r3964_info *pInfo, struct pid *pid, unsigned char __user *buf);
130static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg, 130static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
131 int error_code, struct r3964_block_header *pBlock); 131 int error_code, struct r3964_block_header *pBlock);
132static struct r3964_message* remove_msg(struct r3964_info *pInfo, 132static struct r3964_message* remove_msg(struct r3964_info *pInfo,
@@ -829,7 +829,7 @@ static void on_timeout(unsigned long priv)
829} 829}
830 830
831static struct r3964_client_info *findClient( 831static struct r3964_client_info *findClient(
832 struct r3964_info *pInfo, pid_t pid) 832 struct r3964_info *pInfo, struct pid *pid)
833{ 833{
834 struct r3964_client_info *pClient; 834 struct r3964_client_info *pClient;
835 835
@@ -843,7 +843,7 @@ static struct r3964_client_info *findClient(
843 return NULL; 843 return NULL;
844} 844}
845 845
846static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg) 846static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg)
847{ 847{
848 struct r3964_client_info *pClient; 848 struct r3964_client_info *pClient;
849 struct r3964_client_info **ppClient; 849 struct r3964_client_info **ppClient;
@@ -858,7 +858,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
858 858
859 if(pClient->pid == pid) 859 if(pClient->pid == pid)
860 { 860 {
861 TRACE_PS("removing client %d from client list", pid); 861 TRACE_PS("removing client %d from client list", pid_nr(pid));
862 *ppClient = pClient->next; 862 *ppClient = pClient->next;
863 while(pClient->msg_count) 863 while(pClient->msg_count)
864 { 864 {
@@ -869,6 +869,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
869 TRACE_M("enable_signals - msg kfree %p",pMsg); 869 TRACE_M("enable_signals - msg kfree %p",pMsg);
870 } 870 }
871 } 871 }
872 put_pid(pClient->pid);
872 kfree(pClient); 873 kfree(pClient);
873 TRACE_M("enable_signals - kfree %p",pClient); 874 TRACE_M("enable_signals - kfree %p",pClient);
874 return 0; 875 return 0;
@@ -892,10 +893,10 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
892 if(pClient==NULL) 893 if(pClient==NULL)
893 return -ENOMEM; 894 return -ENOMEM;
894 895
895 TRACE_PS("add client %d to client list", pid); 896 TRACE_PS("add client %d to client list", pid_nr(pid));
896 spin_lock_init(&pClient->lock); 897 spin_lock_init(&pClient->lock);
897 pClient->sig_flags=arg; 898 pClient->sig_flags=arg;
898 pClient->pid = pid; 899 pClient->pid = get_pid(pid);
899 pClient->next=pInfo->firstClient; 900 pClient->next=pInfo->firstClient;
900 pClient->first_msg = NULL; 901 pClient->first_msg = NULL;
901 pClient->last_msg = NULL; 902 pClient->last_msg = NULL;
@@ -908,7 +909,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
908 return 0; 909 return 0;
909} 910}
910 911
911static int read_telegram(struct r3964_info *pInfo, pid_t pid, unsigned char __user *buf) 912static int read_telegram(struct r3964_info *pInfo, struct pid *pid, unsigned char __user *buf)
912{ 913{
913 struct r3964_client_info *pClient; 914 struct r3964_client_info *pClient;
914 struct r3964_block_header *block; 915 struct r3964_block_header *block;
@@ -1005,7 +1006,7 @@ queue_the_message:
1005 /* Send SIGIO signal to client process: */ 1006 /* Send SIGIO signal to client process: */
1006 if(pClient->sig_flags & R3964_USE_SIGIO) 1007 if(pClient->sig_flags & R3964_USE_SIGIO)
1007 { 1008 {
1008 kill_proc(pClient->pid, SIGIO, 1); 1009 kill_pid(pClient->pid, SIGIO, 1);
1009 } 1010 }
1010} 1011}
1011 1012
@@ -1042,7 +1043,7 @@ static void remove_client_block(struct r3964_info *pInfo,
1042{ 1043{
1043 struct r3964_block_header *block; 1044 struct r3964_block_header *block;
1044 1045
1045 TRACE_PS("remove_client_block PID %d", pClient->pid); 1046 TRACE_PS("remove_client_block PID %d", pid_nr(pClient->pid));
1046 1047
1047 block=pClient->next_block_to_read; 1048 block=pClient->next_block_to_read;
1048 if(block) 1049 if(block)
@@ -1157,6 +1158,7 @@ static void r3964_close(struct tty_struct *tty)
1157 TRACE_M("r3964_close - msg kfree %p",pMsg); 1158 TRACE_M("r3964_close - msg kfree %p",pMsg);
1158 } 1159 }
1159 } 1160 }
1161 put_pid(pClient->pid);
1160 kfree(pClient); 1162 kfree(pClient);
1161 TRACE_M("r3964_close - client kfree %p",pClient); 1163 TRACE_M("r3964_close - client kfree %p",pClient);
1162 pClient=pNext; 1164 pClient=pNext;
@@ -1193,12 +1195,11 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1193 struct r3964_client_message theMsg; 1195 struct r3964_client_message theMsg;
1194 DECLARE_WAITQUEUE (wait, current); 1196 DECLARE_WAITQUEUE (wait, current);
1195 1197
1196 int pid = current->pid;
1197 int count; 1198 int count;
1198 1199
1199 TRACE_L("read()"); 1200 TRACE_L("read()");
1200 1201
1201 pClient=findClient(pInfo, pid); 1202 pClient=findClient(pInfo, task_pid(current));
1202 if(pClient) 1203 if(pClient)
1203 { 1204 {
1204 pMsg = remove_msg(pInfo, pClient); 1205 pMsg = remove_msg(pInfo, pClient);
@@ -1252,7 +1253,6 @@ static ssize_t r3964_write(struct tty_struct * tty, struct file * file,
1252 struct r3964_block_header *pHeader; 1253 struct r3964_block_header *pHeader;
1253 struct r3964_client_info *pClient; 1254 struct r3964_client_info *pClient;
1254 unsigned char *new_data; 1255 unsigned char *new_data;
1255 int pid;
1256 1256
1257 TRACE_L("write request, %d characters", count); 1257 TRACE_L("write request, %d characters", count);
1258/* 1258/*
@@ -1295,9 +1295,7 @@ static ssize_t r3964_write(struct tty_struct * tty, struct file * file,
1295 pHeader->locks = 0; 1295 pHeader->locks = 0;
1296 pHeader->owner = NULL; 1296 pHeader->owner = NULL;
1297 1297
1298 pid=current->pid; 1298 pClient=findClient(pInfo, task_pid(current));
1299
1300 pClient=findClient(pInfo, pid);
1301 if(pClient) 1299 if(pClient)
1302 { 1300 {
1303 pHeader->owner = pClient; 1301 pHeader->owner = pClient;
@@ -1328,7 +1326,7 @@ static int r3964_ioctl(struct tty_struct * tty, struct file * file,
1328 switch(cmd) 1326 switch(cmd)
1329 { 1327 {
1330 case R3964_ENABLE_SIGNALS: 1328 case R3964_ENABLE_SIGNALS:
1331 return enable_signals(pInfo, current->pid, arg); 1329 return enable_signals(pInfo, task_pid(current), arg);
1332 case R3964_SETPRIORITY: 1330 case R3964_SETPRIORITY:
1333 if(arg<R3964_MASTER || arg>R3964_SLAVE) 1331 if(arg<R3964_MASTER || arg>R3964_SLAVE)
1334 return -EINVAL; 1332 return -EINVAL;
@@ -1341,7 +1339,7 @@ static int r3964_ioctl(struct tty_struct * tty, struct file * file,
1341 pInfo->flags &= ~R3964_BCC; 1339 pInfo->flags &= ~R3964_BCC;
1342 return 0; 1340 return 0;
1343 case R3964_READ_TELEGRAM: 1341 case R3964_READ_TELEGRAM:
1344 return read_telegram(pInfo, current->pid, (unsigned char __user *)arg); 1342 return read_telegram(pInfo, task_pid(current), (unsigned char __user *)arg);
1345 default: 1343 default:
1346 return -ENOIOCTLCMD; 1344 return -ENOIOCTLCMD;
1347 } 1345 }
@@ -1357,7 +1355,6 @@ static unsigned int r3964_poll(struct tty_struct * tty, struct file * file,
1357 struct poll_table_struct *wait) 1355 struct poll_table_struct *wait)
1358{ 1356{
1359 struct r3964_info *pInfo=(struct r3964_info*)tty->disc_data; 1357 struct r3964_info *pInfo=(struct r3964_info*)tty->disc_data;
1360 int pid=current->pid;
1361 struct r3964_client_info *pClient; 1358 struct r3964_client_info *pClient;
1362 struct r3964_message *pMsg=NULL; 1359 struct r3964_message *pMsg=NULL;
1363 unsigned long flags; 1360 unsigned long flags;
@@ -1365,7 +1362,7 @@ static unsigned int r3964_poll(struct tty_struct * tty, struct file * file,
1365 1362
1366 TRACE_L("POLL"); 1363 TRACE_L("POLL");
1367 1364
1368 pClient=findClient(pInfo,pid); 1365 pClient=findClient(pInfo, task_pid(current));
1369 if(pClient) 1366 if(pClient)
1370 { 1367 {
1371 poll_wait(file, &pInfo->read_wait, wait); 1368 poll_wait(file, &pInfo->read_wait, wait);
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index e96a00fe1389..2bdb0144a22e 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1151,7 +1151,6 @@ static int copy_from_read_buf(struct tty_struct *tty,
1151 n = min(*nr, n); 1151 n = min(*nr, n);
1152 spin_unlock_irqrestore(&tty->read_lock, flags); 1152 spin_unlock_irqrestore(&tty->read_lock, flags);
1153 if (n) { 1153 if (n) {
1154 mb();
1155 retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n); 1154 retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n);
1156 n -= retval; 1155 n -= retval;
1157 spin_lock_irqsave(&tty->read_lock, flags); 1156 spin_lock_irqsave(&tty->read_lock, flags);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 5152cedd8878..f108c136800a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -541,7 +541,7 @@ static int mgslpc_probe(struct pcmcia_device *link)
541 if (debug_level >= DEBUG_LEVEL_INFO) 541 if (debug_level >= DEBUG_LEVEL_INFO)
542 printk("mgslpc_attach\n"); 542 printk("mgslpc_attach\n");
543 543
544 info = (MGSLPC_INFO *)kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL); 544 info = kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
545 if (!info) { 545 if (!info) {
546 printk("Error can't allocate device instance data\n"); 546 printk("Error can't allocate device instance data\n");
547 return -ENOMEM; 547 return -ENOMEM;
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c
index 167ebc84e8d7..245f03195b7c 100644
--- a/drivers/char/rio/riocmd.c
+++ b/drivers/char/rio/riocmd.c
@@ -556,7 +556,7 @@ struct CmdBlk *RIOGetCmdBlk(void)
556{ 556{
557 struct CmdBlk *CmdBlkP; 557 struct CmdBlk *CmdBlkP;
558 558
559 CmdBlkP = (struct CmdBlk *)kmalloc(sizeof(struct CmdBlk), GFP_ATOMIC); 559 CmdBlkP = kmalloc(sizeof(struct CmdBlk), GFP_ATOMIC);
560 if (CmdBlkP) 560 if (CmdBlkP)
561 memset(CmdBlkP, 0, sizeof(struct CmdBlk)); 561 memset(CmdBlkP, 0, sizeof(struct CmdBlk));
562 return CmdBlkP; 562 return CmdBlkP;
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 66a7385bc34a..e1d70e8b6268 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -113,7 +113,7 @@ static int rtc_has_irq = 1;
113#define hpet_set_rtc_irq_bit(arg) 0 113#define hpet_set_rtc_irq_bit(arg) 0
114#define hpet_rtc_timer_init() do { } while (0) 114#define hpet_rtc_timer_init() do { } while (0)
115#define hpet_rtc_dropped_irq() 0 115#define hpet_rtc_dropped_irq() 0
116static inline irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) {return 0;} 116static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) {return 0;}
117#else 117#else
118extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); 118extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
119#endif 119#endif
@@ -165,7 +165,9 @@ static void mask_rtc_irq_bit(unsigned char bit)
165} 165}
166#endif 166#endif
167 167
168#ifdef CONFIG_PROC_FS
168static int rtc_proc_open(struct inode *inode, struct file *file); 169static int rtc_proc_open(struct inode *inode, struct file *file);
170#endif
169 171
170/* 172/*
171 * Bits in rtc_status. (6 bits of room for future expansion) 173 * Bits in rtc_status. (6 bits of room for future expansion)
@@ -906,6 +908,7 @@ static struct miscdevice rtc_dev = {
906 .fops = &rtc_fops, 908 .fops = &rtc_fops,
907}; 909};
908 910
911#ifdef CONFIG_PROC_FS
909static const struct file_operations rtc_proc_fops = { 912static const struct file_operations rtc_proc_fops = {
910 .owner = THIS_MODULE, 913 .owner = THIS_MODULE,
911 .open = rtc_proc_open, 914 .open = rtc_proc_open,
@@ -913,14 +916,13 @@ static const struct file_operations rtc_proc_fops = {
913 .llseek = seq_lseek, 916 .llseek = seq_lseek,
914 .release = single_release, 917 .release = single_release,
915}; 918};
916
917#if defined(RTC_IRQ) && !defined(__sparc__)
918static irq_handler_t rtc_int_handler_ptr;
919#endif 919#endif
920 920
921static int __init rtc_init(void) 921static int __init rtc_init(void)
922{ 922{
923#ifdef CONFIG_PROC_FS
923 struct proc_dir_entry *ent; 924 struct proc_dir_entry *ent;
925#endif
924#if defined(__alpha__) || defined(__mips__) 926#if defined(__alpha__) || defined(__mips__)
925 unsigned int year, ctrl; 927 unsigned int year, ctrl;
926 char *guess = NULL; 928 char *guess = NULL;
@@ -932,9 +934,11 @@ static int __init rtc_init(void)
932 struct sparc_isa_bridge *isa_br; 934 struct sparc_isa_bridge *isa_br;
933 struct sparc_isa_device *isa_dev; 935 struct sparc_isa_device *isa_dev;
934#endif 936#endif
935#endif 937#else
936#ifndef __sparc__
937 void *r; 938 void *r;
939#ifdef RTC_IRQ
940 irq_handler_t rtc_int_handler_ptr;
941#endif
938#endif 942#endif
939 943
940#ifdef __sparc__ 944#ifdef __sparc__
@@ -958,6 +962,7 @@ static int __init rtc_init(void)
958 } 962 }
959 } 963 }
960#endif 964#endif
965 rtc_has_irq = 0;
961 printk(KERN_ERR "rtc_init: no PC rtc found\n"); 966 printk(KERN_ERR "rtc_init: no PC rtc found\n");
962 return -EIO; 967 return -EIO;
963 968
@@ -972,6 +977,7 @@ found:
972 * PCI Slot 2 INTA# (and some INTx# in Slot 1). 977 * PCI Slot 2 INTA# (and some INTx# in Slot 1).
973 */ 978 */
974 if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) { 979 if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) {
980 rtc_has_irq = 0;
975 printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); 981 printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
976 return -EIO; 982 return -EIO;
977 } 983 }
@@ -982,6 +988,9 @@ no_irq:
982 else 988 else
983 r = request_mem_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc"); 989 r = request_mem_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc");
984 if (!r) { 990 if (!r) {
991#ifdef RTC_IRQ
992 rtc_has_irq = 0;
993#endif
985 printk(KERN_ERR "rtc: I/O resource %lx is not free.\n", 994 printk(KERN_ERR "rtc: I/O resource %lx is not free.\n",
986 (long)(RTC_PORT(0))); 995 (long)(RTC_PORT(0)));
987 return -EIO; 996 return -EIO;
@@ -996,6 +1005,7 @@ no_irq:
996 1005
997 if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) { 1006 if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) {
998 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ 1007 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */
1008 rtc_has_irq = 0;
999 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); 1009 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
1000 if (RTC_IOMAPPED) 1010 if (RTC_IOMAPPED)
1001 release_region(RTC_PORT(0), RTC_IO_EXTENT); 1011 release_region(RTC_PORT(0), RTC_IO_EXTENT);
@@ -1012,21 +1022,19 @@ no_irq:
1012 if (misc_register(&rtc_dev)) { 1022 if (misc_register(&rtc_dev)) {
1013#ifdef RTC_IRQ 1023#ifdef RTC_IRQ
1014 free_irq(RTC_IRQ, NULL); 1024 free_irq(RTC_IRQ, NULL);
1025 rtc_has_irq = 0;
1015#endif 1026#endif
1016 release_region(RTC_PORT(0), RTC_IO_EXTENT); 1027 release_region(RTC_PORT(0), RTC_IO_EXTENT);
1017 return -ENODEV; 1028 return -ENODEV;
1018 } 1029 }
1019 1030
1031#ifdef CONFIG_PROC_FS
1020 ent = create_proc_entry("driver/rtc", 0, NULL); 1032 ent = create_proc_entry("driver/rtc", 0, NULL);
1021 if (!ent) { 1033 if (ent)
1022#ifdef RTC_IRQ 1034 ent->proc_fops = &rtc_proc_fops;
1023 free_irq(RTC_IRQ, NULL); 1035 else
1036 printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
1024#endif 1037#endif
1025 release_region(RTC_PORT(0), RTC_IO_EXTENT);
1026 misc_deregister(&rtc_dev);
1027 return -ENOMEM;
1028 }
1029 ent->proc_fops = &rtc_proc_fops;
1030 1038
1031#if defined(__alpha__) || defined(__mips__) 1039#if defined(__alpha__) || defined(__mips__)
1032 rtc_freq = HZ; 1040 rtc_freq = HZ;
@@ -1159,6 +1167,7 @@ static void rtc_dropped_irq(unsigned long data)
1159} 1167}
1160#endif 1168#endif
1161 1169
1170#ifdef CONFIG_PROC_FS
1162/* 1171/*
1163 * Info exported via "/proc/driver/rtc". 1172 * Info exported via "/proc/driver/rtc".
1164 */ 1173 */
@@ -1243,6 +1252,7 @@ static int rtc_proc_open(struct inode *inode, struct file *file)
1243{ 1252{
1244 return single_open(file, rtc_proc_show, NULL); 1253 return single_open(file, rtc_proc_show, NULL);
1245} 1254}
1255#endif
1246 1256
1247void rtc_get_rtc_time(struct rtc_time *rtc_tm) 1257void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1248{ 1258{
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index a3008ce13015..1da92a689ae4 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -2498,8 +2498,10 @@ static void __devexit sx_remove_card(struct sx_board *board,
2498 /* It is safe/allowed to del_timer a non-active timer */ 2498 /* It is safe/allowed to del_timer a non-active timer */
2499 del_timer(&board->timer); 2499 del_timer(&board->timer);
2500 if (pdev) { 2500 if (pdev) {
2501#ifdef CONFIG_PCI
2501 pci_iounmap(pdev, board->base); 2502 pci_iounmap(pdev, board->base);
2502 pci_release_region(pdev, IS_CF_BOARD(board) ? 3 : 2); 2503 pci_release_region(pdev, IS_CF_BOARD(board) ? 3 : 2);
2504#endif
2503 } else { 2505 } else {
2504 iounmap(board->base); 2506 iounmap(board->base);
2505 release_region(board->hw_base, board->hw_len); 2507 release_region(board->hw_base, board->hw_len);
@@ -2601,6 +2603,7 @@ static struct eisa_driver sx_eisadriver = {
2601 2603
2602#endif 2604#endif
2603 2605
2606#ifdef CONFIG_PCI
2604 /******************************************************** 2607 /********************************************************
2605 * Setting bit 17 in the CNTRL register of the PLX 9050 * 2608 * Setting bit 17 in the CNTRL register of the PLX 9050 *
2606 * chip forces a retry on writes while a read is pending.* 2609 * chip forces a retry on writes while a read is pending.*
@@ -2632,10 +2635,12 @@ static void __devinit fix_sx_pci(struct pci_dev *pdev, struct sx_board *board)
2632 } 2635 }
2633 iounmap(rebase); 2636 iounmap(rebase);
2634} 2637}
2638#endif
2635 2639
2636static int __devinit sx_pci_probe(struct pci_dev *pdev, 2640static int __devinit sx_pci_probe(struct pci_dev *pdev,
2637 const struct pci_device_id *ent) 2641 const struct pci_device_id *ent)
2638{ 2642{
2643#ifdef CONFIG_PCI
2639 struct sx_board *board; 2644 struct sx_board *board;
2640 unsigned int i, reg; 2645 unsigned int i, reg;
2641 int retval = -EIO; 2646 int retval = -EIO;
@@ -2700,6 +2705,9 @@ err_flag:
2700 board->flags &= ~SX_BOARD_PRESENT; 2705 board->flags &= ~SX_BOARD_PRESENT;
2701err: 2706err:
2702 return retval; 2707 return retval;
2708#else
2709 return -ENODEV;
2710#endif
2703} 2711}
2704 2712
2705static void __devexit sx_pci_remove(struct pci_dev *pdev) 2713static void __devexit sx_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index acc6fab601cc..3fa625db9e4b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -4332,7 +4332,7 @@ static struct mgsl_struct* mgsl_allocate_device(void)
4332{ 4332{
4333 struct mgsl_struct *info; 4333 struct mgsl_struct *info;
4334 4334
4335 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct), 4335 info = kmalloc(sizeof(struct mgsl_struct),
4336 GFP_KERNEL); 4336 GFP_KERNEL);
4337 4337
4338 if (!info) { 4338 if (!info) {
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 53e8ccf94fe3..8f4d67afe5bf 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -2730,7 +2730,7 @@ static int startup(SLMP_INFO * info)
2730 return 0; 2730 return 0;
2731 2731
2732 if (!info->tx_buf) { 2732 if (!info->tx_buf) {
2733 info->tx_buf = (unsigned char *)kmalloc(info->max_frame_size, GFP_KERNEL); 2733 info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
2734 if (!info->tx_buf) { 2734 if (!info->tx_buf) {
2735 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 2735 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
2736 __FILE__,__LINE__,info->device_name); 2736 __FILE__,__LINE__,info->device_name);
@@ -3798,7 +3798,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
3798{ 3798{
3799 SLMP_INFO *info; 3799 SLMP_INFO *info;
3800 3800
3801 info = (SLMP_INFO *)kmalloc(sizeof(SLMP_INFO), 3801 info = kmalloc(sizeof(SLMP_INFO),
3802 GFP_KERNEL); 3802 GFP_KERNEL);
3803 3803
3804 if (!info) { 3804 if (!info) {
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 05810c8d20bc..13935235e066 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -41,7 +41,34 @@
41#include <asm/irq_regs.h> 41#include <asm/irq_regs.h>
42 42
43/* Whether we react on sysrq keys or just ignore them */ 43/* Whether we react on sysrq keys or just ignore them */
44int sysrq_enabled = 1; 44int __read_mostly __sysrq_enabled = 1;
45
46static int __read_mostly sysrq_always_enabled;
47
48int sysrq_on(void)
49{
50 return __sysrq_enabled || sysrq_always_enabled;
51}
52
53/*
54 * A value of 1 means 'all', other nonzero values are an op mask:
55 */
56static inline int sysrq_on_mask(int mask)
57{
58 return sysrq_always_enabled || __sysrq_enabled == 1 ||
59 (__sysrq_enabled & mask);
60}
61
62static int __init sysrq_always_enabled_setup(char *str)
63{
64 sysrq_always_enabled = 1;
65 printk(KERN_INFO "debug: sysrq always enabled.\n");
66
67 return 1;
68}
69
70__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
71
45 72
46static void sysrq_handle_loglevel(int key, struct tty_struct *tty) 73static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
47{ 74{
@@ -379,8 +406,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
379 * Should we check for enabled operations (/proc/sysrq-trigger 406 * Should we check for enabled operations (/proc/sysrq-trigger
380 * should not) and is the invoked operation enabled? 407 * should not) and is the invoked operation enabled?
381 */ 408 */
382 if (!check_mask || sysrq_enabled == 1 || 409 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
383 (sysrq_enabled & op_p->enable_mask)) {
384 printk("%s\n", op_p->action_msg); 410 printk("%s\n", op_p->action_msg);
385 console_loglevel = orig_log_level; 411 console_loglevel = orig_log_level;
386 op_p->handler(key, tty); 412 op_p->handler(key, tty);
@@ -414,9 +440,8 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
414 */ 440 */
415void handle_sysrq(int key, struct tty_struct *tty) 441void handle_sysrq(int key, struct tty_struct *tty)
416{ 442{
417 if (!sysrq_enabled) 443 if (sysrq_on())
418 return; 444 __handle_sysrq(key, tty, 1);
419 __handle_sysrq(key, tty, 1);
420} 445}
421EXPORT_SYMBOL(handle_sysrq); 446EXPORT_SYMBOL(handle_sysrq);
422 447
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 4044c864fdd4..47a6eacb10bc 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -3335,18 +3335,13 @@ static void __do_SAK(struct work_struct *work)
3335 int session; 3335 int session;
3336 int i; 3336 int i;
3337 struct file *filp; 3337 struct file *filp;
3338 struct tty_ldisc *disc;
3339 struct fdtable *fdt; 3338 struct fdtable *fdt;
3340 3339
3341 if (!tty) 3340 if (!tty)
3342 return; 3341 return;
3343 session = tty->session; 3342 session = tty->session;
3344 3343
3345 /* We don't want an ldisc switch during this */ 3344 tty_ldisc_flush(tty);
3346 disc = tty_ldisc_ref(tty);
3347 if (disc && disc->flush_buffer)
3348 disc->flush_buffer(tty);
3349 tty_ldisc_deref(disc);
3350 3345
3351 if (tty->driver->flush_buffer) 3346 if (tty->driver->flush_buffer)
3352 tty->driver->flush_buffer(tty); 3347 tty->driver->flush_buffer(tty);
@@ -3821,6 +3816,7 @@ struct tty_struct *get_current_tty(void)
3821 barrier(); 3816 barrier();
3822 return tty; 3817 return tty;
3823} 3818}
3819EXPORT_SYMBOL_GPL(get_current_tty);
3824 3820
3825/* 3821/*
3826 * Initialize the console device. This is called *early*, so 3822 * Initialize the console device. This is called *early*, so
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
index 6d2e314860df..0e0da443cbd5 100644
--- a/drivers/char/viocons.c
+++ b/drivers/char/viocons.c
@@ -61,10 +61,7 @@
61static DEFINE_SPINLOCK(consolelock); 61static DEFINE_SPINLOCK(consolelock);
62static DEFINE_SPINLOCK(consoleloglock); 62static DEFINE_SPINLOCK(consoleloglock);
63 63
64#ifdef CONFIG_MAGIC_SYSRQ
65static int vio_sysrq_pressed; 64static int vio_sysrq_pressed;
66extern int sysrq_enabled;
67#endif
68 65
69#define VIOCHAR_NUM_BUF 16 66#define VIOCHAR_NUM_BUF 16
70 67
@@ -936,8 +933,10 @@ static void vioHandleData(struct HvLpEvent *event)
936 */ 933 */
937 num_pushed = 0; 934 num_pushed = 0;
938 for (index = 0; index < cevent->len; index++) { 935 for (index = 0; index < cevent->len; index++) {
939#ifdef CONFIG_MAGIC_SYSRQ 936 /*
940 if (sysrq_enabled) { 937 * Will be optimized away if !CONFIG_MAGIC_SYSRQ:
938 */
939 if (sysrq_on()) {
941 /* 0x0f is the ascii character for ^O */ 940 /* 0x0f is the ascii character for ^O */
942 if (cevent->data[index] == '\x0f') { 941 if (cevent->data[index] == '\x0f') {
943 vio_sysrq_pressed = 1; 942 vio_sysrq_pressed = 1;
@@ -956,7 +955,6 @@ static void vioHandleData(struct HvLpEvent *event)
956 continue; 955 continue;
957 } 956 }
958 } 957 }
959#endif
960 /* 958 /*
961 * The sysrq sequence isn't included in this check if 959 * The sysrq sequence isn't included in this check if
962 * sysrq is enabled and compiled into the kernel because 960 * sysrq is enabled and compiled into the kernel because
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index a8239dac994f..06c32a3e3ca4 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -784,7 +784,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
784 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) 784 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
785 return 0; 785 return 0;
786 786
787 newscreen = (unsigned short *) kmalloc(new_screen_size, GFP_USER); 787 newscreen = kmalloc(new_screen_size, GFP_USER);
788 if (!newscreen) 788 if (!newscreen)
789 return -ENOMEM; 789 return -ENOMEM;
790 790
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index ac5d60edbafa..dc8368ebb1ac 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -129,7 +129,7 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
129 !capable(CAP_SYS_RESOURCE)) 129 !capable(CAP_SYS_RESOURCE))
130 return -EPERM; 130 return -EPERM;
131 131
132 key_map = (ushort *) kmalloc(sizeof(plain_map), 132 key_map = kmalloc(sizeof(plain_map),
133 GFP_KERNEL); 133 GFP_KERNEL);
134 if (!key_map) 134 if (!key_map)
135 return -ENOMEM; 135 return -ENOMEM;
@@ -259,7 +259,7 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
259 sz = 256; 259 sz = 256;
260 while (sz < funcbufsize - funcbufleft + delta) 260 while (sz < funcbufsize - funcbufleft + delta)
261 sz <<= 1; 261 sz <<= 1;
262 fnw = (char *) kmalloc(sz, GFP_KERNEL); 262 fnw = kmalloc(sz, GFP_KERNEL);
263 if(!fnw) { 263 if(!fnw) {
264 ret = -ENOMEM; 264 ret = -ENOMEM;
265 goto reterr; 265 goto reterr;
@@ -1087,7 +1087,7 @@ static void complete_change_console(struct vc_data *vc)
1087 switch_screen(vc); 1087 switch_screen(vc);
1088 1088
1089 /* 1089 /*
1090 * This can't appear below a successful kill_proc(). If it did, 1090 * This can't appear below a successful kill_pid(). If it did,
1091 * then the *blank_screen operation could occur while X, having 1091 * then the *blank_screen operation could occur while X, having
1092 * received acqsig, is waking up on another processor. This 1092 * received acqsig, is waking up on another processor. This
1093 * condition can lead to overlapping accesses to the VGA range 1093 * condition can lead to overlapping accesses to the VGA range
@@ -1110,7 +1110,7 @@ static void complete_change_console(struct vc_data *vc)
1110 */ 1110 */
1111 if (vc->vt_mode.mode == VT_PROCESS) { 1111 if (vc->vt_mode.mode == VT_PROCESS) {
1112 /* 1112 /*
1113 * Send the signal as privileged - kill_proc() will 1113 * Send the signal as privileged - kill_pid() will
1114 * tell us if the process has gone or something else 1114 * tell us if the process has gone or something else
1115 * is awry 1115 * is awry
1116 */ 1116 */
@@ -1170,7 +1170,7 @@ void change_console(struct vc_data *new_vc)
1170 vc = vc_cons[fg_console].d; 1170 vc = vc_cons[fg_console].d;
1171 if (vc->vt_mode.mode == VT_PROCESS) { 1171 if (vc->vt_mode.mode == VT_PROCESS) {
1172 /* 1172 /*
1173 * Send the signal as privileged - kill_proc() will 1173 * Send the signal as privileged - kill_pid() will
1174 * tell us if the process has gone or something else 1174 * tell us if the process has gone or something else
1175 * is awry 1175 * is awry
1176 */ 1176 */
diff --git a/drivers/char/watchdog/at91rm9200_wdt.c b/drivers/char/watchdog/at91rm9200_wdt.c
index cb86967e2c5f..38bd37372599 100644
--- a/drivers/char/watchdog/at91rm9200_wdt.c
+++ b/drivers/char/watchdog/at91rm9200_wdt.c
@@ -203,9 +203,9 @@ static int __init at91wdt_probe(struct platform_device *pdev)
203{ 203{
204 int res; 204 int res;
205 205
206 if (at91wdt_miscdev.dev) 206 if (at91wdt_miscdev.parent)
207 return -EBUSY; 207 return -EBUSY;
208 at91wdt_miscdev.dev = &pdev->dev; 208 at91wdt_miscdev.parent = &pdev->dev;
209 209
210 res = misc_register(&at91wdt_miscdev); 210 res = misc_register(&at91wdt_miscdev);
211 if (res) 211 if (res)
@@ -221,7 +221,7 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
221 221
222 res = misc_deregister(&at91wdt_miscdev); 222 res = misc_deregister(&at91wdt_miscdev);
223 if (!res) 223 if (!res)
224 at91wdt_miscdev.dev = NULL; 224 at91wdt_miscdev.parent = NULL;
225 225
226 return res; 226 return res;
227} 227}
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index 3404a9c67f08..e88947f8fe53 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -347,7 +347,7 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
347 goto err_free; 347 goto err_free;
348 } 348 }
349 349
350 mpcore_wdt_miscdev.dev = &dev->dev; 350 mpcore_wdt_miscdev.parent = &dev->dev;
351 ret = misc_register(&mpcore_wdt_miscdev); 351 ret = misc_register(&mpcore_wdt_miscdev);
352 if (ret) { 352 if (ret) {
353 dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n", 353 dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n",
diff --git a/drivers/char/watchdog/omap_wdt.c b/drivers/char/watchdog/omap_wdt.c
index 5dbd7dc2936f..6c6f97332dbb 100644
--- a/drivers/char/watchdog/omap_wdt.c
+++ b/drivers/char/watchdog/omap_wdt.c
@@ -290,7 +290,7 @@ static int __init omap_wdt_probe(struct platform_device *pdev)
290 omap_wdt_disable(); 290 omap_wdt_disable();
291 omap_wdt_adjust_timeout(timer_margin); 291 omap_wdt_adjust_timeout(timer_margin);
292 292
293 omap_wdt_miscdev.dev = &pdev->dev; 293 omap_wdt_miscdev.parent = &pdev->dev;
294 ret = misc_register(&omap_wdt_miscdev); 294 ret = misc_register(&omap_wdt_miscdev);
295 if (ret) 295 if (ret)
296 goto fail; 296 goto fail;
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 61138726b501..2da5ac99687c 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -42,6 +42,7 @@
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <linux/usb.h> 43#include <linux/usb.h>
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */
45 46
46 47
47#ifdef CONFIG_USB_DEBUG 48#ifdef CONFIG_USB_DEBUG
@@ -109,10 +110,6 @@ MODULE_DEVICE_TABLE (usb, usb_pcwd_table);
109#define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */ 110#define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */
110#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG 111#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG
111 112
112/* Some defines that I like to be somewhere else like include/linux/usb_hid.h */
113#define HID_REQ_SET_REPORT 0x09
114#define HID_DT_REPORT (USB_TYPE_CLASS | 0x02)
115
116/* We can only use 1 card due to the /dev/watchdog restriction */ 113/* We can only use 1 card due to the /dev/watchdog restriction */
117static int cards_found; 114static int cards_found;
118 115
diff --git a/drivers/char/watchdog/rm9k_wdt.c b/drivers/char/watchdog/rm9k_wdt.c
index ec3909371c21..7576a13e86bc 100644
--- a/drivers/char/watchdog/rm9k_wdt.c
+++ b/drivers/char/watchdog/rm9k_wdt.c
@@ -47,7 +47,7 @@
47 47
48 48
49/* Function prototypes */ 49/* Function prototypes */
50static irqreturn_t wdt_gpi_irqhdl(int, void *, struct pt_regs *); 50static irqreturn_t wdt_gpi_irqhdl(int, void *);
51static void wdt_gpi_start(void); 51static void wdt_gpi_start(void);
52static void wdt_gpi_stop(void); 52static void wdt_gpi_stop(void);
53static void wdt_gpi_set_timeout(unsigned int); 53static void wdt_gpi_set_timeout(unsigned int);
@@ -94,8 +94,28 @@ module_param(nowayout, bool, 0444);
94MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started"); 94MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started");
95 95
96 96
97/* Kernel interfaces */
98static struct file_operations fops = {
99 .owner = THIS_MODULE,
100 .open = wdt_gpi_open,
101 .release = wdt_gpi_release,
102 .write = wdt_gpi_write,
103 .unlocked_ioctl = wdt_gpi_ioctl,
104};
105
106static struct miscdevice miscdev = {
107 .minor = WATCHDOG_MINOR,
108 .name = wdt_gpi_name,
109 .fops = &fops,
110};
111
112static struct notifier_block wdt_gpi_shutdown = {
113 .notifier_call = wdt_gpi_notify,
114};
115
116
97/* Interrupt handler */ 117/* Interrupt handler */
98static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt, struct pt_regs *regs) 118static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt)
99{ 119{
100 if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1)) 120 if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1))
101 return IRQ_NONE; 121 return IRQ_NONE;
@@ -312,26 +332,6 @@ wdt_gpi_notify(struct notifier_block *this, unsigned long code, void *unused)
312} 332}
313 333
314 334
315/* Kernel interfaces */
316static struct file_operations fops = {
317 .owner = THIS_MODULE,
318 .open = wdt_gpi_open,
319 .release = wdt_gpi_release,
320 .write = wdt_gpi_write,
321 .unlocked_ioctl = wdt_gpi_ioctl,
322};
323
324static struct miscdevice miscdev = {
325 .minor = WATCHDOG_MINOR,
326 .name = wdt_gpi_name,
327 .fops = &fops,
328};
329
330static struct notifier_block wdt_gpi_shutdown = {
331 .notifier_call = wdt_gpi_notify,
332};
333
334
335/* Init & exit procedures */ 335/* Init & exit procedures */
336static const struct resource * 336static const struct resource *
337wdt_gpi_get_resource(struct platform_device *pdv, const char *name, 337wdt_gpi_get_resource(struct platform_device *pdv, const char *name,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 47ab42db122a..9fb2edf36611 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -29,7 +29,8 @@
29#include <linux/completion.h> 29#include <linux/completion.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) 32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg)
33 34
34/** 35/**
35 * The "cpufreq driver" - the arch- or hardware-dependent low 36 * The "cpufreq driver" - the arch- or hardware-dependent low
@@ -151,7 +152,8 @@ static void cpufreq_debug_disable_ratelimit(void)
151 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 152 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
152} 153}
153 154
154void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...) 155void cpufreq_debug_printk(unsigned int type, const char *prefix,
156 const char *fmt, ...)
155{ 157{
156 char s[256]; 158 char s[256];
157 va_list args; 159 va_list args;
@@ -161,7 +163,8 @@ void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt
161 WARN_ON(!prefix); 163 WARN_ON(!prefix);
162 if (type & debug) { 164 if (type & debug) {
163 spin_lock_irqsave(&disable_ratelimit_lock, flags); 165 spin_lock_irqsave(&disable_ratelimit_lock, flags);
164 if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) { 166 if (!disable_ratelimit && debug_ratelimit
167 && !printk_ratelimit()) {
165 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 168 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
166 return; 169 return;
167 } 170 }
@@ -182,10 +185,12 @@ EXPORT_SYMBOL(cpufreq_debug_printk);
182 185
183 186
184module_param(debug, uint, 0644); 187module_param(debug, uint, 0644);
185MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors."); 188MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
189 " 2 to debug drivers, and 4 to debug governors.");
186 190
187module_param(debug_ratelimit, uint, 0644); 191module_param(debug_ratelimit, uint, 0644);
188MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting."); 192MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
193 " set to 0 to disable ratelimiting.");
189 194
190#else /* !CONFIG_CPU_FREQ_DEBUG */ 195#else /* !CONFIG_CPU_FREQ_DEBUG */
191 196
@@ -219,17 +224,23 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
219 if (!l_p_j_ref_freq) { 224 if (!l_p_j_ref_freq) {
220 l_p_j_ref = loops_per_jiffy; 225 l_p_j_ref = loops_per_jiffy;
221 l_p_j_ref_freq = ci->old; 226 l_p_j_ref_freq = ci->old;
222 dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 227 dprintk("saving %lu as reference value for loops_per_jiffy;"
228 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
223 } 229 }
224 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || 230 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
225 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || 231 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
226 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 232 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
227 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); 233 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
228 dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new); 234 ci->new);
235 dprintk("scaling loops_per_jiffy to %lu"
236 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
229 } 237 }
230} 238}
231#else 239#else
232static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; } 240static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
241{
242 return;
243}
233#endif 244#endif
234 245
235 246
@@ -316,7 +327,8 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
316 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 327 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
317 *policy = CPUFREQ_POLICY_PERFORMANCE; 328 *policy = CPUFREQ_POLICY_PERFORMANCE;
318 err = 0; 329 err = 0;
319 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { 330 } else if (!strnicmp(str_governor, "powersave",
331 CPUFREQ_NAME_LEN)) {
320 *policy = CPUFREQ_POLICY_POWERSAVE; 332 *policy = CPUFREQ_POLICY_POWERSAVE;
321 err = 0; 333 err = 0;
322 } 334 }
@@ -328,7 +340,8 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
328 t = __find_governor(str_governor); 340 t = __find_governor(str_governor);
329 341
330 if (t == NULL) { 342 if (t == NULL) {
331 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor); 343 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
344 str_governor);
332 345
333 if (name) { 346 if (name) {
334 int ret; 347 int ret;
@@ -361,7 +374,8 @@ extern struct sysdev_class cpu_sysdev_class;
361 374
362 375
363/** 376/**
364 * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information 377 * cpufreq_per_cpu_attr_read() / show_##file_name() -
378 * print out cpufreq information
365 * 379 *
366 * Write out information from cpufreq_driver->policy[cpu]; object must be 380 * Write out information from cpufreq_driver->policy[cpu]; object must be
367 * "unsigned int". 381 * "unsigned int".
@@ -380,7 +394,8 @@ show_one(scaling_min_freq, min);
380show_one(scaling_max_freq, max); 394show_one(scaling_max_freq, max);
381show_one(scaling_cur_freq, cur); 395show_one(scaling_cur_freq, cur);
382 396
383static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy); 397static int __cpufreq_set_policy(struct cpufreq_policy *data,
398 struct cpufreq_policy *policy);
384 399
385/** 400/**
386 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 401 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
@@ -416,7 +431,8 @@ store_one(scaling_max_freq,max);
416/** 431/**
417 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 432 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
418 */ 433 */
419static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf) 434static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
435 char *buf)
420{ 436{
421 unsigned int cur_freq = cpufreq_get(policy->cpu); 437 unsigned int cur_freq = cpufreq_get(policy->cpu);
422 if (!cur_freq) 438 if (!cur_freq)
@@ -428,7 +444,8 @@ static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
428/** 444/**
429 * show_scaling_governor - show the current policy for the specified CPU 445 * show_scaling_governor - show the current policy for the specified CPU
430 */ 446 */
431static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf) 447static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
448 char *buf)
432{ 449{
433 if(policy->policy == CPUFREQ_POLICY_POWERSAVE) 450 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
434 return sprintf(buf, "powersave\n"); 451 return sprintf(buf, "powersave\n");
@@ -458,7 +475,8 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
458 if (ret != 1) 475 if (ret != 1)
459 return -EINVAL; 476 return -EINVAL;
460 477
461 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) 478 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
479 &new_policy.governor))
462 return -EINVAL; 480 return -EINVAL;
463 481
464 lock_cpu_hotplug(); 482 lock_cpu_hotplug();
@@ -474,7 +492,10 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
474 492
475 unlock_cpu_hotplug(); 493 unlock_cpu_hotplug();
476 494
477 return ret ? ret : count; 495 if (ret)
496 return ret;
497 else
498 return count;
478} 499}
479 500
480/** 501/**
@@ -488,7 +509,7 @@ static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
488/** 509/**
489 * show_scaling_available_governors - show the available CPUfreq governors 510 * show_scaling_available_governors - show the available CPUfreq governors
490 */ 511 */
491static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy, 512static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
492 char *buf) 513 char *buf)
493{ 514{
494 ssize_t i = 0; 515 ssize_t i = 0;
@@ -574,7 +595,11 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
574 policy = cpufreq_cpu_get(policy->cpu); 595 policy = cpufreq_cpu_get(policy->cpu);
575 if (!policy) 596 if (!policy)
576 return -EINVAL; 597 return -EINVAL;
577 ret = fattr->show ? fattr->show(policy,buf) : -EIO; 598 if (fattr->show)
599 ret = fattr->show(policy, buf);
600 else
601 ret = -EIO;
602
578 cpufreq_cpu_put(policy); 603 cpufreq_cpu_put(policy);
579 return ret; 604 return ret;
580} 605}
@@ -588,7 +613,11 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
588 policy = cpufreq_cpu_get(policy->cpu); 613 policy = cpufreq_cpu_get(policy->cpu);
589 if (!policy) 614 if (!policy)
590 return -EINVAL; 615 return -EINVAL;
591 ret = fattr->store ? fattr->store(policy,buf,count) : -EIO; 616 if (fattr->store)
617 ret = fattr->store(policy, buf, count);
618 else
619 ret = -EIO;
620
592 cpufreq_cpu_put(policy); 621 cpufreq_cpu_put(policy);
593 return ret; 622 return ret;
594} 623}
@@ -913,7 +942,8 @@ static void handle_update(struct work_struct *work)
913 * We adjust to current frequency first, and need to clean up later. So either call 942 * We adjust to current frequency first, and need to clean up later. So either call
914 * to cpufreq_update_policy() or schedule handle_update()). 943 * to cpufreq_update_policy() or schedule handle_update()).
915 */ 944 */
916static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq) 945static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
946 unsigned int new_freq)
917{ 947{
918 struct cpufreq_freqs freqs; 948 struct cpufreq_freqs freqs;
919 949
@@ -938,16 +968,16 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
938unsigned int cpufreq_quick_get(unsigned int cpu) 968unsigned int cpufreq_quick_get(unsigned int cpu)
939{ 969{
940 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 970 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
941 unsigned int ret = 0; 971 unsigned int ret_freq = 0;
942 972
943 if (policy) { 973 if (policy) {
944 mutex_lock(&policy->lock); 974 mutex_lock(&policy->lock);
945 ret = policy->cur; 975 ret_freq = policy->cur;
946 mutex_unlock(&policy->lock); 976 mutex_unlock(&policy->lock);
947 cpufreq_cpu_put(policy); 977 cpufreq_cpu_put(policy);
948 } 978 }
949 979
950 return (ret); 980 return (ret_freq);
951} 981}
952EXPORT_SYMBOL(cpufreq_quick_get); 982EXPORT_SYMBOL(cpufreq_quick_get);
953 983
@@ -961,7 +991,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
961unsigned int cpufreq_get(unsigned int cpu) 991unsigned int cpufreq_get(unsigned int cpu)
962{ 992{
963 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 993 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
964 unsigned int ret = 0; 994 unsigned int ret_freq = 0;
965 995
966 if (!policy) 996 if (!policy)
967 return 0; 997 return 0;
@@ -971,12 +1001,14 @@ unsigned int cpufreq_get(unsigned int cpu)
971 1001
972 mutex_lock(&policy->lock); 1002 mutex_lock(&policy->lock);
973 1003
974 ret = cpufreq_driver->get(cpu); 1004 ret_freq = cpufreq_driver->get(cpu);
975 1005
976 if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1006 if (ret_freq && policy->cur &&
977 /* verify no discrepancy between actual and saved value exists */ 1007 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
978 if (unlikely(ret != policy->cur)) { 1008 /* verify no discrepancy between actual and
979 cpufreq_out_of_sync(cpu, policy->cur, ret); 1009 saved value exists */
1010 if (unlikely(ret_freq != policy->cur)) {
1011 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
980 schedule_work(&policy->update); 1012 schedule_work(&policy->update);
981 } 1013 }
982 } 1014 }
@@ -986,7 +1018,7 @@ unsigned int cpufreq_get(unsigned int cpu)
986out: 1018out:
987 cpufreq_cpu_put(policy); 1019 cpufreq_cpu_put(policy);
988 1020
989 return (ret); 1021 return (ret_freq);
990} 1022}
991EXPORT_SYMBOL(cpufreq_get); 1023EXPORT_SYMBOL(cpufreq_get);
992 1024
@@ -998,7 +1030,7 @@ EXPORT_SYMBOL(cpufreq_get);
998static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) 1030static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
999{ 1031{
1000 int cpu = sysdev->id; 1032 int cpu = sysdev->id;
1001 unsigned int ret = 0; 1033 int ret = 0;
1002 unsigned int cur_freq = 0; 1034 unsigned int cur_freq = 0;
1003 struct cpufreq_policy *cpu_policy; 1035 struct cpufreq_policy *cpu_policy;
1004 1036
@@ -1080,7 +1112,7 @@ out:
1080static int cpufreq_resume(struct sys_device * sysdev) 1112static int cpufreq_resume(struct sys_device * sysdev)
1081{ 1113{
1082 int cpu = sysdev->id; 1114 int cpu = sysdev->id;
1083 unsigned int ret = 0; 1115 int ret = 0;
1084 struct cpufreq_policy *cpu_policy; 1116 struct cpufreq_policy *cpu_policy;
1085 1117
1086 dprintk("resuming cpu %u\n", cpu); 1118 dprintk("resuming cpu %u\n", cpu);
@@ -1276,22 +1308,45 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1276} 1308}
1277EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1309EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1278 1310
1311int cpufreq_driver_getavg(struct cpufreq_policy *policy)
1312{
1313 int ret = 0;
1314
1315 policy = cpufreq_cpu_get(policy->cpu);
1316 if (!policy)
1317 return -EINVAL;
1318
1319 mutex_lock(&policy->lock);
1320
1321 if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
1322 ret = cpufreq_driver->getavg(policy->cpu);
1323
1324 mutex_unlock(&policy->lock);
1325
1326 cpufreq_cpu_put(policy);
1327 return ret;
1328}
1329EXPORT_SYMBOL_GPL(cpufreq_driver_getavg);
1330
1279/* 1331/*
1280 * Locking: Must be called with the lock_cpu_hotplug() lock held 1332 * Locking: Must be called with the lock_cpu_hotplug() lock held
1281 * when "event" is CPUFREQ_GOV_LIMITS 1333 * when "event" is CPUFREQ_GOV_LIMITS
1282 */ 1334 */
1283 1335
1284static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1336static int __cpufreq_governor(struct cpufreq_policy *policy,
1337 unsigned int event)
1285{ 1338{
1286 int ret; 1339 int ret;
1287 1340
1288 if (!try_module_get(policy->governor->owner)) 1341 if (!try_module_get(policy->governor->owner))
1289 return -EINVAL; 1342 return -EINVAL;
1290 1343
1291 dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); 1344 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1345 policy->cpu, event);
1292 ret = policy->governor->governor(policy, event); 1346 ret = policy->governor->governor(policy, event);
1293 1347
1294 /* we keep one module reference alive for each CPU governed by this CPU */ 1348 /* we keep one module reference alive for
1349 each CPU governed by this CPU */
1295 if ((event != CPUFREQ_GOV_START) || ret) 1350 if ((event != CPUFREQ_GOV_START) || ret)
1296 module_put(policy->governor->owner); 1351 module_put(policy->governor->owner);
1297 if ((event == CPUFREQ_GOV_STOP) && !ret) 1352 if ((event == CPUFREQ_GOV_STOP) && !ret)
@@ -1367,9 +1422,12 @@ EXPORT_SYMBOL(cpufreq_get_policy);
1367 1422
1368 1423
1369/* 1424/*
1425 * data : current policy.
1426 * policy : policy to be set.
1370 * Locking: Must be called with the lock_cpu_hotplug() lock held 1427 * Locking: Must be called with the lock_cpu_hotplug() lock held
1371 */ 1428 */
1372static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1429static int __cpufreq_set_policy(struct cpufreq_policy *data,
1430 struct cpufreq_policy *policy)
1373{ 1431{
1374 int ret = 0; 1432 int ret = 0;
1375 1433
@@ -1377,7 +1435,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1377 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1435 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1378 policy->min, policy->max); 1436 policy->min, policy->max);
1379 1437
1380 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); 1438 memcpy(&policy->cpuinfo, &data->cpuinfo,
1439 sizeof(struct cpufreq_cpuinfo));
1381 1440
1382 if (policy->min > data->min && policy->min > policy->max) { 1441 if (policy->min > data->min && policy->min > policy->max) {
1383 ret = -EINVAL; 1442 ret = -EINVAL;
@@ -1410,7 +1469,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1410 data->min = policy->min; 1469 data->min = policy->min;
1411 data->max = policy->max; 1470 data->max = policy->max;
1412 1471
1413 dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max); 1472 dprintk("new min and max freqs are %u - %u kHz\n",
1473 data->min, data->max);
1414 1474
1415 if (cpufreq_driver->setpolicy) { 1475 if (cpufreq_driver->setpolicy) {
1416 data->policy = policy->policy; 1476 data->policy = policy->policy;
@@ -1431,10 +1491,12 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1431 data->governor = policy->governor; 1491 data->governor = policy->governor;
1432 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1492 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1433 /* new governor failed, so re-start old one */ 1493 /* new governor failed, so re-start old one */
1434 dprintk("starting governor %s failed\n", data->governor->name); 1494 dprintk("starting governor %s failed\n",
1495 data->governor->name);
1435 if (old_gov) { 1496 if (old_gov) {
1436 data->governor = old_gov; 1497 data->governor = old_gov;
1437 __cpufreq_governor(data, CPUFREQ_GOV_START); 1498 __cpufreq_governor(data,
1499 CPUFREQ_GOV_START);
1438 } 1500 }
1439 ret = -EINVAL; 1501 ret = -EINVAL;
1440 goto error_out; 1502 goto error_out;
@@ -1524,7 +1586,8 @@ int cpufreq_update_policy(unsigned int cpu)
1524 data->cur = policy.cur; 1586 data->cur = policy.cur;
1525 } else { 1587 } else {
1526 if (data->cur != policy.cur) 1588 if (data->cur != policy.cur)
1527 cpufreq_out_of_sync(cpu, data->cur, policy.cur); 1589 cpufreq_out_of_sync(cpu, data->cur,
1590 policy.cur);
1528 } 1591 }
1529 } 1592 }
1530 1593
@@ -1626,8 +1689,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1626 1689
1627 /* if all ->init() calls failed, unregister */ 1690 /* if all ->init() calls failed, unregister */
1628 if (ret) { 1691 if (ret) {
1629 dprintk("no CPU initialized for driver %s\n", driver_data->name); 1692 dprintk("no CPU initialized for driver %s\n",
1630 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1693 driver_data->name);
1694 sysdev_driver_unregister(&cpu_sysdev_class,
1695 &cpufreq_sysdev_driver);
1631 1696
1632 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1697 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1633 cpufreq_driver = NULL; 1698 cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5ef5ede5b884..eef0270c6f3d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -44,15 +44,17 @@
44 * latency of the processor. The governor will work on any processor with 44 * latency of the processor. The governor will work on any processor with
45 * transition latency <= 10mS, using appropriate sampling 45 * transition latency <= 10mS, using appropriate sampling
46 * rate. 46 * rate.
47 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 47 * For CPUs with transition latency > 10mS (mostly drivers
48 * this governor will not work. 48 * with CPUFREQ_ETERNAL), this governor will not work.
49 * All times here are in uS. 49 * All times here are in uS.
50 */ 50 */
51static unsigned int def_sampling_rate; 51static unsigned int def_sampling_rate;
52#define MIN_SAMPLING_RATE_RATIO (2) 52#define MIN_SAMPLING_RATE_RATIO (2)
53/* for correct statistics, we need at least 10 ticks between each measure */ 53/* for correct statistics, we need at least 10 ticks between each measure */
54#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 54#define MIN_STAT_SAMPLING_RATE \
55#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 55 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
56#define MIN_SAMPLING_RATE \
57 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
56#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 58#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 59#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58#define DEF_SAMPLING_DOWN_FACTOR (1) 60#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -103,11 +105,16 @@ static struct dbs_tuners dbs_tuners_ins = {
103 105
104static inline unsigned int get_cpu_idle_time(unsigned int cpu) 106static inline unsigned int get_cpu_idle_time(unsigned int cpu)
105{ 107{
106 return kstat_cpu(cpu).cpustat.idle + 108 unsigned int add_nice = 0, ret;
109
110 if (dbs_tuners_ins.ignore_nice)
111 add_nice = kstat_cpu(cpu).cpustat.nice;
112
113 ret = kstat_cpu(cpu).cpustat.idle +
107 kstat_cpu(cpu).cpustat.iowait + 114 kstat_cpu(cpu).cpustat.iowait +
108 ( dbs_tuners_ins.ignore_nice ? 115 add_nice;
109 kstat_cpu(cpu).cpustat.nice : 116
110 0); 117 return ret;
111} 118}
112 119
113/************************** sysfs interface ************************/ 120/************************** sysfs interface ************************/
@@ -452,6 +459,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
452 unsigned int cpu = policy->cpu; 459 unsigned int cpu = policy->cpu;
453 struct cpu_dbs_info_s *this_dbs_info; 460 struct cpu_dbs_info_s *this_dbs_info;
454 unsigned int j; 461 unsigned int j;
462 int rc;
455 463
456 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 464 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
457 465
@@ -468,6 +476,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
468 break; 476 break;
469 477
470 mutex_lock(&dbs_mutex); 478 mutex_lock(&dbs_mutex);
479
480 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
481 if (rc) {
482 mutex_unlock(&dbs_mutex);
483 return rc;
484 }
485
471 for_each_cpu_mask(j, policy->cpus) { 486 for_each_cpu_mask(j, policy->cpus) {
472 struct cpu_dbs_info_s *j_dbs_info; 487 struct cpu_dbs_info_s *j_dbs_info;
473 j_dbs_info = &per_cpu(cpu_dbs_info, j); 488 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -480,7 +495,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
480 this_dbs_info->enable = 1; 495 this_dbs_info->enable = 1;
481 this_dbs_info->down_skip = 0; 496 this_dbs_info->down_skip = 0;
482 this_dbs_info->requested_freq = policy->cur; 497 this_dbs_info->requested_freq = policy->cur;
483 sysfs_create_group(&policy->kobj, &dbs_attr_group); 498
484 dbs_enable++; 499 dbs_enable++;
485 /* 500 /*
486 * Start the timerschedule work, when this governor 501 * Start the timerschedule work, when this governor
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e1cc5113c2ae..f697449327c6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -41,8 +41,10 @@
41static unsigned int def_sampling_rate; 41static unsigned int def_sampling_rate;
42#define MIN_SAMPLING_RATE_RATIO (2) 42#define MIN_SAMPLING_RATE_RATIO (2)
43/* for correct statistics, we need at least 10 ticks between each measure */ 43/* for correct statistics, we need at least 10 ticks between each measure */
44#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 44#define MIN_STAT_SAMPLING_RATE \
45#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 45 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
46#define MIN_SAMPLING_RATE \
47 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
46#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 48#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 49#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
48#define TRANSITION_LATENCY_LIMIT (10 * 1000) 50#define TRANSITION_LATENCY_LIMIT (10 * 1000)
@@ -206,7 +208,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
206 ret = sscanf(buf, "%u", &input); 208 ret = sscanf(buf, "%u", &input);
207 209
208 mutex_lock(&dbs_mutex); 210 mutex_lock(&dbs_mutex);
209 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 211 if (ret != 1 || input > MAX_SAMPLING_RATE
212 || input < MIN_SAMPLING_RATE) {
210 mutex_unlock(&dbs_mutex); 213 mutex_unlock(&dbs_mutex);
211 return -EINVAL; 214 return -EINVAL;
212 } 215 }
@@ -397,8 +400,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
397 * policy. To be safe, we focus 10 points under the threshold. 400 * policy. To be safe, we focus 10 points under the threshold.
398 */ 401 */
399 if (load < (dbs_tuners_ins.up_threshold - 10)) { 402 if (load < (dbs_tuners_ins.up_threshold - 10)) {
400 unsigned int freq_next = (policy->cur * load) / 403 unsigned int freq_next, freq_cur;
404
405 freq_cur = cpufreq_driver_getavg(policy);
406 if (!freq_cur)
407 freq_cur = policy->cur;
408
409 freq_next = (freq_cur * load) /
401 (dbs_tuners_ins.up_threshold - 10); 410 (dbs_tuners_ins.up_threshold - 10);
411
402 if (!dbs_tuners_ins.powersave_bias) { 412 if (!dbs_tuners_ins.powersave_bias) {
403 __cpufreq_driver_target(policy, freq_next, 413 __cpufreq_driver_target(policy, freq_next,
404 CPUFREQ_RELATION_L); 414 CPUFREQ_RELATION_L);
@@ -472,6 +482,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
472 unsigned int cpu = policy->cpu; 482 unsigned int cpu = policy->cpu;
473 struct cpu_dbs_info_s *this_dbs_info; 483 struct cpu_dbs_info_s *this_dbs_info;
474 unsigned int j; 484 unsigned int j;
485 int rc;
475 486
476 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 487 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
477 488
@@ -494,12 +505,23 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
494 if (dbs_enable == 1) { 505 if (dbs_enable == 1) {
495 kondemand_wq = create_workqueue("kondemand"); 506 kondemand_wq = create_workqueue("kondemand");
496 if (!kondemand_wq) { 507 if (!kondemand_wq) {
497 printk(KERN_ERR "Creation of kondemand failed\n"); 508 printk(KERN_ERR
509 "Creation of kondemand failed\n");
498 dbs_enable--; 510 dbs_enable--;
499 mutex_unlock(&dbs_mutex); 511 mutex_unlock(&dbs_mutex);
500 return -ENOSPC; 512 return -ENOSPC;
501 } 513 }
502 } 514 }
515
516 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
517 if (rc) {
518 if (dbs_enable == 1)
519 destroy_workqueue(kondemand_wq);
520 dbs_enable--;
521 mutex_unlock(&dbs_mutex);
522 return rc;
523 }
524
503 for_each_cpu_mask(j, policy->cpus) { 525 for_each_cpu_mask(j, policy->cpus) {
504 struct cpu_dbs_info_s *j_dbs_info; 526 struct cpu_dbs_info_s *j_dbs_info;
505 j_dbs_info = &per_cpu(cpu_dbs_info, j); 527 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -509,7 +531,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
509 j_dbs_info->prev_cpu_wall = get_jiffies_64(); 531 j_dbs_info->prev_cpu_wall = get_jiffies_64();
510 } 532 }
511 this_dbs_info->enable = 1; 533 this_dbs_info->enable = 1;
512 sysfs_create_group(&policy->kobj, &dbs_attr_group);
513 /* 534 /*
514 * Start the timerschedule work, when this governor 535 * Start the timerschedule work, when this governor
515 * is used for first time 536 * is used for first time
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index de91e3371ef8..e8e1451ef1c1 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -15,7 +15,8 @@
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) 18#define dprintk(msg...) \
19 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg)
19 20
20 21
21static int cpufreq_governor_performance(struct cpufreq_policy *policy, 22static int cpufreq_governor_performance(struct cpufreq_policy *policy,
@@ -24,8 +25,10 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
24 switch (event) { 25 switch (event) {
25 case CPUFREQ_GOV_START: 26 case CPUFREQ_GOV_START:
26 case CPUFREQ_GOV_LIMITS: 27 case CPUFREQ_GOV_LIMITS:
27 dprintk("setting to %u kHz because of event %u\n", policy->max, event); 28 dprintk("setting to %u kHz because of event %u\n",
28 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); 29 policy->max, event);
30 __cpufreq_driver_target(policy, policy->max,
31 CPUFREQ_RELATION_H);
29 break; 32 break;
30 default: 33 default:
31 break; 34 break;
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 0a2596044e65..13fe06b94b0a 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -15,7 +15,8 @@
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) 18#define dprintk(msg...) \
19 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg)
19 20
20static int cpufreq_governor_powersave(struct cpufreq_policy *policy, 21static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
21 unsigned int event) 22 unsigned int event)
@@ -23,8 +24,10 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
23 switch (event) { 24 switch (event) {
24 case CPUFREQ_GOV_START: 25 case CPUFREQ_GOV_START:
25 case CPUFREQ_GOV_LIMITS: 26 case CPUFREQ_GOV_LIMITS:
26 dprintk("setting to %u kHz because of event %u\n", policy->min, event); 27 dprintk("setting to %u kHz because of event %u\n",
27 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); 28 policy->min, event);
29 __cpufreq_driver_target(policy, policy->min,
30 CPUFREQ_RELATION_L);
28 break; 31 break;
29 default: 32 default:
30 break; 33 break;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c2ecc599dc5f..6742b1adf2c8 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -351,8 +351,8 @@ __init cpufreq_stats_init(void)
351 351
352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
353 for_each_online_cpu(cpu) { 353 for_each_online_cpu(cpu) {
354 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, 354 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
355 (void *)(long)cpu); 355 CPU_ONLINE, (void *)(long)cpu);
356 } 356 }
357 return 0; 357 return 0;
358} 358}
@@ -368,14 +368,15 @@ __exit cpufreq_stats_exit(void)
368 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 368 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
369 lock_cpu_hotplug(); 369 lock_cpu_hotplug();
370 for_each_online_cpu(cpu) { 370 for_each_online_cpu(cpu) {
371 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, 371 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
372 (void *)(long)cpu); 372 CPU_DEAD, (void *)(long)cpu);
373 } 373 }
374 unlock_cpu_hotplug(); 374 unlock_cpu_hotplug();
375} 375}
376 376
377MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 377MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
378MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats through sysfs filesystem"); 378MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats"
379 "through sysfs filesystem");
379MODULE_LICENSE ("GPL"); 380MODULE_LICENSE ("GPL");
380 381
381module_init(cpufreq_stats_init); 382module_init(cpufreq_stats_init);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index a06c204589cd..2a4eb0bfaf30 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -131,19 +131,26 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
131 unsigned int event) 131 unsigned int event)
132{ 132{
133 unsigned int cpu = policy->cpu; 133 unsigned int cpu = policy->cpu;
134 int rc = 0;
135
134 switch (event) { 136 switch (event) {
135 case CPUFREQ_GOV_START: 137 case CPUFREQ_GOV_START:
136 if (!cpu_online(cpu)) 138 if (!cpu_online(cpu))
137 return -EINVAL; 139 return -EINVAL;
138 BUG_ON(!policy->cur); 140 BUG_ON(!policy->cur);
139 mutex_lock(&userspace_mutex); 141 mutex_lock(&userspace_mutex);
142 rc = sysfs_create_file (&policy->kobj,
143 &freq_attr_scaling_setspeed.attr);
144 if (rc)
145 goto start_out;
146
140 cpu_is_managed[cpu] = 1; 147 cpu_is_managed[cpu] = 1;
141 cpu_min_freq[cpu] = policy->min; 148 cpu_min_freq[cpu] = policy->min;
142 cpu_max_freq[cpu] = policy->max; 149 cpu_max_freq[cpu] = policy->max;
143 cpu_cur_freq[cpu] = policy->cur; 150 cpu_cur_freq[cpu] = policy->cur;
144 cpu_set_freq[cpu] = policy->cur; 151 cpu_set_freq[cpu] = policy->cur;
145 sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
146 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 152 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
153start_out:
147 mutex_unlock(&userspace_mutex); 154 mutex_unlock(&userspace_mutex);
148 break; 155 break;
149 case CPUFREQ_GOV_STOP: 156 case CPUFREQ_GOV_STOP:
@@ -180,7 +187,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
180 mutex_unlock(&userspace_mutex); 187 mutex_unlock(&userspace_mutex);
181 break; 188 break;
182 } 189 }
183 return 0; 190 return rc;
184} 191}
185 192
186 193
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 551f4ccf87fd..e7490925fdcf 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,7 +9,8 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/cpufreq.h> 10#include <linux/cpufreq.h>
11 11
12#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) 12#define dprintk(msg...) \
13 cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg)
13 14
14/********************************************************************* 15/*********************************************************************
15 * FREQUENCY TABLE HELPERS * 16 * FREQUENCY TABLE HELPERS *
@@ -29,7 +30,8 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
29 30
30 continue; 31 continue;
31 } 32 }
32 dprintk("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); 33 dprintk("table entry %u: %u kHz, %u index\n",
34 i, freq, table[i].index);
33 if (freq < min_freq) 35 if (freq < min_freq)
34 min_freq = freq; 36 min_freq = freq;
35 if (freq > max_freq) 37 if (freq > max_freq)
@@ -54,13 +56,14 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
54 unsigned int i; 56 unsigned int i;
55 unsigned int count = 0; 57 unsigned int count = 0;
56 58
57 dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); 59 dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n",
60 policy->min, policy->max, policy->cpu);
58 61
59 if (!cpu_online(policy->cpu)) 62 if (!cpu_online(policy->cpu))
60 return -EINVAL; 63 return -EINVAL;
61 64
62 cpufreq_verify_within_limits(policy, 65 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
63 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); 66 policy->cpuinfo.max_freq);
64 67
65 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 68 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
66 unsigned int freq = table[i].frequency; 69 unsigned int freq = table[i].frequency;
@@ -75,10 +78,11 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
75 if (!count) 78 if (!count)
76 policy->max = next_larger; 79 policy->max = next_larger;
77 80
78 cpufreq_verify_within_limits(policy, 81 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
79 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); 82 policy->cpuinfo.max_freq);
80 83
81 dprintk("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); 84 dprintk("verification lead to (%u - %u kHz) for cpu %u\n",
85 policy->min, policy->max, policy->cpu);
82 86
83 return 0; 87 return 0;
84} 88}
@@ -101,7 +105,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
101 }; 105 };
102 unsigned int i; 106 unsigned int i;
103 107
104 dprintk("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); 108 dprintk("request for target %u kHz (relation: %u) for cpu %u\n",
109 target_freq, relation, policy->cpu);
105 110
106 switch (relation) { 111 switch (relation) {
107 case CPUFREQ_RELATION_H: 112 case CPUFREQ_RELATION_H:
@@ -192,7 +197,10 @@ static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
192} 197}
193 198
194struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { 199struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
195 .attr = { .name = "scaling_available_frequencies", .mode = 0444, .owner=THIS_MODULE }, 200 .attr = { .name = "scaling_available_frequencies",
201 .mode = 0444,
202 .owner=THIS_MODULE
203 },
196 .show = show_available_freqs, 204 .show = show_available_freqs,
197}; 205};
198EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); 206EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index ca4e67a022d0..22b62b3cd14e 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -266,7 +266,7 @@ static void fcp_report_map_done(fc_channel *fc, int i, int status)
266 printk ("FC: Bad magic from REPORT_AL_MAP on %s - %08x\n", fc->name, p->magic); 266 printk ("FC: Bad magic from REPORT_AL_MAP on %s - %08x\n", fc->name, p->magic);
267 fc->state = FC_STATE_OFFLINE; 267 fc->state = FC_STATE_OFFLINE;
268 } else { 268 } else {
269 fc->posmap = (fcp_posmap *)kzalloc(sizeof(fcp_posmap)+p->len, GFP_KERNEL); 269 fc->posmap = kzalloc(sizeof(fcp_posmap)+p->len, GFP_KERNEL);
270 if (!fc->posmap) { 270 if (!fc->posmap) {
271 printk("FC: Not enough memory, offlining channel\n"); 271 printk("FC: Not enough memory, offlining channel\n");
272 fc->state = FC_STATE_OFFLINE; 272 fc->state = FC_STATE_OFFLINE;
@@ -355,7 +355,7 @@ void fcp_register(fc_channel *fc, u8 type, int unregister)
355 for (i = fc->can_queue; i < fc->scsi_bitmap_end; i++) 355 for (i = fc->can_queue; i < fc->scsi_bitmap_end; i++)
356 set_bit (i, fc->scsi_bitmap); 356 set_bit (i, fc->scsi_bitmap);
357 fc->scsi_free = fc->can_queue; 357 fc->scsi_free = fc->can_queue;
358 fc->cmd_slots = (fcp_cmnd **)kzalloc(slots * sizeof(fcp_cmnd*), GFP_KERNEL); 358 fc->cmd_slots = kzalloc(slots * sizeof(fcp_cmnd*), GFP_KERNEL);
359 fc->abort_count = 0; 359 fc->abort_count = 0;
360 } else { 360 } else {
361 fc->scsi_name[0] = 0; 361 fc->scsi_name[0] = 0;
@@ -933,7 +933,7 @@ int fcp_scsi_dev_reset(struct scsi_cmnd *SCpnt)
933 DECLARE_MUTEX_LOCKED(sem); 933 DECLARE_MUTEX_LOCKED(sem);
934 934
935 if (!fc->rst_pkt) { 935 if (!fc->rst_pkt) {
936 fc->rst_pkt = (struct scsi_cmnd *) kmalloc(sizeof(SCpnt), GFP_KERNEL); 936 fc->rst_pkt = kmalloc(sizeof(SCpnt), GFP_KERNEL);
937 if (!fc->rst_pkt) return FAILED; 937 if (!fc->rst_pkt) return FAILED;
938 938
939 fcmd = FCP_CMND(fc->rst_pkt); 939 fcmd = FCP_CMND(fc->rst_pkt);
@@ -1107,7 +1107,7 @@ int fc_do_plogi(fc_channel *fc, unsigned char alpa, fc_wwn *node, fc_wwn *nport)
1107 logi *l; 1107 logi *l;
1108 int status; 1108 int status;
1109 1109
1110 l = (logi *)kzalloc(2 * sizeof(logi), GFP_KERNEL); 1110 l = kzalloc(2 * sizeof(logi), GFP_KERNEL);
1111 if (!l) return -ENOMEM; 1111 if (!l) return -ENOMEM;
1112 l->code = LS_PLOGI; 1112 l->code = LS_PLOGI;
1113 memcpy (&l->nport_wwn, &fc->wwn_nport, sizeof(fc_wwn)); 1113 memcpy (&l->nport_wwn, &fc->wwn_nport, sizeof(fc_wwn));
@@ -1141,7 +1141,7 @@ int fc_do_prli(fc_channel *fc, unsigned char alpa)
1141 prli *p; 1141 prli *p;
1142 int status; 1142 int status;
1143 1143
1144 p = (prli *)kzalloc(2 * sizeof(prli), GFP_KERNEL); 1144 p = kzalloc(2 * sizeof(prli), GFP_KERNEL);
1145 if (!p) return -ENOMEM; 1145 if (!p) return -ENOMEM;
1146 p->code = LS_PRLI; 1146 p->code = LS_PRLI;
1147 p->params[0] = 0x08002000; 1147 p->params[0] = 0x08002000;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e76d91906c99..891ef6d0b1bf 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -106,6 +106,31 @@ config SENSORS_K8TEMP
106 This driver can also be built as a module. If so, the module 106 This driver can also be built as a module. If so, the module
107 will be called k8temp. 107 will be called k8temp.
108 108
109config SENSORS_AMS
110 tristate "Apple Motion Sensor driver"
111 depends on HWMON && PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
112 help
113 Support for the motion sensor included in PowerBooks. Includes
114 implementations for PMU and I2C.
115
116 This driver can also be built as a module. If so, the module
117 will be called ams.
118
119config SENSORS_AMS_PMU
120 bool "PMU variant"
121 depends on SENSORS_AMS && ADB_PMU
122 default y
123 help
124 PMU variant of motion sensor, found in late 2005 PowerBooks.
125
126config SENSORS_AMS_I2C
127 bool "I2C variant"
128 depends on SENSORS_AMS && I2C
129 default y
130 help
131 I2C variant of motion sensor, found in early 2005 PowerBooks and
132 iBooks.
133
109config SENSORS_ASB100 134config SENSORS_ASB100
110 tristate "Asus ASB100 Bach" 135 tristate "Asus ASB100 Bach"
111 depends on HWMON && I2C && EXPERIMENTAL 136 depends on HWMON && I2C && EXPERIMENTAL
@@ -142,11 +167,12 @@ config SENSORS_DS1621
142 will be called ds1621. 167 will be called ds1621.
143 168
144config SENSORS_F71805F 169config SENSORS_F71805F
145 tristate "Fintek F71805F/FG" 170 tristate "Fintek F71805F/FG and F71872F/FG"
146 depends on HWMON && EXPERIMENTAL 171 depends on HWMON && EXPERIMENTAL
147 help 172 help
148 If you say yes here you get support for hardware monitoring 173 If you say yes here you get support for hardware monitoring
149 features of the Fintek F71805F/FG chips. 174 features of the Fintek F71805F/FG and F71872F/FG Super-I/O
175 chips.
150 176
151 This driver can also be built as a module. If so, the module 177 This driver can also be built as a module. If so, the module
152 will be called f71805f. 178 will be called f71805f.
@@ -353,6 +379,19 @@ config SENSORS_PC87360
353 This driver can also be built as a module. If so, the module 379 This driver can also be built as a module. If so, the module
354 will be called pc87360. 380 will be called pc87360.
355 381
382config SENSORS_PC87427
383 tristate "National Semiconductor PC87427"
384 depends on HWMON && EXPERIMENTAL
385 help
386 If you say yes here you get access to the hardware monitoring
387 functions of the National Semiconductor PC87427 Super-I/O chip.
388 The chip has two distinct logical devices, one for fan speed
389 monitoring and control, and one for voltage and temperature
390 monitoring. Only fan speed monitoring is supported right now.
391
392 This driver can also be built as a module. If so, the module
393 will be called pc87427.
394
356config SENSORS_SIS5595 395config SENSORS_SIS5595
357 tristate "Silicon Integrated Systems Corp. SiS5595" 396 tristate "Silicon Integrated Systems Corp. SiS5595"
358 depends on HWMON && I2C && PCI && EXPERIMENTAL 397 depends on HWMON && I2C && PCI && EXPERIMENTAL
@@ -474,6 +513,16 @@ config SENSORS_W83792D
474 This driver can also be built as a module. If so, the module 513 This driver can also be built as a module. If so, the module
475 will be called w83792d. 514 will be called w83792d.
476 515
516config SENSORS_W83793
517 tristate "Winbond W83793"
518 depends on HWMON && I2C && EXPERIMENTAL
519 help
520 If you say yes here you get support for the Winbond W83793
521 hardware monitoring chip.
522
523 This driver can also be built as a module. If so, the module
524 will be called w83793.
525
477config SENSORS_W83L785TS 526config SENSORS_W83L785TS
478 tristate "Winbond W83L785TS-S" 527 tristate "Winbond W83L785TS-S"
479 depends on HWMON && I2C && EXPERIMENTAL 528 depends on HWMON && I2C && EXPERIMENTAL
@@ -527,6 +576,9 @@ config SENSORS_HDAPS
527 This driver also provides an absolute input class device, allowing 576 This driver also provides an absolute input class device, allowing
528 the laptop to act as a pinball machine-esque joystick. 577 the laptop to act as a pinball machine-esque joystick.
529 578
579 If your ThinkPad is not recognized by the driver, please update to latest
580 BIOS. This is especially the case for some R52 ThinkPads.
581
530 Say Y here if you have an applicable laptop and want to experience 582 Say Y here if you have an applicable laptop and want to experience
531 the awesome power of hdaps. 583 the awesome power of hdaps.
532 584
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index af01cc64f7d2..31661124271e 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_HWMON_VID) += hwmon-vid.o
9obj-$(CONFIG_SENSORS_ASB100) += asb100.o 9obj-$(CONFIG_SENSORS_ASB100) += asb100.o
10obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o 10obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o
11obj-$(CONFIG_SENSORS_W83792D) += w83792d.o 11obj-$(CONFIG_SENSORS_W83792D) += w83792d.o
12obj-$(CONFIG_SENSORS_W83793) += w83793.o
12obj-$(CONFIG_SENSORS_W83781D) += w83781d.o 13obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
13obj-$(CONFIG_SENSORS_W83791D) += w83791d.o 14obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
14 15
@@ -18,6 +19,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
18obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o 19obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
19obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o 20obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
20obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o 21obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
22obj-$(CONFIG_SENSORS_AMS) += ams/
21obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o 23obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
22obj-$(CONFIG_SENSORS_DS1621) += ds1621.o 24obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
23obj-$(CONFIG_SENSORS_F71805F) += f71805f.o 25obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
@@ -41,6 +43,7 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o
41obj-$(CONFIG_SENSORS_LM92) += lm92.o 43obj-$(CONFIG_SENSORS_LM92) += lm92.o
42obj-$(CONFIG_SENSORS_MAX1619) += max1619.o 44obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
43obj-$(CONFIG_SENSORS_PC87360) += pc87360.o 45obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
46obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
44obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o 47obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
45obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o 48obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
46obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o 49obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
diff --git a/drivers/hwmon/ams/Makefile b/drivers/hwmon/ams/Makefile
new file mode 100644
index 000000000000..41c95b2089dc
--- /dev/null
+++ b/drivers/hwmon/ams/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for Apple Motion Sensor driver
3#
4
5ams-y := ams-core.o ams-input.o
6ams-$(CONFIG_SENSORS_AMS_PMU) += ams-pmu.o
7ams-$(CONFIG_SENSORS_AMS_I2C) += ams-i2c.o
8obj-$(CONFIG_SENSORS_AMS) += ams.o
diff --git a/drivers/hwmon/ams/ams-core.c b/drivers/hwmon/ams/ams-core.c
new file mode 100644
index 000000000000..f1f0f5d0442c
--- /dev/null
+++ b/drivers/hwmon/ams/ams-core.c
@@ -0,0 +1,265 @@
1/*
2 * Apple Motion Sensor driver
3 *
4 * Copyright (C) 2005 Stelian Pop (stelian@popies.net)
5 * Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <asm/pmac_pfunc.h>
28#include <asm/of_platform.h>
29
30#include "ams.h"
31
32/* There is only one motion sensor per machine */
33struct ams ams_info;
34
35static unsigned int verbose;
36module_param(verbose, bool, 0644);
37MODULE_PARM_DESC(verbose, "Show free falls and shocks in kernel output");
38
39/* Call with ams_info.lock held! */
40void ams_sensors(s8 *x, s8 *y, s8 *z)
41{
42 u32 orient = ams_info.vflag? ams_info.orient1 : ams_info.orient2;
43
44 if (orient & 0x80)
45 /* X and Y swapped */
46 ams_info.get_xyz(y, x, z);
47 else
48 ams_info.get_xyz(x, y, z);
49
50 if (orient & 0x04)
51 *z = ~(*z);
52 if (orient & 0x02)
53 *y = ~(*y);
54 if (orient & 0x01)
55 *x = ~(*x);
56}
57
58static ssize_t ams_show_current(struct device *dev,
59 struct device_attribute *attr, char *buf)
60{
61 s8 x, y, z;
62
63 mutex_lock(&ams_info.lock);
64 ams_sensors(&x, &y, &z);
65 mutex_unlock(&ams_info.lock);
66
67 return snprintf(buf, PAGE_SIZE, "%d %d %d\n", x, y, z);
68}
69
70static DEVICE_ATTR(current, S_IRUGO, ams_show_current, NULL);
71
72static void ams_handle_irq(void *data)
73{
74 enum ams_irq irq = *((enum ams_irq *)data);
75
76 spin_lock(&ams_info.irq_lock);
77
78 ams_info.worker_irqs |= irq;
79 schedule_work(&ams_info.worker);
80
81 spin_unlock(&ams_info.irq_lock);
82}
83
84static enum ams_irq ams_freefall_irq_data = AMS_IRQ_FREEFALL;
85static struct pmf_irq_client ams_freefall_client = {
86 .owner = THIS_MODULE,
87 .handler = ams_handle_irq,
88 .data = &ams_freefall_irq_data,
89};
90
91static enum ams_irq ams_shock_irq_data = AMS_IRQ_SHOCK;
92static struct pmf_irq_client ams_shock_client = {
93 .owner = THIS_MODULE,
94 .handler = ams_handle_irq,
95 .data = &ams_shock_irq_data,
96};
97
98/* Once hard disk parking is implemented in the kernel, this function can
99 * trigger it.
100 */
101static void ams_worker(struct work_struct *work)
102{
103 mutex_lock(&ams_info.lock);
104
105 if (ams_info.has_device) {
106 unsigned long flags;
107
108 spin_lock_irqsave(&ams_info.irq_lock, flags);
109
110 if (ams_info.worker_irqs & AMS_IRQ_FREEFALL) {
111 if (verbose)
112 printk(KERN_INFO "ams: freefall detected!\n");
113
114 ams_info.worker_irqs &= ~AMS_IRQ_FREEFALL;
115
116 /* we must call this with interrupts enabled */
117 spin_unlock_irqrestore(&ams_info.irq_lock, flags);
118 ams_info.clear_irq(AMS_IRQ_FREEFALL);
119 spin_lock_irqsave(&ams_info.irq_lock, flags);
120 }
121
122 if (ams_info.worker_irqs & AMS_IRQ_SHOCK) {
123 if (verbose)
124 printk(KERN_INFO "ams: shock detected!\n");
125
126 ams_info.worker_irqs &= ~AMS_IRQ_SHOCK;
127
128 /* we must call this with interrupts enabled */
129 spin_unlock_irqrestore(&ams_info.irq_lock, flags);
130 ams_info.clear_irq(AMS_IRQ_SHOCK);
131 spin_lock_irqsave(&ams_info.irq_lock, flags);
132 }
133
134 spin_unlock_irqrestore(&ams_info.irq_lock, flags);
135 }
136
137 mutex_unlock(&ams_info.lock);
138}
139
140/* Call with ams_info.lock held! */
141int ams_sensor_attach(void)
142{
143 int result;
144 u32 *prop;
145
146 /* Get orientation */
147 prop = (u32*)get_property(ams_info.of_node, "orientation", NULL);
148 if (!prop)
149 return -ENODEV;
150 ams_info.orient1 = *prop;
151 ams_info.orient2 = *(prop + 1);
152
153 /* Register freefall interrupt handler */
154 result = pmf_register_irq_client(ams_info.of_node,
155 "accel-int-1",
156 &ams_freefall_client);
157 if (result < 0)
158 return -ENODEV;
159
160 /* Reset saved irqs */
161 ams_info.worker_irqs = 0;
162
163 /* Register shock interrupt handler */
164 result = pmf_register_irq_client(ams_info.of_node,
165 "accel-int-2",
166 &ams_shock_client);
167 if (result < 0)
168 goto release_freefall;
169
170 /* Create device */
171 ams_info.of_dev = of_platform_device_create(ams_info.of_node, "ams", NULL);
172 if (!ams_info.of_dev) {
173 result = -ENODEV;
174 goto release_shock;
175 }
176
177 /* Create attributes */
178 result = device_create_file(&ams_info.of_dev->dev, &dev_attr_current);
179 if (result)
180 goto release_of;
181
182 ams_info.vflag = !!(ams_info.get_vendor() & 0x10);
183
184 /* Init input device */
185 result = ams_input_init();
186 if (result)
187 goto release_device_file;
188
189 return result;
190release_device_file:
191 device_remove_file(&ams_info.of_dev->dev, &dev_attr_current);
192release_of:
193 of_device_unregister(ams_info.of_dev);
194release_shock:
195 pmf_unregister_irq_client(&ams_shock_client);
196release_freefall:
197 pmf_unregister_irq_client(&ams_freefall_client);
198 return result;
199}
200
201int __init ams_init(void)
202{
203 struct device_node *np;
204
205 spin_lock_init(&ams_info.irq_lock);
206 mutex_init(&ams_info.lock);
207 INIT_WORK(&ams_info.worker, ams_worker);
208
209#ifdef CONFIG_SENSORS_AMS_I2C
210 np = of_find_node_by_name(NULL, "accelerometer");
211 if (np && device_is_compatible(np, "AAPL,accelerometer_1"))
212 /* Found I2C motion sensor */
213 return ams_i2c_init(np);
214#endif
215
216#ifdef CONFIG_SENSORS_AMS_PMU
217 np = of_find_node_by_name(NULL, "sms");
218 if (np && device_is_compatible(np, "sms"))
219 /* Found PMU motion sensor */
220 return ams_pmu_init(np);
221#endif
222
223 printk(KERN_ERR "ams: No motion sensor found.\n");
224
225 return -ENODEV;
226}
227
228void ams_exit(void)
229{
230 mutex_lock(&ams_info.lock);
231
232 if (ams_info.has_device) {
233 /* Remove input device */
234 ams_input_exit();
235
236 /* Shut down implementation */
237 ams_info.exit();
238
239 /* Flush interrupt worker
240 *
241 * We do this after ams_info.exit(), because an interrupt might
242 * have arrived before disabling them.
243 */
244 flush_scheduled_work();
245
246 /* Remove attributes */
247 device_remove_file(&ams_info.of_dev->dev, &dev_attr_current);
248
249 /* Remove device */
250 of_device_unregister(ams_info.of_dev);
251
252 /* Remove handler */
253 pmf_unregister_irq_client(&ams_shock_client);
254 pmf_unregister_irq_client(&ams_freefall_client);
255 }
256
257 mutex_unlock(&ams_info.lock);
258}
259
260MODULE_AUTHOR("Stelian Pop, Michael Hanselmann");
261MODULE_DESCRIPTION("Apple Motion Sensor driver");
262MODULE_LICENSE("GPL");
263
264module_init(ams_init);
265module_exit(ams_exit);
diff --git a/drivers/hwmon/ams/ams-i2c.c b/drivers/hwmon/ams/ams-i2c.c
new file mode 100644
index 000000000000..0d24bdfea53e
--- /dev/null
+++ b/drivers/hwmon/ams/ams-i2c.c
@@ -0,0 +1,299 @@
1/*
2 * Apple Motion Sensor driver (I2C variant)
3 *
4 * Copyright (C) 2005 Stelian Pop (stelian@popies.net)
5 * Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch)
6 *
7 * Clean room implementation based on the reverse engineered Mac OS X driver by
8 * Johannes Berg <johannes@sipsolutions.net>, documentation available at
9 * http://johannes.sipsolutions.net/PowerBook/Apple_Motion_Sensor_Specification
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22
23#include "ams.h"
24
25/* AMS registers */
26#define AMS_COMMAND 0x00 /* command register */
27#define AMS_STATUS 0x01 /* status register */
28#define AMS_CTRL1 0x02 /* read control 1 (number of values) */
29#define AMS_CTRL2 0x03 /* read control 2 (offset?) */
30#define AMS_CTRL3 0x04 /* read control 3 (size of each value?) */
31#define AMS_DATA1 0x05 /* read data 1 */
32#define AMS_DATA2 0x06 /* read data 2 */
33#define AMS_DATA3 0x07 /* read data 3 */
34#define AMS_DATA4 0x08 /* read data 4 */
35#define AMS_DATAX 0x20 /* data X */
36#define AMS_DATAY 0x21 /* data Y */
37#define AMS_DATAZ 0x22 /* data Z */
38#define AMS_FREEFALL 0x24 /* freefall int control */
39#define AMS_SHOCK 0x25 /* shock int control */
40#define AMS_SENSLOW 0x26 /* sensitivity low limit */
41#define AMS_SENSHIGH 0x27 /* sensitivity high limit */
42#define AMS_CTRLX 0x28 /* control X */
43#define AMS_CTRLY 0x29 /* control Y */
44#define AMS_CTRLZ 0x2A /* control Z */
45#define AMS_UNKNOWN1 0x2B /* unknown 1 */
46#define AMS_UNKNOWN2 0x2C /* unknown 2 */
47#define AMS_UNKNOWN3 0x2D /* unknown 3 */
48#define AMS_VENDOR 0x2E /* vendor */
49
50/* AMS commands - use with the AMS_COMMAND register */
51enum ams_i2c_cmd {
52 AMS_CMD_NOOP = 0,
53 AMS_CMD_VERSION,
54 AMS_CMD_READMEM,
55 AMS_CMD_WRITEMEM,
56 AMS_CMD_ERASEMEM,
57 AMS_CMD_READEE,
58 AMS_CMD_WRITEEE,
59 AMS_CMD_RESET,
60 AMS_CMD_START,
61};
62
63static int ams_i2c_attach(struct i2c_adapter *adapter);
64static int ams_i2c_detach(struct i2c_adapter *adapter);
65
66static struct i2c_driver ams_i2c_driver = {
67 .driver = {
68 .name = "ams",
69 .owner = THIS_MODULE,
70 },
71 .attach_adapter = ams_i2c_attach,
72 .detach_adapter = ams_i2c_detach,
73};
74
75static s32 ams_i2c_read(u8 reg)
76{
77 return i2c_smbus_read_byte_data(&ams_info.i2c_client, reg);
78}
79
80static int ams_i2c_write(u8 reg, u8 value)
81{
82 return i2c_smbus_write_byte_data(&ams_info.i2c_client, reg, value);
83}
84
85static int ams_i2c_cmd(enum ams_i2c_cmd cmd)
86{
87 s32 result;
88 int remaining = HZ / 20;
89
90 ams_i2c_write(AMS_COMMAND, cmd);
91 mdelay(5);
92
93 while (remaining) {
94 result = ams_i2c_read(AMS_COMMAND);
95 if (result == 0 || result & 0x80)
96 return 0;
97
98 remaining = schedule_timeout(remaining);
99 }
100
101 return -1;
102}
103
104static void ams_i2c_set_irq(enum ams_irq reg, char enable)
105{
106 if (reg & AMS_IRQ_FREEFALL) {
107 u8 val = ams_i2c_read(AMS_CTRLX);
108 if (enable)
109 val |= 0x80;
110 else
111 val &= ~0x80;
112 ams_i2c_write(AMS_CTRLX, val);
113 }
114
115 if (reg & AMS_IRQ_SHOCK) {
116 u8 val = ams_i2c_read(AMS_CTRLY);
117 if (enable)
118 val |= 0x80;
119 else
120 val &= ~0x80;
121 ams_i2c_write(AMS_CTRLY, val);
122 }
123
124 if (reg & AMS_IRQ_GLOBAL) {
125 u8 val = ams_i2c_read(AMS_CTRLZ);
126 if (enable)
127 val |= 0x80;
128 else
129 val &= ~0x80;
130 ams_i2c_write(AMS_CTRLZ, val);
131 }
132}
133
134static void ams_i2c_clear_irq(enum ams_irq reg)
135{
136 if (reg & AMS_IRQ_FREEFALL)
137 ams_i2c_write(AMS_FREEFALL, 0);
138
139 if (reg & AMS_IRQ_SHOCK)
140 ams_i2c_write(AMS_SHOCK, 0);
141}
142
143static u8 ams_i2c_get_vendor(void)
144{
145 return ams_i2c_read(AMS_VENDOR);
146}
147
148static void ams_i2c_get_xyz(s8 *x, s8 *y, s8 *z)
149{
150 *x = ams_i2c_read(AMS_DATAX);
151 *y = ams_i2c_read(AMS_DATAY);
152 *z = ams_i2c_read(AMS_DATAZ);
153}
154
155static int ams_i2c_attach(struct i2c_adapter *adapter)
156{
157 unsigned long bus;
158 int vmaj, vmin;
159 int result;
160
161 /* There can be only one */
162 if (unlikely(ams_info.has_device))
163 return -ENODEV;
164
165 if (strncmp(adapter->name, "uni-n", 5))
166 return -ENODEV;
167
168 bus = simple_strtoul(adapter->name + 6, NULL, 10);
169 if (bus != ams_info.i2c_bus)
170 return -ENODEV;
171
172 ams_info.i2c_client.addr = ams_info.i2c_address;
173 ams_info.i2c_client.adapter = adapter;
174 ams_info.i2c_client.driver = &ams_i2c_driver;
175 strcpy(ams_info.i2c_client.name, "Apple Motion Sensor");
176
177 if (ams_i2c_cmd(AMS_CMD_RESET)) {
178 printk(KERN_INFO "ams: Failed to reset the device\n");
179 return -ENODEV;
180 }
181
182 if (ams_i2c_cmd(AMS_CMD_START)) {
183 printk(KERN_INFO "ams: Failed to start the device\n");
184 return -ENODEV;
185 }
186
187 /* get version/vendor information */
188 ams_i2c_write(AMS_CTRL1, 0x02);
189 ams_i2c_write(AMS_CTRL2, 0x85);
190 ams_i2c_write(AMS_CTRL3, 0x01);
191
192 ams_i2c_cmd(AMS_CMD_READMEM);
193
194 vmaj = ams_i2c_read(AMS_DATA1);
195 vmin = ams_i2c_read(AMS_DATA2);
196 if (vmaj != 1 || vmin != 52) {
197 printk(KERN_INFO "ams: Incorrect device version (%d.%d)\n",
198 vmaj, vmin);
199 return -ENODEV;
200 }
201
202 ams_i2c_cmd(AMS_CMD_VERSION);
203
204 vmaj = ams_i2c_read(AMS_DATA1);
205 vmin = ams_i2c_read(AMS_DATA2);
206 if (vmaj != 0 || vmin != 1) {
207 printk(KERN_INFO "ams: Incorrect firmware version (%d.%d)\n",
208 vmaj, vmin);
209 return -ENODEV;
210 }
211
212 /* Disable interrupts */
213 ams_i2c_set_irq(AMS_IRQ_ALL, 0);
214
215 result = ams_sensor_attach();
216 if (result < 0)
217 return result;
218
219 /* Set default values */
220 ams_i2c_write(AMS_SENSLOW, 0x15);
221 ams_i2c_write(AMS_SENSHIGH, 0x60);
222 ams_i2c_write(AMS_CTRLX, 0x08);
223 ams_i2c_write(AMS_CTRLY, 0x0F);
224 ams_i2c_write(AMS_CTRLZ, 0x4F);
225 ams_i2c_write(AMS_UNKNOWN1, 0x14);
226
227 /* Clear interrupts */
228 ams_i2c_clear_irq(AMS_IRQ_ALL);
229
230 ams_info.has_device = 1;
231
232 /* Enable interrupts */
233 ams_i2c_set_irq(AMS_IRQ_ALL, 1);
234
235 printk(KERN_INFO "ams: Found I2C based motion sensor\n");
236
237 return 0;
238}
239
240static int ams_i2c_detach(struct i2c_adapter *adapter)
241{
242 if (ams_info.has_device) {
243 /* Disable interrupts */
244 ams_i2c_set_irq(AMS_IRQ_ALL, 0);
245
246 /* Clear interrupts */
247 ams_i2c_clear_irq(AMS_IRQ_ALL);
248
249 printk(KERN_INFO "ams: Unloading\n");
250
251 ams_info.has_device = 0;
252 }
253
254 return 0;
255}
256
257static void ams_i2c_exit(void)
258{
259 i2c_del_driver(&ams_i2c_driver);
260}
261
262int __init ams_i2c_init(struct device_node *np)
263{
264 char *tmp_bus;
265 int result;
266 u32 *prop;
267
268 mutex_lock(&ams_info.lock);
269
270 /* Set implementation stuff */
271 ams_info.of_node = np;
272 ams_info.exit = ams_i2c_exit;
273 ams_info.get_vendor = ams_i2c_get_vendor;
274 ams_info.get_xyz = ams_i2c_get_xyz;
275 ams_info.clear_irq = ams_i2c_clear_irq;
276 ams_info.bustype = BUS_I2C;
277
278 /* look for bus either using "reg" or by path */
279 prop = (u32*)get_property(ams_info.of_node, "reg", NULL);
280 if (!prop) {
281 result = -ENODEV;
282
283 goto exit;
284 }
285
286 tmp_bus = strstr(ams_info.of_node->full_name, "/i2c-bus@");
287 if (tmp_bus)
288 ams_info.i2c_bus = *(tmp_bus + 9) - '0';
289 else
290 ams_info.i2c_bus = ((*prop) >> 8) & 0x0f;
291 ams_info.i2c_address = ((*prop) & 0xff) >> 1;
292
293 result = i2c_add_driver(&ams_i2c_driver);
294
295exit:
296 mutex_unlock(&ams_info.lock);
297
298 return result;
299}
diff --git a/drivers/hwmon/ams/ams-input.c b/drivers/hwmon/ams/ams-input.c
new file mode 100644
index 000000000000..f126aa485134
--- /dev/null
+++ b/drivers/hwmon/ams/ams-input.c
@@ -0,0 +1,160 @@
1/*
2 * Apple Motion Sensor driver (joystick emulation)
3 *
4 * Copyright (C) 2005 Stelian Pop (stelian@popies.net)
5 * Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/module.h>
14
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/delay.h>
19
20#include "ams.h"
21
22static unsigned int joystick;
23module_param(joystick, bool, 0644);
24MODULE_PARM_DESC(joystick, "Enable the input class device on module load");
25
26static unsigned int invert;
27module_param(invert, bool, 0644);
28MODULE_PARM_DESC(invert, "Invert input data on X and Y axis");
29
30static int ams_input_kthread(void *data)
31{
32 s8 x, y, z;
33
34 while (!kthread_should_stop()) {
35 mutex_lock(&ams_info.lock);
36
37 ams_sensors(&x, &y, &z);
38
39 x -= ams_info.xcalib;
40 y -= ams_info.ycalib;
41 z -= ams_info.zcalib;
42
43 input_report_abs(ams_info.idev, ABS_X, invert ? -x : x);
44 input_report_abs(ams_info.idev, ABS_Y, invert ? -y : y);
45 input_report_abs(ams_info.idev, ABS_Z, z);
46
47 input_sync(ams_info.idev);
48
49 mutex_unlock(&ams_info.lock);
50
51 msleep(25);
52 }
53
54 return 0;
55}
56
57static int ams_input_open(struct input_dev *dev)
58{
59 ams_info.kthread = kthread_run(ams_input_kthread, NULL, "kams");
60 return IS_ERR(ams_info.kthread) ? PTR_ERR(ams_info.kthread) : 0;
61}
62
63static void ams_input_close(struct input_dev *dev)
64{
65 kthread_stop(ams_info.kthread);
66}
67
68/* Call with ams_info.lock held! */
69static void ams_input_enable(void)
70{
71 s8 x, y, z;
72
73 if (ams_info.idev)
74 return;
75
76 ams_sensors(&x, &y, &z);
77 ams_info.xcalib = x;
78 ams_info.ycalib = y;
79 ams_info.zcalib = z;
80
81 ams_info.idev = input_allocate_device();
82 if (!ams_info.idev)
83 return;
84
85 ams_info.idev->name = "Apple Motion Sensor";
86 ams_info.idev->id.bustype = ams_info.bustype;
87 ams_info.idev->id.vendor = 0;
88 ams_info.idev->open = ams_input_open;
89 ams_info.idev->close = ams_input_close;
90 ams_info.idev->cdev.dev = &ams_info.of_dev->dev;
91
92 input_set_abs_params(ams_info.idev, ABS_X, -50, 50, 3, 0);
93 input_set_abs_params(ams_info.idev, ABS_Y, -50, 50, 3, 0);
94 input_set_abs_params(ams_info.idev, ABS_Z, -50, 50, 3, 0);
95
96 set_bit(EV_ABS, ams_info.idev->evbit);
97 set_bit(EV_KEY, ams_info.idev->evbit);
98 set_bit(BTN_TOUCH, ams_info.idev->keybit);
99
100 if (input_register_device(ams_info.idev)) {
101 input_free_device(ams_info.idev);
102 ams_info.idev = NULL;
103 return;
104 }
105}
106
107/* Call with ams_info.lock held! */
108static void ams_input_disable(void)
109{
110 if (ams_info.idev) {
111 input_unregister_device(ams_info.idev);
112 ams_info.idev = NULL;
113 }
114}
115
116static ssize_t ams_input_show_joystick(struct device *dev,
117 struct device_attribute *attr, char *buf)
118{
119 return sprintf(buf, "%d\n", joystick);
120}
121
122static ssize_t ams_input_store_joystick(struct device *dev,
123 struct device_attribute *attr, const char *buf, size_t count)
124{
125 if (sscanf(buf, "%d\n", &joystick) != 1)
126 return -EINVAL;
127
128 mutex_lock(&ams_info.lock);
129
130 if (joystick)
131 ams_input_enable();
132 else
133 ams_input_disable();
134
135 mutex_unlock(&ams_info.lock);
136
137 return count;
138}
139
140static DEVICE_ATTR(joystick, S_IRUGO | S_IWUSR,
141 ams_input_show_joystick, ams_input_store_joystick);
142
143/* Call with ams_info.lock held! */
144int ams_input_init(void)
145{
146 int result;
147
148 result = device_create_file(&ams_info.of_dev->dev, &dev_attr_joystick);
149
150 if (!result && joystick)
151 ams_input_enable();
152 return result;
153}
154
155/* Call with ams_info.lock held! */
156void ams_input_exit()
157{
158 ams_input_disable();
159 device_remove_file(&ams_info.of_dev->dev, &dev_attr_joystick);
160}
diff --git a/drivers/hwmon/ams/ams-pmu.c b/drivers/hwmon/ams/ams-pmu.c
new file mode 100644
index 000000000000..4636ae031a53
--- /dev/null
+++ b/drivers/hwmon/ams/ams-pmu.c
@@ -0,0 +1,207 @@
1/*
2 * Apple Motion Sensor driver (PMU variant)
3 *
4 * Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/adb.h>
17#include <linux/pmu.h>
18
19#include "ams.h"
20
21/* Attitude */
22#define AMS_X 0x00
23#define AMS_Y 0x01
24#define AMS_Z 0x02
25
26/* Not exactly known, maybe chip vendor */
27#define AMS_VENDOR 0x03
28
29/* Freefall registers */
30#define AMS_FF_CLEAR 0x04
31#define AMS_FF_ENABLE 0x05
32#define AMS_FF_LOW_LIMIT 0x06
33#define AMS_FF_DEBOUNCE 0x07
34
35/* Shock registers */
36#define AMS_SHOCK_CLEAR 0x08
37#define AMS_SHOCK_ENABLE 0x09
38#define AMS_SHOCK_HIGH_LIMIT 0x0a
39#define AMS_SHOCK_DEBOUNCE 0x0b
40
41/* Global interrupt and power control register */
42#define AMS_CONTROL 0x0c
43
44static u8 ams_pmu_cmd;
45
46static void ams_pmu_req_complete(struct adb_request *req)
47{
48 complete((struct completion *)req->arg);
49}
50
51/* Only call this function from task context */
52static void ams_pmu_set_register(u8 reg, u8 value)
53{
54 static struct adb_request req;
55 DECLARE_COMPLETION(req_complete);
56
57 req.arg = &req_complete;
58 if (pmu_request(&req, ams_pmu_req_complete, 4, ams_pmu_cmd, 0x00, reg, value))
59 return;
60
61 wait_for_completion(&req_complete);
62}
63
64/* Only call this function from task context */
65static u8 ams_pmu_get_register(u8 reg)
66{
67 static struct adb_request req;
68 DECLARE_COMPLETION(req_complete);
69
70 req.arg = &req_complete;
71 if (pmu_request(&req, ams_pmu_req_complete, 3, ams_pmu_cmd, 0x01, reg))
72 return 0;
73
74 wait_for_completion(&req_complete);
75
76 if (req.reply_len > 0)
77 return req.reply[0];
78 else
79 return 0;
80}
81
82/* Enables or disables the specified interrupts */
83static void ams_pmu_set_irq(enum ams_irq reg, char enable)
84{
85 if (reg & AMS_IRQ_FREEFALL) {
86 u8 val = ams_pmu_get_register(AMS_FF_ENABLE);
87 if (enable)
88 val |= 0x80;
89 else
90 val &= ~0x80;
91 ams_pmu_set_register(AMS_FF_ENABLE, val);
92 }
93
94 if (reg & AMS_IRQ_SHOCK) {
95 u8 val = ams_pmu_get_register(AMS_SHOCK_ENABLE);
96 if (enable)
97 val |= 0x80;
98 else
99 val &= ~0x80;
100 ams_pmu_set_register(AMS_SHOCK_ENABLE, val);
101 }
102
103 if (reg & AMS_IRQ_GLOBAL) {
104 u8 val = ams_pmu_get_register(AMS_CONTROL);
105 if (enable)
106 val |= 0x80;
107 else
108 val &= ~0x80;
109 ams_pmu_set_register(AMS_CONTROL, val);
110 }
111}
112
113static void ams_pmu_clear_irq(enum ams_irq reg)
114{
115 if (reg & AMS_IRQ_FREEFALL)
116 ams_pmu_set_register(AMS_FF_CLEAR, 0x00);
117
118 if (reg & AMS_IRQ_SHOCK)
119 ams_pmu_set_register(AMS_SHOCK_CLEAR, 0x00);
120}
121
122static u8 ams_pmu_get_vendor(void)
123{
124 return ams_pmu_get_register(AMS_VENDOR);
125}
126
127static void ams_pmu_get_xyz(s8 *x, s8 *y, s8 *z)
128{
129 *x = ams_pmu_get_register(AMS_X);
130 *y = ams_pmu_get_register(AMS_Y);
131 *z = ams_pmu_get_register(AMS_Z);
132}
133
134static void ams_pmu_exit(void)
135{
136 /* Disable interrupts */
137 ams_pmu_set_irq(AMS_IRQ_ALL, 0);
138
139 /* Clear interrupts */
140 ams_pmu_clear_irq(AMS_IRQ_ALL);
141
142 ams_info.has_device = 0;
143
144 printk(KERN_INFO "ams: Unloading\n");
145}
146
147int __init ams_pmu_init(struct device_node *np)
148{
149 u32 *prop;
150 int result;
151
152 mutex_lock(&ams_info.lock);
153
154 /* Set implementation stuff */
155 ams_info.of_node = np;
156 ams_info.exit = ams_pmu_exit;
157 ams_info.get_vendor = ams_pmu_get_vendor;
158 ams_info.get_xyz = ams_pmu_get_xyz;
159 ams_info.clear_irq = ams_pmu_clear_irq;
160 ams_info.bustype = BUS_HOST;
161
162 /* Get PMU command, should be 0x4e, but we can never know */
163 prop = (u32*)get_property(ams_info.of_node, "reg", NULL);
164 if (!prop) {
165 result = -ENODEV;
166 goto exit;
167 }
168 ams_pmu_cmd = ((*prop) >> 8) & 0xff;
169
170 /* Disable interrupts */
171 ams_pmu_set_irq(AMS_IRQ_ALL, 0);
172
173 /* Clear interrupts */
174 ams_pmu_clear_irq(AMS_IRQ_ALL);
175
176 result = ams_sensor_attach();
177 if (result < 0)
178 goto exit;
179
180 /* Set default values */
181 ams_pmu_set_register(AMS_FF_LOW_LIMIT, 0x15);
182 ams_pmu_set_register(AMS_FF_ENABLE, 0x08);
183 ams_pmu_set_register(AMS_FF_DEBOUNCE, 0x14);
184
185 ams_pmu_set_register(AMS_SHOCK_HIGH_LIMIT, 0x60);
186 ams_pmu_set_register(AMS_SHOCK_ENABLE, 0x0f);
187 ams_pmu_set_register(AMS_SHOCK_DEBOUNCE, 0x14);
188
189 ams_pmu_set_register(AMS_CONTROL, 0x4f);
190
191 /* Clear interrupts */
192 ams_pmu_clear_irq(AMS_IRQ_ALL);
193
194 ams_info.has_device = 1;
195
196 /* Enable interrupts */
197 ams_pmu_set_irq(AMS_IRQ_ALL, 1);
198
199 printk(KERN_INFO "ams: Found PMU based motion sensor\n");
200
201 result = 0;
202
203exit:
204 mutex_unlock(&ams_info.lock);
205
206 return result;
207}
diff --git a/drivers/hwmon/ams/ams.h b/drivers/hwmon/ams/ams.h
new file mode 100644
index 000000000000..240730e6bcde
--- /dev/null
+++ b/drivers/hwmon/ams/ams.h
@@ -0,0 +1,72 @@
1#include <linux/i2c.h>
2#include <linux/input.h>
3#include <linux/kthread.h>
4#include <linux/mutex.h>
5#include <linux/spinlock.h>
6#include <linux/types.h>
7#include <asm/of_device.h>
8
9enum ams_irq {
10 AMS_IRQ_FREEFALL = 0x01,
11 AMS_IRQ_SHOCK = 0x02,
12 AMS_IRQ_GLOBAL = 0x04,
13 AMS_IRQ_ALL =
14 AMS_IRQ_FREEFALL |
15 AMS_IRQ_SHOCK |
16 AMS_IRQ_GLOBAL,
17};
18
19struct ams {
20 /* Locks */
21 spinlock_t irq_lock;
22 struct mutex lock;
23
24 /* General properties */
25 struct device_node *of_node;
26 struct of_device *of_dev;
27 char has_device;
28 char vflag;
29 u32 orient1;
30 u32 orient2;
31
32 /* Interrupt worker */
33 struct work_struct worker;
34 u8 worker_irqs;
35
36 /* Implementation
37 *
38 * Only call these functions with the main lock held.
39 */
40 void (*exit)(void);
41
42 void (*get_xyz)(s8 *x, s8 *y, s8 *z);
43 u8 (*get_vendor)(void);
44
45 void (*clear_irq)(enum ams_irq reg);
46
47#ifdef CONFIG_SENSORS_AMS_I2C
48 /* I2C properties */
49 int i2c_bus;
50 int i2c_address;
51 struct i2c_client i2c_client;
52#endif
53
54 /* Joystick emulation */
55 struct task_struct *kthread;
56 struct input_dev *idev;
57 __u16 bustype;
58
59 /* calibrated null values */
60 int xcalib, ycalib, zcalib;
61};
62
63extern struct ams ams_info;
64
65extern void ams_sensors(s8 *x, s8 *y, s8 *z);
66extern int ams_sensor_attach(void);
67
68extern int ams_pmu_init(struct device_node *np);
69extern int ams_i2c_init(struct device_node *np);
70
71extern int ams_input_init(void);
72extern void ams_input_exit(void);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index de17a72149d9..a272cae8f60e 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1,12 +1,15 @@
1/* 1/*
2 * f71805f.c - driver for the Fintek F71805F/FG Super-I/O chip integrated 2 * f71805f.c - driver for the Fintek F71805F/FG and F71872F/FG Super-I/O
3 * hardware monitoring features 3 * chips integrated hardware monitoring features
4 * Copyright (C) 2005-2006 Jean Delvare <khali@linux-fr.org> 4 * Copyright (C) 2005-2006 Jean Delvare <khali@linux-fr.org>
5 * 5 *
6 * The F71805F/FG is a LPC Super-I/O chip made by Fintek. It integrates 6 * The F71805F/FG is a LPC Super-I/O chip made by Fintek. It integrates
7 * complete hardware monitoring features: voltage, fan and temperature 7 * complete hardware monitoring features: voltage, fan and temperature
8 * sensors, and manual and automatic fan speed control. 8 * sensors, and manual and automatic fan speed control.
9 * 9 *
10 * The F71872F/FG is almost the same, with two more voltages monitored,
11 * and 6 VID inputs.
12 *
10 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 15 * the Free Software Foundation; either version 2 of the License, or
@@ -37,6 +40,7 @@
37static struct platform_device *pdev; 40static struct platform_device *pdev;
38 41
39#define DRVNAME "f71805f" 42#define DRVNAME "f71805f"
43enum kinds { f71805f, f71872f };
40 44
41/* 45/*
42 * Super-I/O constants and functions 46 * Super-I/O constants and functions
@@ -48,11 +52,13 @@ static struct platform_device *pdev;
48#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ 52#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
49#define SIO_REG_DEVREV 0x22 /* Device revision */ 53#define SIO_REG_DEVREV 0x22 /* Device revision */
50#define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */ 54#define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */
55#define SIO_REG_FNSEL1 0x29 /* Multi Function Select 1 (F71872F) */
51#define SIO_REG_ENABLE 0x30 /* Logical device enable */ 56#define SIO_REG_ENABLE 0x30 /* Logical device enable */
52#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ 57#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
53 58
54#define SIO_FINTEK_ID 0x1934 59#define SIO_FINTEK_ID 0x1934
55#define SIO_F71805F_ID 0x0406 60#define SIO_F71805F_ID 0x0406
61#define SIO_F71872F_ID 0x0341
56 62
57static inline int 63static inline int
58superio_inb(int base, int reg) 64superio_inb(int base, int reg)
@@ -96,22 +102,25 @@ superio_exit(int base)
96 * ISA constants 102 * ISA constants
97 */ 103 */
98 104
99#define REGION_LENGTH 2 105#define REGION_LENGTH 8
100#define ADDR_REG_OFFSET 0 106#define ADDR_REG_OFFSET 5
101#define DATA_REG_OFFSET 1 107#define DATA_REG_OFFSET 6
102 108
103/* 109/*
104 * Registers 110 * Registers
105 */ 111 */
106 112
107/* in nr from 0 to 8 (8-bit values) */ 113/* in nr from 0 to 10 (8-bit values) */
108#define F71805F_REG_IN(nr) (0x10 + (nr)) 114#define F71805F_REG_IN(nr) (0x10 + (nr))
109#define F71805F_REG_IN_HIGH(nr) (0x40 + 2 * (nr)) 115#define F71805F_REG_IN_HIGH(nr) ((nr) < 10 ? 0x40 + 2 * (nr) : 0x2E)
110#define F71805F_REG_IN_LOW(nr) (0x41 + 2 * (nr)) 116#define F71805F_REG_IN_LOW(nr) ((nr) < 10 ? 0x41 + 2 * (nr) : 0x2F)
111/* fan nr from 0 to 2 (12-bit values, two registers) */ 117/* fan nr from 0 to 2 (12-bit values, two registers) */
112#define F71805F_REG_FAN(nr) (0x20 + 2 * (nr)) 118#define F71805F_REG_FAN(nr) (0x20 + 2 * (nr))
113#define F71805F_REG_FAN_LOW(nr) (0x28 + 2 * (nr)) 119#define F71805F_REG_FAN_LOW(nr) (0x28 + 2 * (nr))
120#define F71805F_REG_FAN_TARGET(nr) (0x69 + 16 * (nr))
114#define F71805F_REG_FAN_CTRL(nr) (0x60 + 16 * (nr)) 121#define F71805F_REG_FAN_CTRL(nr) (0x60 + 16 * (nr))
122#define F71805F_REG_PWM_FREQ(nr) (0x63 + 16 * (nr))
123#define F71805F_REG_PWM_DUTY(nr) (0x6B + 16 * (nr))
115/* temp nr from 0 to 2 (8-bit values) */ 124/* temp nr from 0 to 2 (8-bit values) */
116#define F71805F_REG_TEMP(nr) (0x1B + (nr)) 125#define F71805F_REG_TEMP(nr) (0x1B + (nr))
117#define F71805F_REG_TEMP_HIGH(nr) (0x54 + 2 * (nr)) 126#define F71805F_REG_TEMP_HIGH(nr) (0x54 + 2 * (nr))
@@ -122,6 +131,14 @@ superio_exit(int base)
122/* status nr from 0 to 2 */ 131/* status nr from 0 to 2 */
123#define F71805F_REG_STATUS(nr) (0x36 + (nr)) 132#define F71805F_REG_STATUS(nr) (0x36 + (nr))
124 133
134/* individual register bits */
135#define FAN_CTRL_DC_MODE 0x10
136#define FAN_CTRL_LATCH_FULL 0x08
137#define FAN_CTRL_MODE_MASK 0x03
138#define FAN_CTRL_MODE_SPEED 0x00
139#define FAN_CTRL_MODE_TEMPERATURE 0x01
140#define FAN_CTRL_MODE_MANUAL 0x02
141
125/* 142/*
126 * Data structures and manipulation thereof 143 * Data structures and manipulation thereof
127 */ 144 */
@@ -138,12 +155,16 @@ struct f71805f_data {
138 unsigned long last_limits; /* In jiffies */ 155 unsigned long last_limits; /* In jiffies */
139 156
140 /* Register values */ 157 /* Register values */
141 u8 in[9]; 158 u8 in[11];
142 u8 in_high[9]; 159 u8 in_high[11];
143 u8 in_low[9]; 160 u8 in_low[11];
161 u16 has_in;
144 u16 fan[3]; 162 u16 fan[3];
145 u16 fan_low[3]; 163 u16 fan_low[3];
146 u8 fan_enabled; /* Read once at init time */ 164 u16 fan_target[3];
165 u8 fan_ctrl[3];
166 u8 pwm[3];
167 u8 pwm_freq[3];
147 u8 temp[3]; 168 u8 temp[3];
148 u8 temp_high[3]; 169 u8 temp_high[3];
149 u8 temp_hyst[3]; 170 u8 temp_hyst[3];
@@ -151,6 +172,11 @@ struct f71805f_data {
151 unsigned long alarms; 172 unsigned long alarms;
152}; 173};
153 174
175struct f71805f_sio_data {
176 enum kinds kind;
177 u8 fnsel1;
178};
179
154static inline long in_from_reg(u8 reg) 180static inline long in_from_reg(u8 reg)
155{ 181{
156 return (reg * 8); 182 return (reg * 8);
@@ -200,6 +226,33 @@ static inline u16 fan_to_reg(long rpm)
200 return (1500000 / rpm); 226 return (1500000 / rpm);
201} 227}
202 228
229static inline unsigned long pwm_freq_from_reg(u8 reg)
230{
231 unsigned long clock = (reg & 0x80) ? 48000000UL : 1000000UL;
232
233 reg &= 0x7f;
234 if (reg == 0)
235 reg++;
236 return clock / (reg << 8);
237}
238
239static inline u8 pwm_freq_to_reg(unsigned long val)
240{
241 if (val >= 187500) /* The highest we can do */
242 return 0x80;
243 if (val >= 1475) /* Use 48 MHz clock */
244 return 0x80 | (48000000UL / (val << 8));
245 if (val < 31) /* The lowest we can do */
246 return 0x7f;
247 else /* Use 1 MHz clock */
248 return 1000000UL / (val << 8);
249}
250
251static inline int pwm_mode_from_reg(u8 reg)
252{
253 return !(reg & FAN_CTRL_DC_MODE);
254}
255
203static inline long temp_from_reg(u8 reg) 256static inline long temp_from_reg(u8 reg)
204{ 257{
205 return (reg * 1000); 258 return (reg * 1000);
@@ -274,16 +327,21 @@ static struct f71805f_data *f71805f_update_device(struct device *dev)
274 /* Limit registers cache is refreshed after 60 seconds */ 327 /* Limit registers cache is refreshed after 60 seconds */
275 if (time_after(jiffies, data->last_updated + 60 * HZ) 328 if (time_after(jiffies, data->last_updated + 60 * HZ)
276 || !data->valid) { 329 || !data->valid) {
277 for (nr = 0; nr < 9; nr++) { 330 for (nr = 0; nr < 11; nr++) {
331 if (!(data->has_in & (1 << nr)))
332 continue;
278 data->in_high[nr] = f71805f_read8(data, 333 data->in_high[nr] = f71805f_read8(data,
279 F71805F_REG_IN_HIGH(nr)); 334 F71805F_REG_IN_HIGH(nr));
280 data->in_low[nr] = f71805f_read8(data, 335 data->in_low[nr] = f71805f_read8(data,
281 F71805F_REG_IN_LOW(nr)); 336 F71805F_REG_IN_LOW(nr));
282 } 337 }
283 for (nr = 0; nr < 3; nr++) { 338 for (nr = 0; nr < 3; nr++) {
284 if (data->fan_enabled & (1 << nr)) 339 data->fan_low[nr] = f71805f_read16(data,
285 data->fan_low[nr] = f71805f_read16(data, 340 F71805F_REG_FAN_LOW(nr));
286 F71805F_REG_FAN_LOW(nr)); 341 data->fan_target[nr] = f71805f_read16(data,
342 F71805F_REG_FAN_TARGET(nr));
343 data->pwm_freq[nr] = f71805f_read8(data,
344 F71805F_REG_PWM_FREQ(nr));
287 } 345 }
288 for (nr = 0; nr < 3; nr++) { 346 for (nr = 0; nr < 3; nr++) {
289 data->temp_high[nr] = f71805f_read8(data, 347 data->temp_high[nr] = f71805f_read8(data,
@@ -299,14 +357,19 @@ static struct f71805f_data *f71805f_update_device(struct device *dev)
299 /* Measurement registers cache is refreshed after 1 second */ 357 /* Measurement registers cache is refreshed after 1 second */
300 if (time_after(jiffies, data->last_updated + HZ) 358 if (time_after(jiffies, data->last_updated + HZ)
301 || !data->valid) { 359 || !data->valid) {
302 for (nr = 0; nr < 9; nr++) { 360 for (nr = 0; nr < 11; nr++) {
361 if (!(data->has_in & (1 << nr)))
362 continue;
303 data->in[nr] = f71805f_read8(data, 363 data->in[nr] = f71805f_read8(data,
304 F71805F_REG_IN(nr)); 364 F71805F_REG_IN(nr));
305 } 365 }
306 for (nr = 0; nr < 3; nr++) { 366 for (nr = 0; nr < 3; nr++) {
307 if (data->fan_enabled & (1 << nr)) 367 data->fan[nr] = f71805f_read16(data,
308 data->fan[nr] = f71805f_read16(data, 368 F71805F_REG_FAN(nr));
309 F71805F_REG_FAN(nr)); 369 data->fan_ctrl[nr] = f71805f_read8(data,
370 F71805F_REG_FAN_CTRL(nr));
371 data->pwm[nr] = f71805f_read8(data,
372 F71805F_REG_PWM_DUTY(nr));
310 } 373 }
311 for (nr = 0; nr < 3; nr++) { 374 for (nr = 0; nr < 3; nr++) {
312 data->temp[nr] = f71805f_read8(data, 375 data->temp[nr] = f71805f_read8(data,
@@ -333,35 +396,43 @@ static ssize_t show_in0(struct device *dev, struct device_attribute *devattr,
333 char *buf) 396 char *buf)
334{ 397{
335 struct f71805f_data *data = f71805f_update_device(dev); 398 struct f71805f_data *data = f71805f_update_device(dev);
399 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
400 int nr = attr->index;
336 401
337 return sprintf(buf, "%ld\n", in0_from_reg(data->in[0])); 402 return sprintf(buf, "%ld\n", in0_from_reg(data->in[nr]));
338} 403}
339 404
340static ssize_t show_in0_max(struct device *dev, struct device_attribute 405static ssize_t show_in0_max(struct device *dev, struct device_attribute
341 *devattr, char *buf) 406 *devattr, char *buf)
342{ 407{
343 struct f71805f_data *data = f71805f_update_device(dev); 408 struct f71805f_data *data = f71805f_update_device(dev);
409 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
410 int nr = attr->index;
344 411
345 return sprintf(buf, "%ld\n", in0_from_reg(data->in_high[0])); 412 return sprintf(buf, "%ld\n", in0_from_reg(data->in_high[nr]));
346} 413}
347 414
348static ssize_t show_in0_min(struct device *dev, struct device_attribute 415static ssize_t show_in0_min(struct device *dev, struct device_attribute
349 *devattr, char *buf) 416 *devattr, char *buf)
350{ 417{
351 struct f71805f_data *data = f71805f_update_device(dev); 418 struct f71805f_data *data = f71805f_update_device(dev);
419 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
420 int nr = attr->index;
352 421
353 return sprintf(buf, "%ld\n", in0_from_reg(data->in_low[0])); 422 return sprintf(buf, "%ld\n", in0_from_reg(data->in_low[nr]));
354} 423}
355 424
356static ssize_t set_in0_max(struct device *dev, struct device_attribute 425static ssize_t set_in0_max(struct device *dev, struct device_attribute
357 *devattr, const char *buf, size_t count) 426 *devattr, const char *buf, size_t count)
358{ 427{
359 struct f71805f_data *data = dev_get_drvdata(dev); 428 struct f71805f_data *data = dev_get_drvdata(dev);
429 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
430 int nr = attr->index;
360 long val = simple_strtol(buf, NULL, 10); 431 long val = simple_strtol(buf, NULL, 10);
361 432
362 mutex_lock(&data->update_lock); 433 mutex_lock(&data->update_lock);
363 data->in_high[0] = in0_to_reg(val); 434 data->in_high[nr] = in0_to_reg(val);
364 f71805f_write8(data, F71805F_REG_IN_HIGH(0), data->in_high[0]); 435 f71805f_write8(data, F71805F_REG_IN_HIGH(nr), data->in_high[nr]);
365 mutex_unlock(&data->update_lock); 436 mutex_unlock(&data->update_lock);
366 437
367 return count; 438 return count;
@@ -371,11 +442,13 @@ static ssize_t set_in0_min(struct device *dev, struct device_attribute
371 *devattr, const char *buf, size_t count) 442 *devattr, const char *buf, size_t count)
372{ 443{
373 struct f71805f_data *data = dev_get_drvdata(dev); 444 struct f71805f_data *data = dev_get_drvdata(dev);
445 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
446 int nr = attr->index;
374 long val = simple_strtol(buf, NULL, 10); 447 long val = simple_strtol(buf, NULL, 10);
375 448
376 mutex_lock(&data->update_lock); 449 mutex_lock(&data->update_lock);
377 data->in_low[0] = in0_to_reg(val); 450 data->in_low[nr] = in0_to_reg(val);
378 f71805f_write8(data, F71805F_REG_IN_LOW(0), data->in_low[0]); 451 f71805f_write8(data, F71805F_REG_IN_LOW(nr), data->in_low[nr]);
379 mutex_unlock(&data->update_lock); 452 mutex_unlock(&data->update_lock);
380 453
381 return count; 454 return count;
@@ -463,6 +536,16 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute
463 return sprintf(buf, "%ld\n", fan_from_reg(data->fan_low[nr])); 536 return sprintf(buf, "%ld\n", fan_from_reg(data->fan_low[nr]));
464} 537}
465 538
539static ssize_t show_fan_target(struct device *dev, struct device_attribute
540 *devattr, char *buf)
541{
542 struct f71805f_data *data = f71805f_update_device(dev);
543 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
544 int nr = attr->index;
545
546 return sprintf(buf, "%ld\n", fan_from_reg(data->fan_target[nr]));
547}
548
466static ssize_t set_fan_min(struct device *dev, struct device_attribute 549static ssize_t set_fan_min(struct device *dev, struct device_attribute
467 *devattr, const char *buf, size_t count) 550 *devattr, const char *buf, size_t count)
468{ 551{
@@ -479,6 +562,157 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
479 return count; 562 return count;
480} 563}
481 564
565static ssize_t set_fan_target(struct device *dev, struct device_attribute
566 *devattr, const char *buf, size_t count)
567{
568 struct f71805f_data *data = dev_get_drvdata(dev);
569 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
570 int nr = attr->index;
571 long val = simple_strtol(buf, NULL, 10);
572
573 mutex_lock(&data->update_lock);
574 data->fan_target[nr] = fan_to_reg(val);
575 f71805f_write16(data, F71805F_REG_FAN_TARGET(nr),
576 data->fan_target[nr]);
577 mutex_unlock(&data->update_lock);
578
579 return count;
580}
581
582static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
583 char *buf)
584{
585 struct f71805f_data *data = f71805f_update_device(dev);
586 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
587 int nr = attr->index;
588
589 return sprintf(buf, "%d\n", (int)data->pwm[nr]);
590}
591
592static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
593 *devattr, char *buf)
594{
595 struct f71805f_data *data = f71805f_update_device(dev);
596 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
597 int nr = attr->index;
598 int mode;
599
600 switch (data->fan_ctrl[nr] & FAN_CTRL_MODE_MASK) {
601 case FAN_CTRL_MODE_SPEED:
602 mode = 3;
603 break;
604 case FAN_CTRL_MODE_TEMPERATURE:
605 mode = 2;
606 break;
607 default: /* MANUAL */
608 mode = 1;
609 }
610
611 return sprintf(buf, "%d\n", mode);
612}
613
614static ssize_t show_pwm_freq(struct device *dev, struct device_attribute
615 *devattr, char *buf)
616{
617 struct f71805f_data *data = f71805f_update_device(dev);
618 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
619 int nr = attr->index;
620
621 return sprintf(buf, "%lu\n", pwm_freq_from_reg(data->pwm_freq[nr]));
622}
623
624static ssize_t show_pwm_mode(struct device *dev, struct device_attribute
625 *devattr, char *buf)
626{
627 struct f71805f_data *data = f71805f_update_device(dev);
628 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
629 int nr = attr->index;
630
631 return sprintf(buf, "%d\n", pwm_mode_from_reg(data->fan_ctrl[nr]));
632}
633
634static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
635 const char *buf, size_t count)
636{
637 struct f71805f_data *data = dev_get_drvdata(dev);
638 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
639 int nr = attr->index;
640 unsigned long val = simple_strtoul(buf, NULL, 10);
641
642 if (val > 255)
643 return -EINVAL;
644
645 mutex_lock(&data->update_lock);
646 data->pwm[nr] = val;
647 f71805f_write8(data, F71805F_REG_PWM_DUTY(nr), data->pwm[nr]);
648 mutex_unlock(&data->update_lock);
649
650 return count;
651}
652
653static struct attribute *f71805f_attr_pwm[];
654
655static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
656 *devattr, const char *buf, size_t count)
657{
658 struct f71805f_data *data = dev_get_drvdata(dev);
659 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
660 int nr = attr->index;
661 unsigned long val = simple_strtoul(buf, NULL, 10);
662 u8 reg;
663
664 if (val < 1 || val > 3)
665 return -EINVAL;
666
667 if (val > 1) { /* Automatic mode, user can't set PWM value */
668 if (sysfs_chmod_file(&dev->kobj, f71805f_attr_pwm[nr],
669 S_IRUGO))
670 dev_dbg(dev, "chmod -w pwm%d failed\n", nr + 1);
671 }
672
673 mutex_lock(&data->update_lock);
674 reg = f71805f_read8(data, F71805F_REG_FAN_CTRL(nr))
675 & ~FAN_CTRL_MODE_MASK;
676 switch (val) {
677 case 1:
678 reg |= FAN_CTRL_MODE_MANUAL;
679 break;
680 case 2:
681 reg |= FAN_CTRL_MODE_TEMPERATURE;
682 break;
683 case 3:
684 reg |= FAN_CTRL_MODE_SPEED;
685 break;
686 }
687 data->fan_ctrl[nr] = reg;
688 f71805f_write8(data, F71805F_REG_FAN_CTRL(nr), reg);
689 mutex_unlock(&data->update_lock);
690
691 if (val == 1) { /* Manual mode, user can set PWM value */
692 if (sysfs_chmod_file(&dev->kobj, f71805f_attr_pwm[nr],
693 S_IRUGO | S_IWUSR))
694 dev_dbg(dev, "chmod +w pwm%d failed\n", nr + 1);
695 }
696
697 return count;
698}
699
700static ssize_t set_pwm_freq(struct device *dev, struct device_attribute
701 *devattr, const char *buf, size_t count)
702{
703 struct f71805f_data *data = dev_get_drvdata(dev);
704 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
705 int nr = attr->index;
706 unsigned long val = simple_strtoul(buf, NULL, 10);
707
708 mutex_lock(&data->update_lock);
709 data->pwm_freq[nr] = pwm_freq_to_reg(val);
710 f71805f_write8(data, F71805F_REG_PWM_FREQ(nr), data->pwm_freq[nr]);
711 mutex_unlock(&data->update_lock);
712
713 return count;
714}
715
482static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, 716static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
483 char *buf) 717 char *buf)
484{ 718{
@@ -557,7 +791,7 @@ static ssize_t show_alarms_in(struct device *dev, struct device_attribute
557{ 791{
558 struct f71805f_data *data = f71805f_update_device(dev); 792 struct f71805f_data *data = f71805f_update_device(dev);
559 793
560 return sprintf(buf, "%lu\n", data->alarms & 0x1ff); 794 return sprintf(buf, "%lu\n", data->alarms & 0x7ff);
561} 795}
562 796
563static ssize_t show_alarms_fan(struct device *dev, struct device_attribute 797static ssize_t show_alarms_fan(struct device *dev, struct device_attribute
@@ -594,9 +828,11 @@ static ssize_t show_name(struct device *dev, struct device_attribute
594 return sprintf(buf, "%s\n", data->name); 828 return sprintf(buf, "%s\n", data->name);
595} 829}
596 830
597static DEVICE_ATTR(in0_input, S_IRUGO, show_in0, NULL); 831static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in0, NULL, 0);
598static DEVICE_ATTR(in0_max, S_IRUGO| S_IWUSR, show_in0_max, set_in0_max); 832static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO| S_IWUSR,
599static DEVICE_ATTR(in0_min, S_IRUGO| S_IWUSR, show_in0_min, set_in0_min); 833 show_in0_max, set_in0_max, 0);
834static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO| S_IWUSR,
835 show_in0_min, set_in0_min, 0);
600static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 1); 836static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 1);
601static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, 837static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR,
602 show_in_max, set_in_max, 1); 838 show_in_max, set_in_max, 1);
@@ -637,16 +873,32 @@ static SENSOR_DEVICE_ATTR(in8_max, S_IRUGO | S_IWUSR,
637 show_in_max, set_in_max, 8); 873 show_in_max, set_in_max, 8);
638static SENSOR_DEVICE_ATTR(in8_min, S_IRUGO | S_IWUSR, 874static SENSOR_DEVICE_ATTR(in8_min, S_IRUGO | S_IWUSR,
639 show_in_min, set_in_min, 8); 875 show_in_min, set_in_min, 8);
876static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_in0, NULL, 9);
877static SENSOR_DEVICE_ATTR(in9_max, S_IRUGO | S_IWUSR,
878 show_in0_max, set_in0_max, 9);
879static SENSOR_DEVICE_ATTR(in9_min, S_IRUGO | S_IWUSR,
880 show_in0_min, set_in0_min, 9);
881static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_in0, NULL, 10);
882static SENSOR_DEVICE_ATTR(in10_max, S_IRUGO | S_IWUSR,
883 show_in0_max, set_in0_max, 10);
884static SENSOR_DEVICE_ATTR(in10_min, S_IRUGO | S_IWUSR,
885 show_in0_min, set_in0_min, 10);
640 886
641static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0); 887static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
642static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR, 888static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR,
643 show_fan_min, set_fan_min, 0); 889 show_fan_min, set_fan_min, 0);
890static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR,
891 show_fan_target, set_fan_target, 0);
644static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1); 892static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
645static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO | S_IWUSR, 893static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO | S_IWUSR,
646 show_fan_min, set_fan_min, 1); 894 show_fan_min, set_fan_min, 1);
895static SENSOR_DEVICE_ATTR(fan2_target, S_IRUGO | S_IWUSR,
896 show_fan_target, set_fan_target, 1);
647static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2); 897static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
648static SENSOR_DEVICE_ATTR(fan3_min, S_IRUGO | S_IWUSR, 898static SENSOR_DEVICE_ATTR(fan3_min, S_IRUGO | S_IWUSR,
649 show_fan_min, set_fan_min, 2); 899 show_fan_min, set_fan_min, 2);
900static SENSOR_DEVICE_ATTR(fan3_target, S_IRUGO | S_IWUSR,
901 show_fan_target, set_fan_target, 2);
650 902
651static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); 903static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
652static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, 904static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
@@ -667,6 +919,27 @@ static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR,
667 show_temp_hyst, set_temp_hyst, 2); 919 show_temp_hyst, set_temp_hyst, 2);
668static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2); 920static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2);
669 921
922/* pwm (value) files are created read-only, write permission is
923 then added or removed dynamically as needed */
924static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, show_pwm, set_pwm, 0);
925static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
926 show_pwm_enable, set_pwm_enable, 0);
927static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR,
928 show_pwm_freq, set_pwm_freq, 0);
929static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0);
930static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO, show_pwm, set_pwm, 1);
931static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
932 show_pwm_enable, set_pwm_enable, 1);
933static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO | S_IWUSR,
934 show_pwm_freq, set_pwm_freq, 1);
935static SENSOR_DEVICE_ATTR(pwm2_mode, S_IRUGO, show_pwm_mode, NULL, 1);
936static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO, show_pwm, set_pwm, 2);
937static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
938 show_pwm_enable, set_pwm_enable, 2);
939static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO | S_IWUSR,
940 show_pwm_freq, set_pwm_freq, 2);
941static SENSOR_DEVICE_ATTR(pwm3_mode, S_IRUGO, show_pwm_mode, NULL, 2);
942
670static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); 943static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
671static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); 944static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
672static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); 945static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
@@ -676,6 +949,8 @@ static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 5);
676static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6); 949static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6);
677static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 7); 950static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 7);
678static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 8); 951static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 8);
952static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 9);
953static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 10);
679static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 11); 954static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 11);
680static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 12); 955static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 12);
681static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13); 956static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
@@ -689,9 +964,9 @@ static DEVICE_ATTR(alarms_temp, S_IRUGO, show_alarms_temp, NULL);
689static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 964static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
690 965
691static struct attribute *f71805f_attributes[] = { 966static struct attribute *f71805f_attributes[] = {
692 &dev_attr_in0_input.attr, 967 &sensor_dev_attr_in0_input.dev_attr.attr,
693 &dev_attr_in0_max.attr, 968 &sensor_dev_attr_in0_max.dev_attr.attr,
694 &dev_attr_in0_min.attr, 969 &sensor_dev_attr_in0_min.dev_attr.attr,
695 &sensor_dev_attr_in1_input.dev_attr.attr, 970 &sensor_dev_attr_in1_input.dev_attr.attr,
696 &sensor_dev_attr_in1_max.dev_attr.attr, 971 &sensor_dev_attr_in1_max.dev_attr.attr,
697 &sensor_dev_attr_in1_min.dev_attr.attr, 972 &sensor_dev_attr_in1_min.dev_attr.attr,
@@ -701,9 +976,6 @@ static struct attribute *f71805f_attributes[] = {
701 &sensor_dev_attr_in3_input.dev_attr.attr, 976 &sensor_dev_attr_in3_input.dev_attr.attr,
702 &sensor_dev_attr_in3_max.dev_attr.attr, 977 &sensor_dev_attr_in3_max.dev_attr.attr,
703 &sensor_dev_attr_in3_min.dev_attr.attr, 978 &sensor_dev_attr_in3_min.dev_attr.attr,
704 &sensor_dev_attr_in4_input.dev_attr.attr,
705 &sensor_dev_attr_in4_max.dev_attr.attr,
706 &sensor_dev_attr_in4_min.dev_attr.attr,
707 &sensor_dev_attr_in5_input.dev_attr.attr, 979 &sensor_dev_attr_in5_input.dev_attr.attr,
708 &sensor_dev_attr_in5_max.dev_attr.attr, 980 &sensor_dev_attr_in5_max.dev_attr.attr,
709 &sensor_dev_attr_in5_min.dev_attr.attr, 981 &sensor_dev_attr_in5_min.dev_attr.attr,
@@ -713,9 +985,29 @@ static struct attribute *f71805f_attributes[] = {
713 &sensor_dev_attr_in7_input.dev_attr.attr, 985 &sensor_dev_attr_in7_input.dev_attr.attr,
714 &sensor_dev_attr_in7_max.dev_attr.attr, 986 &sensor_dev_attr_in7_max.dev_attr.attr,
715 &sensor_dev_attr_in7_min.dev_attr.attr, 987 &sensor_dev_attr_in7_min.dev_attr.attr,
716 &sensor_dev_attr_in8_input.dev_attr.attr, 988
717 &sensor_dev_attr_in8_max.dev_attr.attr, 989 &sensor_dev_attr_fan1_input.dev_attr.attr,
718 &sensor_dev_attr_in8_min.dev_attr.attr, 990 &sensor_dev_attr_fan1_min.dev_attr.attr,
991 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
992 &sensor_dev_attr_fan1_target.dev_attr.attr,
993 &sensor_dev_attr_fan2_input.dev_attr.attr,
994 &sensor_dev_attr_fan2_min.dev_attr.attr,
995 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
996 &sensor_dev_attr_fan2_target.dev_attr.attr,
997 &sensor_dev_attr_fan3_input.dev_attr.attr,
998 &sensor_dev_attr_fan3_min.dev_attr.attr,
999 &sensor_dev_attr_fan3_alarm.dev_attr.attr,
1000 &sensor_dev_attr_fan3_target.dev_attr.attr,
1001
1002 &sensor_dev_attr_pwm1.dev_attr.attr,
1003 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
1004 &sensor_dev_attr_pwm1_mode.dev_attr.attr,
1005 &sensor_dev_attr_pwm2.dev_attr.attr,
1006 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
1007 &sensor_dev_attr_pwm2_mode.dev_attr.attr,
1008 &sensor_dev_attr_pwm3.dev_attr.attr,
1009 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
1010 &sensor_dev_attr_pwm3_mode.dev_attr.attr,
719 1011
720 &sensor_dev_attr_temp1_input.dev_attr.attr, 1012 &sensor_dev_attr_temp1_input.dev_attr.attr,
721 &sensor_dev_attr_temp1_max.dev_attr.attr, 1013 &sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -734,11 +1026,9 @@ static struct attribute *f71805f_attributes[] = {
734 &sensor_dev_attr_in1_alarm.dev_attr.attr, 1026 &sensor_dev_attr_in1_alarm.dev_attr.attr,
735 &sensor_dev_attr_in2_alarm.dev_attr.attr, 1027 &sensor_dev_attr_in2_alarm.dev_attr.attr,
736 &sensor_dev_attr_in3_alarm.dev_attr.attr, 1028 &sensor_dev_attr_in3_alarm.dev_attr.attr,
737 &sensor_dev_attr_in4_alarm.dev_attr.attr,
738 &sensor_dev_attr_in5_alarm.dev_attr.attr, 1029 &sensor_dev_attr_in5_alarm.dev_attr.attr,
739 &sensor_dev_attr_in6_alarm.dev_attr.attr, 1030 &sensor_dev_attr_in6_alarm.dev_attr.attr,
740 &sensor_dev_attr_in7_alarm.dev_attr.attr, 1031 &sensor_dev_attr_in7_alarm.dev_attr.attr,
741 &sensor_dev_attr_in8_alarm.dev_attr.attr,
742 &dev_attr_alarms_in.attr, 1032 &dev_attr_alarms_in.attr,
743 &sensor_dev_attr_temp1_alarm.dev_attr.attr, 1033 &sensor_dev_attr_temp1_alarm.dev_attr.attr,
744 &sensor_dev_attr_temp2_alarm.dev_attr.attr, 1034 &sensor_dev_attr_temp2_alarm.dev_attr.attr,
@@ -754,29 +1044,59 @@ static const struct attribute_group f71805f_group = {
754 .attrs = f71805f_attributes, 1044 .attrs = f71805f_attributes,
755}; 1045};
756 1046
757static struct attribute *f71805f_attributes_fan[3][4] = { 1047static struct attribute *f71805f_attributes_optin[4][5] = {
758 { 1048 {
759 &sensor_dev_attr_fan1_input.dev_attr.attr, 1049 &sensor_dev_attr_in4_input.dev_attr.attr,
760 &sensor_dev_attr_fan1_min.dev_attr.attr, 1050 &sensor_dev_attr_in4_max.dev_attr.attr,
761 &sensor_dev_attr_fan1_alarm.dev_attr.attr, 1051 &sensor_dev_attr_in4_min.dev_attr.attr,
1052 &sensor_dev_attr_in4_alarm.dev_attr.attr,
1053 NULL
1054 }, {
1055 &sensor_dev_attr_in8_input.dev_attr.attr,
1056 &sensor_dev_attr_in8_max.dev_attr.attr,
1057 &sensor_dev_attr_in8_min.dev_attr.attr,
1058 &sensor_dev_attr_in8_alarm.dev_attr.attr,
762 NULL 1059 NULL
763 }, { 1060 }, {
764 &sensor_dev_attr_fan2_input.dev_attr.attr, 1061 &sensor_dev_attr_in9_input.dev_attr.attr,
765 &sensor_dev_attr_fan2_min.dev_attr.attr, 1062 &sensor_dev_attr_in9_max.dev_attr.attr,
766 &sensor_dev_attr_fan2_alarm.dev_attr.attr, 1063 &sensor_dev_attr_in9_min.dev_attr.attr,
1064 &sensor_dev_attr_in9_alarm.dev_attr.attr,
767 NULL 1065 NULL
768 }, { 1066 }, {
769 &sensor_dev_attr_fan3_input.dev_attr.attr, 1067 &sensor_dev_attr_in10_input.dev_attr.attr,
770 &sensor_dev_attr_fan3_min.dev_attr.attr, 1068 &sensor_dev_attr_in10_max.dev_attr.attr,
771 &sensor_dev_attr_fan3_alarm.dev_attr.attr, 1069 &sensor_dev_attr_in10_min.dev_attr.attr,
1070 &sensor_dev_attr_in10_alarm.dev_attr.attr,
772 NULL 1071 NULL
773 } 1072 }
774}; 1073};
775 1074
776static const struct attribute_group f71805f_group_fan[3] = { 1075static const struct attribute_group f71805f_group_optin[4] = {
777 { .attrs = f71805f_attributes_fan[0] }, 1076 { .attrs = f71805f_attributes_optin[0] },
778 { .attrs = f71805f_attributes_fan[1] }, 1077 { .attrs = f71805f_attributes_optin[1] },
779 { .attrs = f71805f_attributes_fan[2] }, 1078 { .attrs = f71805f_attributes_optin[2] },
1079 { .attrs = f71805f_attributes_optin[3] },
1080};
1081
1082/* We don't include pwm_freq files in the arrays above, because they must be
1083 created conditionally (only if pwm_mode is 1 == PWM) */
1084static struct attribute *f71805f_attributes_pwm_freq[] = {
1085 &sensor_dev_attr_pwm1_freq.dev_attr.attr,
1086 &sensor_dev_attr_pwm2_freq.dev_attr.attr,
1087 &sensor_dev_attr_pwm3_freq.dev_attr.attr,
1088 NULL
1089};
1090
1091static const struct attribute_group f71805f_group_pwm_freq = {
1092 .attrs = f71805f_attributes_pwm_freq,
1093};
1094
1095/* We also need an indexed access to pwmN files to toggle writability */
1096static struct attribute *f71805f_attr_pwm[] = {
1097 &sensor_dev_attr_pwm1.dev_attr.attr,
1098 &sensor_dev_attr_pwm2.dev_attr.attr,
1099 &sensor_dev_attr_pwm3.dev_attr.attr,
780}; 1100};
781 1101
782/* 1102/*
@@ -798,18 +1118,30 @@ static void __devinit f71805f_init_device(struct f71805f_data *data)
798 /* Fan monitoring can be disabled. If it is, we won't be polling 1118 /* Fan monitoring can be disabled. If it is, we won't be polling
799 the register values, and won't create the related sysfs files. */ 1119 the register values, and won't create the related sysfs files. */
800 for (i = 0; i < 3; i++) { 1120 for (i = 0; i < 3; i++) {
801 reg = f71805f_read8(data, F71805F_REG_FAN_CTRL(i)); 1121 data->fan_ctrl[i] = f71805f_read8(data,
802 if (!(reg & 0x80)) 1122 F71805F_REG_FAN_CTRL(i));
803 data->fan_enabled |= (1 << i); 1123 /* Clear latch full bit, else "speed mode" fan speed control
1124 doesn't work */
1125 if (data->fan_ctrl[i] & FAN_CTRL_LATCH_FULL) {
1126 data->fan_ctrl[i] &= ~FAN_CTRL_LATCH_FULL;
1127 f71805f_write8(data, F71805F_REG_FAN_CTRL(i),
1128 data->fan_ctrl[i]);
1129 }
804 } 1130 }
805} 1131}
806 1132
807static int __devinit f71805f_probe(struct platform_device *pdev) 1133static int __devinit f71805f_probe(struct platform_device *pdev)
808{ 1134{
1135 struct f71805f_sio_data *sio_data = pdev->dev.platform_data;
809 struct f71805f_data *data; 1136 struct f71805f_data *data;
810 struct resource *res; 1137 struct resource *res;
811 int i, err; 1138 int i, err;
812 1139
1140 static const char *names[] = {
1141 "f71805f",
1142 "f71872f",
1143 };
1144
813 if (!(data = kzalloc(sizeof(struct f71805f_data), GFP_KERNEL))) { 1145 if (!(data = kzalloc(sizeof(struct f71805f_data), GFP_KERNEL))) {
814 err = -ENOMEM; 1146 err = -ENOMEM;
815 printk(KERN_ERR DRVNAME ": Out of memory\n"); 1147 printk(KERN_ERR DRVNAME ": Out of memory\n");
@@ -819,24 +1151,69 @@ static int __devinit f71805f_probe(struct platform_device *pdev)
819 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 1151 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
820 data->addr = res->start; 1152 data->addr = res->start;
821 mutex_init(&data->lock); 1153 mutex_init(&data->lock);
822 data->name = "f71805f"; 1154 data->name = names[sio_data->kind];
823 mutex_init(&data->update_lock); 1155 mutex_init(&data->update_lock);
824 1156
825 platform_set_drvdata(pdev, data); 1157 platform_set_drvdata(pdev, data);
826 1158
1159 /* Some voltage inputs depend on chip model and configuration */
1160 switch (sio_data->kind) {
1161 case f71805f:
1162 data->has_in = 0x1ff;
1163 break;
1164 case f71872f:
1165 data->has_in = 0x6ef;
1166 if (sio_data->fnsel1 & 0x01)
1167 data->has_in |= (1 << 4); /* in4 */
1168 if (sio_data->fnsel1 & 0x02)
1169 data->has_in |= (1 << 8); /* in8 */
1170 break;
1171 }
1172
827 /* Initialize the F71805F chip */ 1173 /* Initialize the F71805F chip */
828 f71805f_init_device(data); 1174 f71805f_init_device(data);
829 1175
830 /* Register sysfs interface files */ 1176 /* Register sysfs interface files */
831 if ((err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group))) 1177 if ((err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group)))
832 goto exit_free; 1178 goto exit_free;
833 for (i = 0; i < 3; i++) { 1179 if (data->has_in & (1 << 4)) { /* in4 */
834 if (!(data->fan_enabled & (1 << i))) 1180 if ((err = sysfs_create_group(&pdev->dev.kobj,
835 continue; 1181 &f71805f_group_optin[0])))
1182 goto exit_remove_files;
1183 }
1184 if (data->has_in & (1 << 8)) { /* in8 */
1185 if ((err = sysfs_create_group(&pdev->dev.kobj,
1186 &f71805f_group_optin[1])))
1187 goto exit_remove_files;
1188 }
1189 if (data->has_in & (1 << 9)) { /* in9 (F71872F/FG only) */
836 if ((err = sysfs_create_group(&pdev->dev.kobj, 1190 if ((err = sysfs_create_group(&pdev->dev.kobj,
837 &f71805f_group_fan[i]))) 1191 &f71805f_group_optin[2])))
838 goto exit_remove_files; 1192 goto exit_remove_files;
839 } 1193 }
1194 if (data->has_in & (1 << 10)) { /* in9 (F71872F/FG only) */
1195 if ((err = sysfs_create_group(&pdev->dev.kobj,
1196 &f71805f_group_optin[3])))
1197 goto exit_remove_files;
1198 }
1199 for (i = 0; i < 3; i++) {
1200 /* If control mode is PWM, create pwm_freq file */
1201 if (!(data->fan_ctrl[i] & FAN_CTRL_DC_MODE)) {
1202 if ((err = sysfs_create_file(&pdev->dev.kobj,
1203 f71805f_attributes_pwm_freq[i])))
1204 goto exit_remove_files;
1205 }
1206 /* If PWM is in manual mode, add write permission */
1207 if (data->fan_ctrl[i] & FAN_CTRL_MODE_MANUAL) {
1208 if ((err = sysfs_chmod_file(&pdev->dev.kobj,
1209 f71805f_attr_pwm[i],
1210 S_IRUGO | S_IWUSR))) {
1211 dev_err(&pdev->dev, "chmod +w pwm%d failed\n",
1212 i + 1);
1213 goto exit_remove_files;
1214 }
1215 }
1216 }
840 1217
841 data->class_dev = hwmon_device_register(&pdev->dev); 1218 data->class_dev = hwmon_device_register(&pdev->dev);
842 if (IS_ERR(data->class_dev)) { 1219 if (IS_ERR(data->class_dev)) {
@@ -849,8 +1226,9 @@ static int __devinit f71805f_probe(struct platform_device *pdev)
849 1226
850exit_remove_files: 1227exit_remove_files:
851 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group); 1228 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group);
852 for (i = 0; i < 3; i++) 1229 for (i = 0; i < 4; i++)
853 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_fan[i]); 1230 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]);
1231 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq);
854exit_free: 1232exit_free:
855 platform_set_drvdata(pdev, NULL); 1233 platform_set_drvdata(pdev, NULL);
856 kfree(data); 1234 kfree(data);
@@ -866,8 +1244,9 @@ static int __devexit f71805f_remove(struct platform_device *pdev)
866 platform_set_drvdata(pdev, NULL); 1244 platform_set_drvdata(pdev, NULL);
867 hwmon_device_unregister(data->class_dev); 1245 hwmon_device_unregister(data->class_dev);
868 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group); 1246 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group);
869 for (i = 0; i < 3; i++) 1247 for (i = 0; i < 4; i++)
870 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_fan[i]); 1248 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]);
1249 sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq);
871 kfree(data); 1250 kfree(data);
872 1251
873 return 0; 1252 return 0;
@@ -882,7 +1261,8 @@ static struct platform_driver f71805f_driver = {
882 .remove = __devexit_p(f71805f_remove), 1261 .remove = __devexit_p(f71805f_remove),
883}; 1262};
884 1263
885static int __init f71805f_device_add(unsigned short address) 1264static int __init f71805f_device_add(unsigned short address,
1265 const struct f71805f_sio_data *sio_data)
886{ 1266{
887 struct resource res = { 1267 struct resource res = {
888 .start = address, 1268 .start = address,
@@ -906,26 +1286,45 @@ static int __init f71805f_device_add(unsigned short address)
906 goto exit_device_put; 1286 goto exit_device_put;
907 } 1287 }
908 1288
1289 pdev->dev.platform_data = kmalloc(sizeof(struct f71805f_sio_data),
1290 GFP_KERNEL);
1291 if (!pdev->dev.platform_data) {
1292 err = -ENOMEM;
1293 printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
1294 goto exit_device_put;
1295 }
1296 memcpy(pdev->dev.platform_data, sio_data,
1297 sizeof(struct f71805f_sio_data));
1298
909 err = platform_device_add(pdev); 1299 err = platform_device_add(pdev);
910 if (err) { 1300 if (err) {
911 printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", 1301 printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
912 err); 1302 err);
913 goto exit_device_put; 1303 goto exit_kfree_data;
914 } 1304 }
915 1305
916 return 0; 1306 return 0;
917 1307
1308exit_kfree_data:
1309 kfree(pdev->dev.platform_data);
1310 pdev->dev.platform_data = NULL;
918exit_device_put: 1311exit_device_put:
919 platform_device_put(pdev); 1312 platform_device_put(pdev);
920exit: 1313exit:
921 return err; 1314 return err;
922} 1315}
923 1316
924static int __init f71805f_find(int sioaddr, unsigned short *address) 1317static int __init f71805f_find(int sioaddr, unsigned short *address,
1318 struct f71805f_sio_data *sio_data)
925{ 1319{
926 int err = -ENODEV; 1320 int err = -ENODEV;
927 u16 devid; 1321 u16 devid;
928 1322
1323 static const char *names[] = {
1324 "F71805F/FG",
1325 "F71872F/FG",
1326 };
1327
929 superio_enter(sioaddr); 1328 superio_enter(sioaddr);
930 1329
931 devid = superio_inw(sioaddr, SIO_REG_MANID); 1330 devid = superio_inw(sioaddr, SIO_REG_MANID);
@@ -933,7 +1332,15 @@ static int __init f71805f_find(int sioaddr, unsigned short *address)
933 goto exit; 1332 goto exit;
934 1333
935 devid = superio_inw(sioaddr, SIO_REG_DEVID); 1334 devid = superio_inw(sioaddr, SIO_REG_DEVID);
936 if (devid != SIO_F71805F_ID) { 1335 switch (devid) {
1336 case SIO_F71805F_ID:
1337 sio_data->kind = f71805f;
1338 break;
1339 case SIO_F71872F_ID:
1340 sio_data->kind = f71872f;
1341 sio_data->fnsel1 = superio_inb(sioaddr, SIO_REG_FNSEL1);
1342 break;
1343 default:
937 printk(KERN_INFO DRVNAME ": Unsupported Fintek device, " 1344 printk(KERN_INFO DRVNAME ": Unsupported Fintek device, "
938 "skipping\n"); 1345 "skipping\n");
939 goto exit; 1346 goto exit;
@@ -952,10 +1359,12 @@ static int __init f71805f_find(int sioaddr, unsigned short *address)
952 "skipping\n"); 1359 "skipping\n");
953 goto exit; 1360 goto exit;
954 } 1361 }
1362 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */
955 1363
956 err = 0; 1364 err = 0;
957 printk(KERN_INFO DRVNAME ": Found F71805F chip at %#x, revision %u\n", 1365 printk(KERN_INFO DRVNAME ": Found %s chip at %#x, revision %u\n",
958 *address, superio_inb(sioaddr, SIO_REG_DEVREV)); 1366 names[sio_data->kind], *address,
1367 superio_inb(sioaddr, SIO_REG_DEVREV));
959 1368
960exit: 1369exit:
961 superio_exit(sioaddr); 1370 superio_exit(sioaddr);
@@ -966,9 +1375,10 @@ static int __init f71805f_init(void)
966{ 1375{
967 int err; 1376 int err;
968 unsigned short address; 1377 unsigned short address;
1378 struct f71805f_sio_data sio_data;
969 1379
970 if (f71805f_find(0x2e, &address) 1380 if (f71805f_find(0x2e, &address, &sio_data)
971 && f71805f_find(0x4e, &address)) 1381 && f71805f_find(0x4e, &address, &sio_data))
972 return -ENODEV; 1382 return -ENODEV;
973 1383
974 err = platform_driver_register(&f71805f_driver); 1384 err = platform_driver_register(&f71805f_driver);
@@ -976,7 +1386,7 @@ static int __init f71805f_init(void)
976 goto exit; 1386 goto exit;
977 1387
978 /* Sets global pdev as a side effect */ 1388 /* Sets global pdev as a side effect */
979 err = f71805f_device_add(address); 1389 err = f71805f_device_add(address, &sio_data);
980 if (err) 1390 if (err)
981 goto exit_driver; 1391 goto exit_driver;
982 1392
@@ -990,13 +1400,16 @@ exit:
990 1400
991static void __exit f71805f_exit(void) 1401static void __exit f71805f_exit(void)
992{ 1402{
1403 kfree(pdev->dev.platform_data);
1404 pdev->dev.platform_data = NULL;
993 platform_device_unregister(pdev); 1405 platform_device_unregister(pdev);
1406
994 platform_driver_unregister(&f71805f_driver); 1407 platform_driver_unregister(&f71805f_driver);
995} 1408}
996 1409
997MODULE_AUTHOR("Jean Delvare <khali@linux-fr>"); 1410MODULE_AUTHOR("Jean Delvare <khali@linux-fr>");
998MODULE_LICENSE("GPL"); 1411MODULE_LICENSE("GPL");
999MODULE_DESCRIPTION("F71805F hardware monitoring driver"); 1412MODULE_DESCRIPTION("F71805F/F71872F hardware monitoring driver");
1000 1413
1001module_init(f71805f_init); 1414module_init(f71805f_init);
1002module_exit(f71805f_exit); 1415module_exit(f71805f_exit);
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index e8ef62b83d6b..bf759ea545ac 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -478,74 +478,64 @@ static struct attribute_group hdaps_attribute_group = {
478/* Module stuff */ 478/* Module stuff */
479 479
480/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */ 480/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
481static int hdaps_dmi_match(struct dmi_system_id *id) 481static int __init hdaps_dmi_match(struct dmi_system_id *id)
482{ 482{
483 printk(KERN_INFO "hdaps: %s detected.\n", id->ident); 483 printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
484 return 1; 484 return 1;
485} 485}
486 486
487/* hdaps_dmi_match_invert - found an inverted match. */ 487/* hdaps_dmi_match_invert - found an inverted match. */
488static int hdaps_dmi_match_invert(struct dmi_system_id *id) 488static int __init hdaps_dmi_match_invert(struct dmi_system_id *id)
489{ 489{
490 hdaps_invert = 1; 490 hdaps_invert = 1;
491 printk(KERN_INFO "hdaps: inverting axis readings.\n"); 491 printk(KERN_INFO "hdaps: inverting axis readings.\n");
492 return hdaps_dmi_match(id); 492 return hdaps_dmi_match(id);
493} 493}
494 494
495#define HDAPS_DMI_MATCH_NORMAL(model) { \ 495#define HDAPS_DMI_MATCH_NORMAL(vendor, model) { \
496 .ident = "IBM " model, \ 496 .ident = vendor " " model, \
497 .callback = hdaps_dmi_match, \ 497 .callback = hdaps_dmi_match, \
498 .matches = { \ 498 .matches = { \
499 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), \ 499 DMI_MATCH(DMI_BOARD_VENDOR, vendor), \
500 DMI_MATCH(DMI_PRODUCT_VERSION, model) \ 500 DMI_MATCH(DMI_PRODUCT_VERSION, model) \
501 } \ 501 } \
502} 502}
503 503
504#define HDAPS_DMI_MATCH_INVERT(model) { \ 504#define HDAPS_DMI_MATCH_INVERT(vendor, model) { \
505 .ident = "IBM " model, \ 505 .ident = vendor " " model, \
506 .callback = hdaps_dmi_match_invert, \ 506 .callback = hdaps_dmi_match_invert, \
507 .matches = { \ 507 .matches = { \
508 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), \ 508 DMI_MATCH(DMI_BOARD_VENDOR, vendor), \
509 DMI_MATCH(DMI_PRODUCT_VERSION, model) \ 509 DMI_MATCH(DMI_PRODUCT_VERSION, model) \
510 } \ 510 } \
511} 511}
512 512
513#define HDAPS_DMI_MATCH_LENOVO(model) { \ 513/* Note that HDAPS_DMI_MATCH_NORMAL("ThinkPad T42") would match
514 .ident = "Lenovo " model, \ 514 "ThinkPad T42p", so the order of the entries matters.
515 .callback = hdaps_dmi_match_invert, \ 515 If your ThinkPad is not recognized, please update to latest
516 .matches = { \ 516 BIOS. This is especially the case for some R52 ThinkPads. */
517 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), \ 517static struct dmi_system_id __initdata hdaps_whitelist[] = {
518 DMI_MATCH(DMI_PRODUCT_VERSION, model) \ 518 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p"),
519 } \ 519 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
520} 520 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
521 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R52"),
522 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T41p"),
523 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T41"),
524 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T42p"),
525 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T42"),
526 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T43"),
527 HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T60"),
528 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad X40"),
529 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad X41"),
530 HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X60"),
531 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad Z60m"),
532 { .ident = NULL }
533};
521 534
522static int __init hdaps_init(void) 535static int __init hdaps_init(void)
523{ 536{
524 int ret; 537 int ret;
525 538
526 /* Note that HDAPS_DMI_MATCH_NORMAL("ThinkPad T42") would match
527 "ThinkPad T42p", so the order of the entries matters */
528 struct dmi_system_id hdaps_whitelist[] = {
529 HDAPS_DMI_MATCH_NORMAL("ThinkPad H"),
530 HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"),
531 HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"),
532 HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"),
533 HDAPS_DMI_MATCH_NORMAL("ThinkPad R52"),
534 HDAPS_DMI_MATCH_NORMAL("ThinkPad H"), /* R52 (1846AQG) */
535 HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"),
536 HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"),
537 HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"),
538 HDAPS_DMI_MATCH_NORMAL("ThinkPad T42"),
539 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"),
540 HDAPS_DMI_MATCH_LENOVO("ThinkPad T60p"),
541 HDAPS_DMI_MATCH_LENOVO("ThinkPad T60"),
542 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"),
543 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41"),
544 HDAPS_DMI_MATCH_LENOVO("ThinkPad X60"),
545 HDAPS_DMI_MATCH_NORMAL("ThinkPad Z60m"),
546 { .ident = NULL }
547 };
548
549 if (!dmi_check_system(hdaps_whitelist)) { 539 if (!dmi_check_system(hdaps_whitelist)) {
550 printk(KERN_WARNING "hdaps: supported laptop not found!\n"); 540 printk(KERN_WARNING "hdaps: supported laptop not found!\n");
551 ret = -ENODEV; 541 ret = -ENODEV;
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 9d67320e6840..31c42002708f 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -1,7 +1,7 @@
1/* 1/*
2 hwmon-vid.c - VID/VRM/VRD voltage conversions 2 hwmon-vid.c - VID/VRM/VRD voltage conversions
3 3
4 Copyright (c) 2004 Rudolf Marek <r.marek@sh.cvut.cz> 4 Copyright (c) 2004 Rudolf Marek <r.marek@assembler.cz>
5 5
6 Partly imported from i2c-vid.h of the lm_sensors project 6 Partly imported from i2c-vid.h of the lm_sensors project
7 Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com> 7 Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
@@ -232,7 +232,7 @@ u8 vid_which_vrm(void)
232EXPORT_SYMBOL(vid_from_reg); 232EXPORT_SYMBOL(vid_from_reg);
233EXPORT_SYMBOL(vid_which_vrm); 233EXPORT_SYMBOL(vid_which_vrm);
234 234
235MODULE_AUTHOR("Rudolf Marek <r.marek@sh.cvut.cz>"); 235MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
236 236
237MODULE_DESCRIPTION("hwmon-vid driver"); 237MODULE_DESCRIPTION("hwmon-vid driver");
238MODULE_LICENSE("GPL"); 238MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 323ef06719c1..1ed8b7e2c35d 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3,7 +3,7 @@
3 monitoring. 3 monitoring.
4 4
5 Supports: IT8705F Super I/O chip w/LPC interface 5 Supports: IT8705F Super I/O chip w/LPC interface
6 IT8712F Super I/O chip w/LPC interface & SMBus 6 IT8712F Super I/O chip w/LPC interface
7 IT8716F Super I/O chip w/LPC interface 7 IT8716F Super I/O chip w/LPC interface
8 IT8718F Super I/O chip w/LPC interface 8 IT8718F Super I/O chip w/LPC interface
9 Sis950 A clone of the IT8705F 9 Sis950 A clone of the IT8705F
@@ -41,12 +41,8 @@
41#include <asm/io.h> 41#include <asm/io.h>
42 42
43 43
44/* Addresses to scan */
45static unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END };
46static unsigned short isa_address; 44static unsigned short isa_address;
47 45enum chips { it87, it8712, it8716, it8718 };
48/* Insmod parameters */
49I2C_CLIENT_INSMOD_4(it87, it8712, it8716, it8718);
50 46
51#define REG 0x2e /* The register to read/write */ 47#define REG 0x2e /* The register to read/write */
52#define DEV 0x07 /* Register: Logical device select */ 48#define DEV 0x07 /* Register: Logical device select */
@@ -162,8 +158,6 @@ static u8 vid_value;
162#define IT87_REG_TEMP_HIGH(nr) (0x40 + (nr) * 2) 158#define IT87_REG_TEMP_HIGH(nr) (0x40 + (nr) * 2)
163#define IT87_REG_TEMP_LOW(nr) (0x41 + (nr) * 2) 159#define IT87_REG_TEMP_LOW(nr) (0x41 + (nr) * 2)
164 160
165#define IT87_REG_I2C_ADDR 0x48
166
167#define IT87_REG_VIN_ENABLE 0x50 161#define IT87_REG_VIN_ENABLE 0x50
168#define IT87_REG_TEMP_ENABLE 0x51 162#define IT87_REG_TEMP_ENABLE 0x51
169 163
@@ -242,33 +236,22 @@ struct it87_data {
242}; 236};
243 237
244 238
245static int it87_attach_adapter(struct i2c_adapter *adapter); 239static int it87_detect(struct i2c_adapter *adapter);
246static int it87_isa_attach_adapter(struct i2c_adapter *adapter);
247static int it87_detect(struct i2c_adapter *adapter, int address, int kind);
248static int it87_detach_client(struct i2c_client *client); 240static int it87_detach_client(struct i2c_client *client);
249 241
250static int it87_read_value(struct i2c_client *client, u8 reg); 242static int it87_read_value(struct i2c_client *client, u8 reg);
251static int it87_write_value(struct i2c_client *client, u8 reg, u8 value); 243static void it87_write_value(struct i2c_client *client, u8 reg, u8 value);
252static struct it87_data *it87_update_device(struct device *dev); 244static struct it87_data *it87_update_device(struct device *dev);
253static int it87_check_pwm(struct i2c_client *client); 245static int it87_check_pwm(struct i2c_client *client);
254static void it87_init_client(struct i2c_client *client, struct it87_data *data); 246static void it87_init_client(struct i2c_client *client, struct it87_data *data);
255 247
256 248
257static struct i2c_driver it87_driver = {
258 .driver = {
259 .name = "it87",
260 },
261 .id = I2C_DRIVERID_IT87,
262 .attach_adapter = it87_attach_adapter,
263 .detach_client = it87_detach_client,
264};
265
266static struct i2c_driver it87_isa_driver = { 249static struct i2c_driver it87_isa_driver = {
267 .driver = { 250 .driver = {
268 .owner = THIS_MODULE, 251 .owner = THIS_MODULE,
269 .name = "it87-isa", 252 .name = "it87-isa",
270 }, 253 },
271 .attach_adapter = it87_isa_attach_adapter, 254 .attach_adapter = it87_detect,
272 .detach_client = it87_detach_client, 255 .detach_client = it87_detach_client,
273}; 256};
274 257
@@ -850,22 +833,6 @@ static const struct attribute_group it87_group_opt = {
850 .attrs = it87_attributes_opt, 833 .attrs = it87_attributes_opt,
851}; 834};
852 835
853/* This function is called when:
854 * it87_driver is inserted (when this module is loaded), for each
855 available adapter
856 * when a new adapter is inserted (and it87_driver is still present) */
857static int it87_attach_adapter(struct i2c_adapter *adapter)
858{
859 if (!(adapter->class & I2C_CLASS_HWMON))
860 return 0;
861 return i2c_probe(adapter, &addr_data, it87_detect);
862}
863
864static int it87_isa_attach_adapter(struct i2c_adapter *adapter)
865{
866 return it87_detect(adapter, isa_address, -1);
867}
868
869/* SuperIO detection - will change isa_address if a chip is found */ 836/* SuperIO detection - will change isa_address if a chip is found */
870static int __init it87_find(unsigned short *address) 837static int __init it87_find(unsigned short *address)
871{ 838{
@@ -916,29 +883,20 @@ exit:
916} 883}
917 884
918/* This function is called by i2c_probe */ 885/* This function is called by i2c_probe */
919static int it87_detect(struct i2c_adapter *adapter, int address, int kind) 886static int it87_detect(struct i2c_adapter *adapter)
920{ 887{
921 int i;
922 struct i2c_client *new_client; 888 struct i2c_client *new_client;
923 struct it87_data *data; 889 struct it87_data *data;
924 int err = 0; 890 int err = 0;
925 const char *name = ""; 891 const char *name;
926 int is_isa = i2c_is_isa_adapter(adapter);
927 int enable_pwm_interface; 892 int enable_pwm_interface;
928 893
929 if (!is_isa &&
930 !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
931 goto ERROR0;
932
933 /* Reserve the ISA region */ 894 /* Reserve the ISA region */
934 if (is_isa) 895 if (!request_region(isa_address, IT87_EXTENT,
935 if (!request_region(address, IT87_EXTENT, 896 it87_isa_driver.driver.name)){
936 it87_isa_driver.driver.name)) 897 err = -EBUSY;
937 goto ERROR0; 898 goto ERROR0;
938 899 }
939 /* For now, we presume we have a valid client. We create the
940 client structure, even though we cannot fill it completely yet.
941 But it allows us to access it87_{read,write}_value. */
942 900
943 if (!(data = kzalloc(sizeof(struct it87_data), GFP_KERNEL))) { 901 if (!(data = kzalloc(sizeof(struct it87_data), GFP_KERNEL))) {
944 err = -ENOMEM; 902 err = -ENOMEM;
@@ -946,80 +904,46 @@ static int it87_detect(struct i2c_adapter *adapter, int address, int kind)
946 } 904 }
947 905
948 new_client = &data->client; 906 new_client = &data->client;
949 if (is_isa) 907 mutex_init(&data->lock);
950 mutex_init(&data->lock);
951 i2c_set_clientdata(new_client, data); 908 i2c_set_clientdata(new_client, data);
952 new_client->addr = address; 909 new_client->addr = isa_address;
953 new_client->adapter = adapter; 910 new_client->adapter = adapter;
954 new_client->driver = is_isa ? &it87_isa_driver : &it87_driver; 911 new_client->driver = &it87_isa_driver;
955 new_client->flags = 0;
956 912
957 /* Now, we do the remaining detection. */ 913 /* Now, we do the remaining detection. */
958 914 if ((it87_read_value(new_client, IT87_REG_CONFIG) & 0x80)
959 if (kind < 0) { 915 || it87_read_value(new_client, IT87_REG_CHIPID) != 0x90) {
960 if ((it87_read_value(new_client, IT87_REG_CONFIG) & 0x80) 916 err = -ENODEV;
961 || (!is_isa 917 goto ERROR2;
962 && it87_read_value(new_client, IT87_REG_I2C_ADDR) != address)) {
963 err = -ENODEV;
964 goto ERROR2;
965 }
966 } 918 }
967 919
968 /* Determine the chip type. */ 920 /* Determine the chip type. */
969 if (kind <= 0) { 921 switch (chip_type) {
970 i = it87_read_value(new_client, IT87_REG_CHIPID); 922 case IT8712F_DEVID:
971 if (i == 0x90) { 923 data->type = it8712;
972 kind = it87;
973 if (is_isa) {
974 switch (chip_type) {
975 case IT8712F_DEVID:
976 kind = it8712;
977 break;
978 case IT8716F_DEVID:
979 kind = it8716;
980 break;
981 case IT8718F_DEVID:
982 kind = it8718;
983 break;
984 }
985 }
986 }
987 else {
988 if (kind == 0)
989 dev_info(&adapter->dev,
990 "Ignoring 'force' parameter for unknown chip at "
991 "adapter %d, address 0x%02x\n",
992 i2c_adapter_id(adapter), address);
993 err = -ENODEV;
994 goto ERROR2;
995 }
996 }
997
998 if (kind == it87) {
999 name = "it87";
1000 } else if (kind == it8712) {
1001 name = "it8712"; 924 name = "it8712";
1002 } else if (kind == it8716) { 925 break;
926 case IT8716F_DEVID:
927 data->type = it8716;
1003 name = "it8716"; 928 name = "it8716";
1004 } else if (kind == it8718) { 929 break;
930 case IT8718F_DEVID:
931 data->type = it8718;
1005 name = "it8718"; 932 name = "it8718";
933 break;
934 default:
935 data->type = it87;
936 name = "it87";
1006 } 937 }
1007 938
1008 /* Fill in the remaining client fields and put it into the global list */ 939 /* Fill in the remaining client fields and put it into the global list */
1009 strlcpy(new_client->name, name, I2C_NAME_SIZE); 940 strlcpy(new_client->name, name, I2C_NAME_SIZE);
1010 data->type = kind;
1011 data->valid = 0;
1012 mutex_init(&data->update_lock); 941 mutex_init(&data->update_lock);
1013 942
1014 /* Tell the I2C layer a new client has arrived */ 943 /* Tell the I2C layer a new client has arrived */
1015 if ((err = i2c_attach_client(new_client))) 944 if ((err = i2c_attach_client(new_client)))
1016 goto ERROR2; 945 goto ERROR2;
1017 946
1018 if (!is_isa)
1019 dev_info(&new_client->dev, "The I2C interface to IT87xxF "
1020 "hardware monitoring chips is deprecated. Please "
1021 "report if you still rely on it.\n");
1022
1023 /* Check PWM configuration */ 947 /* Check PWM configuration */
1024 enable_pwm_interface = it87_check_pwm(new_client); 948 enable_pwm_interface = it87_check_pwm(new_client);
1025 949
@@ -1129,8 +1053,7 @@ ERROR3:
1129ERROR2: 1053ERROR2:
1130 kfree(data); 1054 kfree(data);
1131ERROR1: 1055ERROR1:
1132 if (is_isa) 1056 release_region(isa_address, IT87_EXTENT);
1133 release_region(address, IT87_EXTENT);
1134ERROR0: 1057ERROR0:
1135 return err; 1058 return err;
1136} 1059}
@@ -1147,50 +1070,39 @@ static int it87_detach_client(struct i2c_client *client)
1147 if ((err = i2c_detach_client(client))) 1070 if ((err = i2c_detach_client(client)))
1148 return err; 1071 return err;
1149 1072
1150 if(i2c_is_isa_client(client)) 1073 release_region(client->addr, IT87_EXTENT);
1151 release_region(client->addr, IT87_EXTENT);
1152 kfree(data); 1074 kfree(data);
1153 1075
1154 return 0; 1076 return 0;
1155} 1077}
1156 1078
1157/* The SMBus locks itself, but ISA access must be locked explicitly! 1079/* ISA access must be locked explicitly!
1158 We don't want to lock the whole ISA bus, so we lock each client
1159 separately.
1160 We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks, 1080 We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
1161 would slow down the IT87 access and should not be necessary. */ 1081 would slow down the IT87 access and should not be necessary. */
1162static int it87_read_value(struct i2c_client *client, u8 reg) 1082static int it87_read_value(struct i2c_client *client, u8 reg)
1163{ 1083{
1164 struct it87_data *data = i2c_get_clientdata(client); 1084 struct it87_data *data = i2c_get_clientdata(client);
1165
1166 int res; 1085 int res;
1167 if (i2c_is_isa_client(client)) { 1086
1168 mutex_lock(&data->lock); 1087 mutex_lock(&data->lock);
1169 outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET); 1088 outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET);
1170 res = inb_p(client->addr + IT87_DATA_REG_OFFSET); 1089 res = inb_p(client->addr + IT87_DATA_REG_OFFSET);
1171 mutex_unlock(&data->lock); 1090 mutex_unlock(&data->lock);
1172 return res; 1091
1173 } else 1092 return res;
1174 return i2c_smbus_read_byte_data(client, reg);
1175} 1093}
1176 1094
1177/* The SMBus locks itself, but ISA access muse be locked explicitly! 1095/* ISA access must be locked explicitly!
1178 We don't want to lock the whole ISA bus, so we lock each client
1179 separately.
1180 We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks, 1096 We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
1181 would slow down the IT87 access and should not be necessary. */ 1097 would slow down the IT87 access and should not be necessary. */
1182static int it87_write_value(struct i2c_client *client, u8 reg, u8 value) 1098static void it87_write_value(struct i2c_client *client, u8 reg, u8 value)
1183{ 1099{
1184 struct it87_data *data = i2c_get_clientdata(client); 1100 struct it87_data *data = i2c_get_clientdata(client);
1185 1101
1186 if (i2c_is_isa_client(client)) { 1102 mutex_lock(&data->lock);
1187 mutex_lock(&data->lock); 1103 outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET);
1188 outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET); 1104 outb_p(value, client->addr + IT87_DATA_REG_OFFSET);
1189 outb_p(value, client->addr + IT87_DATA_REG_OFFSET); 1105 mutex_unlock(&data->lock);
1190 mutex_unlock(&data->lock);
1191 return 0;
1192 } else
1193 return i2c_smbus_write_byte_data(client, reg, value);
1194} 1106}
1195 1107
1196/* Return 1 if and only if the PWM interface is safe to use */ 1108/* Return 1 if and only if the PWM interface is safe to use */
@@ -1426,26 +1338,14 @@ static int __init sm_it87_init(void)
1426{ 1338{
1427 int res; 1339 int res;
1428 1340
1429 res = i2c_add_driver(&it87_driver); 1341 if ((res = it87_find(&isa_address)))
1430 if (res)
1431 return res; 1342 return res;
1432 1343 return i2c_isa_add_driver(&it87_isa_driver);
1433 if (!it87_find(&isa_address)) {
1434 res = i2c_isa_add_driver(&it87_isa_driver);
1435 if (res) {
1436 i2c_del_driver(&it87_driver);
1437 return res;
1438 }
1439 }
1440
1441 return 0;
1442} 1344}
1443 1345
1444static void __exit sm_it87_exit(void) 1346static void __exit sm_it87_exit(void)
1445{ 1347{
1446 if (isa_address) 1348 i2c_isa_del_driver(&it87_isa_driver);
1447 i2c_isa_del_driver(&it87_isa_driver);
1448 i2c_del_driver(&it87_driver);
1449} 1349}
1450 1350
1451 1351
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index f58b64ed09e3..5d8d0ca08fa9 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * k8temp.c - Linux kernel module for hardware monitoring 2 * k8temp.c - Linux kernel module for hardware monitoring
3 * 3 *
4 * Copyright (C) 2006 Rudolf Marek <r.marek@sh.cvut.cz> 4 * Copyright (C) 2006 Rudolf Marek <r.marek@assembler.cz>
5 * 5 *
6 * Inspired from the w83785 and amd756 drivers. 6 * Inspired from the w83785 and amd756 drivers.
7 * 7 *
@@ -286,7 +286,7 @@ static void __exit k8temp_exit(void)
286 pci_unregister_driver(&k8temp_driver); 286 pci_unregister_driver(&k8temp_driver);
287} 287}
288 288
289MODULE_AUTHOR("Rudolf Marek <r.marek@sh.cvut.cz>"); 289MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
290MODULE_DESCRIPTION("AMD K8 core temperature monitor"); 290MODULE_DESCRIPTION("AMD K8 core temperature monitor");
291MODULE_LICENSE("GPL"); 291MODULE_LICENSE("GPL");
292 292
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 3b8b81984ad4..c8a21be09d87 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1000,7 +1000,7 @@ static int pc87360_detect(struct i2c_adapter *adapter)
1000 (i&0x02) ? "external" : "internal"); 1000 (i&0x02) ? "external" : "internal");
1001 1001
1002 data->vid_conf = confreg[3]; 1002 data->vid_conf = confreg[3];
1003 data->vrm = 90; 1003 data->vrm = vid_which_vrm();
1004 } 1004 }
1005 1005
1006 /* Fan clock dividers may be needed before any data is read */ 1006 /* Fan clock dividers may be needed before any data is read */
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
new file mode 100644
index 000000000000..affa21a5ccfd
--- /dev/null
+++ b/drivers/hwmon/pc87427.c
@@ -0,0 +1,627 @@
1/*
2 * pc87427.c - hardware monitoring driver for the
3 * National Semiconductor PC87427 Super-I/O chip
4 * Copyright (C) 2006 Jean Delvare <khali@linux-fr.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Supports the following chips:
16 *
17 * Chip #vin #fan #pwm #temp devid
18 * PC87427 - 8 - - 0xF2
19 *
20 * This driver assumes that no more than one chip is present.
21 * Only fan inputs are supported so far, although the chip can do much more.
22 */
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/jiffies.h>
28#include <linux/platform_device.h>
29#include <linux/hwmon.h>
30#include <linux/hwmon-sysfs.h>
31#include <linux/err.h>
32#include <linux/mutex.h>
33#include <linux/sysfs.h>
34#include <asm/io.h>
35
36static struct platform_device *pdev;
37
38#define DRVNAME "pc87427"
39
40/* The lock mutex protects both the I/O accesses (needed because the
41 device is using banked registers) and the register cache (needed to keep
42 the data in the registers and the cache in sync at any time). */
43struct pc87427_data {
44 struct class_device *class_dev;
45 struct mutex lock;
46 int address[2];
47 const char *name;
48
49 unsigned long last_updated; /* in jiffies */
50 u8 fan_enabled; /* bit vector */
51 u16 fan[8]; /* register values */
52 u16 fan_min[8]; /* register values */
53 u8 fan_status[8]; /* register values */
54};
55
56/*
57 * Super-I/O registers and operations
58 */
59
60#define SIOREG_LDSEL 0x07 /* Logical device select */
61#define SIOREG_DEVID 0x20 /* Device ID */
62#define SIOREG_ACT 0x30 /* Device activation */
63#define SIOREG_MAP 0x50 /* I/O or memory mapping */
64#define SIOREG_IOBASE 0x60 /* I/O base address */
65
66static const u8 logdev[2] = { 0x09, 0x14 };
67static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
68#define LD_FAN 0
69#define LD_IN 1
70#define LD_TEMP 1
71
72static inline void superio_outb(int sioaddr, int reg, int val)
73{
74 outb(reg, sioaddr);
75 outb(val, sioaddr + 1);
76}
77
78static inline int superio_inb(int sioaddr, int reg)
79{
80 outb(reg, sioaddr);
81 return inb(sioaddr + 1);
82}
83
84static inline void superio_exit(int sioaddr)
85{
86 outb(0x02, sioaddr);
87 outb(0x02, sioaddr + 1);
88}
89
90/*
91 * Logical devices
92 */
93
94#define REGION_LENGTH 32
95#define PC87427_REG_BANK 0x0f
96#define BANK_FM(nr) (nr)
97#define BANK_FT(nr) (0x08 + (nr))
98#define BANK_FC(nr) (0x10 + (nr) * 2)
99
100/*
101 * I/O access functions
102 */
103
104/* ldi is the logical device index */
105static inline int pc87427_read8(struct pc87427_data *data, u8 ldi, u8 reg)
106{
107 return inb(data->address[ldi] + reg);
108}
109
110/* Must be called with data->lock held, except during init */
111static inline int pc87427_read8_bank(struct pc87427_data *data, u8 ldi,
112 u8 bank, u8 reg)
113{
114 outb(bank, data->address[ldi] + PC87427_REG_BANK);
115 return inb(data->address[ldi] + reg);
116}
117
118/* Must be called with data->lock held, except during init */
119static inline void pc87427_write8_bank(struct pc87427_data *data, u8 ldi,
120 u8 bank, u8 reg, u8 value)
121{
122 outb(bank, data->address[ldi] + PC87427_REG_BANK);
123 outb(value, data->address[ldi] + reg);
124}
125
126/*
127 * Fan registers and conversions
128 */
129
130/* fan data registers are 16-bit wide */
131#define PC87427_REG_FAN 0x12
132#define PC87427_REG_FAN_MIN 0x14
133#define PC87427_REG_FAN_STATUS 0x10
134
135#define FAN_STATUS_STALL (1 << 3)
136#define FAN_STATUS_LOSPD (1 << 1)
137#define FAN_STATUS_MONEN (1 << 0)
138
139/* Dedicated function to read all registers related to a given fan input.
140 This saves us quite a few locks and bank selections.
141 Must be called with data->lock held.
142 nr is from 0 to 7 */
143static void pc87427_readall_fan(struct pc87427_data *data, u8 nr)
144{
145 int iobase = data->address[LD_FAN];
146
147 outb(BANK_FM(nr), iobase + PC87427_REG_BANK);
148 data->fan[nr] = inw(iobase + PC87427_REG_FAN);
149 data->fan_min[nr] = inw(iobase + PC87427_REG_FAN_MIN);
150 data->fan_status[nr] = inb(iobase + PC87427_REG_FAN_STATUS);
151 /* Clear fan alarm bits */
152 outb(data->fan_status[nr], iobase + PC87427_REG_FAN_STATUS);
153}
154
155/* The 2 LSB of fan speed registers are used for something different.
156 The actual 2 LSB of the measurements are not available. */
157static inline unsigned long fan_from_reg(u16 reg)
158{
159 reg &= 0xfffc;
160 if (reg == 0x0000 || reg == 0xfffc)
161 return 0;
162 return 5400000UL / reg;
163}
164
165/* The 2 LSB of the fan speed limit registers are not significant. */
166static inline u16 fan_to_reg(unsigned long val)
167{
168 if (val < 83UL)
169 return 0xffff;
170 if (val >= 1350000UL)
171 return 0x0004;
172 return ((1350000UL + val / 2) / val) << 2;
173}
174
175/*
176 * Data interface
177 */
178
179static struct pc87427_data *pc87427_update_device(struct device *dev)
180{
181 struct pc87427_data *data = dev_get_drvdata(dev);
182 int i;
183
184 mutex_lock(&data->lock);
185 if (!time_after(jiffies, data->last_updated + HZ)
186 && data->last_updated)
187 goto done;
188
189 /* Fans */
190 for (i = 0; i < 8; i++) {
191 if (!(data->fan_enabled & (1 << i)))
192 continue;
193 pc87427_readall_fan(data, i);
194 }
195 data->last_updated = jiffies;
196
197done:
198 mutex_unlock(&data->lock);
199 return data;
200}
201
202static ssize_t show_fan_input(struct device *dev, struct device_attribute
203 *devattr, char *buf)
204{
205 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
206 struct pc87427_data *data = pc87427_update_device(dev);
207 int nr = attr->index;
208
209 return sprintf(buf, "%lu\n", fan_from_reg(data->fan[nr]));
210}
211
212static ssize_t show_fan_min(struct device *dev, struct device_attribute
213 *devattr, char *buf)
214{
215 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
216 struct pc87427_data *data = pc87427_update_device(dev);
217 int nr = attr->index;
218
219 return sprintf(buf, "%lu\n", fan_from_reg(data->fan_min[nr]));
220}
221
222static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
223 *devattr, char *buf)
224{
225 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
226 struct pc87427_data *data = pc87427_update_device(dev);
227 int nr = attr->index;
228
229 return sprintf(buf, "%d\n", !!(data->fan_status[nr]
230 & FAN_STATUS_LOSPD));
231}
232
233static ssize_t show_fan_fault(struct device *dev, struct device_attribute
234 *devattr, char *buf)
235{
236 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
237 struct pc87427_data *data = pc87427_update_device(dev);
238 int nr = attr->index;
239
240 return sprintf(buf, "%d\n", !!(data->fan_status[nr]
241 & FAN_STATUS_STALL));
242}
243
244static ssize_t set_fan_min(struct device *dev, struct device_attribute
245 *devattr, const char *buf, size_t count)
246{
247 struct pc87427_data *data = dev_get_drvdata(dev);
248 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
249 int nr = attr->index;
250 unsigned long val = simple_strtoul(buf, NULL, 10);
251 int iobase = data->address[LD_FAN];
252
253 mutex_lock(&data->lock);
254 outb(BANK_FM(nr), iobase + PC87427_REG_BANK);
255 /* The low speed limit registers are read-only while monitoring
256 is enabled, so we have to disable monitoring, then change the
257 limit, and finally enable monitoring again. */
258 outb(0, iobase + PC87427_REG_FAN_STATUS);
259 data->fan_min[nr] = fan_to_reg(val);
260 outw(data->fan_min[nr], iobase + PC87427_REG_FAN_MIN);
261 outb(FAN_STATUS_MONEN, iobase + PC87427_REG_FAN_STATUS);
262 mutex_unlock(&data->lock);
263
264 return count;
265}
266
267static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
268static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
269static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2);
270static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_input, NULL, 3);
271static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan_input, NULL, 4);
272static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan_input, NULL, 5);
273static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan_input, NULL, 6);
274static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan_input, NULL, 7);
275
276static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO,
277 show_fan_min, set_fan_min, 0);
278static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO,
279 show_fan_min, set_fan_min, 1);
280static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO,
281 show_fan_min, set_fan_min, 2);
282static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO,
283 show_fan_min, set_fan_min, 3);
284static SENSOR_DEVICE_ATTR(fan5_min, S_IWUSR | S_IRUGO,
285 show_fan_min, set_fan_min, 4);
286static SENSOR_DEVICE_ATTR(fan6_min, S_IWUSR | S_IRUGO,
287 show_fan_min, set_fan_min, 5);
288static SENSOR_DEVICE_ATTR(fan7_min, S_IWUSR | S_IRUGO,
289 show_fan_min, set_fan_min, 6);
290static SENSOR_DEVICE_ATTR(fan8_min, S_IWUSR | S_IRUGO,
291 show_fan_min, set_fan_min, 7);
292
293static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0);
294static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1);
295static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 2);
296static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 3);
297static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_fan_alarm, NULL, 4);
298static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_fan_alarm, NULL, 5);
299static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_fan_alarm, NULL, 6);
300static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_fan_alarm, NULL, 7);
301
302static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL, 0);
303static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_fan_fault, NULL, 1);
304static SENSOR_DEVICE_ATTR(fan3_fault, S_IRUGO, show_fan_fault, NULL, 2);
305static SENSOR_DEVICE_ATTR(fan4_fault, S_IRUGO, show_fan_fault, NULL, 3);
306static SENSOR_DEVICE_ATTR(fan5_fault, S_IRUGO, show_fan_fault, NULL, 4);
307static SENSOR_DEVICE_ATTR(fan6_fault, S_IRUGO, show_fan_fault, NULL, 5);
308static SENSOR_DEVICE_ATTR(fan7_fault, S_IRUGO, show_fan_fault, NULL, 6);
309static SENSOR_DEVICE_ATTR(fan8_fault, S_IRUGO, show_fan_fault, NULL, 7);
310
311static struct attribute *pc87427_attributes_fan[8][5] = {
312 {
313 &sensor_dev_attr_fan1_input.dev_attr.attr,
314 &sensor_dev_attr_fan1_min.dev_attr.attr,
315 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
316 &sensor_dev_attr_fan1_fault.dev_attr.attr,
317 NULL
318 }, {
319 &sensor_dev_attr_fan2_input.dev_attr.attr,
320 &sensor_dev_attr_fan2_min.dev_attr.attr,
321 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
322 &sensor_dev_attr_fan2_fault.dev_attr.attr,
323 NULL
324 }, {
325 &sensor_dev_attr_fan3_input.dev_attr.attr,
326 &sensor_dev_attr_fan3_min.dev_attr.attr,
327 &sensor_dev_attr_fan3_alarm.dev_attr.attr,
328 &sensor_dev_attr_fan3_fault.dev_attr.attr,
329 NULL
330 }, {
331 &sensor_dev_attr_fan4_input.dev_attr.attr,
332 &sensor_dev_attr_fan4_min.dev_attr.attr,
333 &sensor_dev_attr_fan4_alarm.dev_attr.attr,
334 &sensor_dev_attr_fan4_fault.dev_attr.attr,
335 NULL
336 }, {
337 &sensor_dev_attr_fan5_input.dev_attr.attr,
338 &sensor_dev_attr_fan5_min.dev_attr.attr,
339 &sensor_dev_attr_fan5_alarm.dev_attr.attr,
340 &sensor_dev_attr_fan5_fault.dev_attr.attr,
341 NULL
342 }, {
343 &sensor_dev_attr_fan6_input.dev_attr.attr,
344 &sensor_dev_attr_fan6_min.dev_attr.attr,
345 &sensor_dev_attr_fan6_alarm.dev_attr.attr,
346 &sensor_dev_attr_fan6_fault.dev_attr.attr,
347 NULL
348 }, {
349 &sensor_dev_attr_fan7_input.dev_attr.attr,
350 &sensor_dev_attr_fan7_min.dev_attr.attr,
351 &sensor_dev_attr_fan7_alarm.dev_attr.attr,
352 &sensor_dev_attr_fan7_fault.dev_attr.attr,
353 NULL
354 }, {
355 &sensor_dev_attr_fan8_input.dev_attr.attr,
356 &sensor_dev_attr_fan8_min.dev_attr.attr,
357 &sensor_dev_attr_fan8_alarm.dev_attr.attr,
358 &sensor_dev_attr_fan8_fault.dev_attr.attr,
359 NULL
360 }
361};
362
363static const struct attribute_group pc87427_group_fan[8] = {
364 { .attrs = pc87427_attributes_fan[0] },
365 { .attrs = pc87427_attributes_fan[1] },
366 { .attrs = pc87427_attributes_fan[2] },
367 { .attrs = pc87427_attributes_fan[3] },
368 { .attrs = pc87427_attributes_fan[4] },
369 { .attrs = pc87427_attributes_fan[5] },
370 { .attrs = pc87427_attributes_fan[6] },
371 { .attrs = pc87427_attributes_fan[7] },
372};
373
374static ssize_t show_name(struct device *dev, struct device_attribute
375 *devattr, char *buf)
376{
377 struct pc87427_data *data = dev_get_drvdata(dev);
378
379 return sprintf(buf, "%s\n", data->name);
380}
381static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
382
383
384/*
385 * Device detection, attach and detach
386 */
387
388static void __devinit pc87427_init_device(struct device *dev)
389{
390 struct pc87427_data *data = dev_get_drvdata(dev);
391 int i;
392 u8 reg;
393
394 /* The FMC module should be ready */
395 reg = pc87427_read8(data, LD_FAN, PC87427_REG_BANK);
396 if (!(reg & 0x80))
397 dev_warn(dev, "FMC module not ready!\n");
398
399 /* Check which fans are enabled */
400 for (i = 0; i < 8; i++) {
401 reg = pc87427_read8_bank(data, LD_FAN, BANK_FM(i),
402 PC87427_REG_FAN_STATUS);
403 if (reg & FAN_STATUS_MONEN)
404 data->fan_enabled |= (1 << i);
405 }
406
407 if (!data->fan_enabled) {
408 dev_dbg(dev, "Enabling all fan inputs\n");
409 for (i = 0; i < 8; i++)
410 pc87427_write8_bank(data, LD_FAN, BANK_FM(i),
411 PC87427_REG_FAN_STATUS,
412 FAN_STATUS_MONEN);
413 data->fan_enabled = 0xff;
414 }
415}
416
417static int __devinit pc87427_probe(struct platform_device *pdev)
418{
419 struct pc87427_data *data;
420 struct resource *res;
421 int i, err;
422
423 if (!(data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL))) {
424 err = -ENOMEM;
425 printk(KERN_ERR DRVNAME ": Out of memory\n");
426 goto exit;
427 }
428
429 /* This will need to be revisited when we add support for
430 temperature and voltage monitoring. */
431 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
432 data->address[0] = res->start;
433
434 mutex_init(&data->lock);
435 data->name = "pc87427";
436 platform_set_drvdata(pdev, data);
437 pc87427_init_device(&pdev->dev);
438
439 /* Register sysfs hooks */
440 if ((err = device_create_file(&pdev->dev, &dev_attr_name)))
441 goto exit_kfree;
442 for (i = 0; i < 8; i++) {
443 if (!(data->fan_enabled & (1 << i)))
444 continue;
445 if ((err = sysfs_create_group(&pdev->dev.kobj,
446 &pc87427_group_fan[i])))
447 goto exit_remove_files;
448 }
449
450 data->class_dev = hwmon_device_register(&pdev->dev);
451 if (IS_ERR(data->class_dev)) {
452 err = PTR_ERR(data->class_dev);
453 dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
454 goto exit_remove_files;
455 }
456
457 return 0;
458
459exit_remove_files:
460 for (i = 0; i < 8; i++) {
461 if (!(data->fan_enabled & (1 << i)))
462 continue;
463 sysfs_remove_group(&pdev->dev.kobj, &pc87427_group_fan[i]);
464 }
465exit_kfree:
466 platform_set_drvdata(pdev, NULL);
467 kfree(data);
468exit:
469 return err;
470}
471
472static int __devexit pc87427_remove(struct platform_device *pdev)
473{
474 struct pc87427_data *data = platform_get_drvdata(pdev);
475 int i;
476
477 platform_set_drvdata(pdev, NULL);
478 hwmon_device_unregister(data->class_dev);
479 device_remove_file(&pdev->dev, &dev_attr_name);
480 for (i = 0; i < 8; i++) {
481 if (!(data->fan_enabled & (1 << i)))
482 continue;
483 sysfs_remove_group(&pdev->dev.kobj, &pc87427_group_fan[i]);
484 }
485 kfree(data);
486
487 return 0;
488}
489
490
491static struct platform_driver pc87427_driver = {
492 .driver = {
493 .owner = THIS_MODULE,
494 .name = DRVNAME,
495 },
496 .probe = pc87427_probe,
497 .remove = __devexit_p(pc87427_remove),
498};
499
500static int __init pc87427_device_add(unsigned short address)
501{
502 struct resource res = {
503 .start = address,
504 .end = address + REGION_LENGTH - 1,
505 .name = logdev_str[0],
506 .flags = IORESOURCE_IO,
507 };
508 int err;
509
510 pdev = platform_device_alloc(DRVNAME, address);
511 if (!pdev) {
512 err = -ENOMEM;
513 printk(KERN_ERR DRVNAME ": Device allocation failed\n");
514 goto exit;
515 }
516
517 err = platform_device_add_resources(pdev, &res, 1);
518 if (err) {
519 printk(KERN_ERR DRVNAME ": Device resource addition failed "
520 "(%d)\n", err);
521 goto exit_device_put;
522 }
523
524 err = platform_device_add(pdev);
525 if (err) {
526 printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
527 err);
528 goto exit_device_put;
529 }
530
531 return 0;
532
533exit_device_put:
534 platform_device_put(pdev);
535exit:
536 return err;
537}
538
539static int __init pc87427_find(int sioaddr, unsigned short *address)
540{
541 u16 val;
542 int i, err = 0;
543
544 /* Identify device */
545 val = superio_inb(sioaddr, SIOREG_DEVID);
546 if (val != 0xf2) { /* PC87427 */
547 err = -ENODEV;
548 goto exit;
549 }
550
551 for (i = 0; i < 2; i++) {
552 address[i] = 0;
553 /* Select logical device */
554 superio_outb(sioaddr, SIOREG_LDSEL, logdev[i]);
555
556 val = superio_inb(sioaddr, SIOREG_ACT);
557 if (!(val & 0x01)) {
558 printk(KERN_INFO DRVNAME ": Logical device 0x%02x "
559 "not activated\n", logdev[i]);
560 continue;
561 }
562
563 val = superio_inb(sioaddr, SIOREG_MAP);
564 if (val & 0x01) {
565 printk(KERN_WARNING DRVNAME ": Logical device 0x%02x "
566 "is memory-mapped, can't use\n", logdev[i]);
567 continue;
568 }
569
570 val = (superio_inb(sioaddr, SIOREG_IOBASE) << 8)
571 | superio_inb(sioaddr, SIOREG_IOBASE + 1);
572 if (!val) {
573 printk(KERN_INFO DRVNAME ": I/O base address not set "
574 "for logical device 0x%02x\n", logdev[i]);
575 continue;
576 }
577 address[i] = val;
578 }
579
580exit:
581 superio_exit(sioaddr);
582 return err;
583}
584
585static int __init pc87427_init(void)
586{
587 int err;
588 unsigned short address[2];
589
590 if (pc87427_find(0x2e, address)
591 && pc87427_find(0x4e, address))
592 return -ENODEV;
593
594 /* For now the driver only handles fans so we only care about the
595 first address. */
596 if (!address[0])
597 return -ENODEV;
598
599 err = platform_driver_register(&pc87427_driver);
600 if (err)
601 goto exit;
602
603 /* Sets global pdev as a side effect */
604 err = pc87427_device_add(address[0]);
605 if (err)
606 goto exit_driver;
607
608 return 0;
609
610exit_driver:
611 platform_driver_unregister(&pc87427_driver);
612exit:
613 return err;
614}
615
616static void __exit pc87427_exit(void)
617{
618 platform_device_unregister(pdev);
619 platform_driver_unregister(&pc87427_driver);
620}
621
622MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
623MODULE_DESCRIPTION("PC87427 hardware monitoring driver");
624MODULE_LICENSE("GPL");
625
626module_init(pc87427_init);
627module_exit(pc87427_exit);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 2257806d0102..212a1558c63b 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -3,7 +3,7 @@
3 the Winbond W83627EHF Super-I/O chip 3 the Winbond W83627EHF Super-I/O chip
4 Copyright (C) 2005 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2005 Jean Delvare <khali@linux-fr.org>
5 Copyright (C) 2006 Yuan Mu (Winbond), 5 Copyright (C) 2006 Yuan Mu (Winbond),
6 Rudolf Marek <r.marek@sh.cvut.cz> 6 Rudolf Marek <r.marek@assembler.cz>
7 David Hubbard <david.c.hubbard@gmail.com> 7 David Hubbard <david.c.hubbard@gmail.com>
8 8
9 Shamelessly ripped from the w83627hf driver 9 Shamelessly ripped from the w83627hf driver
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 4e108262576f..b0fa296740d1 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -3,7 +3,7 @@
3 monitoring 3 monitoring
4 Copyright (C) 2004, 2005 Winbond Electronics Corp. 4 Copyright (C) 2004, 2005 Winbond Electronics Corp.
5 Chunhao Huang <DZShen@Winbond.com.tw>, 5 Chunhao Huang <DZShen@Winbond.com.tw>,
6 Rudolf Marek <r.marek@sh.cvut.cz> 6 Rudolf Marek <r.marek@assembler.cz>
7 7
8 This program is free software; you can redistribute it and/or modify 8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by 9 it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
new file mode 100644
index 000000000000..c12ac5abc2bb
--- /dev/null
+++ b/drivers/hwmon/w83793.c
@@ -0,0 +1,1609 @@
1/*
2 w83793.c - Linux kernel driver for hardware monitoring
3 Copyright (C) 2006 Winbond Electronics Corp.
4 Yuan Mu
5 Rudolf Marek <r.marek@assembler.cz>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation - version 2.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 02110-1301 USA.
20*/
21
22/*
23 Supports following chips:
24
25 Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
26 w83793 10 12 8 6 0x7b 0x5ca3 yes no
27*/
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/slab.h>
32#include <linux/i2c.h>
33#include <linux/hwmon.h>
34#include <linux/hwmon-vid.h>
35#include <linux/hwmon-sysfs.h>
36#include <linux/err.h>
37#include <linux/mutex.h>
38
39/* Addresses to scan */
40static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END };
41
42/* Insmod parameters */
43I2C_CLIENT_INSMOD_1(w83793);
44I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
45 "{bus, clientaddr, subclientaddr1, subclientaddr2}");
46
47static int reset;
48module_param(reset, bool, 0);
49MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
50
51/*
52 Address 0x00, 0x0d, 0x0e, 0x0f in all three banks are reserved
53 as ID, Bank Select registers
54*/
55#define W83793_REG_BANKSEL 0x00
56#define W83793_REG_VENDORID 0x0d
57#define W83793_REG_CHIPID 0x0e
58#define W83793_REG_DEVICEID 0x0f
59
60#define W83793_REG_CONFIG 0x40
61#define W83793_REG_MFC 0x58
62#define W83793_REG_FANIN_CTRL 0x5c
63#define W83793_REG_FANIN_SEL 0x5d
64#define W83793_REG_I2C_ADDR 0x0b
65#define W83793_REG_I2C_SUBADDR 0x0c
66#define W83793_REG_VID_INA 0x05
67#define W83793_REG_VID_INB 0x06
68#define W83793_REG_VID_LATCHA 0x07
69#define W83793_REG_VID_LATCHB 0x08
70#define W83793_REG_VID_CTRL 0x59
71
72static u16 W83793_REG_TEMP_MODE[2] = { 0x5e, 0x5f };
73
74#define TEMP_READ 0
75#define TEMP_CRIT 1
76#define TEMP_CRIT_HYST 2
77#define TEMP_WARN 3
78#define TEMP_WARN_HYST 4
79/* only crit and crit_hyst affect real-time alarm status
80 current crit crit_hyst warn warn_hyst */
81static u16 W83793_REG_TEMP[][5] = {
82 {0x1c, 0x78, 0x79, 0x7a, 0x7b},
83 {0x1d, 0x7c, 0x7d, 0x7e, 0x7f},
84 {0x1e, 0x80, 0x81, 0x82, 0x83},
85 {0x1f, 0x84, 0x85, 0x86, 0x87},
86 {0x20, 0x88, 0x89, 0x8a, 0x8b},
87 {0x21, 0x8c, 0x8d, 0x8e, 0x8f},
88};
89
90#define W83793_REG_TEMP_LOW_BITS 0x22
91
92#define W83793_REG_BEEP(index) (0x53 + (index))
93#define W83793_REG_ALARM(index) (0x4b + (index))
94
95#define W83793_REG_CLR_CHASSIS 0x4a /* SMI MASK4 */
96#define W83793_REG_IRQ_CTRL 0x50
97#define W83793_REG_OVT_CTRL 0x51
98#define W83793_REG_OVT_BEEP 0x52
99
100#define IN_READ 0
101#define IN_MAX 1
102#define IN_LOW 2
103static const u16 W83793_REG_IN[][3] = {
104 /* Current, High, Low */
105 {0x10, 0x60, 0x61}, /* Vcore A */
106 {0x11, 0x62, 0x63}, /* Vcore B */
107 {0x12, 0x64, 0x65}, /* Vtt */
108 {0x14, 0x6a, 0x6b}, /* VSEN1 */
109 {0x15, 0x6c, 0x6d}, /* VSEN2 */
110 {0x16, 0x6e, 0x6f}, /* +3VSEN */
111 {0x17, 0x70, 0x71}, /* +12VSEN */
112 {0x18, 0x72, 0x73}, /* 5VDD */
113 {0x19, 0x74, 0x75}, /* 5VSB */
114 {0x1a, 0x76, 0x77}, /* VBAT */
115};
116
117/* Low Bits of Vcore A/B Vtt Read/High/Low */
118static const u16 W83793_REG_IN_LOW_BITS[] = { 0x1b, 0x68, 0x69 };
119static u8 scale_in[] = { 2, 2, 2, 16, 16, 16, 8, 24, 24, 16 };
120
121#define W83793_REG_FAN(index) (0x23 + 2 * (index)) /* High byte */
122#define W83793_REG_FAN_MIN(index) (0x90 + 2 * (index)) /* High byte */
123
124#define W83793_REG_PWM_DEFAULT 0xb2
125#define W83793_REG_PWM_ENABLE 0x207
126#define W83793_REG_PWM_UPTIME 0xc3 /* Unit in 0.1 second */
127#define W83793_REG_PWM_DOWNTIME 0xc4 /* Unit in 0.1 second */
128#define W83793_REG_TEMP_CRITICAL 0xc5
129
130#define PWM_DUTY 0
131#define PWM_START 1
132#define PWM_NONSTOP 2
133#define W83793_REG_PWM(index, nr) (((nr) == 0 ? 0xb3 : \
134 (nr) == 1 ? 0x220 : 0x218) + (index))
135
136/* bit field, fan1 is bit0, fan2 is bit1 ... */
137#define W83793_REG_TEMP_FAN_MAP(index) (0x201 + (index))
138#define W83793_REG_TEMP_TOL(index) (0x208 + (index))
139#define W83793_REG_TEMP_CRUISE(index) (0x210 + (index))
140#define W83793_REG_PWM_STOP_TIME(index) (0x228 + (index))
141#define W83793_REG_SF2_TEMP(index, nr) (0x230 + ((index) << 4) + (nr))
142#define W83793_REG_SF2_PWM(index, nr) (0x238 + ((index) << 4) + (nr))
143
144static inline unsigned long FAN_FROM_REG(u16 val)
145{
146 if ((val >= 0xfff) || (val == 0))
147 return 0;
148 return (1350000UL / val);
149}
150
151static inline u16 FAN_TO_REG(long rpm)
152{
153 if (rpm <= 0)
154 return 0x0fff;
155 return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
156}
157
158static inline unsigned long TIME_FROM_REG(u8 reg)
159{
160 return (reg * 100);
161}
162
163static inline u8 TIME_TO_REG(unsigned long val)
164{
165 return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
166}
167
168static inline long TEMP_FROM_REG(s8 reg)
169{
170 return (reg * 1000);
171}
172
173static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
174{
175 return SENSORS_LIMIT((val + (val < 0 ? -500 : 500)) / 1000, min, max);
176}
177
178struct w83793_data {
179 struct i2c_client client;
180 struct i2c_client *lm75[2];
181 struct class_device *class_dev;
182 struct mutex update_lock;
183 char valid; /* !=0 if following fields are valid */
184 unsigned long last_updated; /* In jiffies */
185 unsigned long last_nonvolatile; /* In jiffies, last time we update the
186 nonvolatile registers */
187
188 u8 bank;
189 u8 vrm;
190 u8 vid[2];
191 u8 in[10][3]; /* Register value, read/high/low */
192 u8 in_low_bits[3]; /* Additional resolution for VCore A/B Vtt */
193
194 u16 has_fan; /* Only fan1- fan5 has own pins */
195 u16 fan[12]; /* Register value combine */
196 u16 fan_min[12]; /* Register value combine */
197
198 s8 temp[6][5]; /* current, crit, crit_hyst,warn, warn_hyst */
199 u8 temp_low_bits; /* Additional resolution TD1-TD4 */
200 u8 temp_mode[2]; /* byte 0: Temp D1-D4 mode each has 2 bits
201 byte 1: Temp R1,R2 mode, each has 1 bit */
202 u8 temp_critical; /* If reached all fan will be at full speed */
203 u8 temp_fan_map[6]; /* Temp controls which pwm fan, bit field */
204
205 u8 has_pwm;
206 u8 pwm_enable; /* Register value, each Temp has 1 bit */
207 u8 pwm_uptime; /* Register value */
208 u8 pwm_downtime; /* Register value */
209 u8 pwm_default; /* All fan default pwm, next poweron valid */
210 u8 pwm[8][3]; /* Register value */
211 u8 pwm_stop_time[8];
212 u8 temp_cruise[6];
213
214 u8 alarms[5]; /* realtime status registers */
215 u8 beeps[5];
216 u8 beep_enable;
217 u8 tolerance[3]; /* Temp tolerance(Smart Fan I/II) */
218 u8 sf2_pwm[6][7]; /* Smart FanII: Fan duty cycle */
219 u8 sf2_temp[6][7]; /* Smart FanII: Temp level point */
220};
221
222static u8 w83793_read_value(struct i2c_client *client, u16 reg);
223static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
224static int w83793_attach_adapter(struct i2c_adapter *adapter);
225static int w83793_detect(struct i2c_adapter *adapter, int address, int kind);
226static int w83793_detach_client(struct i2c_client *client);
227static void w83793_init_client(struct i2c_client *client);
228static void w83793_update_nonvolatile(struct device *dev);
229static struct w83793_data *w83793_update_device(struct device *dev);
230
231static struct i2c_driver w83793_driver = {
232 .driver = {
233 .name = "w83793",
234 },
235 .attach_adapter = w83793_attach_adapter,
236 .detach_client = w83793_detach_client,
237};
238
239static ssize_t
240show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
241{
242 struct i2c_client *client = to_i2c_client(dev);
243 struct w83793_data *data = i2c_get_clientdata(client);
244
245 return sprintf(buf, "%d\n", data->vrm);
246}
247
248static ssize_t
249show_vid(struct device *dev, struct device_attribute *attr, char *buf)
250{
251 struct w83793_data *data = w83793_update_device(dev);
252 struct sensor_device_attribute_2 *sensor_attr =
253 to_sensor_dev_attr_2(attr);
254 int index = sensor_attr->index;
255
256 return sprintf(buf, "%d\n", vid_from_reg(data->vid[index], data->vrm));
257}
258
259static ssize_t
260store_vrm(struct device *dev, struct device_attribute *attr,
261 const char *buf, size_t count)
262{
263 struct i2c_client *client = to_i2c_client(dev);
264 struct w83793_data *data = i2c_get_clientdata(client);
265
266 data->vrm = simple_strtoul(buf, NULL, 10);
267 return count;
268}
269
270#define ALARM_STATUS 0
271#define BEEP_ENABLE 1
272static ssize_t
273show_alarm_beep(struct device *dev, struct device_attribute *attr, char *buf)
274{
275 struct w83793_data *data = w83793_update_device(dev);
276 struct sensor_device_attribute_2 *sensor_attr =
277 to_sensor_dev_attr_2(attr);
278 int nr = sensor_attr->nr;
279 int index = sensor_attr->index >> 3;
280 int bit = sensor_attr->index & 0x07;
281 u8 val;
282
283 if (ALARM_STATUS == nr) {
284 val = (data->alarms[index] >> (bit)) & 1;
285 } else { /* BEEP_ENABLE */
286 val = (data->beeps[index] >> (bit)) & 1;
287 }
288
289 return sprintf(buf, "%u\n", val);
290}
291
292static ssize_t
293store_beep(struct device *dev, struct device_attribute *attr,
294 const char *buf, size_t count)
295{
296 struct i2c_client *client = to_i2c_client(dev);
297 struct w83793_data *data = i2c_get_clientdata(client);
298 struct sensor_device_attribute_2 *sensor_attr =
299 to_sensor_dev_attr_2(attr);
300 int index = sensor_attr->index >> 3;
301 int shift = sensor_attr->index & 0x07;
302 u8 beep_bit = 1 << shift;
303 u8 val;
304
305 val = simple_strtoul(buf, NULL, 10);
306 if (val != 0 && val != 1)
307 return -EINVAL;
308
309 mutex_lock(&data->update_lock);
310 data->beeps[index] = w83793_read_value(client, W83793_REG_BEEP(index));
311 data->beeps[index] &= ~beep_bit;
312 data->beeps[index] |= val << shift;
313 w83793_write_value(client, W83793_REG_BEEP(index), data->beeps[index]);
314 mutex_unlock(&data->update_lock);
315
316 return count;
317}
318
319static ssize_t
320show_beep_enable(struct device *dev, struct device_attribute *attr, char *buf)
321{
322 struct w83793_data *data = w83793_update_device(dev);
323 return sprintf(buf, "%u\n", (data->beep_enable >> 1) & 0x01);
324}
325
326static ssize_t
327store_beep_enable(struct device *dev, struct device_attribute *attr,
328 const char *buf, size_t count)
329{
330 struct i2c_client *client = to_i2c_client(dev);
331 struct w83793_data *data = i2c_get_clientdata(client);
332 u8 val = simple_strtoul(buf, NULL, 10);
333
334 if (val != 0 && val != 1)
335 return -EINVAL;
336
337 mutex_lock(&data->update_lock);
338 data->beep_enable = w83793_read_value(client, W83793_REG_OVT_BEEP)
339 & 0xfd;
340 data->beep_enable |= val << 1;
341 w83793_write_value(client, W83793_REG_OVT_BEEP, data->beep_enable);
342 mutex_unlock(&data->update_lock);
343
344 return count;
345}
346
347/* Write any value to clear chassis alarm */
348static ssize_t
349store_chassis_clear(struct device *dev,
350 struct device_attribute *attr, const char *buf,
351 size_t count)
352{
353 struct i2c_client *client = to_i2c_client(dev);
354 struct w83793_data *data = i2c_get_clientdata(client);
355 u8 val;
356
357 mutex_lock(&data->update_lock);
358 val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
359 val |= 0x80;
360 w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
361 mutex_unlock(&data->update_lock);
362 return count;
363}
364
365#define FAN_INPUT 0
366#define FAN_MIN 1
367static ssize_t
368show_fan(struct device *dev, struct device_attribute *attr, char *buf)
369{
370 struct sensor_device_attribute_2 *sensor_attr =
371 to_sensor_dev_attr_2(attr);
372 int nr = sensor_attr->nr;
373 int index = sensor_attr->index;
374 struct w83793_data *data = w83793_update_device(dev);
375 u16 val;
376
377 if (FAN_INPUT == nr) {
378 val = data->fan[index] & 0x0fff;
379 } else {
380 val = data->fan_min[index] & 0x0fff;
381 }
382
383 return sprintf(buf, "%lu\n", FAN_FROM_REG(val));
384}
385
386static ssize_t
387store_fan_min(struct device *dev, struct device_attribute *attr,
388 const char *buf, size_t count)
389{
390 struct sensor_device_attribute_2 *sensor_attr =
391 to_sensor_dev_attr_2(attr);
392 int index = sensor_attr->index;
393 struct i2c_client *client = to_i2c_client(dev);
394 struct w83793_data *data = i2c_get_clientdata(client);
395 u16 val = FAN_TO_REG(simple_strtoul(buf, NULL, 10));
396
397 mutex_lock(&data->update_lock);
398 data->fan_min[index] = val;
399 w83793_write_value(client, W83793_REG_FAN_MIN(index),
400 (val >> 8) & 0xff);
401 w83793_write_value(client, W83793_REG_FAN_MIN(index) + 1, val & 0xff);
402 mutex_unlock(&data->update_lock);
403
404 return count;
405}
406
407#define PWM_DUTY 0
408#define PWM_START 1
409#define PWM_NONSTOP 2
410#define PWM_STOP_TIME 3
411static ssize_t
412show_pwm(struct device *dev, struct device_attribute *attr, char *buf)
413{
414 struct sensor_device_attribute_2 *sensor_attr =
415 to_sensor_dev_attr_2(attr);
416 struct w83793_data *data = w83793_update_device(dev);
417 u16 val;
418 int nr = sensor_attr->nr;
419 int index = sensor_attr->index;
420
421 if (PWM_STOP_TIME == nr)
422 val = TIME_FROM_REG(data->pwm_stop_time[index]);
423 else
424 val = (data->pwm[index][nr] & 0x3f) << 2;
425
426 return sprintf(buf, "%d\n", val);
427}
428
429static ssize_t
430store_pwm(struct device *dev, struct device_attribute *attr,
431 const char *buf, size_t count)
432{
433 struct i2c_client *client = to_i2c_client(dev);
434 struct w83793_data *data = i2c_get_clientdata(client);
435 struct sensor_device_attribute_2 *sensor_attr =
436 to_sensor_dev_attr_2(attr);
437 int nr = sensor_attr->nr;
438 int index = sensor_attr->index;
439 u8 val;
440
441 mutex_lock(&data->update_lock);
442 if (PWM_STOP_TIME == nr) {
443 val = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
444 data->pwm_stop_time[index] = val;
445 w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index),
446 val);
447 } else {
448 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 0xff)
449 >> 2;
450 data->pwm[index][nr] =
451 w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0;
452 data->pwm[index][nr] |= val;
453 w83793_write_value(client, W83793_REG_PWM(index, nr),
454 data->pwm[index][nr]);
455 }
456
457 mutex_unlock(&data->update_lock);
458 return count;
459}
460
461static ssize_t
462show_temp(struct device *dev, struct device_attribute *attr, char *buf)
463{
464 struct sensor_device_attribute_2 *sensor_attr =
465 to_sensor_dev_attr_2(attr);
466 int nr = sensor_attr->nr;
467 int index = sensor_attr->index;
468 struct w83793_data *data = w83793_update_device(dev);
469 long temp = TEMP_FROM_REG(data->temp[index][nr]);
470
471 if (TEMP_READ == nr && index < 4) { /* Only TD1-TD4 have low bits */
472 int low = ((data->temp_low_bits >> (index * 2)) & 0x03) * 250;
473 temp += temp > 0 ? low : -low;
474 }
475 return sprintf(buf, "%ld\n", temp);
476}
477
478static ssize_t
479store_temp(struct device *dev, struct device_attribute *attr,
480 const char *buf, size_t count)
481{
482 struct sensor_device_attribute_2 *sensor_attr =
483 to_sensor_dev_attr_2(attr);
484 int nr = sensor_attr->nr;
485 int index = sensor_attr->index;
486 struct i2c_client *client = to_i2c_client(dev);
487 struct w83793_data *data = i2c_get_clientdata(client);
488 long tmp = simple_strtol(buf, NULL, 10);
489
490 mutex_lock(&data->update_lock);
491 data->temp[index][nr] = TEMP_TO_REG(tmp, -128, 127);
492 w83793_write_value(client, W83793_REG_TEMP[index][nr],
493 data->temp[index][nr]);
494 mutex_unlock(&data->update_lock);
495 return count;
496}
497
498/*
499 TD1-TD4
500 each has 4 mode:(2 bits)
501 0: Stop monitor
502 1: Use internal temp sensor(default)
503 2: Use sensor in AMD CPU and get result by AMDSI
504 3: Use sensor in Intel CPU and get result by PECI
505
506 TR1-TR2
507 each has 2 mode:(1 bit)
508 0: Disable temp sensor monitor
509 1: To enable temp sensors monitor
510*/
511
512/* 0 disable, 5 AMDSI, 6 PECI */
513static u8 TO_TEMP_MODE[] = { 0, 0, 5, 6 };
514
515static ssize_t
516show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf)
517{
518 struct w83793_data *data = w83793_update_device(dev);
519 struct sensor_device_attribute_2 *sensor_attr =
520 to_sensor_dev_attr_2(attr);
521 int index = sensor_attr->index;
522 u8 mask = (index < 4) ? 0x03 : 0x01;
523 u8 shift = (index < 4) ? (2 * index) : (index - 4);
524 u8 tmp;
525 index = (index < 4) ? 0 : 1;
526
527 tmp = (data->temp_mode[index] >> shift) & mask;
528
529 /* for the internal sensor, found out if diode or thermistor */
530 if (tmp == 1) {
531 tmp = index == 0 ? 3 : 4;
532 } else {
533 tmp = TO_TEMP_MODE[tmp];
534 }
535
536 return sprintf(buf, "%d\n", tmp);
537}
538
539static ssize_t
540store_temp_mode(struct device *dev, struct device_attribute *attr,
541 const char *buf, size_t count)
542{
543 struct i2c_client *client = to_i2c_client(dev);
544 struct w83793_data *data = i2c_get_clientdata(client);
545 struct sensor_device_attribute_2 *sensor_attr =
546 to_sensor_dev_attr_2(attr);
547 int index = sensor_attr->index;
548 u8 mask = (index < 4) ? 0x03 : 0x01;
549 u8 shift = (index < 4) ? (2 * index) : (index - 4);
550 u8 val = simple_strtoul(buf, NULL, 10);
551
552 /* transform the sysfs interface values into table above */
553 if ((val == 5 || val == 6) && (index < 4)) {
554 val -= 3;
555 } else if ((val == 3 && index < 4)
556 || (val == 4 && index >= 4)
557 || val == 0) {
558 /* transform diode or thermistor into internal enable */
559 val = !!val;
560 } else {
561 return -EINVAL;
562 }
563
564 index = (index < 4) ? 0 : 1;
565 mutex_lock(&data->update_lock);
566 data->temp_mode[index] =
567 w83793_read_value(client, W83793_REG_TEMP_MODE[index]);
568 data->temp_mode[index] &= ~(mask << shift);
569 data->temp_mode[index] |= val << shift;
570 w83793_write_value(client, W83793_REG_TEMP_MODE[index],
571 data->temp_mode[index]);
572 mutex_unlock(&data->update_lock);
573
574 return count;
575}
576
577#define SETUP_PWM_DEFAULT 0
578#define SETUP_PWM_UPTIME 1 /* Unit in 0.1s */
579#define SETUP_PWM_DOWNTIME 2 /* Unit in 0.1s */
580#define SETUP_TEMP_CRITICAL 3
581static ssize_t
582show_sf_setup(struct device *dev, struct device_attribute *attr, char *buf)
583{
584 struct sensor_device_attribute_2 *sensor_attr =
585 to_sensor_dev_attr_2(attr);
586 int nr = sensor_attr->nr;
587 struct w83793_data *data = w83793_update_device(dev);
588 u32 val = 0;
589
590 if (SETUP_PWM_DEFAULT == nr) {
591 val = (data->pwm_default & 0x3f) << 2;
592 } else if (SETUP_PWM_UPTIME == nr) {
593 val = TIME_FROM_REG(data->pwm_uptime);
594 } else if (SETUP_PWM_DOWNTIME == nr) {
595 val = TIME_FROM_REG(data->pwm_downtime);
596 } else if (SETUP_TEMP_CRITICAL == nr) {
597 val = TEMP_FROM_REG(data->temp_critical & 0x7f);
598 }
599
600 return sprintf(buf, "%d\n", val);
601}
602
603static ssize_t
604store_sf_setup(struct device *dev, struct device_attribute *attr,
605 const char *buf, size_t count)
606{
607 struct sensor_device_attribute_2 *sensor_attr =
608 to_sensor_dev_attr_2(attr);
609 int nr = sensor_attr->nr;
610 struct i2c_client *client = to_i2c_client(dev);
611 struct w83793_data *data = i2c_get_clientdata(client);
612
613 mutex_lock(&data->update_lock);
614 if (SETUP_PWM_DEFAULT == nr) {
615 data->pwm_default =
616 w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0;
617 data->pwm_default |= SENSORS_LIMIT(simple_strtoul(buf, NULL,
618 10),
619 0, 0xff) >> 2;
620 w83793_write_value(client, W83793_REG_PWM_DEFAULT,
621 data->pwm_default);
622 } else if (SETUP_PWM_UPTIME == nr) {
623 data->pwm_uptime = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
624 data->pwm_uptime += data->pwm_uptime == 0 ? 1 : 0;
625 w83793_write_value(client, W83793_REG_PWM_UPTIME,
626 data->pwm_uptime);
627 } else if (SETUP_PWM_DOWNTIME == nr) {
628 data->pwm_downtime = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
629 data->pwm_downtime += data->pwm_downtime == 0 ? 1 : 0;
630 w83793_write_value(client, W83793_REG_PWM_DOWNTIME,
631 data->pwm_downtime);
632 } else { /* SETUP_TEMP_CRITICAL */
633 data->temp_critical =
634 w83793_read_value(client, W83793_REG_TEMP_CRITICAL) & 0x80;
635 data->temp_critical |= TEMP_TO_REG(simple_strtol(buf, NULL, 10),
636 0, 0x7f);
637 w83793_write_value(client, W83793_REG_TEMP_CRITICAL,
638 data->temp_critical);
639 }
640
641 mutex_unlock(&data->update_lock);
642 return count;
643}
644
645/*
646 Temp SmartFan control
647 TEMP_FAN_MAP
648 Temp channel control which pwm fan, bitfield, bit 0 indicate pwm1...
649 It's possible two or more temp channels control the same fan, w83793
650 always prefers to pick the most critical request and applies it to
651 the related Fan.
652 It's possible one fan is not in any mapping of 6 temp channels, this
653 means the fan is manual mode
654
655 TEMP_PWM_ENABLE
656 Each temp channel has its own SmartFan mode, and temp channel
657 control fans that are set by TEMP_FAN_MAP
658 0: SmartFanII mode
659 1: Thermal Cruise Mode
660
661 TEMP_CRUISE
662 Target temperature in thermal cruise mode, w83793 will try to turn
663 fan speed to keep the temperature of target device around this
664 temperature.
665
666 TEMP_TOLERANCE
667 If Temp higher or lower than target with this tolerance, w83793
668 will take actions to speed up or slow down the fan to keep the
669 temperature within the tolerance range.
670*/
671
672#define TEMP_FAN_MAP 0
673#define TEMP_PWM_ENABLE 1
674#define TEMP_CRUISE 2
675#define TEMP_TOLERANCE 3
676static ssize_t
677show_sf_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
678{
679 struct sensor_device_attribute_2 *sensor_attr =
680 to_sensor_dev_attr_2(attr);
681 int nr = sensor_attr->nr;
682 int index = sensor_attr->index;
683 struct w83793_data *data = w83793_update_device(dev);
684 u32 val;
685
686 if (TEMP_FAN_MAP == nr) {
687 val = data->temp_fan_map[index];
688 } else if (TEMP_PWM_ENABLE == nr) {
689 /* +2 to transfrom into 2 and 3 to conform with sysfs intf */
690 val = ((data->pwm_enable >> index) & 0x01) + 2;
691 } else if (TEMP_CRUISE == nr) {
692 val = TEMP_FROM_REG(data->temp_cruise[index] & 0x7f);
693 } else { /* TEMP_TOLERANCE */
694 val = data->tolerance[index >> 1] >> ((index & 0x01) ? 4 : 0);
695 val = TEMP_FROM_REG(val & 0x0f);
696 }
697 return sprintf(buf, "%d\n", val);
698}
699
700static ssize_t
701store_sf_ctrl(struct device *dev, struct device_attribute *attr,
702 const char *buf, size_t count)
703{
704 struct sensor_device_attribute_2 *sensor_attr =
705 to_sensor_dev_attr_2(attr);
706 int nr = sensor_attr->nr;
707 int index = sensor_attr->index;
708 struct i2c_client *client = to_i2c_client(dev);
709 struct w83793_data *data = i2c_get_clientdata(client);
710 u32 val;
711
712 mutex_lock(&data->update_lock);
713 if (TEMP_FAN_MAP == nr) {
714 val = simple_strtoul(buf, NULL, 10) & 0xff;
715 w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val);
716 data->temp_fan_map[index] = val;
717 } else if (TEMP_PWM_ENABLE == nr) {
718 val = simple_strtoul(buf, NULL, 10);
719 if (2 == val || 3 == val) {
720 data->pwm_enable =
721 w83793_read_value(client, W83793_REG_PWM_ENABLE);
722 if (val - 2)
723 data->pwm_enable |= 1 << index;
724 else
725 data->pwm_enable &= ~(1 << index);
726 w83793_write_value(client, W83793_REG_PWM_ENABLE,
727 data->pwm_enable);
728 } else {
729 mutex_unlock(&data->update_lock);
730 return -EINVAL;
731 }
732 } else if (TEMP_CRUISE == nr) {
733 data->temp_cruise[index] =
734 w83793_read_value(client, W83793_REG_TEMP_CRUISE(index));
735 val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x7f);
736 data->temp_cruise[index] &= 0x80;
737 data->temp_cruise[index] |= val;
738
739 w83793_write_value(client, W83793_REG_TEMP_CRUISE(index),
740 data->temp_cruise[index]);
741 } else { /* TEMP_TOLERANCE */
742 int i = index >> 1;
743 u8 shift = (index & 0x01) ? 4 : 0;
744 data->tolerance[i] =
745 w83793_read_value(client, W83793_REG_TEMP_TOL(i));
746
747 val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x0f);
748 data->tolerance[i] &= ~(0x0f << shift);
749 data->tolerance[i] |= val << shift;
750 w83793_write_value(client, W83793_REG_TEMP_TOL(i),
751 data->tolerance[i]);
752 }
753
754 mutex_unlock(&data->update_lock);
755 return count;
756}
757
758static ssize_t
759show_sf2_pwm(struct device *dev, struct device_attribute *attr, char *buf)
760{
761 struct sensor_device_attribute_2 *sensor_attr =
762 to_sensor_dev_attr_2(attr);
763 int nr = sensor_attr->nr;
764 int index = sensor_attr->index;
765 struct w83793_data *data = w83793_update_device(dev);
766
767 return sprintf(buf, "%d\n", (data->sf2_pwm[index][nr] & 0x3f) << 2);
768}
769
770static ssize_t
771store_sf2_pwm(struct device *dev, struct device_attribute *attr,
772 const char *buf, size_t count)
773{
774 struct i2c_client *client = to_i2c_client(dev);
775 struct w83793_data *data = i2c_get_clientdata(client);
776 struct sensor_device_attribute_2 *sensor_attr =
777 to_sensor_dev_attr_2(attr);
778 int nr = sensor_attr->nr;
779 int index = sensor_attr->index;
780 u8 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 0xff) >> 2;
781
782 mutex_lock(&data->update_lock);
783 data->sf2_pwm[index][nr] =
784 w83793_read_value(client, W83793_REG_SF2_PWM(index, nr)) & 0xc0;
785 data->sf2_pwm[index][nr] |= val;
786 w83793_write_value(client, W83793_REG_SF2_PWM(index, nr),
787 data->sf2_pwm[index][nr]);
788 mutex_unlock(&data->update_lock);
789 return count;
790}
791
792static ssize_t
793show_sf2_temp(struct device *dev, struct device_attribute *attr, char *buf)
794{
795 struct sensor_device_attribute_2 *sensor_attr =
796 to_sensor_dev_attr_2(attr);
797 int nr = sensor_attr->nr;
798 int index = sensor_attr->index;
799 struct w83793_data *data = w83793_update_device(dev);
800
801 return sprintf(buf, "%ld\n",
802 TEMP_FROM_REG(data->sf2_temp[index][nr] & 0x7f));
803}
804
805static ssize_t
806store_sf2_temp(struct device *dev, struct device_attribute *attr,
807 const char *buf, size_t count)
808{
809 struct i2c_client *client = to_i2c_client(dev);
810 struct w83793_data *data = i2c_get_clientdata(client);
811 struct sensor_device_attribute_2 *sensor_attr =
812 to_sensor_dev_attr_2(attr);
813 int nr = sensor_attr->nr;
814 int index = sensor_attr->index;
815 u8 val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x7f);
816
817 mutex_lock(&data->update_lock);
818 data->sf2_temp[index][nr] =
819 w83793_read_value(client, W83793_REG_SF2_TEMP(index, nr)) & 0x80;
820 data->sf2_temp[index][nr] |= val;
821 w83793_write_value(client, W83793_REG_SF2_TEMP(index, nr),
822 data->sf2_temp[index][nr]);
823 mutex_unlock(&data->update_lock);
824 return count;
825}
826
827/* only Vcore A/B and Vtt have additional 2 bits precision */
828static ssize_t
829show_in(struct device *dev, struct device_attribute *attr, char *buf)
830{
831 struct sensor_device_attribute_2 *sensor_attr =
832 to_sensor_dev_attr_2(attr);
833 int nr = sensor_attr->nr;
834 int index = sensor_attr->index;
835 struct w83793_data *data = w83793_update_device(dev);
836 u16 val = data->in[index][nr];
837
838 if (index < 3) {
839 val <<= 2;
840 val += (data->in_low_bits[nr] >> (index * 2)) & 0x3;
841 }
842 return sprintf(buf, "%d\n", val * scale_in[index]);
843}
844
845static ssize_t
846store_in(struct device *dev, struct device_attribute *attr,
847 const char *buf, size_t count)
848{
849 struct sensor_device_attribute_2 *sensor_attr =
850 to_sensor_dev_attr_2(attr);
851 int nr = sensor_attr->nr;
852 int index = sensor_attr->index;
853 struct i2c_client *client = to_i2c_client(dev);
854 struct w83793_data *data = i2c_get_clientdata(client);
855 u32 val;
856
857 val =
858 (simple_strtoul(buf, NULL, 10) +
859 scale_in[index] / 2) / scale_in[index];
860 mutex_lock(&data->update_lock);
861 if (index > 2) {
862 val = SENSORS_LIMIT(val, 0, 255);
863 } else {
864 val = SENSORS_LIMIT(val, 0, 0x3FF);
865 data->in_low_bits[nr] =
866 w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]);
867 data->in_low_bits[nr] &= ~(0x03 << (2 * index));
868 data->in_low_bits[nr] |= (val & 0x03) << (2 * index);
869 w83793_write_value(client, W83793_REG_IN_LOW_BITS[nr],
870 data->in_low_bits[nr]);
871 val >>= 2;
872 }
873 data->in[index][nr] = val;
874 w83793_write_value(client, W83793_REG_IN[index][nr],
875 data->in[index][nr]);
876 mutex_unlock(&data->update_lock);
877 return count;
878}
879
880#define NOT_USED -1
881
882#define SENSOR_ATTR_IN(index) \
883 SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \
884 IN_READ, index), \
885 SENSOR_ATTR_2(in##index##_max, S_IRUGO | S_IWUSR, show_in, \
886 store_in, IN_MAX, index), \
887 SENSOR_ATTR_2(in##index##_min, S_IRUGO | S_IWUSR, show_in, \
888 store_in, IN_LOW, index), \
889 SENSOR_ATTR_2(in##index##_alarm, S_IRUGO, show_alarm_beep, \
890 NULL, ALARM_STATUS, index + ((index > 2) ? 1 : 0)), \
891 SENSOR_ATTR_2(in##index##_beep, S_IWUSR | S_IRUGO, \
892 show_alarm_beep, store_beep, BEEP_ENABLE, \
893 index + ((index > 2) ? 1 : 0))
894
895#define SENSOR_ATTR_FAN(index) \
896 SENSOR_ATTR_2(fan##index##_alarm, S_IRUGO, show_alarm_beep, \
897 NULL, ALARM_STATUS, index + 17), \
898 SENSOR_ATTR_2(fan##index##_beep, S_IWUSR | S_IRUGO, \
899 show_alarm_beep, store_beep, BEEP_ENABLE, index + 17), \
900 SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \
901 NULL, FAN_INPUT, index - 1), \
902 SENSOR_ATTR_2(fan##index##_min, S_IWUSR | S_IRUGO, \
903 show_fan, store_fan_min, FAN_MIN, index - 1)
904
905#define SENSOR_ATTR_PWM(index) \
906 SENSOR_ATTR_2(pwm##index, S_IWUSR | S_IRUGO, show_pwm, \
907 store_pwm, PWM_DUTY, index - 1), \
908 SENSOR_ATTR_2(pwm##index##_nonstop, S_IWUSR | S_IRUGO, \
909 show_pwm, store_pwm, PWM_NONSTOP, index - 1), \
910 SENSOR_ATTR_2(pwm##index##_start, S_IWUSR | S_IRUGO, \
911 show_pwm, store_pwm, PWM_START, index - 1), \
912 SENSOR_ATTR_2(pwm##index##_stop_time, S_IWUSR | S_IRUGO, \
913 show_pwm, store_pwm, PWM_STOP_TIME, index - 1)
914
915#define SENSOR_ATTR_TEMP(index) \
916 SENSOR_ATTR_2(temp##index##_type, S_IRUGO | S_IWUSR, \
917 show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
918 SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
919 NULL, TEMP_READ, index - 1), \
920 SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_temp, \
921 store_temp, TEMP_CRIT, index - 1), \
922 SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \
923 show_temp, store_temp, TEMP_CRIT_HYST, index - 1), \
924 SENSOR_ATTR_2(temp##index##_warn, S_IRUGO | S_IWUSR, show_temp, \
925 store_temp, TEMP_WARN, index - 1), \
926 SENSOR_ATTR_2(temp##index##_warn_hyst, S_IRUGO | S_IWUSR, \
927 show_temp, store_temp, TEMP_WARN_HYST, index - 1), \
928 SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \
929 show_alarm_beep, NULL, ALARM_STATUS, index + 11), \
930 SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
931 show_alarm_beep, store_beep, BEEP_ENABLE, index + 11), \
932 SENSOR_ATTR_2(temp##index##_auto_channels_pwm, \
933 S_IRUGO | S_IWUSR, show_sf_ctrl, store_sf_ctrl, \
934 TEMP_FAN_MAP, index - 1), \
935 SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \
936 show_sf_ctrl, store_sf_ctrl, TEMP_PWM_ENABLE, \
937 index - 1), \
938 SENSOR_ATTR_2(thermal_cruise##index, S_IRUGO | S_IWUSR, \
939 show_sf_ctrl, store_sf_ctrl, TEMP_CRUISE, index - 1), \
940 SENSOR_ATTR_2(tolerance##index, S_IRUGO | S_IWUSR, show_sf_ctrl,\
941 store_sf_ctrl, TEMP_TOLERANCE, index - 1), \
942 SENSOR_ATTR_2(temp##index##_auto_point1_pwm, S_IRUGO | S_IWUSR, \
943 show_sf2_pwm, store_sf2_pwm, 0, index - 1), \
944 SENSOR_ATTR_2(temp##index##_auto_point2_pwm, S_IRUGO | S_IWUSR, \
945 show_sf2_pwm, store_sf2_pwm, 1, index - 1), \
946 SENSOR_ATTR_2(temp##index##_auto_point3_pwm, S_IRUGO | S_IWUSR, \
947 show_sf2_pwm, store_sf2_pwm, 2, index - 1), \
948 SENSOR_ATTR_2(temp##index##_auto_point4_pwm, S_IRUGO | S_IWUSR, \
949 show_sf2_pwm, store_sf2_pwm, 3, index - 1), \
950 SENSOR_ATTR_2(temp##index##_auto_point5_pwm, S_IRUGO | S_IWUSR, \
951 show_sf2_pwm, store_sf2_pwm, 4, index - 1), \
952 SENSOR_ATTR_2(temp##index##_auto_point6_pwm, S_IRUGO | S_IWUSR, \
953 show_sf2_pwm, store_sf2_pwm, 5, index - 1), \
954 SENSOR_ATTR_2(temp##index##_auto_point7_pwm, S_IRUGO | S_IWUSR, \
955 show_sf2_pwm, store_sf2_pwm, 6, index - 1), \
956 SENSOR_ATTR_2(temp##index##_auto_point1_temp, S_IRUGO | S_IWUSR,\
957 show_sf2_temp, store_sf2_temp, 0, index - 1), \
958 SENSOR_ATTR_2(temp##index##_auto_point2_temp, S_IRUGO | S_IWUSR,\
959 show_sf2_temp, store_sf2_temp, 1, index - 1), \
960 SENSOR_ATTR_2(temp##index##_auto_point3_temp, S_IRUGO | S_IWUSR,\
961 show_sf2_temp, store_sf2_temp, 2, index - 1), \
962 SENSOR_ATTR_2(temp##index##_auto_point4_temp, S_IRUGO | S_IWUSR,\
963 show_sf2_temp, store_sf2_temp, 3, index - 1), \
964 SENSOR_ATTR_2(temp##index##_auto_point5_temp, S_IRUGO | S_IWUSR,\
965 show_sf2_temp, store_sf2_temp, 4, index - 1), \
966 SENSOR_ATTR_2(temp##index##_auto_point6_temp, S_IRUGO | S_IWUSR,\
967 show_sf2_temp, store_sf2_temp, 5, index - 1), \
968 SENSOR_ATTR_2(temp##index##_auto_point7_temp, S_IRUGO | S_IWUSR,\
969 show_sf2_temp, store_sf2_temp, 6, index - 1)
970
971static struct sensor_device_attribute_2 w83793_sensor_attr_2[] = {
972 SENSOR_ATTR_IN(0),
973 SENSOR_ATTR_IN(1),
974 SENSOR_ATTR_IN(2),
975 SENSOR_ATTR_IN(3),
976 SENSOR_ATTR_IN(4),
977 SENSOR_ATTR_IN(5),
978 SENSOR_ATTR_IN(6),
979 SENSOR_ATTR_IN(7),
980 SENSOR_ATTR_IN(8),
981 SENSOR_ATTR_IN(9),
982 SENSOR_ATTR_TEMP(1),
983 SENSOR_ATTR_TEMP(2),
984 SENSOR_ATTR_TEMP(3),
985 SENSOR_ATTR_TEMP(4),
986 SENSOR_ATTR_TEMP(5),
987 SENSOR_ATTR_TEMP(6),
988 SENSOR_ATTR_FAN(1),
989 SENSOR_ATTR_FAN(2),
990 SENSOR_ATTR_FAN(3),
991 SENSOR_ATTR_FAN(4),
992 SENSOR_ATTR_FAN(5),
993 SENSOR_ATTR_PWM(1),
994 SENSOR_ATTR_PWM(2),
995 SENSOR_ATTR_PWM(3),
996};
997
998/* Fan6-Fan12 */
999static struct sensor_device_attribute_2 w83793_left_fan[] = {
1000 SENSOR_ATTR_FAN(6),
1001 SENSOR_ATTR_FAN(7),
1002 SENSOR_ATTR_FAN(8),
1003 SENSOR_ATTR_FAN(9),
1004 SENSOR_ATTR_FAN(10),
1005 SENSOR_ATTR_FAN(11),
1006 SENSOR_ATTR_FAN(12),
1007};
1008
1009/* Pwm4-Pwm8 */
1010static struct sensor_device_attribute_2 w83793_left_pwm[] = {
1011 SENSOR_ATTR_PWM(4),
1012 SENSOR_ATTR_PWM(5),
1013 SENSOR_ATTR_PWM(6),
1014 SENSOR_ATTR_PWM(7),
1015 SENSOR_ATTR_PWM(8),
1016};
1017
1018static struct sensor_device_attribute_2 sda_single_files[] = {
1019 SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
1020 SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
1021 SENSOR_ATTR_2(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm,
1022 NOT_USED, NOT_USED),
1023 SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
1024 store_chassis_clear, ALARM_STATUS, 30),
1025 SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
1026 store_beep_enable, NOT_USED, NOT_USED),
1027 SENSOR_ATTR_2(pwm_default, S_IWUSR | S_IRUGO, show_sf_setup,
1028 store_sf_setup, SETUP_PWM_DEFAULT, NOT_USED),
1029 SENSOR_ATTR_2(pwm_uptime, S_IWUSR | S_IRUGO, show_sf_setup,
1030 store_sf_setup, SETUP_PWM_UPTIME, NOT_USED),
1031 SENSOR_ATTR_2(pwm_downtime, S_IWUSR | S_IRUGO, show_sf_setup,
1032 store_sf_setup, SETUP_PWM_DOWNTIME, NOT_USED),
1033 SENSOR_ATTR_2(temp_critical, S_IWUSR | S_IRUGO, show_sf_setup,
1034 store_sf_setup, SETUP_TEMP_CRITICAL, NOT_USED),
1035};
1036
1037static void w83793_init_client(struct i2c_client *client)
1038{
1039 if (reset) {
1040 w83793_write_value(client, W83793_REG_CONFIG, 0x80);
1041 }
1042
1043 /* Start monitoring */
1044 w83793_write_value(client, W83793_REG_CONFIG,
1045 w83793_read_value(client, W83793_REG_CONFIG) | 0x01);
1046
1047}
1048
1049static int w83793_attach_adapter(struct i2c_adapter *adapter)
1050{
1051 if (!(adapter->class & I2C_CLASS_HWMON))
1052 return 0;
1053 return i2c_probe(adapter, &addr_data, w83793_detect);
1054}
1055
1056static int w83793_detach_client(struct i2c_client *client)
1057{
1058 struct w83793_data *data = i2c_get_clientdata(client);
1059 struct device *dev = &client->dev;
1060 int err, i;
1061
1062 /* main client */
1063 if (data) {
1064 hwmon_device_unregister(data->class_dev);
1065
1066 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
1067 device_remove_file(dev,
1068 &w83793_sensor_attr_2[i].dev_attr);
1069
1070 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
1071 device_remove_file(dev, &sda_single_files[i].dev_attr);
1072
1073 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
1074 device_remove_file(dev, &w83793_left_fan[i].dev_attr);
1075
1076 for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
1077 device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
1078 }
1079
1080 if ((err = i2c_detach_client(client)))
1081 return err;
1082
1083 /* main client */
1084 if (data)
1085 kfree(data);
1086 /* subclient */
1087 else
1088 kfree(client);
1089
1090 return 0;
1091}
1092
1093static int
1094w83793_create_subclient(struct i2c_adapter *adapter,
1095 struct i2c_client *client, int addr,
1096 struct i2c_client **sub_cli)
1097{
1098 int err = 0;
1099 struct i2c_client *sub_client;
1100
1101 (*sub_cli) = sub_client =
1102 kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
1103 if (!(sub_client)) {
1104 return -ENOMEM;
1105 }
1106 sub_client->addr = 0x48 + addr;
1107 i2c_set_clientdata(sub_client, NULL);
1108 sub_client->adapter = adapter;
1109 sub_client->driver = &w83793_driver;
1110 strlcpy(sub_client->name, "w83793 subclient", I2C_NAME_SIZE);
1111 if ((err = i2c_attach_client(sub_client))) {
1112 dev_err(&client->dev, "subclient registration "
1113 "at address 0x%x failed\n", sub_client->addr);
1114 kfree(sub_client);
1115 }
1116 return err;
1117}
1118
1119static int
1120w83793_detect_subclients(struct i2c_adapter *adapter, int address,
1121 int kind, struct i2c_client *client)
1122{
1123 int i, id, err;
1124 u8 tmp;
1125 struct w83793_data *data = i2c_get_clientdata(client);
1126
1127 id = i2c_adapter_id(adapter);
1128 if (force_subclients[0] == id && force_subclients[1] == address) {
1129 for (i = 2; i <= 3; i++) {
1130 if (force_subclients[i] < 0x48
1131 || force_subclients[i] > 0x4f) {
1132 dev_err(&client->dev,
1133 "invalid subclient "
1134 "address %d; must be 0x48-0x4f\n",
1135 force_subclients[i]);
1136 err = -EINVAL;
1137 goto ERROR_SC_0;
1138 }
1139 }
1140 w83793_write_value(client, W83793_REG_I2C_SUBADDR,
1141 (force_subclients[2] & 0x07) |
1142 ((force_subclients[3] & 0x07) << 4));
1143 }
1144
1145 tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
1146 if (!(tmp & 0x08)) {
1147 err =
1148 w83793_create_subclient(adapter, client, tmp & 0x7,
1149 &data->lm75[0]);
1150 if (err < 0)
1151 goto ERROR_SC_0;
1152 }
1153 if (!(tmp & 0x80)) {
1154 if ((data->lm75[0] != NULL)
1155 && ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
1156 dev_err(&client->dev,
1157 "duplicate addresses 0x%x, "
1158 "use force_subclients\n", data->lm75[0]->addr);
1159 err = -ENODEV;
1160 goto ERROR_SC_1;
1161 }
1162 err = w83793_create_subclient(adapter, client,
1163 (tmp >> 4) & 0x7, &data->lm75[1]);
1164 if (err < 0)
1165 goto ERROR_SC_1;
1166 }
1167
1168 return 0;
1169
1170 /* Undo inits in case of errors */
1171
1172ERROR_SC_1:
1173 if (data->lm75[0] != NULL) {
1174 i2c_detach_client(data->lm75[0]);
1175 kfree(data->lm75[0]);
1176 }
1177ERROR_SC_0:
1178 return err;
1179}
1180
1181static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1182{
1183 int i;
1184 u8 tmp, val;
1185 struct i2c_client *client;
1186 struct device *dev;
1187 struct w83793_data *data;
1188 int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
1189 int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5;
1190 int err = 0;
1191
1192 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1193 goto exit;
1194 }
1195
1196 /* OK. For now, we presume we have a valid client. We now create the
1197 client structure, even though we cannot fill it completely yet.
1198 But it allows us to access w83793_{read,write}_value. */
1199
1200 if (!(data = kzalloc(sizeof(struct w83793_data), GFP_KERNEL))) {
1201 err = -ENOMEM;
1202 goto exit;
1203 }
1204
1205 client = &data->client;
1206 dev = &client->dev;
1207 i2c_set_clientdata(client, data);
1208 client->addr = address;
1209 client->adapter = adapter;
1210 client->driver = &w83793_driver;
1211
1212 data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
1213
1214 /* Now, we do the remaining detection. */
1215 if (kind < 0) {
1216 tmp = data->bank & 0x80 ? 0x5c : 0xa3;
1217 /* Check Winbond vendor ID */
1218 if (tmp != i2c_smbus_read_byte_data(client,
1219 W83793_REG_VENDORID)) {
1220 pr_debug("w83793: Detection failed at check "
1221 "vendor id\n");
1222 err = -ENODEV;
1223 goto free_mem;
1224 }
1225
1226 /* If Winbond chip, address of chip and W83793_REG_I2C_ADDR
1227 should match */
1228 if ((data->bank & 0x07) == 0
1229 && i2c_smbus_read_byte_data(client, W83793_REG_I2C_ADDR) !=
1230 (address << 1)) {
1231 pr_debug("w83793: Detection failed at check "
1232 "i2c addr\n");
1233 err = -ENODEV;
1234 goto free_mem;
1235 }
1236
1237 }
1238
1239 /* We have either had a force parameter, or we have already detected the
1240 Winbond. Determine the chip type now */
1241
1242 if (kind <= 0) {
1243 if (0x7b == w83793_read_value(client, W83793_REG_CHIPID)) {
1244 kind = w83793;
1245 } else {
1246 if (kind == 0)
1247 dev_warn(&adapter->dev, "w83793: Ignoring "
1248 "'force' parameter for unknown chip "
1249 "at address 0x%02x\n", address);
1250 err = -ENODEV;
1251 goto free_mem;
1252 }
1253 }
1254
1255 /* Fill in the remaining client fields and put into the global list */
1256 strlcpy(client->name, "w83793", I2C_NAME_SIZE);
1257
1258 mutex_init(&data->update_lock);
1259
1260 /* Tell the I2C layer a new client has arrived */
1261 if ((err = i2c_attach_client(client)))
1262 goto free_mem;
1263
1264 if ((err = w83793_detect_subclients(adapter, address, kind, client)))
1265 goto detach_client;
1266
1267 /* Initialize the chip */
1268 w83793_init_client(client);
1269
1270 data->vrm = vid_which_vrm();
1271 /*
1272 Only fan 1-5 has their own input pins,
1273 Pwm 1-3 has their own pins
1274 */
1275 data->has_fan = 0x1f;
1276 data->has_pwm = 0x07;
1277 tmp = w83793_read_value(client, W83793_REG_MFC);
1278 val = w83793_read_value(client, W83793_REG_FANIN_CTRL);
1279
1280 /* check the function of pins 49-56 */
1281 if (!(tmp & 0x80)) {
1282 data->has_pwm |= 0x18; /* pwm 4,5 */
1283 if (val & 0x01) { /* fan 6 */
1284 data->has_fan |= 0x20;
1285 data->has_pwm |= 0x20;
1286 }
1287 if (val & 0x02) { /* fan 7 */
1288 data->has_fan |= 0x40;
1289 data->has_pwm |= 0x40;
1290 }
1291 if (!(tmp & 0x40) && (val & 0x04)) { /* fan 8 */
1292 data->has_fan |= 0x80;
1293 data->has_pwm |= 0x80;
1294 }
1295 }
1296
1297 if (0x08 == (tmp & 0x0c)) {
1298 if (val & 0x08) /* fan 9 */
1299 data->has_fan |= 0x100;
1300 if (val & 0x10) /* fan 10 */
1301 data->has_fan |= 0x200;
1302 }
1303
1304 if (0x20 == (tmp & 0x30)) {
1305 if (val & 0x20) /* fan 11 */
1306 data->has_fan |= 0x400;
1307 if (val & 0x40) /* fan 12 */
1308 data->has_fan |= 0x800;
1309 }
1310
1311 if ((tmp & 0x01) && (val & 0x04)) { /* fan 8, second location */
1312 data->has_fan |= 0x80;
1313 data->has_pwm |= 0x80;
1314 }
1315
1316 /* Register sysfs hooks */
1317 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) {
1318 err = device_create_file(dev,
1319 &w83793_sensor_attr_2[i].dev_attr);
1320 if (err)
1321 goto exit_remove;
1322 }
1323
1324 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
1325 err = device_create_file(dev, &sda_single_files[i].dev_attr);
1326 if (err)
1327 goto exit_remove;
1328
1329 }
1330
1331 for (i = 5; i < 12; i++) {
1332 int j;
1333 if (!(data->has_fan & (1 << i)))
1334 continue;
1335 for (j = 0; j < files_fan; j++) {
1336 err = device_create_file(dev,
1337 &w83793_left_fan[(i - 5) * files_fan
1338 + j].dev_attr);
1339 if (err)
1340 goto exit_remove;
1341 }
1342 }
1343
1344 for (i = 3; i < 8; i++) {
1345 int j;
1346 if (!(data->has_pwm & (1 << i)))
1347 continue;
1348 for (j = 0; j < files_pwm; j++) {
1349 err = device_create_file(dev,
1350 &w83793_left_pwm[(i - 3) * files_pwm
1351 + j].dev_attr);
1352 if (err)
1353 goto exit_remove;
1354 }
1355 }
1356
1357 data->class_dev = hwmon_device_register(dev);
1358 if (IS_ERR(data->class_dev)) {
1359 err = PTR_ERR(data->class_dev);
1360 goto exit_remove;
1361 }
1362
1363 return 0;
1364
1365 /* Unregister sysfs hooks */
1366
1367exit_remove:
1368 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
1369 device_remove_file(dev, &w83793_sensor_attr_2[i].dev_attr);
1370
1371 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
1372 device_remove_file(dev, &sda_single_files[i].dev_attr);
1373
1374 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
1375 device_remove_file(dev, &w83793_left_fan[i].dev_attr);
1376
1377 for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
1378 device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
1379
1380 if (data->lm75[0] != NULL) {
1381 i2c_detach_client(data->lm75[0]);
1382 kfree(data->lm75[0]);
1383 }
1384 if (data->lm75[1] != NULL) {
1385 i2c_detach_client(data->lm75[1]);
1386 kfree(data->lm75[1]);
1387 }
1388detach_client:
1389 i2c_detach_client(client);
1390free_mem:
1391 kfree(data);
1392exit:
1393 return err;
1394}
1395
1396static void w83793_update_nonvolatile(struct device *dev)
1397{
1398 struct i2c_client *client = to_i2c_client(dev);
1399 struct w83793_data *data = i2c_get_clientdata(client);
1400 int i, j;
1401 /*
1402 They are somewhat "stable" registers, and to update them everytime
1403 takes so much time, it's just not worthy. Update them in a long
1404 interval to avoid exception.
1405 */
1406 if (!(time_after(jiffies, data->last_nonvolatile + HZ * 300)
1407 || !data->valid))
1408 return;
1409 /* update voltage limits */
1410 for (i = 1; i < 3; i++) {
1411 for (j = 0; j < ARRAY_SIZE(data->in); j++) {
1412 data->in[j][i] =
1413 w83793_read_value(client, W83793_REG_IN[j][i]);
1414 }
1415 data->in_low_bits[i] =
1416 w83793_read_value(client, W83793_REG_IN_LOW_BITS[i]);
1417 }
1418
1419 for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
1420 /* Update the Fan measured value and limits */
1421 if (!(data->has_fan & (1 << i))) {
1422 continue;
1423 }
1424 data->fan_min[i] =
1425 w83793_read_value(client, W83793_REG_FAN_MIN(i)) << 8;
1426 data->fan_min[i] |=
1427 w83793_read_value(client, W83793_REG_FAN_MIN(i) + 1);
1428 }
1429
1430 for (i = 0; i < ARRAY_SIZE(data->temp_fan_map); i++) {
1431 data->temp_fan_map[i] =
1432 w83793_read_value(client, W83793_REG_TEMP_FAN_MAP(i));
1433 for (j = 1; j < 5; j++) {
1434 data->temp[i][j] =
1435 w83793_read_value(client, W83793_REG_TEMP[i][j]);
1436 }
1437 data->temp_cruise[i] =
1438 w83793_read_value(client, W83793_REG_TEMP_CRUISE(i));
1439 for (j = 0; j < 7; j++) {
1440 data->sf2_pwm[i][j] =
1441 w83793_read_value(client, W83793_REG_SF2_PWM(i, j));
1442 data->sf2_temp[i][j] =
1443 w83793_read_value(client,
1444 W83793_REG_SF2_TEMP(i, j));
1445 }
1446 }
1447
1448 for (i = 0; i < ARRAY_SIZE(data->temp_mode); i++)
1449 data->temp_mode[i] =
1450 w83793_read_value(client, W83793_REG_TEMP_MODE[i]);
1451
1452 for (i = 0; i < ARRAY_SIZE(data->tolerance); i++) {
1453 data->tolerance[i] =
1454 w83793_read_value(client, W83793_REG_TEMP_TOL(i));
1455 }
1456
1457 for (i = 0; i < ARRAY_SIZE(data->pwm); i++) {
1458 if (!(data->has_pwm & (1 << i)))
1459 continue;
1460 data->pwm[i][PWM_NONSTOP] =
1461 w83793_read_value(client, W83793_REG_PWM(i, PWM_NONSTOP));
1462 data->pwm[i][PWM_START] =
1463 w83793_read_value(client, W83793_REG_PWM(i, PWM_START));
1464 data->pwm_stop_time[i] =
1465 w83793_read_value(client, W83793_REG_PWM_STOP_TIME(i));
1466 }
1467
1468 data->pwm_default = w83793_read_value(client, W83793_REG_PWM_DEFAULT);
1469 data->pwm_enable = w83793_read_value(client, W83793_REG_PWM_ENABLE);
1470 data->pwm_uptime = w83793_read_value(client, W83793_REG_PWM_UPTIME);
1471 data->pwm_downtime = w83793_read_value(client, W83793_REG_PWM_DOWNTIME);
1472 data->temp_critical =
1473 w83793_read_value(client, W83793_REG_TEMP_CRITICAL);
1474 data->beep_enable = w83793_read_value(client, W83793_REG_OVT_BEEP);
1475
1476 for (i = 0; i < ARRAY_SIZE(data->beeps); i++) {
1477 data->beeps[i] = w83793_read_value(client, W83793_REG_BEEP(i));
1478 }
1479
1480 data->last_nonvolatile = jiffies;
1481}
1482
1483static struct w83793_data *w83793_update_device(struct device *dev)
1484{
1485 struct i2c_client *client = to_i2c_client(dev);
1486 struct w83793_data *data = i2c_get_clientdata(client);
1487 int i;
1488
1489 mutex_lock(&data->update_lock);
1490
1491 if (!(time_after(jiffies, data->last_updated + HZ * 2)
1492 || !data->valid))
1493 goto END;
1494
1495 /* Update the voltages measured value and limits */
1496 for (i = 0; i < ARRAY_SIZE(data->in); i++)
1497 data->in[i][IN_READ] =
1498 w83793_read_value(client, W83793_REG_IN[i][IN_READ]);
1499
1500 data->in_low_bits[IN_READ] =
1501 w83793_read_value(client, W83793_REG_IN_LOW_BITS[IN_READ]);
1502
1503 for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
1504 if (!(data->has_fan & (1 << i))) {
1505 continue;
1506 }
1507 data->fan[i] =
1508 w83793_read_value(client, W83793_REG_FAN(i)) << 8;
1509 data->fan[i] |=
1510 w83793_read_value(client, W83793_REG_FAN(i) + 1);
1511 }
1512
1513 for (i = 0; i < ARRAY_SIZE(data->temp); i++)
1514 data->temp[i][TEMP_READ] =
1515 w83793_read_value(client, W83793_REG_TEMP[i][TEMP_READ]);
1516
1517 data->temp_low_bits =
1518 w83793_read_value(client, W83793_REG_TEMP_LOW_BITS);
1519
1520 for (i = 0; i < ARRAY_SIZE(data->pwm); i++) {
1521 if (data->has_pwm & (1 << i))
1522 data->pwm[i][PWM_DUTY] =
1523 w83793_read_value(client,
1524 W83793_REG_PWM(i, PWM_DUTY));
1525 }
1526
1527 for (i = 0; i < ARRAY_SIZE(data->alarms); i++)
1528 data->alarms[i] =
1529 w83793_read_value(client, W83793_REG_ALARM(i));
1530 data->vid[0] = w83793_read_value(client, W83793_REG_VID_INA);
1531 data->vid[1] = w83793_read_value(client, W83793_REG_VID_INB);
1532 w83793_update_nonvolatile(dev);
1533 data->last_updated = jiffies;
1534 data->valid = 1;
1535
1536END:
1537 mutex_unlock(&data->update_lock);
1538 return data;
1539}
1540
1541/* Ignore the possibility that somebody change bank outside the driver
1542 Must be called with data->update_lock held, except during initialization */
1543static u8 w83793_read_value(struct i2c_client *client, u16 reg)
1544{
1545 struct w83793_data *data = i2c_get_clientdata(client);
1546 u8 res = 0xff;
1547 u8 new_bank = reg >> 8;
1548
1549 new_bank |= data->bank & 0xfc;
1550 if (data->bank != new_bank) {
1551 if (i2c_smbus_write_byte_data
1552 (client, W83793_REG_BANKSEL, new_bank) >= 0)
1553 data->bank = new_bank;
1554 else {
1555 dev_err(&client->dev,
1556 "set bank to %d failed, fall back "
1557 "to bank %d, read reg 0x%x error\n",
1558 new_bank, data->bank, reg);
1559 res = 0x0; /* read 0x0 from the chip */
1560 goto END;
1561 }
1562 }
1563 res = i2c_smbus_read_byte_data(client, reg & 0xff);
1564END:
1565 return res;
1566}
1567
1568/* Must be called with data->update_lock held, except during initialization */
1569static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value)
1570{
1571 struct w83793_data *data = i2c_get_clientdata(client);
1572 int res;
1573 u8 new_bank = reg >> 8;
1574
1575 new_bank |= data->bank & 0xfc;
1576 if (data->bank != new_bank) {
1577 if ((res = i2c_smbus_write_byte_data
1578 (client, W83793_REG_BANKSEL, new_bank)) >= 0)
1579 data->bank = new_bank;
1580 else {
1581 dev_err(&client->dev,
1582 "set bank to %d failed, fall back "
1583 "to bank %d, write reg 0x%x error\n",
1584 new_bank, data->bank, reg);
1585 goto END;
1586 }
1587 }
1588
1589 res = i2c_smbus_write_byte_data(client, reg & 0xff, value);
1590END:
1591 return res;
1592}
1593
1594static int __init sensors_w83793_init(void)
1595{
1596 return i2c_add_driver(&w83793_driver);
1597}
1598
1599static void __exit sensors_w83793_exit(void)
1600{
1601 i2c_del_driver(&w83793_driver);
1602}
1603
1604MODULE_AUTHOR("Yuan Mu");
1605MODULE_DESCRIPTION("w83793 driver");
1606MODULE_LICENSE("GPL");
1607
1608module_init(sensors_w83793_init);
1609module_exit(sensors_w83793_exit);
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 33fbb47100a3..8e1e3f8e40a4 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -2,7 +2,7 @@
2 * i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge 2 * i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge
3 * 3 *
4 * Copyright (C) 2004 Patrick Mochel 4 * Copyright (C) 2004 Patrick Mochel
5 * 2005 Rudolf Marek <r.marek@sh.cvut.cz> 5 * 2005 Rudolf Marek <r.marek@assembler.cz>
6 * 6 *
7 * The 1563 southbridge is deceptively similar to the 1533, with a 7 * The 1563 southbridge is deceptively similar to the 1533, with a
8 * few notable exceptions. One of those happens to be the fact they 8 * few notable exceptions. One of those happens to be the fact they
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 60bef94cd25f..4ee56def61f2 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -82,7 +82,7 @@ struct tps65010 {
82 struct i2c_client client; 82 struct i2c_client client;
83 struct mutex lock; 83 struct mutex lock;
84 int irq; 84 int irq;
85 struct work_struct work; 85 struct delayed_work work;
86 struct dentry *file; 86 struct dentry *file;
87 unsigned charging:1; 87 unsigned charging:1;
88 unsigned por:1; 88 unsigned por:1;
@@ -328,7 +328,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
328{ 328{
329 u8 tmp = 0, mask, poll; 329 u8 tmp = 0, mask, poll;
330 330
331 /* IRQs won't trigger irqs for certain events, but we can get 331 /* IRQs won't trigger for certain events, but we can get
332 * others by polling (normally, with external power applied). 332 * others by polling (normally, with external power applied).
333 */ 333 */
334 poll = 0; 334 poll = 0;
@@ -411,10 +411,11 @@ static void tps65010_interrupt(struct tps65010 *tps)
411} 411}
412 412
413/* handle IRQs and polling using keventd for now */ 413/* handle IRQs and polling using keventd for now */
414static void tps65010_work(void *_tps) 414static void tps65010_work(struct work_struct *work)
415{ 415{
416 struct tps65010 *tps = _tps; 416 struct tps65010 *tps;
417 417
418 tps = container_of(work, struct tps65010, work.work);
418 mutex_lock(&tps->lock); 419 mutex_lock(&tps->lock);
419 420
420 tps65010_interrupt(tps); 421 tps65010_interrupt(tps);
@@ -452,7 +453,7 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
452 453
453 disable_irq_nosync(irq); 454 disable_irq_nosync(irq);
454 set_bit(FLAG_IRQ_ENABLE, &tps->flags); 455 set_bit(FLAG_IRQ_ENABLE, &tps->flags);
455 (void) schedule_work(&tps->work); 456 (void) schedule_work(&tps->work.work);
456 return IRQ_HANDLED; 457 return IRQ_HANDLED;
457} 458}
458 459
@@ -465,13 +466,15 @@ static int __exit tps65010_detach_client(struct i2c_client *client)
465 struct tps65010 *tps; 466 struct tps65010 *tps;
466 467
467 tps = container_of(client, struct tps65010, client); 468 tps = container_of(client, struct tps65010, client);
469 free_irq(tps->irq, tps);
468#ifdef CONFIG_ARM 470#ifdef CONFIG_ARM
469 if (machine_is_omap_h2()) 471 if (machine_is_omap_h2())
470 omap_free_gpio(58); 472 omap_free_gpio(58);
471 if (machine_is_omap_osk()) 473 if (machine_is_omap_osk())
472 omap_free_gpio(OMAP_MPUIO(1)); 474 omap_free_gpio(OMAP_MPUIO(1));
473#endif 475#endif
474 free_irq(tps->irq, tps); 476 cancel_delayed_work(&tps->work);
477 flush_scheduled_work();
475 debugfs_remove(tps->file); 478 debugfs_remove(tps->file);
476 if (i2c_detach_client(client) == 0) 479 if (i2c_detach_client(client) == 0)
477 kfree(tps); 480 kfree(tps);
@@ -505,7 +508,7 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
505 return 0; 508 return 0;
506 509
507 mutex_init(&tps->lock); 510 mutex_init(&tps->lock);
508 INIT_WORK(&tps->work, tps65010_work, tps); 511 INIT_DELAYED_WORK(&tps->work, tps65010_work);
509 tps->irq = -1; 512 tps->irq = -1;
510 tps->client.addr = address; 513 tps->client.addr = address;
511 tps->client.adapter = bus; 514 tps->client.adapter = bus;
@@ -620,7 +623,7 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
620 (void) i2c_smbus_write_byte_data(&tps->client, TPS_MASK3, 0x0f 623 (void) i2c_smbus_write_byte_data(&tps->client, TPS_MASK3, 0x0f
621 | i2c_smbus_read_byte_data(&tps->client, TPS_MASK3)); 624 | i2c_smbus_read_byte_data(&tps->client, TPS_MASK3));
622 625
623 tps65010_work(tps); 626 tps65010_work(&tps->work.work);
624 627
625 tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, NULL, 628 tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, NULL,
626 tps, DEBUG_FOPS); 629 tps, DEBUG_FOPS);
@@ -672,7 +675,7 @@ int tps65010_set_vbus_draw(unsigned mA)
672 && test_and_set_bit( 675 && test_and_set_bit(
673 FLAG_VBUS_CHANGED, &the_tps->flags)) { 676 FLAG_VBUS_CHANGED, &the_tps->flags)) {
674 /* gadget drivers call this in_irq() */ 677 /* gadget drivers call this in_irq() */
675 (void) schedule_work(&the_tps->work); 678 (void) schedule_work(&the_tps->work.work);
676 } 679 }
677 local_irq_restore(flags); 680 local_irq_restore(flags);
678 681
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index e23bc0d62159..3f828052f8d2 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -796,7 +796,7 @@ endchoice
796config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ 796config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
797 int "Maximum transfer size (KB) per request (up to 128)" 797 int "Maximum transfer size (KB) per request (up to 128)"
798 default "128" 798 default "128"
799 depends BLK_DEV_IDE_AU1XXX 799 depends on BLK_DEV_IDE_AU1XXX
800 800
801config IDE_ARM 801config IDE_ARM
802 def_bool ARM && (ARCH_A5K || ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK) 802 def_bool ARM && (ARCH_A5K || ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index e3a267622bb6..d33717c8afd4 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -2147,7 +2147,7 @@ static int ide_floppy_probe(ide_drive_t *drive)
2147 printk("ide-floppy: passing drive %s to ide-scsi emulation.\n", drive->name); 2147 printk("ide-floppy: passing drive %s to ide-scsi emulation.\n", drive->name);
2148 goto failed; 2148 goto failed;
2149 } 2149 }
2150 if ((floppy = (idefloppy_floppy_t *) kzalloc (sizeof (idefloppy_floppy_t), GFP_KERNEL)) == NULL) { 2150 if ((floppy = kzalloc(sizeof (idefloppy_floppy_t), GFP_KERNEL)) == NULL) {
2151 printk (KERN_ERR "ide-floppy: %s: Can't allocate a floppy structure\n", drive->name); 2151 printk (KERN_ERR "ide-floppy: %s: Can't allocate a floppy structure\n", drive->name);
2152 goto failed; 2152 goto failed;
2153 } 2153 }
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index e2f4bb549063..b3bcd1d7315e 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2573,11 +2573,11 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
2573 int pages = tape->pages_per_stage; 2573 int pages = tape->pages_per_stage;
2574 char *b_data = NULL; 2574 char *b_data = NULL;
2575 2575
2576 if ((stage = (idetape_stage_t *) kmalloc (sizeof (idetape_stage_t),GFP_KERNEL)) == NULL) 2576 if ((stage = kmalloc(sizeof (idetape_stage_t),GFP_KERNEL)) == NULL)
2577 return NULL; 2577 return NULL;
2578 stage->next = NULL; 2578 stage->next = NULL;
2579 2579
2580 bh = stage->bh = (struct idetape_bh *)kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); 2580 bh = stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
2581 if (bh == NULL) 2581 if (bh == NULL)
2582 goto abort; 2582 goto abort;
2583 bh->b_reqnext = NULL; 2583 bh->b_reqnext = NULL;
@@ -2607,7 +2607,7 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
2607 continue; 2607 continue;
2608 } 2608 }
2609 prev_bh = bh; 2609 prev_bh = bh;
2610 if ((bh = (struct idetape_bh *)kmalloc(sizeof(struct idetape_bh), GFP_KERNEL)) == NULL) { 2610 if ((bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL)) == NULL) {
2611 free_page((unsigned long) b_data); 2611 free_page((unsigned long) b_data);
2612 goto abort; 2612 goto abort;
2613 } 2613 }
@@ -4860,7 +4860,7 @@ static int ide_tape_probe(ide_drive_t *drive)
4860 printk(KERN_WARNING "ide-tape: Use drive %s with ide-scsi emulation and osst.\n", drive->name); 4860 printk(KERN_WARNING "ide-tape: Use drive %s with ide-scsi emulation and osst.\n", drive->name);
4861 printk(KERN_WARNING "ide-tape: OnStream support will be removed soon from ide-tape!\n"); 4861 printk(KERN_WARNING "ide-tape: OnStream support will be removed soon from ide-tape!\n");
4862 } 4862 }
4863 tape = (idetape_tape_t *) kzalloc (sizeof (idetape_tape_t), GFP_KERNEL); 4863 tape = kzalloc(sizeof (idetape_tape_t), GFP_KERNEL);
4864 if (tape == NULL) { 4864 if (tape == NULL) {
4865 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape structure\n", drive->name); 4865 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape structure\n", drive->name);
4866 goto failed; 4866 goto failed;
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index e993a51f250e..08119da06d54 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
5 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 5 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
6 * Portions Copyright (C) 2003 Red Hat Inc 6 * Portions Copyright (C) 2003 Red Hat Inc
7 * Portions Copyright (C) 2005-2006 MontaVista Software, Inc.
7 * 8 *
8 * Thanks to HighPoint Technologies for their assistance, and hardware. 9 * Thanks to HighPoint Technologies for their assistance, and hardware.
9 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his 10 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
@@ -11,9 +12,11 @@
11 * development and support. 12 * development and support.
12 * 13 *
13 * 14 *
14 * Highpoint have their own driver (source except for the raid part) 15 * HighPoint has its own drivers (open source except for the RAID part)
15 * available from http://www.highpoint-tech.com/hpt3xx-opensource-v131.tgz 16 * available from http://www.highpoint-tech.com/BIOS%20+%20Driver/.
16 * This may be useful to anyone wanting to work on the mainstream hpt IDE. 17 * This may be useful to anyone wanting to work on this driver, however do not
18 * trust them too much since the code tends to become less and less meaningful
19 * as the time passes... :-/
17 * 20 *
18 * Note that final HPT370 support was done by force extraction of GPL. 21 * Note that final HPT370 support was done by force extraction of GPL.
19 * 22 *
@@ -52,6 +55,29 @@
52 * keeping me sane. 55 * keeping me sane.
53 * Alan Cox <alan@redhat.com> 56 * Alan Cox <alan@redhat.com>
54 * 57 *
58 * - fix the clock turnaround code: it was writing to the wrong ports when
59 * called for the secondary channel, caching the current clock mode per-
60 * channel caused the cached register value to get out of sync with the
61 * actual one, the channels weren't serialized, the turnaround shouldn't
62 * be done on 66 MHz PCI bus
63 * - avoid calibrating PLL twice as the second time results in a wrong PCI
64 * frequency and thus in the wrong timings for the secondary channel
65 * - disable UltraATA/133 for HPT372 by default (50 MHz DPLL clock do not
66 * allow for this speed anyway)
67 * - add support for HPT302N and HPT371N clocking (the same as for HPT372N)
68 * - HPT371/N are single channel chips, so avoid touching the primary channel
69 * which exists only virtually (there's no pins for it)
70 * - fix/remove bad/unused timing tables and use one set of tables for the whole
71 * HPT37x chip family; save space by introducing the separate transfer mode
72 * table in which the mode lookup is done
73 * - use f_CNT value saved by the HighPoint BIOS as reading it directly gives
74 * the wrong PCI frequency since DPLL has already been calibrated by BIOS
75 * - fix the hotswap code: it caused RESET- to glitch when tristating the bus,
76 * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead
77 * - pass to init_chipset() handlers a copy of the IDE PCI device structure as
78 * they tamper with its fields
79 * <source@mvista.com>
80 *
55 */ 81 */
56 82
57 83
@@ -76,8 +102,8 @@
76 102
77/* various tuning parameters */ 103/* various tuning parameters */
78#define HPT_RESET_STATE_ENGINE 104#define HPT_RESET_STATE_ENGINE
79#undef HPT_DELAY_INTERRUPT 105#undef HPT_DELAY_INTERRUPT
80#undef HPT_SERIALIZE_IO 106#define HPT_SERIALIZE_IO 0
81 107
82static const char *quirk_drives[] = { 108static const char *quirk_drives[] = {
83 "QUANTUM FIREBALLlct08 08", 109 "QUANTUM FIREBALLlct08 08",
@@ -141,305 +167,175 @@ static const char *bad_ata33[] = {
141 NULL 167 NULL
142}; 168};
143 169
144struct chipset_bus_clock_list_entry { 170static u8 xfer_speeds[] = {
145 u8 xfer_speed; 171 XFER_UDMA_6,
146 unsigned int chipset_settings; 172 XFER_UDMA_5,
173 XFER_UDMA_4,
174 XFER_UDMA_3,
175 XFER_UDMA_2,
176 XFER_UDMA_1,
177 XFER_UDMA_0,
178
179 XFER_MW_DMA_2,
180 XFER_MW_DMA_1,
181 XFER_MW_DMA_0,
182
183 XFER_PIO_4,
184 XFER_PIO_3,
185 XFER_PIO_2,
186 XFER_PIO_1,
187 XFER_PIO_0
147}; 188};
148 189
149/* key for bus clock timings 190/* Key for bus clock timings
150 * bit 191 * 36x 37x
151 * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW 192 * bits bits
152 * DMA. cycles = value + 1 193 * 0:3 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
153 * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW 194 * cycles = value + 1
154 * DMA. cycles = value + 1 195 * 4:7 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
155 * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file 196 * cycles = value + 1
156 * register access. 197 * 8:11 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
157 * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file 198 * register access.
158 * register access. 199 * 12:15 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file
159 * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer. 200 * register access.
160 * during task file register access. 201 * 16:18 18:20 udma_cycle_time. Clock cycles for UDMA xfer.
161 * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA 202 * - 21 CLK frequency: 0=ATA clock, 1=dual ATA clock.
162 * xfer. 203 * 19:21 22:24 pre_high_time. Time to initialize the 1st cycle for PIO and
163 * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task 204 * MW DMA xfer.
164 * register access. 205 * 22:24 25:27 cmd_pre_high_time. Time to initialize the 1st PIO cycle for
165 * 28 UDMA enable 206 * task file register access.
166 * 29 DMA enable 207 * 28 28 UDMA enable.
167 * 30 PIO_MST enable. if set, the chip is in bus master mode during 208 * 29 29 DMA enable.
168 * PIO. 209 * 30 30 PIO MST enable. If set, the chip is in bus master mode during
169 * 31 FIFO enable. 210 * PIO xfer.
211 * 31 31 FIFO enable.
170 */ 212 */
171static struct chipset_bus_clock_list_entry forty_base_hpt366[] = {
172 { XFER_UDMA_4, 0x900fd943 },
173 { XFER_UDMA_3, 0x900ad943 },
174 { XFER_UDMA_2, 0x900bd943 },
175 { XFER_UDMA_1, 0x9008d943 },
176 { XFER_UDMA_0, 0x9008d943 },
177
178 { XFER_MW_DMA_2, 0xa008d943 },
179 { XFER_MW_DMA_1, 0xa010d955 },
180 { XFER_MW_DMA_0, 0xa010d9fc },
181
182 { XFER_PIO_4, 0xc008d963 },
183 { XFER_PIO_3, 0xc010d974 },
184 { XFER_PIO_2, 0xc010d997 },
185 { XFER_PIO_1, 0xc010d9c7 },
186 { XFER_PIO_0, 0xc018d9d9 },
187 { 0, 0x0120d9d9 }
188};
189
190static struct chipset_bus_clock_list_entry thirty_three_base_hpt366[] = {
191 { XFER_UDMA_4, 0x90c9a731 },
192 { XFER_UDMA_3, 0x90cfa731 },
193 { XFER_UDMA_2, 0x90caa731 },
194 { XFER_UDMA_1, 0x90cba731 },
195 { XFER_UDMA_0, 0x90c8a731 },
196
197 { XFER_MW_DMA_2, 0xa0c8a731 },
198 { XFER_MW_DMA_1, 0xa0c8a732 }, /* 0xa0c8a733 */
199 { XFER_MW_DMA_0, 0xa0c8a797 },
200
201 { XFER_PIO_4, 0xc0c8a731 },
202 { XFER_PIO_3, 0xc0c8a742 },
203 { XFER_PIO_2, 0xc0d0a753 },
204 { XFER_PIO_1, 0xc0d0a7a3 }, /* 0xc0d0a793 */
205 { XFER_PIO_0, 0xc0d0a7aa }, /* 0xc0d0a7a7 */
206 { 0, 0x0120a7a7 }
207};
208
209static struct chipset_bus_clock_list_entry twenty_five_base_hpt366[] = {
210 { XFER_UDMA_4, 0x90c98521 },
211 { XFER_UDMA_3, 0x90cf8521 },
212 { XFER_UDMA_2, 0x90cf8521 },
213 { XFER_UDMA_1, 0x90cb8521 },
214 { XFER_UDMA_0, 0x90cb8521 },
215
216 { XFER_MW_DMA_2, 0xa0ca8521 },
217 { XFER_MW_DMA_1, 0xa0ca8532 },
218 { XFER_MW_DMA_0, 0xa0ca8575 },
219
220 { XFER_PIO_4, 0xc0ca8521 },
221 { XFER_PIO_3, 0xc0ca8532 },
222 { XFER_PIO_2, 0xc0ca8542 },
223 { XFER_PIO_1, 0xc0d08572 },
224 { XFER_PIO_0, 0xc0d08585 },
225 { 0, 0x01208585 }
226};
227
228/* from highpoint documentation. these are old values */
229static struct chipset_bus_clock_list_entry thirty_three_base_hpt370[] = {
230/* { XFER_UDMA_5, 0x1A85F442, 0x16454e31 }, */
231 { XFER_UDMA_5, 0x16454e31 },
232 { XFER_UDMA_4, 0x16454e31 },
233 { XFER_UDMA_3, 0x166d4e31 },
234 { XFER_UDMA_2, 0x16494e31 },
235 { XFER_UDMA_1, 0x164d4e31 },
236 { XFER_UDMA_0, 0x16514e31 },
237
238 { XFER_MW_DMA_2, 0x26514e21 },
239 { XFER_MW_DMA_1, 0x26514e33 },
240 { XFER_MW_DMA_0, 0x26514e97 },
241
242 { XFER_PIO_4, 0x06514e21 },
243 { XFER_PIO_3, 0x06514e22 },
244 { XFER_PIO_2, 0x06514e33 },
245 { XFER_PIO_1, 0x06914e43 },
246 { XFER_PIO_0, 0x06914e57 },
247 { 0, 0x06514e57 }
248};
249
250static struct chipset_bus_clock_list_entry sixty_six_base_hpt370[] = {
251 { XFER_UDMA_5, 0x14846231 },
252 { XFER_UDMA_4, 0x14886231 },
253 { XFER_UDMA_3, 0x148c6231 },
254 { XFER_UDMA_2, 0x148c6231 },
255 { XFER_UDMA_1, 0x14906231 },
256 { XFER_UDMA_0, 0x14986231 },
257
258 { XFER_MW_DMA_2, 0x26514e21 },
259 { XFER_MW_DMA_1, 0x26514e33 },
260 { XFER_MW_DMA_0, 0x26514e97 },
261
262 { XFER_PIO_4, 0x06514e21 },
263 { XFER_PIO_3, 0x06514e22 },
264 { XFER_PIO_2, 0x06514e33 },
265 { XFER_PIO_1, 0x06914e43 },
266 { XFER_PIO_0, 0x06914e57 },
267 { 0, 0x06514e57 }
268};
269
270/* these are the current (4 sep 2001) timings from highpoint */
271static struct chipset_bus_clock_list_entry thirty_three_base_hpt370a[] = {
272 { XFER_UDMA_5, 0x12446231 },
273 { XFER_UDMA_4, 0x12446231 },
274 { XFER_UDMA_3, 0x126c6231 },
275 { XFER_UDMA_2, 0x12486231 },
276 { XFER_UDMA_1, 0x124c6233 },
277 { XFER_UDMA_0, 0x12506297 },
278
279 { XFER_MW_DMA_2, 0x22406c31 },
280 { XFER_MW_DMA_1, 0x22406c33 },
281 { XFER_MW_DMA_0, 0x22406c97 },
282
283 { XFER_PIO_4, 0x06414e31 },
284 { XFER_PIO_3, 0x06414e42 },
285 { XFER_PIO_2, 0x06414e53 },
286 { XFER_PIO_1, 0x06814e93 },
287 { XFER_PIO_0, 0x06814ea7 },
288 { 0, 0x06814ea7 }
289};
290
291/* 2x 33MHz timings */
292static struct chipset_bus_clock_list_entry sixty_six_base_hpt370a[] = {
293 { XFER_UDMA_5, 0x1488e673 },
294 { XFER_UDMA_4, 0x1488e673 },
295 { XFER_UDMA_3, 0x1498e673 },
296 { XFER_UDMA_2, 0x1490e673 },
297 { XFER_UDMA_1, 0x1498e677 },
298 { XFER_UDMA_0, 0x14a0e73f },
299
300 { XFER_MW_DMA_2, 0x2480fa73 },
301 { XFER_MW_DMA_1, 0x2480fa77 },
302 { XFER_MW_DMA_0, 0x2480fb3f },
303
304 { XFER_PIO_4, 0x0c82be73 },
305 { XFER_PIO_3, 0x0c82be95 },
306 { XFER_PIO_2, 0x0c82beb7 },
307 { XFER_PIO_1, 0x0d02bf37 },
308 { XFER_PIO_0, 0x0d02bf5f },
309 { 0, 0x0d02bf5f }
310};
311 213
312static struct chipset_bus_clock_list_entry fifty_base_hpt370a[] = { 214static u32 forty_base_hpt36x[] = {
313 { XFER_UDMA_5, 0x12848242 }, 215 /* XFER_UDMA_6 */ 0x900fd943,
314 { XFER_UDMA_4, 0x12ac8242 }, 216 /* XFER_UDMA_5 */ 0x900fd943,
315 { XFER_UDMA_3, 0x128c8242 }, 217 /* XFER_UDMA_4 */ 0x900fd943,
316 { XFER_UDMA_2, 0x120c8242 }, 218 /* XFER_UDMA_3 */ 0x900ad943,
317 { XFER_UDMA_1, 0x12148254 }, 219 /* XFER_UDMA_2 */ 0x900bd943,
318 { XFER_UDMA_0, 0x121882ea }, 220 /* XFER_UDMA_1 */ 0x9008d943,
319 221 /* XFER_UDMA_0 */ 0x9008d943,
320 { XFER_MW_DMA_2, 0x22808242 }, 222
321 { XFER_MW_DMA_1, 0x22808254 }, 223 /* XFER_MW_DMA_2 */ 0xa008d943,
322 { XFER_MW_DMA_0, 0x228082ea }, 224 /* XFER_MW_DMA_1 */ 0xa010d955,
323 225 /* XFER_MW_DMA_0 */ 0xa010d9fc,
324 { XFER_PIO_4, 0x0a81f442 }, 226
325 { XFER_PIO_3, 0x0a81f443 }, 227 /* XFER_PIO_4 */ 0xc008d963,
326 { XFER_PIO_2, 0x0a81f454 }, 228 /* XFER_PIO_3 */ 0xc010d974,
327 { XFER_PIO_1, 0x0ac1f465 }, 229 /* XFER_PIO_2 */ 0xc010d997,
328 { XFER_PIO_0, 0x0ac1f48a }, 230 /* XFER_PIO_1 */ 0xc010d9c7,
329 { 0, 0x0ac1f48a } 231 /* XFER_PIO_0 */ 0xc018d9d9
330}; 232};
331 233
332static struct chipset_bus_clock_list_entry thirty_three_base_hpt372[] = { 234static u32 thirty_three_base_hpt36x[] = {
333 { XFER_UDMA_6, 0x1c81dc62 }, 235 /* XFER_UDMA_6 */ 0x90c9a731,
334 { XFER_UDMA_5, 0x1c6ddc62 }, 236 /* XFER_UDMA_5 */ 0x90c9a731,
335 { XFER_UDMA_4, 0x1c8ddc62 }, 237 /* XFER_UDMA_4 */ 0x90c9a731,
336 { XFER_UDMA_3, 0x1c8edc62 }, /* checkme */ 238 /* XFER_UDMA_3 */ 0x90cfa731,
337 { XFER_UDMA_2, 0x1c91dc62 }, 239 /* XFER_UDMA_2 */ 0x90caa731,
338 { XFER_UDMA_1, 0x1c9adc62 }, /* checkme */ 240 /* XFER_UDMA_1 */ 0x90cba731,
339 { XFER_UDMA_0, 0x1c82dc62 }, /* checkme */ 241 /* XFER_UDMA_0 */ 0x90c8a731,
340 242
341 { XFER_MW_DMA_2, 0x2c829262 }, 243 /* XFER_MW_DMA_2 */ 0xa0c8a731,
342 { XFER_MW_DMA_1, 0x2c829266 }, /* checkme */ 244 /* XFER_MW_DMA_1 */ 0xa0c8a732, /* 0xa0c8a733 */
343 { XFER_MW_DMA_0, 0x2c82922e }, /* checkme */ 245 /* XFER_MW_DMA_0 */ 0xa0c8a797,
344 246
345 { XFER_PIO_4, 0x0c829c62 }, 247 /* XFER_PIO_4 */ 0xc0c8a731,
346 { XFER_PIO_3, 0x0c829c84 }, 248 /* XFER_PIO_3 */ 0xc0c8a742,
347 { XFER_PIO_2, 0x0c829ca6 }, 249 /* XFER_PIO_2 */ 0xc0d0a753,
348 { XFER_PIO_1, 0x0d029d26 }, 250 /* XFER_PIO_1 */ 0xc0d0a7a3, /* 0xc0d0a793 */
349 { XFER_PIO_0, 0x0d029d5e }, 251 /* XFER_PIO_0 */ 0xc0d0a7aa /* 0xc0d0a7a7 */
350 { 0, 0x0d029d5e }
351}; 252};
352 253
353static struct chipset_bus_clock_list_entry fifty_base_hpt372[] = { 254static u32 twenty_five_base_hpt36x[] = {
354 { XFER_UDMA_5, 0x12848242 }, 255 /* XFER_UDMA_6 */ 0x90c98521,
355 { XFER_UDMA_4, 0x12ac8242 }, 256 /* XFER_UDMA_5 */ 0x90c98521,
356 { XFER_UDMA_3, 0x128c8242 }, 257 /* XFER_UDMA_4 */ 0x90c98521,
357 { XFER_UDMA_2, 0x120c8242 }, 258 /* XFER_UDMA_3 */ 0x90cf8521,
358 { XFER_UDMA_1, 0x12148254 }, 259 /* XFER_UDMA_2 */ 0x90cf8521,
359 { XFER_UDMA_0, 0x121882ea }, 260 /* XFER_UDMA_1 */ 0x90cb8521,
360 261 /* XFER_UDMA_0 */ 0x90cb8521,
361 { XFER_MW_DMA_2, 0x22808242 }, 262
362 { XFER_MW_DMA_1, 0x22808254 }, 263 /* XFER_MW_DMA_2 */ 0xa0ca8521,
363 { XFER_MW_DMA_0, 0x228082ea }, 264 /* XFER_MW_DMA_1 */ 0xa0ca8532,
364 265 /* XFER_MW_DMA_0 */ 0xa0ca8575,
365 { XFER_PIO_4, 0x0a81f442 }, 266
366 { XFER_PIO_3, 0x0a81f443 }, 267 /* XFER_PIO_4 */ 0xc0ca8521,
367 { XFER_PIO_2, 0x0a81f454 }, 268 /* XFER_PIO_3 */ 0xc0ca8532,
368 { XFER_PIO_1, 0x0ac1f465 }, 269 /* XFER_PIO_2 */ 0xc0ca8542,
369 { XFER_PIO_0, 0x0ac1f48a }, 270 /* XFER_PIO_1 */ 0xc0d08572,
370 { 0, 0x0a81f443 } 271 /* XFER_PIO_0 */ 0xc0d08585
371}; 272};
372 273
373static struct chipset_bus_clock_list_entry sixty_six_base_hpt372[] = { 274static u32 thirty_three_base_hpt37x[] = {
374 { XFER_UDMA_6, 0x1c869c62 }, 275 /* XFER_UDMA_6 */ 0x12446231, /* 0x12646231 ?? */
375 { XFER_UDMA_5, 0x1cae9c62 }, 276 /* XFER_UDMA_5 */ 0x12446231,
376 { XFER_UDMA_4, 0x1c8a9c62 }, 277 /* XFER_UDMA_4 */ 0x12446231,
377 { XFER_UDMA_3, 0x1c8e9c62 }, 278 /* XFER_UDMA_3 */ 0x126c6231,
378 { XFER_UDMA_2, 0x1c929c62 }, 279 /* XFER_UDMA_2 */ 0x12486231,
379 { XFER_UDMA_1, 0x1c9a9c62 }, 280 /* XFER_UDMA_1 */ 0x124c6233,
380 { XFER_UDMA_0, 0x1c829c62 }, 281 /* XFER_UDMA_0 */ 0x12506297,
381 282
382 { XFER_MW_DMA_2, 0x2c829c62 }, 283 /* XFER_MW_DMA_2 */ 0x22406c31,
383 { XFER_MW_DMA_1, 0x2c829c66 }, 284 /* XFER_MW_DMA_1 */ 0x22406c33,
384 { XFER_MW_DMA_0, 0x2c829d2e }, 285 /* XFER_MW_DMA_0 */ 0x22406c97,
385 286
386 { XFER_PIO_4, 0x0c829c62 }, 287 /* XFER_PIO_4 */ 0x06414e31,
387 { XFER_PIO_3, 0x0c829c84 }, 288 /* XFER_PIO_3 */ 0x06414e42,
388 { XFER_PIO_2, 0x0c829ca6 }, 289 /* XFER_PIO_2 */ 0x06414e53,
389 { XFER_PIO_1, 0x0d029d26 }, 290 /* XFER_PIO_1 */ 0x06814e93,
390 { XFER_PIO_0, 0x0d029d5e }, 291 /* XFER_PIO_0 */ 0x06814ea7
391 { 0, 0x0d029d26 }
392}; 292};
393 293
394static struct chipset_bus_clock_list_entry thirty_three_base_hpt374[] = { 294static u32 fifty_base_hpt37x[] = {
395 { XFER_UDMA_6, 0x12808242 }, 295 /* XFER_UDMA_6 */ 0x12848242,
396 { XFER_UDMA_5, 0x12848242 }, 296 /* XFER_UDMA_5 */ 0x12848242,
397 { XFER_UDMA_4, 0x12ac8242 }, 297 /* XFER_UDMA_4 */ 0x12ac8242,
398 { XFER_UDMA_3, 0x128c8242 }, 298 /* XFER_UDMA_3 */ 0x128c8242,
399 { XFER_UDMA_2, 0x120c8242 }, 299 /* XFER_UDMA_2 */ 0x120c8242,
400 { XFER_UDMA_1, 0x12148254 }, 300 /* XFER_UDMA_1 */ 0x12148254,
401 { XFER_UDMA_0, 0x121882ea }, 301 /* XFER_UDMA_0 */ 0x121882ea,
402 302
403 { XFER_MW_DMA_2, 0x22808242 }, 303 /* XFER_MW_DMA_2 */ 0x22808242,
404 { XFER_MW_DMA_1, 0x22808254 }, 304 /* XFER_MW_DMA_1 */ 0x22808254,
405 { XFER_MW_DMA_0, 0x228082ea }, 305 /* XFER_MW_DMA_0 */ 0x228082ea,
406 306
407 { XFER_PIO_4, 0x0a81f442 }, 307 /* XFER_PIO_4 */ 0x0a81f442,
408 { XFER_PIO_3, 0x0a81f443 }, 308 /* XFER_PIO_3 */ 0x0a81f443,
409 { XFER_PIO_2, 0x0a81f454 }, 309 /* XFER_PIO_2 */ 0x0a81f454,
410 { XFER_PIO_1, 0x0ac1f465 }, 310 /* XFER_PIO_1 */ 0x0ac1f465,
411 { XFER_PIO_0, 0x0ac1f48a }, 311 /* XFER_PIO_0 */ 0x0ac1f48a
412 { 0, 0x06814e93 }
413}; 312};
414 313
415/* FIXME: 50MHz timings for HPT374 */ 314static u32 sixty_six_base_hpt37x[] = {
416 315 /* XFER_UDMA_6 */ 0x1c869c62,
417#if 0 316 /* XFER_UDMA_5 */ 0x1cae9c62, /* 0x1c8a9c62 */
418static struct chipset_bus_clock_list_entry sixty_six_base_hpt374[] = { 317 /* XFER_UDMA_4 */ 0x1c8a9c62,
419 { XFER_UDMA_6, 0x12406231 }, /* checkme */ 318 /* XFER_UDMA_3 */ 0x1c8e9c62,
420 { XFER_UDMA_5, 0x12446231 }, /* 0x14846231 */ 319 /* XFER_UDMA_2 */ 0x1c929c62,
421 { XFER_UDMA_4, 0x16814ea7 }, /* 0x14886231 */ 320 /* XFER_UDMA_1 */ 0x1c9a9c62,
422 { XFER_UDMA_3, 0x16814ea7 }, /* 0x148c6231 */ 321 /* XFER_UDMA_0 */ 0x1c829c62,
423 { XFER_UDMA_2, 0x16814ea7 }, /* 0x148c6231 */ 322
424 { XFER_UDMA_1, 0x16814ea7 }, /* 0x14906231 */ 323 /* XFER_MW_DMA_2 */ 0x2c829c62,
425 { XFER_UDMA_0, 0x16814ea7 }, /* 0x14986231 */ 324 /* XFER_MW_DMA_1 */ 0x2c829c66,
426 { XFER_MW_DMA_2, 0x16814ea7 }, /* 0x26514e21 */ 325 /* XFER_MW_DMA_0 */ 0x2c829d2e,
427 { XFER_MW_DMA_1, 0x16814ea7 }, /* 0x26514e97 */ 326
428 { XFER_MW_DMA_0, 0x16814ea7 }, /* 0x26514e97 */ 327 /* XFER_PIO_4 */ 0x0c829c62,
429 { XFER_PIO_4, 0x06814ea7 }, /* 0x06514e21 */ 328 /* XFER_PIO_3 */ 0x0c829c84,
430 { XFER_PIO_3, 0x06814ea7 }, /* 0x06514e22 */ 329 /* XFER_PIO_2 */ 0x0c829ca6,
431 { XFER_PIO_2, 0x06814ea7 }, /* 0x06514e33 */ 330 /* XFER_PIO_1 */ 0x0d029d26,
432 { XFER_PIO_1, 0x06814ea7 }, /* 0x06914e43 */ 331 /* XFER_PIO_0 */ 0x0d029d5e
433 { XFER_PIO_0, 0x06814ea7 }, /* 0x06914e57 */
434 { 0, 0x06814ea7 }
435}; 332};
436#endif
437 333
438#define HPT366_DEBUG_DRIVE_INFO 0 334#define HPT366_DEBUG_DRIVE_INFO 0
439#define HPT374_ALLOW_ATA133_6 0 335#define HPT374_ALLOW_ATA133_6 0
440#define HPT371_ALLOW_ATA133_6 0 336#define HPT371_ALLOW_ATA133_6 0
441#define HPT302_ALLOW_ATA133_6 0 337#define HPT302_ALLOW_ATA133_6 0
442#define HPT372_ALLOW_ATA133_6 1 338#define HPT372_ALLOW_ATA133_6 0
443#define HPT370_ALLOW_ATA100_5 1 339#define HPT370_ALLOW_ATA100_5 1
444#define HPT366_ALLOW_ATA66_4 1 340#define HPT366_ALLOW_ATA66_4 1
445#define HPT366_ALLOW_ATA66_3 1 341#define HPT366_ALLOW_ATA66_3 1
@@ -461,9 +357,10 @@ struct hpt_info
461 int revision; /* Chipset revision */ 357 int revision; /* Chipset revision */
462 int flags; /* Chipset properties */ 358 int flags; /* Chipset properties */
463#define PLL_MODE 1 359#define PLL_MODE 1
464#define IS_372N 2 360#define IS_3xxN 2
361#define PCI_66MHZ 4
465 /* Speed table */ 362 /* Speed table */
466 struct chipset_bus_clock_list_entry *speed; 363 u32 *speed;
467}; 364};
468 365
469/* 366/*
@@ -600,12 +497,20 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
600 return 0; 497 return 0;
601} 498}
602 499
603static unsigned int pci_bus_clock_list (u8 speed, struct chipset_bus_clock_list_entry * chipset_table) 500static u32 pci_bus_clock_list(u8 speed, u32 *chipset_table)
604{ 501{
605 for ( ; chipset_table->xfer_speed ; chipset_table++) 502 int i;
606 if (chipset_table->xfer_speed == speed) 503
607 return chipset_table->chipset_settings; 504 /*
608 return chipset_table->chipset_settings; 505 * Lookup the transfer mode table to get the index into
506 * the timing table.
507 *
508 * NOTE: For XFER_PIO_SLOW, PIO mode 0 timings will be used.
509 */
510 for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
511 if (xfer_speeds[i] == speed)
512 break;
513 return chipset_table[i];
609} 514}
610 515
611static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed) 516static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
@@ -956,156 +861,127 @@ static int hpt374_ide_dma_end (ide_drive_t *drive)
956} 861}
957 862
958/** 863/**
959 * hpt372n_set_clock - perform clock switching dance 864 * hpt3xxn_set_clock - perform clock switching dance
960 * @drive: Drive to switch 865 * @hwif: hwif to switch
961 * @mode: Switching mode (0x21 for write, 0x23 otherwise) 866 * @mode: clocking mode (0x21 for write, 0x23 otherwise)
962 * 867 *
963 * Switch the DPLL clock on the HPT372N devices. This is a 868 * Switch the DPLL clock on the HPT3xxN devices. This is a right mess.
964 * right mess. 869 * NOTE: avoid touching the disabled primary channel on HPT371N -- it
870 * doesn't physically exist anyway...
965 */ 871 */
966 872
967static void hpt372n_set_clock(ide_drive_t *drive, int mode) 873static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
968{ 874{
969 ide_hwif_t *hwif = HWIF(drive); 875 u8 mcr1, scr2 = hwif->INB(hwif->dma_master + 0x7b);
970 876
971 /* FIXME: should we check for DMA active and BUG() */ 877 if ((scr2 & 0x7f) == mode)
878 return;
879
880 /* MISC. control register 1 has the channel enable bit... */
881 mcr1 = hwif->INB(hwif->dma_master + 0x70);
882
972 /* Tristate the bus */ 883 /* Tristate the bus */
973 outb(0x80, hwif->dma_base+0x73); 884 if (mcr1 & 0x04)
974 outb(0x80, hwif->dma_base+0x77); 885 hwif->OUTB(0x80, hwif->dma_master + 0x73);
975 886 hwif->OUTB(0x80, hwif->dma_master + 0x77);
887
976 /* Switch clock and reset channels */ 888 /* Switch clock and reset channels */
977 outb(mode, hwif->dma_base+0x7B); 889 hwif->OUTB(mode, hwif->dma_master + 0x7b);
978 outb(0xC0, hwif->dma_base+0x79); 890 hwif->OUTB(0xc0, hwif->dma_master + 0x79);
979 891
980 /* Reset state machines */ 892 /* Reset state machines */
981 outb(0x37, hwif->dma_base+0x70); 893 if (mcr1 & 0x04)
982 outb(0x37, hwif->dma_base+0x74); 894 hwif->OUTB(0x37, hwif->dma_master + 0x70);
983 895 hwif->OUTB(0x37, hwif->dma_master + 0x74);
896
984 /* Complete reset */ 897 /* Complete reset */
985 outb(0x00, hwif->dma_base+0x79); 898 hwif->OUTB(0x00, hwif->dma_master + 0x79);
986 899
987 /* Reconnect channels to bus */ 900 /* Reconnect channels to bus */
988 outb(0x00, hwif->dma_base+0x73); 901 if (mcr1 & 0x04)
989 outb(0x00, hwif->dma_base+0x77); 902 hwif->OUTB(0x00, hwif->dma_master + 0x73);
903 hwif->OUTB(0x00, hwif->dma_master + 0x77);
990} 904}
991 905
992/** 906/**
993 * hpt372n_rw_disk - prepare for I/O 907 * hpt3xxn_rw_disk - prepare for I/O
994 * @drive: drive for command 908 * @drive: drive for command
995 * @rq: block request structure 909 * @rq: block request structure
996 * 910 *
997 * This is called when a disk I/O is issued to the 372N. 911 * This is called when a disk I/O is issued to HPT3xxN.
998 * We need it because of the clock switching. 912 * We need it because of the clock switching.
999 */ 913 */
1000 914
1001static void hpt372n_rw_disk(ide_drive_t *drive, struct request *rq) 915static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
1002{
1003 ide_hwif_t *hwif = drive->hwif;
1004 int wantclock;
1005
1006 wantclock = rq_data_dir(rq) ? 0x23 : 0x21;
1007
1008 if (hwif->config_data != wantclock) {
1009 hpt372n_set_clock(drive, wantclock);
1010 hwif->config_data = wantclock;
1011 }
1012}
1013
1014/*
1015 * Since SUN Cobalt is attempting to do this operation, I should disclose
1016 * this has been a long time ago Thu Jul 27 16:40:57 2000 was the patch date
1017 * HOTSWAP ATA Infrastructure.
1018 */
1019
1020static void hpt3xx_reset (ide_drive_t *drive)
1021{
1022}
1023
1024static int hpt3xx_tristate (ide_drive_t * drive, int state)
1025{ 916{
1026 ide_hwif_t *hwif = HWIF(drive); 917 ide_hwif_t *hwif = HWIF(drive);
1027 struct pci_dev *dev = hwif->pci_dev; 918 u8 wantclock = rq_data_dir(rq) ? 0x23 : 0x21;
1028 u8 reg59h = 0, reset = (hwif->channel) ? 0x80 : 0x40;
1029 u8 regXXh = 0, state_reg= (hwif->channel) ? 0x57 : 0x53;
1030
1031 pci_read_config_byte(dev, 0x59, &reg59h);
1032 pci_read_config_byte(dev, state_reg, &regXXh);
1033 919
1034 if (state) { 920 hpt3xxn_set_clock(hwif, wantclock);
1035 (void) ide_do_reset(drive);
1036 pci_write_config_byte(dev, state_reg, regXXh|0x80);
1037 pci_write_config_byte(dev, 0x59, reg59h|reset);
1038 } else {
1039 pci_write_config_byte(dev, 0x59, reg59h & ~(reset));
1040 pci_write_config_byte(dev, state_reg, regXXh & ~(0x80));
1041 (void) ide_do_reset(drive);
1042 }
1043 return 0;
1044} 921}
1045 922
1046/* 923/*
1047 * set/get power state for a drive. 924 * Set/get power state for a drive.
1048 * turning the power off does the following things:
1049 * 1) soft-reset the drive
1050 * 2) tri-states the ide bus
1051 * 925 *
1052 * when we turn things back on, we need to re-initialize things. 926 * When we turn the power back on, we need to re-initialize things.
1053 */ 927 */
1054#define TRISTATE_BIT 0x8000 928#define TRISTATE_BIT 0x8000
1055static int hpt370_busproc(ide_drive_t * drive, int state) 929
930static int hpt3xx_busproc(ide_drive_t *drive, int state)
1056{ 931{
1057 ide_hwif_t *hwif = drive->hwif; 932 ide_hwif_t *hwif = drive->hwif;
1058 struct pci_dev *dev = hwif->pci_dev; 933 struct pci_dev *dev = hwif->pci_dev;
1059 u8 tristate = 0, resetmask = 0, bus_reg = 0; 934 u8 tristate, resetmask, bus_reg = 0;
1060 u16 tri_reg; 935 u16 tri_reg = 0;
1061 936
1062 hwif->bus_state = state; 937 hwif->bus_state = state;
1063 938
1064 if (hwif->channel) { 939 if (hwif->channel) {
1065 /* secondary channel */ 940 /* secondary channel */
1066 tristate = 0x56; 941 tristate = 0x56;
1067 resetmask = 0x80; 942 resetmask = 0x80;
1068 } else { 943 } else {
1069 /* primary channel */ 944 /* primary channel */
1070 tristate = 0x52; 945 tristate = 0x52;
1071 resetmask = 0x40; 946 resetmask = 0x40;
1072 } 947 }
1073 948
1074 /* grab status */ 949 /* Grab the status. */
1075 pci_read_config_word(dev, tristate, &tri_reg); 950 pci_read_config_word(dev, tristate, &tri_reg);
1076 pci_read_config_byte(dev, 0x59, &bus_reg); 951 pci_read_config_byte(dev, 0x59, &bus_reg);
1077 952
1078 /* set the state. we don't set it if we don't need to do so. 953 /*
1079 * make sure that the drive knows that it has failed if it's off */ 954 * Set the state. We don't set it if we don't need to do so.
955 * Make sure that the drive knows that it has failed if it's off.
956 */
1080 switch (state) { 957 switch (state) {
1081 case BUSSTATE_ON: 958 case BUSSTATE_ON:
1082 hwif->drives[0].failures = 0; 959 if (!(bus_reg & resetmask))
1083 hwif->drives[1].failures = 0;
1084 if ((bus_reg & resetmask) == 0)
1085 return 0; 960 return 0;
1086 tri_reg &= ~TRISTATE_BIT; 961 hwif->drives[0].failures = hwif->drives[1].failures = 0;
1087 bus_reg &= ~resetmask; 962
1088 break; 963 pci_write_config_byte(dev, 0x59, bus_reg & ~resetmask);
964 pci_write_config_word(dev, tristate, tri_reg & ~TRISTATE_BIT);
965 return 0;
1089 case BUSSTATE_OFF: 966 case BUSSTATE_OFF:
1090 hwif->drives[0].failures = hwif->drives[0].max_failures + 1; 967 if ((bus_reg & resetmask) && !(tri_reg & TRISTATE_BIT))
1091 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
1092 if ((tri_reg & TRISTATE_BIT) == 0 && (bus_reg & resetmask))
1093 return 0; 968 return 0;
1094 tri_reg &= ~TRISTATE_BIT; 969 tri_reg &= ~TRISTATE_BIT;
1095 bus_reg |= resetmask;
1096 break; 970 break;
1097 case BUSSTATE_TRISTATE: 971 case BUSSTATE_TRISTATE:
1098 hwif->drives[0].failures = hwif->drives[0].max_failures + 1; 972 if ((bus_reg & resetmask) && (tri_reg & TRISTATE_BIT))
1099 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
1100 if ((tri_reg & TRISTATE_BIT) && (bus_reg & resetmask))
1101 return 0; 973 return 0;
1102 tri_reg |= TRISTATE_BIT; 974 tri_reg |= TRISTATE_BIT;
1103 bus_reg |= resetmask;
1104 break; 975 break;
976 default:
977 return -EINVAL;
1105 } 978 }
1106 pci_write_config_byte(dev, 0x59, bus_reg);
1107 pci_write_config_word(dev, tristate, tri_reg);
1108 979
980 hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
981 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
982
983 pci_write_config_word(dev, tristate, tri_reg);
984 pci_write_config_byte(dev, 0x59, bus_reg | resetmask);
1109 return 0; 985 return 0;
1110} 986}
1111 987
@@ -1119,14 +995,14 @@ static void __devinit hpt366_clocking(ide_hwif_t *hwif)
1119 /* detect bus speed by looking at control reg timing: */ 995 /* detect bus speed by looking at control reg timing: */
1120 switch((reg1 >> 8) & 7) { 996 switch((reg1 >> 8) & 7) {
1121 case 5: 997 case 5:
1122 info->speed = forty_base_hpt366; 998 info->speed = forty_base_hpt36x;
1123 break; 999 break;
1124 case 9: 1000 case 9:
1125 info->speed = twenty_five_base_hpt366; 1001 info->speed = twenty_five_base_hpt36x;
1126 break; 1002 break;
1127 case 7: 1003 case 7:
1128 default: 1004 default:
1129 info->speed = thirty_three_base_hpt366; 1005 info->speed = thirty_three_base_hpt36x;
1130 break; 1006 break;
1131 } 1007 }
1132} 1008}
@@ -1136,9 +1012,9 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1136 struct hpt_info *info = ide_get_hwifdata(hwif); 1012 struct hpt_info *info = ide_get_hwifdata(hwif);
1137 struct pci_dev *dev = hwif->pci_dev; 1013 struct pci_dev *dev = hwif->pci_dev;
1138 int adjust, i; 1014 int adjust, i;
1139 u16 freq; 1015 u16 freq = 0;
1140 u32 pll; 1016 u32 pll, temp = 0;
1141 u8 reg5bh; 1017 u8 reg5bh = 0, mcr1 = 0;
1142 1018
1143 /* 1019 /*
1144 * default to pci clock. make sure MA15/16 are set to output 1020 * default to pci clock. make sure MA15/16 are set to output
@@ -1151,27 +1027,40 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1151 pci_write_config_byte(dev, 0x5b, 0x23); 1027 pci_write_config_byte(dev, 0x5b, 0x23);
1152 1028
1153 /* 1029 /*
1154 * set up the PLL. we need to adjust it so that it's stable. 1030 * We'll have to read f_CNT value in order to determine
1155 * freq = Tpll * 192 / Tpci 1031 * the PCI clock frequency according to the following ratio:
1032 *
1033 * f_CNT = Fpci * 192 / Fdpll
1034 *
1035 * First try reading the register in which the HighPoint BIOS
1036 * saves f_CNT value before reprogramming the DPLL from its
1037 * default setting (which differs for the various chips).
1038 * NOTE: This register is only accessible via I/O space.
1156 * 1039 *
1157 * Todo. For non x86 should probably check the dword is 1040 * In case the signature check fails, we'll have to resort to
1158 * set to 0xABCDExxx indicating the BIOS saved f_CNT 1041 * reading the f_CNT register itself in hopes that nobody has
1042 * touched the DPLL yet...
1159 */ 1043 */
1160 pci_read_config_word(dev, 0x78, &freq); 1044 temp = inl(pci_resource_start(dev, 4) + 0x90);
1161 freq &= 0x1FF; 1045 if ((temp & 0xFFFFF000) != 0xABCDE000) {
1162 1046 printk(KERN_WARNING "HPT37X: no clock data saved by BIOS\n");
1047
1048 /* Calculate the average value of f_CNT */
1049 for (temp = i = 0; i < 128; i++) {
1050 pci_read_config_word(dev, 0x78, &freq);
1051 temp += freq & 0x1ff;
1052 mdelay(1);
1053 }
1054 freq = temp / 128;
1055 } else
1056 freq = temp & 0x1ff;
1057
1163 /* 1058 /*
1164 * The 372N uses different PCI clock information and has 1059 * HPT3xxN chips use different PCI clock information.
1165 * some other complications 1060 * Currently we always set up the PLL for them.
1166 * On PCI33 timing we must clock switch
1167 * On PCI66 timing we must NOT use the PCI clock
1168 *
1169 * Currently we always set up the PLL for the 372N
1170 */ 1061 */
1171 1062
1172 if(info->flags & IS_372N) 1063 if (info->flags & IS_3xxN) {
1173 {
1174 printk(KERN_INFO "hpt: HPT372N detected, using 372N timing.\n");
1175 if(freq < 0x55) 1064 if(freq < 0x55)
1176 pll = F_LOW_PCI_33; 1065 pll = F_LOW_PCI_33;
1177 else if(freq < 0x70) 1066 else if(freq < 0x70)
@@ -1180,10 +1069,8 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1180 pll = F_LOW_PCI_50; 1069 pll = F_LOW_PCI_50;
1181 else 1070 else
1182 pll = F_LOW_PCI_66; 1071 pll = F_LOW_PCI_66;
1183 1072
1184 printk(KERN_INFO "FREQ: %d PLL: %d\n", freq, pll); 1073 printk(KERN_INFO "HPT3xxN detected, FREQ: %d, PLL: %d\n", freq, pll);
1185
1186 /* We always use the pll not the PCI clock on 372N */
1187 } 1074 }
1188 else 1075 else
1189 { 1076 {
@@ -1197,41 +1084,22 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1197 pll = F_LOW_PCI_66; 1084 pll = F_LOW_PCI_66;
1198 1085
1199 if (pll == F_LOW_PCI_33) { 1086 if (pll == F_LOW_PCI_33) {
1200 if (info->revision >= 8) 1087 info->speed = thirty_three_base_hpt37x;
1201 info->speed = thirty_three_base_hpt374;
1202 else if (info->revision >= 5)
1203 info->speed = thirty_three_base_hpt372;
1204 else if (info->revision >= 4)
1205 info->speed = thirty_three_base_hpt370a;
1206 else
1207 info->speed = thirty_three_base_hpt370;
1208 printk(KERN_DEBUG "HPT37X: using 33MHz PCI clock\n"); 1088 printk(KERN_DEBUG "HPT37X: using 33MHz PCI clock\n");
1209 } else if (pll == F_LOW_PCI_40) { 1089 } else if (pll == F_LOW_PCI_40) {
1210 /* Unsupported */ 1090 /* Unsupported */
1211 } else if (pll == F_LOW_PCI_50) { 1091 } else if (pll == F_LOW_PCI_50) {
1212 if (info->revision >= 8) 1092 info->speed = fifty_base_hpt37x;
1213 info->speed = fifty_base_hpt370a;
1214 else if (info->revision >= 5)
1215 info->speed = fifty_base_hpt372;
1216 else if (info->revision >= 4)
1217 info->speed = fifty_base_hpt370a;
1218 else
1219 info->speed = fifty_base_hpt370a;
1220 printk(KERN_DEBUG "HPT37X: using 50MHz PCI clock\n"); 1093 printk(KERN_DEBUG "HPT37X: using 50MHz PCI clock\n");
1221 } else { 1094 } else {
1222 if (info->revision >= 8) { 1095 info->speed = sixty_six_base_hpt37x;
1223 printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
1224 }
1225 else if (info->revision >= 5)
1226 info->speed = sixty_six_base_hpt372;
1227 else if (info->revision >= 4)
1228 info->speed = sixty_six_base_hpt370a;
1229 else
1230 info->speed = sixty_six_base_hpt370;
1231 printk(KERN_DEBUG "HPT37X: using 66MHz PCI clock\n"); 1096 printk(KERN_DEBUG "HPT37X: using 66MHz PCI clock\n");
1232 } 1097 }
1233 } 1098 }
1234 1099
1100 if (pll == F_LOW_PCI_66)
1101 info->flags |= PCI_66MHZ;
1102
1235 /* 1103 /*
1236 * only try the pll if we don't have a table for the clock 1104 * only try the pll if we don't have a table for the clock
1237 * speed that we're running at. NOTE: the internal PLL will 1105 * speed that we're running at. NOTE: the internal PLL will
@@ -1248,11 +1116,8 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1248 info->flags |= PLL_MODE; 1116 info->flags |= PLL_MODE;
1249 1117
1250 /* 1118 /*
1251 * FIXME: make this work correctly, esp with 372N as per 1119 * Adjust the PLL based upon the PCI clock, enable it, and
1252 * reference driver code. 1120 * wait for stabilization...
1253 *
1254 * adjust PLL based upon PCI clock, enable it, and wait for
1255 * stabilization.
1256 */ 1121 */
1257 adjust = 0; 1122 adjust = 0;
1258 freq = (pll < F_LOW_PCI_50) ? 2 : 4; 1123 freq = (pll < F_LOW_PCI_50) ? 2 : 4;
@@ -1275,22 +1140,12 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1275 pci_write_config_dword(dev, 0x5c, 1140 pci_write_config_dword(dev, 0x5c,
1276 pll & ~0x100); 1141 pll & ~0x100);
1277 pci_write_config_byte(dev, 0x5b, 0x21); 1142 pci_write_config_byte(dev, 0x5b, 0x21);
1278 if (info->revision >= 8) 1143
1279 info->speed = fifty_base_hpt370a; 1144 info->speed = fifty_base_hpt37x;
1280 else if (info->revision >= 5)
1281 info->speed = fifty_base_hpt372;
1282 else if (info->revision >= 4)
1283 info->speed = fifty_base_hpt370a;
1284 else
1285 info->speed = fifty_base_hpt370a;
1286 printk("HPT37X: using 50MHz internal PLL\n"); 1145 printk("HPT37X: using 50MHz internal PLL\n");
1287 goto init_hpt37X_done; 1146 goto init_hpt37X_done;
1288 } 1147 }
1289 } 1148 }
1290 if (!pci_get_drvdata(dev)) {
1291 printk("No Clock Stabilization!!!\n");
1292 return;
1293 }
1294pll_recal: 1149pll_recal:
1295 if (adjust & 1) 1150 if (adjust & 1)
1296 pll -= (adjust >> 1); 1151 pll -= (adjust >> 1);
@@ -1300,11 +1155,16 @@ pll_recal:
1300 1155
1301init_hpt37X_done: 1156init_hpt37X_done:
1302 if (!info->speed) 1157 if (!info->speed)
1303 printk(KERN_ERR "HPT37X%s: unknown bus timing [%d %d].\n", 1158 printk(KERN_ERR "HPT37x%s: unknown bus timing [%d %d].\n",
1304 (info->flags & IS_372N)?"N":"", pll, freq); 1159 (info->flags & IS_3xxN) ? "N" : "", pll, freq);
1305 /* reset state engine */ 1160 /*
1306 pci_write_config_byte(dev, 0x50, 0x37); 1161 * Reset the state engines.
1307 pci_write_config_byte(dev, 0x54, 0x37); 1162 * NOTE: avoid accidentally enabling the primary channel on HPT371N.
1163 */
1164 pci_read_config_byte(dev, 0x50, &mcr1);
1165 if (mcr1 & 0x04)
1166 pci_write_config_byte(dev, 0x50, 0x37);
1167 pci_write_config_byte(dev, 0x54, 0x37);
1308 udelay(100); 1168 udelay(100);
1309} 1169}
1310 1170
@@ -1367,6 +1227,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1367 struct pci_dev *dev = hwif->pci_dev; 1227 struct pci_dev *dev = hwif->pci_dev;
1368 struct hpt_info *info = ide_get_hwifdata(hwif); 1228 struct hpt_info *info = ide_get_hwifdata(hwif);
1369 u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02; 1229 u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
1230 int serialize = HPT_SERIALIZE_IO;
1370 1231
1371 hwif->tuneproc = &hpt3xx_tune_drive; 1232 hwif->tuneproc = &hpt3xx_tune_drive;
1372 hwif->speedproc = &hpt3xx_tune_chipset; 1233 hwif->speedproc = &hpt3xx_tune_chipset;
@@ -1374,8 +1235,20 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1374 hwif->intrproc = &hpt3xx_intrproc; 1235 hwif->intrproc = &hpt3xx_intrproc;
1375 hwif->maskproc = &hpt3xx_maskproc; 1236 hwif->maskproc = &hpt3xx_maskproc;
1376 1237
1377 if(info->flags & IS_372N) 1238 /*
1378 hwif->rw_disk = &hpt372n_rw_disk; 1239 * HPT3xxN chips have some complications:
1240 *
1241 * - on 33 MHz PCI we must clock switch
1242 * - on 66 MHz PCI we must NOT use the PCI clock
1243 */
1244 if ((info->flags & (IS_3xxN | PCI_66MHZ)) == IS_3xxN) {
1245 /*
1246 * Clock is shared between the channels,
1247 * so we'll have to serialize them... :-(
1248 */
1249 serialize = 1;
1250 hwif->rw_disk = &hpt3xxn_rw_disk;
1251 }
1379 1252
1380 /* 1253 /*
1381 * The HPT37x uses the CBLID pins as outputs for MA15/MA16 1254 * The HPT37x uses the CBLID pins as outputs for MA15/MA16
@@ -1418,29 +1291,15 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1418 PCI_FUNC(hwif->pci_dev->devfn)); 1291 PCI_FUNC(hwif->pci_dev->devfn));
1419#endif /* DEBUG */ 1292#endif /* DEBUG */
1420 1293
1421#ifdef HPT_SERIALIZE_IO 1294 /* Serialize access to this device */
1422 /* serialize access to this device */ 1295 if (serialize && hwif->mate)
1423 if (hwif->mate)
1424 hwif->serialized = hwif->mate->serialized = 1; 1296 hwif->serialized = hwif->mate->serialized = 1;
1425#endif
1426 1297
1427 if (info->revision >= 3) { 1298 /*
1428 u8 reg5ah = 0; 1299 * Set up ioctl for power status.
1429 pci_write_config_byte(dev, 0x5a, reg5ah & ~0x10); 1300 * NOTE: power affects both drives on each channel.
1430 /* 1301 */
1431 * set up ioctl for power status. 1302 hwif->busproc = &hpt3xx_busproc;
1432 * note: power affects both
1433 * drives on each channel
1434 */
1435 hwif->resetproc = &hpt3xx_reset;
1436 hwif->busproc = &hpt370_busproc;
1437 } else if (info->revision >= 2) {
1438 hwif->resetproc = &hpt3xx_reset;
1439 hwif->busproc = &hpt3xx_tristate;
1440 } else {
1441 hwif->resetproc = &hpt3xx_reset;
1442 hwif->busproc = &hpt3xx_tristate;
1443 }
1444 1303
1445 if (!hwif->dma_base) { 1304 if (!hwif->dma_base) {
1446 hwif->drives[0].autotune = 1; 1305 hwif->drives[0].autotune = 1;
@@ -1490,7 +1349,7 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
1490 return; 1349 return;
1491 1350
1492 if(info->speed == NULL) { 1351 if(info->speed == NULL) {
1493 printk(KERN_WARNING "hpt: no known IDE timings, disabling DMA.\n"); 1352 printk(KERN_WARNING "hpt366: no known IDE timings, disabling DMA.\n");
1494 return; 1353 return;
1495 } 1354 }
1496 1355
@@ -1519,9 +1378,10 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
1519 1378
1520static void __devinit init_iops_hpt366(ide_hwif_t *hwif) 1379static void __devinit init_iops_hpt366(ide_hwif_t *hwif)
1521{ 1380{
1522 struct hpt_info *info = kzalloc(sizeof(struct hpt_info), GFP_KERNEL); 1381 struct hpt_info *info = kzalloc(sizeof(struct hpt_info), GFP_KERNEL);
1523 unsigned long dmabase = pci_resource_start(hwif->pci_dev, 4); 1382 struct pci_dev *dev = hwif->pci_dev;
1524 u8 did, rid; 1383 u16 did = dev->device;
1384 u8 rid = 0;
1525 1385
1526 if(info == NULL) { 1386 if(info == NULL) {
1527 printk(KERN_WARNING "hpt366: out of memory.\n"); 1387 printk(KERN_WARNING "hpt366: out of memory.\n");
@@ -1529,15 +1389,22 @@ static void __devinit init_iops_hpt366(ide_hwif_t *hwif)
1529 } 1389 }
1530 ide_set_hwifdata(hwif, info); 1390 ide_set_hwifdata(hwif, info);
1531 1391
1532 if(dmabase) { 1392 /* Avoid doing the same thing twice. */
1533 did = inb(dmabase + 0x22); 1393 if (hwif->channel && hwif->mate) {
1534 rid = inb(dmabase + 0x28); 1394 memcpy(info, ide_get_hwifdata(hwif->mate), sizeof(struct hpt_info));
1535 1395 return;
1536 if((did == 4 && rid == 6) || (did == 5 && rid > 1))
1537 info->flags |= IS_372N;
1538 } 1396 }
1539 1397
1540 info->revision = hpt_revision(hwif->pci_dev); 1398 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rid);
1399
1400 if (( did == PCI_DEVICE_ID_TTI_HPT366 && rid == 6) ||
1401 ((did == PCI_DEVICE_ID_TTI_HPT372 ||
1402 did == PCI_DEVICE_ID_TTI_HPT302 ||
1403 did == PCI_DEVICE_ID_TTI_HPT371) && rid > 1) ||
1404 did == PCI_DEVICE_ID_TTI_HPT372N)
1405 info->flags |= IS_3xxN;
1406
1407 info->revision = hpt_revision(dev);
1541 1408
1542 if (info->revision >= 3) 1409 if (info->revision >= 3)
1543 hpt37x_clocking(hwif); 1410 hpt37x_clocking(hwif);
@@ -1574,6 +1441,23 @@ static int __devinit init_setup_hpt37x(struct pci_dev *dev, ide_pci_device_t *d)
1574 return ide_setup_pci_device(dev, d); 1441 return ide_setup_pci_device(dev, d);
1575} 1442}
1576 1443
1444static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d)
1445{
1446 u8 mcr1 = 0;
1447
1448 /*
1449 * HPT371 chips physically have only one channel, the secondary one,
1450 * but the primary channel registers do exist! Go figure...
1451 * So, we manually disable the non-existing channel here
1452 * (if the BIOS hasn't done this already).
1453 */
1454 pci_read_config_byte(dev, 0x50, &mcr1);
1455 if (mcr1 & 0x04)
1456 pci_write_config_byte(dev, 0x50, (mcr1 & ~0x04));
1457
1458 return ide_setup_pci_device(dev, d);
1459}
1460
1577static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d) 1461static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
1578{ 1462{
1579 struct pci_dev *findev = NULL; 1463 struct pci_dev *findev = NULL;
@@ -1661,13 +1545,14 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1661 .bootable = OFF_BOARD, 1545 .bootable = OFF_BOARD,
1662 },{ /* 3 */ 1546 },{ /* 3 */
1663 .name = "HPT371", 1547 .name = "HPT371",
1664 .init_setup = init_setup_hpt37x, 1548 .init_setup = init_setup_hpt371,
1665 .init_chipset = init_chipset_hpt366, 1549 .init_chipset = init_chipset_hpt366,
1666 .init_iops = init_iops_hpt366, 1550 .init_iops = init_iops_hpt366,
1667 .init_hwif = init_hwif_hpt366, 1551 .init_hwif = init_hwif_hpt366,
1668 .init_dma = init_dma_hpt366, 1552 .init_dma = init_dma_hpt366,
1669 .channels = 2, 1553 .channels = 2,
1670 .autodma = AUTODMA, 1554 .autodma = AUTODMA,
1555 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1671 .bootable = OFF_BOARD, 1556 .bootable = OFF_BOARD,
1672 },{ /* 4 */ 1557 },{ /* 4 */
1673 .name = "HPT374", 1558 .name = "HPT374",
@@ -1699,13 +1584,16 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1699 * 1584 *
1700 * Called when the PCI registration layer (or the IDE initialization) 1585 * Called when the PCI registration layer (or the IDE initialization)
1701 * finds a device matching our IDE device tables. 1586 * finds a device matching our IDE device tables.
1587 *
1588 * NOTE: since we'll have to modify some fields of the ide_pci_device_t
1589 * structure depending on the chip's revision, we'd better pass a local
1590 * copy down the call chain...
1702 */ 1591 */
1703
1704static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id) 1592static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1705{ 1593{
1706 ide_pci_device_t *d = &hpt366_chipsets[id->driver_data]; 1594 ide_pci_device_t d = hpt366_chipsets[id->driver_data];
1707 1595
1708 return d->init_setup(dev, d); 1596 return d.init_setup(dev, &d);
1709} 1597}
1710 1598
1711static struct pci_device_id hpt366_pci_tbl[] = { 1599static struct pci_device_id hpt366_pci_tbl[] = {
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 163d991eb8c9..50fb1cd447b7 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,9 +1,11 @@
1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o 1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
2user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
2 3
3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 4obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
4 ib_cm.o iw_cm.o $(infiniband-y) 5 ib_cm.o iw_cm.o $(infiniband-y)
5obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 6obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
6obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o 7obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
8 $(user_access-y)
7 9
8ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
9 device.o fmr_pool.o cache.o 11 device.o fmr_pool.o cache.o
@@ -18,6 +20,8 @@ iw_cm-y := iwcm.o
18 20
19rdma_cm-y := cma.o 21rdma_cm-y := cma.o
20 22
23rdma_ucm-y := ucma.o
24
21ib_addr-y := addr.o 25ib_addr-y := addr.o
22 26
23ib_umad-y := user_mad.o 27ib_umad-y := user_mad.o
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 79c937bf6962..d446998b12a4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3289,6 +3289,10 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3289 3289
3290 spin_lock_irqsave(&cm_id_priv->lock, flags); 3290 spin_lock_irqsave(&cm_id_priv->lock, flags);
3291 switch (cm_id_priv->id.state) { 3291 switch (cm_id_priv->id.state) {
3292 /* Allow transition to RTS before sending REP */
3293 case IB_CM_REQ_RCVD:
3294 case IB_CM_MRA_REQ_SENT:
3295
3292 case IB_CM_REP_RCVD: 3296 case IB_CM_REP_RCVD:
3293 case IB_CM_MRA_REP_SENT: 3297 case IB_CM_MRA_REP_SENT:
3294 case IB_CM_REP_SENT: 3298 case IB_CM_REP_SENT:
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 985a6b564d8f..533193d4e5df 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -70,6 +70,7 @@ static DEFINE_MUTEX(lock);
70static struct workqueue_struct *cma_wq; 70static struct workqueue_struct *cma_wq;
71static DEFINE_IDR(sdp_ps); 71static DEFINE_IDR(sdp_ps);
72static DEFINE_IDR(tcp_ps); 72static DEFINE_IDR(tcp_ps);
73static DEFINE_IDR(udp_ps);
73 74
74struct cma_device { 75struct cma_device {
75 struct list_head list; 76 struct list_head list;
@@ -133,7 +134,6 @@ struct rdma_id_private {
133 134
134 u32 seq_num; 135 u32 seq_num;
135 u32 qp_num; 136 u32 qp_num;
136 enum ib_qp_type qp_type;
137 u8 srq; 137 u8 srq;
138}; 138};
139 139
@@ -392,7 +392,6 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
392 392
393 id->qp = qp; 393 id->qp = qp;
394 id_priv->qp_num = qp->qp_num; 394 id_priv->qp_num = qp->qp_num;
395 id_priv->qp_type = qp->qp_type;
396 id_priv->srq = (qp->srq != NULL); 395 id_priv->srq = (qp->srq != NULL);
397 return 0; 396 return 0;
398err: 397err:
@@ -510,9 +509,17 @@ static inline int cma_any_addr(struct sockaddr *addr)
510 return cma_zero_addr(addr) || cma_loopback_addr(addr); 509 return cma_zero_addr(addr) || cma_loopback_addr(addr);
511} 510}
512 511
512static inline __be16 cma_port(struct sockaddr *addr)
513{
514 if (addr->sa_family == AF_INET)
515 return ((struct sockaddr_in *) addr)->sin_port;
516 else
517 return ((struct sockaddr_in6 *) addr)->sin6_port;
518}
519
513static inline int cma_any_port(struct sockaddr *addr) 520static inline int cma_any_port(struct sockaddr *addr)
514{ 521{
515 return !((struct sockaddr_in *) addr)->sin_port; 522 return !cma_port(addr);
516} 523}
517 524
518static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 525static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
@@ -594,20 +601,6 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
594 } 601 }
595} 602}
596 603
597static int cma_notify_user(struct rdma_id_private *id_priv,
598 enum rdma_cm_event_type type, int status,
599 void *data, u8 data_len)
600{
601 struct rdma_cm_event event;
602
603 event.event = type;
604 event.status = status;
605 event.private_data = data;
606 event.private_data_len = data_len;
607
608 return id_priv->id.event_handler(&id_priv->id, &event);
609}
610
611static void cma_cancel_route(struct rdma_id_private *id_priv) 604static void cma_cancel_route(struct rdma_id_private *id_priv)
612{ 605{
613 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 606 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
@@ -776,63 +769,61 @@ static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
776 return 0; 769 return 0;
777} 770}
778 771
779static int cma_rtu_recv(struct rdma_id_private *id_priv) 772static void cma_set_rep_event_data(struct rdma_cm_event *event,
773 struct ib_cm_rep_event_param *rep_data,
774 void *private_data)
780{ 775{
781 int ret; 776 event->param.conn.private_data = private_data;
782 777 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
783 ret = cma_modify_qp_rts(&id_priv->id); 778 event->param.conn.responder_resources = rep_data->responder_resources;
784 if (ret) 779 event->param.conn.initiator_depth = rep_data->initiator_depth;
785 goto reject; 780 event->param.conn.flow_control = rep_data->flow_control;
786 781 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
787 return 0; 782 event->param.conn.srq = rep_data->srq;
788reject: 783 event->param.conn.qp_num = rep_data->remote_qpn;
789 cma_modify_qp_err(&id_priv->id);
790 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
791 NULL, 0, NULL, 0);
792 return ret;
793} 784}
794 785
795static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 786static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
796{ 787{
797 struct rdma_id_private *id_priv = cm_id->context; 788 struct rdma_id_private *id_priv = cm_id->context;
798 enum rdma_cm_event_type event; 789 struct rdma_cm_event event;
799 u8 private_data_len = 0; 790 int ret = 0;
800 int ret = 0, status = 0;
801 791
802 atomic_inc(&id_priv->dev_remove); 792 atomic_inc(&id_priv->dev_remove);
803 if (!cma_comp(id_priv, CMA_CONNECT)) 793 if (!cma_comp(id_priv, CMA_CONNECT))
804 goto out; 794 goto out;
805 795
796 memset(&event, 0, sizeof event);
806 switch (ib_event->event) { 797 switch (ib_event->event) {
807 case IB_CM_REQ_ERROR: 798 case IB_CM_REQ_ERROR:
808 case IB_CM_REP_ERROR: 799 case IB_CM_REP_ERROR:
809 event = RDMA_CM_EVENT_UNREACHABLE; 800 event.event = RDMA_CM_EVENT_UNREACHABLE;
810 status = -ETIMEDOUT; 801 event.status = -ETIMEDOUT;
811 break; 802 break;
812 case IB_CM_REP_RECEIVED: 803 case IB_CM_REP_RECEIVED:
813 status = cma_verify_rep(id_priv, ib_event->private_data); 804 event.status = cma_verify_rep(id_priv, ib_event->private_data);
814 if (status) 805 if (event.status)
815 event = RDMA_CM_EVENT_CONNECT_ERROR; 806 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
816 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 807 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
817 status = cma_rep_recv(id_priv); 808 event.status = cma_rep_recv(id_priv);
818 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 809 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
819 RDMA_CM_EVENT_ESTABLISHED; 810 RDMA_CM_EVENT_ESTABLISHED;
820 } else 811 } else
821 event = RDMA_CM_EVENT_CONNECT_RESPONSE; 812 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
822 private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 813 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
814 ib_event->private_data);
823 break; 815 break;
824 case IB_CM_RTU_RECEIVED: 816 case IB_CM_RTU_RECEIVED:
825 status = cma_rtu_recv(id_priv); 817 case IB_CM_USER_ESTABLISHED:
826 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 818 event.event = RDMA_CM_EVENT_ESTABLISHED;
827 RDMA_CM_EVENT_ESTABLISHED;
828 break; 819 break;
829 case IB_CM_DREQ_ERROR: 820 case IB_CM_DREQ_ERROR:
830 status = -ETIMEDOUT; /* fall through */ 821 event.status = -ETIMEDOUT; /* fall through */
831 case IB_CM_DREQ_RECEIVED: 822 case IB_CM_DREQ_RECEIVED:
832 case IB_CM_DREP_RECEIVED: 823 case IB_CM_DREP_RECEIVED:
833 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 824 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
834 goto out; 825 goto out;
835 event = RDMA_CM_EVENT_DISCONNECTED; 826 event.event = RDMA_CM_EVENT_DISCONNECTED;
836 break; 827 break;
837 case IB_CM_TIMEWAIT_EXIT: 828 case IB_CM_TIMEWAIT_EXIT:
838 case IB_CM_MRA_RECEIVED: 829 case IB_CM_MRA_RECEIVED:
@@ -840,9 +831,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
840 goto out; 831 goto out;
841 case IB_CM_REJ_RECEIVED: 832 case IB_CM_REJ_RECEIVED:
842 cma_modify_qp_err(&id_priv->id); 833 cma_modify_qp_err(&id_priv->id);
843 status = ib_event->param.rej_rcvd.reason; 834 event.status = ib_event->param.rej_rcvd.reason;
844 event = RDMA_CM_EVENT_REJECTED; 835 event.event = RDMA_CM_EVENT_REJECTED;
845 private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 836 event.param.conn.private_data = ib_event->private_data;
837 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
846 break; 838 break;
847 default: 839 default:
848 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", 840 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
@@ -850,8 +842,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
850 goto out; 842 goto out;
851 } 843 }
852 844
853 ret = cma_notify_user(id_priv, event, status, ib_event->private_data, 845 ret = id_priv->id.event_handler(&id_priv->id, &event);
854 private_data_len);
855 if (ret) { 846 if (ret) {
856 /* Destroy the CM ID by returning a non-zero value. */ 847 /* Destroy the CM ID by returning a non-zero value. */
857 id_priv->cm_id.ib = NULL; 848 id_priv->cm_id.ib = NULL;
@@ -865,8 +856,8 @@ out:
865 return ret; 856 return ret;
866} 857}
867 858
868static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, 859static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
869 struct ib_cm_event *ib_event) 860 struct ib_cm_event *ib_event)
870{ 861{
871 struct rdma_id_private *id_priv; 862 struct rdma_id_private *id_priv;
872 struct rdma_cm_id *id; 863 struct rdma_cm_id *id;
@@ -913,9 +904,61 @@ err:
913 return NULL; 904 return NULL;
914} 905}
915 906
907static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
908 struct ib_cm_event *ib_event)
909{
910 struct rdma_id_private *id_priv;
911 struct rdma_cm_id *id;
912 union cma_ip_addr *src, *dst;
913 __u16 port;
914 u8 ip_ver;
915 int ret;
916
917 id = rdma_create_id(listen_id->event_handler, listen_id->context,
918 listen_id->ps);
919 if (IS_ERR(id))
920 return NULL;
921
922
923 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
924 &ip_ver, &port, &src, &dst))
925 goto err;
926
927 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
928 ip_ver, port, src, dst);
929
930 ret = rdma_translate_ip(&id->route.addr.src_addr,
931 &id->route.addr.dev_addr);
932 if (ret)
933 goto err;
934
935 id_priv = container_of(id, struct rdma_id_private, id);
936 id_priv->state = CMA_CONNECT;
937 return id_priv;
938err:
939 rdma_destroy_id(id);
940 return NULL;
941}
942
943static void cma_set_req_event_data(struct rdma_cm_event *event,
944 struct ib_cm_req_event_param *req_data,
945 void *private_data, int offset)
946{
947 event->param.conn.private_data = private_data + offset;
948 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
949 event->param.conn.responder_resources = req_data->responder_resources;
950 event->param.conn.initiator_depth = req_data->initiator_depth;
951 event->param.conn.flow_control = req_data->flow_control;
952 event->param.conn.retry_count = req_data->retry_count;
953 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
954 event->param.conn.srq = req_data->srq;
955 event->param.conn.qp_num = req_data->remote_qpn;
956}
957
916static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 958static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
917{ 959{
918 struct rdma_id_private *listen_id, *conn_id; 960 struct rdma_id_private *listen_id, *conn_id;
961 struct rdma_cm_event event;
919 int offset, ret; 962 int offset, ret;
920 963
921 listen_id = cm_id->context; 964 listen_id = cm_id->context;
@@ -925,7 +968,19 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
925 goto out; 968 goto out;
926 } 969 }
927 970
928 conn_id = cma_new_id(&listen_id->id, ib_event); 971 memset(&event, 0, sizeof event);
972 offset = cma_user_data_offset(listen_id->id.ps);
973 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
974 if (listen_id->id.ps == RDMA_PS_UDP) {
975 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
976 event.param.ud.private_data = ib_event->private_data + offset;
977 event.param.ud.private_data_len =
978 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
979 } else {
980 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
981 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
982 ib_event->private_data, offset);
983 }
929 if (!conn_id) { 984 if (!conn_id) {
930 ret = -ENOMEM; 985 ret = -ENOMEM;
931 goto out; 986 goto out;
@@ -942,10 +997,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
942 cm_id->context = conn_id; 997 cm_id->context = conn_id;
943 cm_id->cm_handler = cma_ib_handler; 998 cm_id->cm_handler = cma_ib_handler;
944 999
945 offset = cma_user_data_offset(listen_id->id.ps); 1000 ret = conn_id->id.event_handler(&conn_id->id, &event);
946 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
947 ib_event->private_data + offset,
948 IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
949 if (!ret) 1001 if (!ret)
950 goto out; 1002 goto out;
951 1003
@@ -964,8 +1016,7 @@ out:
964 1016
965static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1017static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
966{ 1018{
967 return cpu_to_be64(((u64)ps << 16) + 1019 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
968 be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
969} 1020}
970 1021
971static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1022static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
@@ -1021,15 +1072,16 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1021static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1072static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1022{ 1073{
1023 struct rdma_id_private *id_priv = iw_id->context; 1074 struct rdma_id_private *id_priv = iw_id->context;
1024 enum rdma_cm_event_type event = 0; 1075 struct rdma_cm_event event;
1025 struct sockaddr_in *sin; 1076 struct sockaddr_in *sin;
1026 int ret = 0; 1077 int ret = 0;
1027 1078
1079 memset(&event, 0, sizeof event);
1028 atomic_inc(&id_priv->dev_remove); 1080 atomic_inc(&id_priv->dev_remove);
1029 1081
1030 switch (iw_event->event) { 1082 switch (iw_event->event) {
1031 case IW_CM_EVENT_CLOSE: 1083 case IW_CM_EVENT_CLOSE:
1032 event = RDMA_CM_EVENT_DISCONNECTED; 1084 event.event = RDMA_CM_EVENT_DISCONNECTED;
1033 break; 1085 break;
1034 case IW_CM_EVENT_CONNECT_REPLY: 1086 case IW_CM_EVENT_CONNECT_REPLY:
1035 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1087 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
@@ -1037,20 +1089,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1037 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1038 *sin = iw_event->remote_addr; 1090 *sin = iw_event->remote_addr;
1039 if (iw_event->status) 1091 if (iw_event->status)
1040 event = RDMA_CM_EVENT_REJECTED; 1092 event.event = RDMA_CM_EVENT_REJECTED;
1041 else 1093 else
1042 event = RDMA_CM_EVENT_ESTABLISHED; 1094 event.event = RDMA_CM_EVENT_ESTABLISHED;
1043 break; 1095 break;
1044 case IW_CM_EVENT_ESTABLISHED: 1096 case IW_CM_EVENT_ESTABLISHED:
1045 event = RDMA_CM_EVENT_ESTABLISHED; 1097 event.event = RDMA_CM_EVENT_ESTABLISHED;
1046 break; 1098 break;
1047 default: 1099 default:
1048 BUG_ON(1); 1100 BUG_ON(1);
1049 } 1101 }
1050 1102
1051 ret = cma_notify_user(id_priv, event, iw_event->status, 1103 event.status = iw_event->status;
1052 iw_event->private_data, 1104 event.param.conn.private_data = iw_event->private_data;
1053 iw_event->private_data_len); 1105 event.param.conn.private_data_len = iw_event->private_data_len;
1106 ret = id_priv->id.event_handler(&id_priv->id, &event);
1054 if (ret) { 1107 if (ret) {
1055 /* Destroy the CM ID by returning a non-zero value. */ 1108 /* Destroy the CM ID by returning a non-zero value. */
1056 id_priv->cm_id.iw = NULL; 1109 id_priv->cm_id.iw = NULL;
@@ -1071,6 +1124,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1071 struct rdma_id_private *listen_id, *conn_id; 1124 struct rdma_id_private *listen_id, *conn_id;
1072 struct sockaddr_in *sin; 1125 struct sockaddr_in *sin;
1073 struct net_device *dev = NULL; 1126 struct net_device *dev = NULL;
1127 struct rdma_cm_event event;
1074 int ret; 1128 int ret;
1075 1129
1076 listen_id = cm_id->context; 1130 listen_id = cm_id->context;
@@ -1124,9 +1178,11 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1124 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1178 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1125 *sin = iw_event->remote_addr; 1179 *sin = iw_event->remote_addr;
1126 1180
1127 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 1181 memset(&event, 0, sizeof event);
1128 iw_event->private_data, 1182 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1129 iw_event->private_data_len); 1183 event.param.conn.private_data = iw_event->private_data;
1184 event.param.conn.private_data_len = iw_event->private_data_len;
1185 ret = conn_id->id.event_handler(&conn_id->id, &event);
1130 if (ret) { 1186 if (ret) {
1131 /* User wants to destroy the CM ID */ 1187 /* User wants to destroy the CM ID */
1132 conn_id->cm_id.iw = NULL; 1188 conn_id->cm_id.iw = NULL;
@@ -1515,8 +1571,9 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1515 struct rdma_dev_addr *dev_addr, void *context) 1571 struct rdma_dev_addr *dev_addr, void *context)
1516{ 1572{
1517 struct rdma_id_private *id_priv = context; 1573 struct rdma_id_private *id_priv = context;
1518 enum rdma_cm_event_type event; 1574 struct rdma_cm_event event;
1519 1575
1576 memset(&event, 0, sizeof event);
1520 atomic_inc(&id_priv->dev_remove); 1577 atomic_inc(&id_priv->dev_remove);
1521 1578
1522 /* 1579 /*
@@ -1536,14 +1593,15 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1536 if (status) { 1593 if (status) {
1537 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1594 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1538 goto out; 1595 goto out;
1539 event = RDMA_CM_EVENT_ADDR_ERROR; 1596 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1597 event.status = status;
1540 } else { 1598 } else {
1541 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1599 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1542 ip_addr_size(src_addr)); 1600 ip_addr_size(src_addr));
1543 event = RDMA_CM_EVENT_ADDR_RESOLVED; 1601 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1544 } 1602 }
1545 1603
1546 if (cma_notify_user(id_priv, event, status, NULL, 0)) { 1604 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1547 cma_exch(id_priv, CMA_DESTROYING); 1605 cma_exch(id_priv, CMA_DESTROYING);
1548 cma_release_remove(id_priv); 1606 cma_release_remove(id_priv);
1549 cma_deref_id(id_priv); 1607 cma_deref_id(id_priv);
@@ -1733,6 +1791,9 @@ static int cma_get_port(struct rdma_id_private *id_priv)
1733 case RDMA_PS_TCP: 1791 case RDMA_PS_TCP:
1734 ps = &tcp_ps; 1792 ps = &tcp_ps;
1735 break; 1793 break;
1794 case RDMA_PS_UDP:
1795 ps = &udp_ps;
1796 break;
1736 default: 1797 default:
1737 return -EPROTONOSUPPORT; 1798 return -EPROTONOSUPPORT;
1738 } 1799 }
@@ -1821,6 +1882,110 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
1821 return 0; 1882 return 0;
1822} 1883}
1823 1884
1885static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
1886 struct ib_cm_event *ib_event)
1887{
1888 struct rdma_id_private *id_priv = cm_id->context;
1889 struct rdma_cm_event event;
1890 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
1891 int ret = 0;
1892
1893 memset(&event, 0, sizeof event);
1894 atomic_inc(&id_priv->dev_remove);
1895 if (!cma_comp(id_priv, CMA_CONNECT))
1896 goto out;
1897
1898 switch (ib_event->event) {
1899 case IB_CM_SIDR_REQ_ERROR:
1900 event.event = RDMA_CM_EVENT_UNREACHABLE;
1901 event.status = -ETIMEDOUT;
1902 break;
1903 case IB_CM_SIDR_REP_RECEIVED:
1904 event.param.ud.private_data = ib_event->private_data;
1905 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
1906 if (rep->status != IB_SIDR_SUCCESS) {
1907 event.event = RDMA_CM_EVENT_UNREACHABLE;
1908 event.status = ib_event->param.sidr_rep_rcvd.status;
1909 break;
1910 }
1911 if (rep->qkey != RDMA_UD_QKEY) {
1912 event.event = RDMA_CM_EVENT_UNREACHABLE;
1913 event.status = -EINVAL;
1914 break;
1915 }
1916 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
1917 id_priv->id.route.path_rec,
1918 &event.param.ud.ah_attr);
1919 event.param.ud.qp_num = rep->qpn;
1920 event.param.ud.qkey = rep->qkey;
1921 event.event = RDMA_CM_EVENT_ESTABLISHED;
1922 event.status = 0;
1923 break;
1924 default:
1925 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
1926 ib_event->event);
1927 goto out;
1928 }
1929
1930 ret = id_priv->id.event_handler(&id_priv->id, &event);
1931 if (ret) {
1932 /* Destroy the CM ID by returning a non-zero value. */
1933 id_priv->cm_id.ib = NULL;
1934 cma_exch(id_priv, CMA_DESTROYING);
1935 cma_release_remove(id_priv);
1936 rdma_destroy_id(&id_priv->id);
1937 return ret;
1938 }
1939out:
1940 cma_release_remove(id_priv);
1941 return ret;
1942}
1943
1944static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
1945 struct rdma_conn_param *conn_param)
1946{
1947 struct ib_cm_sidr_req_param req;
1948 struct rdma_route *route;
1949 int ret;
1950
1951 req.private_data_len = sizeof(struct cma_hdr) +
1952 conn_param->private_data_len;
1953 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
1954 if (!req.private_data)
1955 return -ENOMEM;
1956
1957 if (conn_param->private_data && conn_param->private_data_len)
1958 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
1959 conn_param->private_data, conn_param->private_data_len);
1960
1961 route = &id_priv->id.route;
1962 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
1963 if (ret)
1964 goto out;
1965
1966 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
1967 cma_sidr_rep_handler, id_priv);
1968 if (IS_ERR(id_priv->cm_id.ib)) {
1969 ret = PTR_ERR(id_priv->cm_id.ib);
1970 goto out;
1971 }
1972
1973 req.path = route->path_rec;
1974 req.service_id = cma_get_service_id(id_priv->id.ps,
1975 &route->addr.dst_addr);
1976 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
1977 req.max_cm_retries = CMA_MAX_CM_RETRIES;
1978
1979 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
1980 if (ret) {
1981 ib_destroy_cm_id(id_priv->cm_id.ib);
1982 id_priv->cm_id.ib = NULL;
1983 }
1984out:
1985 kfree(req.private_data);
1986 return ret;
1987}
1988
1824static int cma_connect_ib(struct rdma_id_private *id_priv, 1989static int cma_connect_ib(struct rdma_id_private *id_priv,
1825 struct rdma_conn_param *conn_param) 1990 struct rdma_conn_param *conn_param)
1826{ 1991{
@@ -1860,7 +2025,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
1860 req.service_id = cma_get_service_id(id_priv->id.ps, 2025 req.service_id = cma_get_service_id(id_priv->id.ps,
1861 &route->addr.dst_addr); 2026 &route->addr.dst_addr);
1862 req.qp_num = id_priv->qp_num; 2027 req.qp_num = id_priv->qp_num;
1863 req.qp_type = id_priv->qp_type; 2028 req.qp_type = IB_QPT_RC;
1864 req.starting_psn = id_priv->seq_num; 2029 req.starting_psn = id_priv->seq_num;
1865 req.responder_resources = conn_param->responder_resources; 2030 req.responder_resources = conn_param->responder_resources;
1866 req.initiator_depth = conn_param->initiator_depth; 2031 req.initiator_depth = conn_param->initiator_depth;
@@ -1937,13 +2102,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1937 2102
1938 if (!id->qp) { 2103 if (!id->qp) {
1939 id_priv->qp_num = conn_param->qp_num; 2104 id_priv->qp_num = conn_param->qp_num;
1940 id_priv->qp_type = conn_param->qp_type;
1941 id_priv->srq = conn_param->srq; 2105 id_priv->srq = conn_param->srq;
1942 } 2106 }
1943 2107
1944 switch (rdma_node_get_transport(id->device->node_type)) { 2108 switch (rdma_node_get_transport(id->device->node_type)) {
1945 case RDMA_TRANSPORT_IB: 2109 case RDMA_TRANSPORT_IB:
1946 ret = cma_connect_ib(id_priv, conn_param); 2110 if (id->ps == RDMA_PS_UDP)
2111 ret = cma_resolve_ib_udp(id_priv, conn_param);
2112 else
2113 ret = cma_connect_ib(id_priv, conn_param);
1947 break; 2114 break;
1948 case RDMA_TRANSPORT_IWARP: 2115 case RDMA_TRANSPORT_IWARP:
1949 ret = cma_connect_iw(id_priv, conn_param); 2116 ret = cma_connect_iw(id_priv, conn_param);
@@ -1966,11 +2133,25 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
1966 struct rdma_conn_param *conn_param) 2133 struct rdma_conn_param *conn_param)
1967{ 2134{
1968 struct ib_cm_rep_param rep; 2135 struct ib_cm_rep_param rep;
1969 int ret; 2136 struct ib_qp_attr qp_attr;
2137 int qp_attr_mask, ret;
1970 2138
1971 ret = cma_modify_qp_rtr(&id_priv->id); 2139 if (id_priv->id.qp) {
1972 if (ret) 2140 ret = cma_modify_qp_rtr(&id_priv->id);
1973 return ret; 2141 if (ret)
2142 goto out;
2143
2144 qp_attr.qp_state = IB_QPS_RTS;
2145 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
2146 &qp_attr_mask);
2147 if (ret)
2148 goto out;
2149
2150 qp_attr.max_rd_atomic = conn_param->initiator_depth;
2151 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
2152 if (ret)
2153 goto out;
2154 }
1974 2155
1975 memset(&rep, 0, sizeof rep); 2156 memset(&rep, 0, sizeof rep);
1976 rep.qp_num = id_priv->qp_num; 2157 rep.qp_num = id_priv->qp_num;
@@ -1985,7 +2166,9 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
1985 rep.rnr_retry_count = conn_param->rnr_retry_count; 2166 rep.rnr_retry_count = conn_param->rnr_retry_count;
1986 rep.srq = id_priv->srq ? 1 : 0; 2167 rep.srq = id_priv->srq ? 1 : 0;
1987 2168
1988 return ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2169 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2170out:
2171 return ret;
1989} 2172}
1990 2173
1991static int cma_accept_iw(struct rdma_id_private *id_priv, 2174static int cma_accept_iw(struct rdma_id_private *id_priv,
@@ -2010,6 +2193,24 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
2010 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2193 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2011} 2194}
2012 2195
2196static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2197 enum ib_cm_sidr_status status,
2198 const void *private_data, int private_data_len)
2199{
2200 struct ib_cm_sidr_rep_param rep;
2201
2202 memset(&rep, 0, sizeof rep);
2203 rep.status = status;
2204 if (status == IB_SIDR_SUCCESS) {
2205 rep.qp_num = id_priv->qp_num;
2206 rep.qkey = RDMA_UD_QKEY;
2207 }
2208 rep.private_data = private_data;
2209 rep.private_data_len = private_data_len;
2210
2211 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2212}
2213
2013int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2214int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2014{ 2215{
2015 struct rdma_id_private *id_priv; 2216 struct rdma_id_private *id_priv;
@@ -2021,13 +2222,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2021 2222
2022 if (!id->qp && conn_param) { 2223 if (!id->qp && conn_param) {
2023 id_priv->qp_num = conn_param->qp_num; 2224 id_priv->qp_num = conn_param->qp_num;
2024 id_priv->qp_type = conn_param->qp_type;
2025 id_priv->srq = conn_param->srq; 2225 id_priv->srq = conn_param->srq;
2026 } 2226 }
2027 2227
2028 switch (rdma_node_get_transport(id->device->node_type)) { 2228 switch (rdma_node_get_transport(id->device->node_type)) {
2029 case RDMA_TRANSPORT_IB: 2229 case RDMA_TRANSPORT_IB:
2030 if (conn_param) 2230 if (id->ps == RDMA_PS_UDP)
2231 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2232 conn_param->private_data,
2233 conn_param->private_data_len);
2234 else if (conn_param)
2031 ret = cma_accept_ib(id_priv, conn_param); 2235 ret = cma_accept_ib(id_priv, conn_param);
2032 else 2236 else
2033 ret = cma_rep_recv(id_priv); 2237 ret = cma_rep_recv(id_priv);
@@ -2051,6 +2255,27 @@ reject:
2051} 2255}
2052EXPORT_SYMBOL(rdma_accept); 2256EXPORT_SYMBOL(rdma_accept);
2053 2257
2258int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2259{
2260 struct rdma_id_private *id_priv;
2261 int ret;
2262
2263 id_priv = container_of(id, struct rdma_id_private, id);
2264 if (!cma_comp(id_priv, CMA_CONNECT))
2265 return -EINVAL;
2266
2267 switch (id->device->node_type) {
2268 case RDMA_NODE_IB_CA:
2269 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2270 break;
2271 default:
2272 ret = 0;
2273 break;
2274 }
2275 return ret;
2276}
2277EXPORT_SYMBOL(rdma_notify);
2278
2054int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2279int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2055 u8 private_data_len) 2280 u8 private_data_len)
2056{ 2281{
@@ -2063,9 +2288,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2063 2288
2064 switch (rdma_node_get_transport(id->device->node_type)) { 2289 switch (rdma_node_get_transport(id->device->node_type)) {
2065 case RDMA_TRANSPORT_IB: 2290 case RDMA_TRANSPORT_IB:
2066 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2291 if (id->ps == RDMA_PS_UDP)
2067 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2292 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2068 private_data, private_data_len); 2293 private_data, private_data_len);
2294 else
2295 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2296 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2297 0, private_data, private_data_len);
2069 break; 2298 break;
2070 case RDMA_TRANSPORT_IWARP: 2299 case RDMA_TRANSPORT_IWARP:
2071 ret = iw_cm_reject(id_priv->cm_id.iw, 2300 ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -2136,6 +2365,7 @@ static void cma_add_one(struct ib_device *device)
2136 2365
2137static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2366static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2138{ 2367{
2368 struct rdma_cm_event event;
2139 enum cma_state state; 2369 enum cma_state state;
2140 2370
2141 /* Record that we want to remove the device */ 2371 /* Record that we want to remove the device */
@@ -2150,8 +2380,9 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2150 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2380 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2151 return 0; 2381 return 0;
2152 2382
2153 return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL, 2383 memset(&event, 0, sizeof event);
2154 0, NULL, 0); 2384 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2385 return id_priv->id.event_handler(&id_priv->id, &event);
2155} 2386}
2156 2387
2157static void cma_process_remove(struct cma_device *cma_dev) 2388static void cma_process_remove(struct cma_device *cma_dev)
@@ -2233,6 +2464,7 @@ static void cma_cleanup(void)
2233 destroy_workqueue(cma_wq); 2464 destroy_workqueue(cma_wq);
2234 idr_destroy(&sdp_ps); 2465 idr_destroy(&sdp_ps);
2235 idr_destroy(&tcp_ps); 2466 idr_destroy(&tcp_ps);
2467 idr_destroy(&udp_ps);
2236} 2468}
2237 2469
2238module_init(cma_init); 2470module_init(cma_init);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 86a3b2d401db..8926a2bd4a87 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -394,20 +394,12 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
394 */ 394 */
395int ib_flush_fmr_pool(struct ib_fmr_pool *pool) 395int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
396{ 396{
397 int serial; 397 int serial = atomic_inc_return(&pool->req_ser);
398
399 atomic_inc(&pool->req_ser);
400 /*
401 * It's OK if someone else bumps req_ser again here -- we'll
402 * just wait a little longer.
403 */
404 serial = atomic_read(&pool->req_ser);
405 398
406 wake_up_process(pool->thread); 399 wake_up_process(pool->thread);
407 400
408 if (wait_event_interruptible(pool->force_wait, 401 if (wait_event_interruptible(pool->force_wait,
409 atomic_read(&pool->flush_ser) - 402 atomic_read(&pool->flush_ser) - serial >= 0))
410 atomic_read(&pool->req_ser) >= 0))
411 return -EINTR; 403 return -EINTR;
412 404
413 return 0; 405 return 0;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 15f38d94b3a8..5ed141ebd1c8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
998 998
999 mad_agent = mad_send_wr->send_buf.mad_agent; 999 mad_agent = mad_send_wr->send_buf.mad_agent;
1000 sge = mad_send_wr->sg_list; 1000 sge = mad_send_wr->sg_list;
1001 sge[0].addr = dma_map_single(mad_agent->device->dma_device, 1001 sge[0].addr = ib_dma_map_single(mad_agent->device,
1002 mad_send_wr->send_buf.mad, 1002 mad_send_wr->send_buf.mad,
1003 sge[0].length, 1003 sge[0].length,
1004 DMA_TO_DEVICE); 1004 DMA_TO_DEVICE);
1005 pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr); 1005 mad_send_wr->header_mapping = sge[0].addr;
1006 1006
1007 sge[1].addr = dma_map_single(mad_agent->device->dma_device, 1007 sge[1].addr = ib_dma_map_single(mad_agent->device,
1008 ib_get_payload(mad_send_wr), 1008 ib_get_payload(mad_send_wr),
1009 sge[1].length, 1009 sge[1].length,
1010 DMA_TO_DEVICE); 1010 DMA_TO_DEVICE);
1011 pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr); 1011 mad_send_wr->payload_mapping = sge[1].addr;
1012 1012
1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1026 } 1026 }
1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1028 if (ret) { 1028 if (ret) {
1029 dma_unmap_single(mad_agent->device->dma_device, 1029 ib_dma_unmap_single(mad_agent->device,
1030 pci_unmap_addr(mad_send_wr, header_mapping), 1030 mad_send_wr->header_mapping,
1031 sge[0].length, DMA_TO_DEVICE); 1031 sge[0].length, DMA_TO_DEVICE);
1032 dma_unmap_single(mad_agent->device->dma_device, 1032 ib_dma_unmap_single(mad_agent->device,
1033 pci_unmap_addr(mad_send_wr, payload_mapping), 1033 mad_send_wr->payload_mapping,
1034 sge[1].length, DMA_TO_DEVICE); 1034 sge[1].length, DMA_TO_DEVICE);
1035 } 1035 }
1036 return ret; 1036 return ret;
1037} 1037}
@@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1851 mad_list); 1851 mad_list);
1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1853 dma_unmap_single(port_priv->device->dma_device, 1853 ib_dma_unmap_single(port_priv->device,
1854 pci_unmap_addr(&recv->header, mapping), 1854 recv->header.mapping,
1855 sizeof(struct ib_mad_private) - 1855 sizeof(struct ib_mad_private) -
1856 sizeof(struct ib_mad_private_header), 1856 sizeof(struct ib_mad_private_header),
1857 DMA_FROM_DEVICE); 1857 DMA_FROM_DEVICE);
1858 1858
1859 /* Setup MAD receive work completion from "normal" work completion */ 1859 /* Setup MAD receive work completion from "normal" work completion */
1860 recv->header.wc = *wc; 1860 recv->header.wc = *wc;
@@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2080 qp_info = send_queue->qp_info; 2080 qp_info = send_queue->qp_info;
2081 2081
2082retry: 2082retry:
2083 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2083 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2084 pci_unmap_addr(mad_send_wr, header_mapping), 2084 mad_send_wr->header_mapping,
2085 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2085 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2086 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2086 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2087 pci_unmap_addr(mad_send_wr, payload_mapping), 2087 mad_send_wr->payload_mapping,
2088 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2088 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2089 queued_send_wr = NULL; 2089 queued_send_wr = NULL;
2090 spin_lock_irqsave(&send_queue->lock, flags); 2090 spin_lock_irqsave(&send_queue->lock, flags);
2091 list_del(&mad_list->list); 2091 list_del(&mad_list->list);
@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2528 break; 2528 break;
2529 } 2529 }
2530 } 2530 }
2531 sg_list.addr = dma_map_single(qp_info->port_priv-> 2531 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2532 device->dma_device, 2532 &mad_priv->grh,
2533 &mad_priv->grh, 2533 sizeof *mad_priv -
2534 sizeof *mad_priv - 2534 sizeof mad_priv->header,
2535 sizeof mad_priv->header, 2535 DMA_FROM_DEVICE);
2536 DMA_FROM_DEVICE); 2536 mad_priv->header.mapping = sg_list.addr;
2537 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2538 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2537 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2539 mad_priv->header.mad_list.mad_queue = recv_queue; 2538 mad_priv->header.mad_list.mad_queue = recv_queue;
2540 2539
@@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2549 list_del(&mad_priv->header.mad_list.list); 2548 list_del(&mad_priv->header.mad_list.list);
2550 recv_queue->count--; 2549 recv_queue->count--;
2551 spin_unlock_irqrestore(&recv_queue->lock, flags); 2550 spin_unlock_irqrestore(&recv_queue->lock, flags);
2552 dma_unmap_single(qp_info->port_priv->device->dma_device, 2551 ib_dma_unmap_single(qp_info->port_priv->device,
2553 pci_unmap_addr(&mad_priv->header, 2552 mad_priv->header.mapping,
2554 mapping), 2553 sizeof *mad_priv -
2555 sizeof *mad_priv - 2554 sizeof mad_priv->header,
2556 sizeof mad_priv->header, 2555 DMA_FROM_DEVICE);
2557 DMA_FROM_DEVICE);
2558 kmem_cache_free(ib_mad_cache, mad_priv); 2556 kmem_cache_free(ib_mad_cache, mad_priv);
2559 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); 2557 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2560 break; 2558 break;
@@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2586 /* Remove from posted receive MAD list */ 2584 /* Remove from posted receive MAD list */
2587 list_del(&mad_list->list); 2585 list_del(&mad_list->list);
2588 2586
2589 dma_unmap_single(qp_info->port_priv->device->dma_device, 2587 ib_dma_unmap_single(qp_info->port_priv->device,
2590 pci_unmap_addr(&recv->header, mapping), 2588 recv->header.mapping,
2591 sizeof(struct ib_mad_private) - 2589 sizeof(struct ib_mad_private) -
2592 sizeof(struct ib_mad_private_header), 2590 sizeof(struct ib_mad_private_header),
2593 DMA_FROM_DEVICE); 2591 DMA_FROM_DEVICE);
2594 kmem_cache_free(ib_mad_cache, recv); 2592 kmem_cache_free(ib_mad_cache, recv);
2595 } 2593 }
2596 2594
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d5548e73e068..de89717f49fe 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -73,7 +73,7 @@ struct ib_mad_private_header {
73 struct ib_mad_list_head mad_list; 73 struct ib_mad_list_head mad_list;
74 struct ib_mad_recv_wc recv_wc; 74 struct ib_mad_recv_wc recv_wc;
75 struct ib_wc wc; 75 struct ib_wc wc;
76 DECLARE_PCI_UNMAP_ADDR(mapping) 76 u64 mapping;
77} __attribute__ ((packed)); 77} __attribute__ ((packed));
78 78
79struct ib_mad_private { 79struct ib_mad_private {
@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
126 struct list_head agent_list; 126 struct list_head agent_list;
127 struct ib_mad_agent_private *mad_agent_priv; 127 struct ib_mad_agent_private *mad_agent_priv;
128 struct ib_mad_send_buf send_buf; 128 struct ib_mad_send_buf send_buf;
129 DECLARE_PCI_UNMAP_ADDR(header_mapping) 129 u64 header_mapping;
130 DECLARE_PCI_UNMAP_ADDR(payload_mapping) 130 u64 payload_mapping;
131 struct ib_send_wr send_wr; 131 struct ib_send_wr send_wr;
132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
133 __be64 tid; 133 __be64 tid;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
new file mode 100644
index 000000000000..81a5cdc5733a
--- /dev/null
+++ b/drivers/infiniband/core/ucma.c
@@ -0,0 +1,874 @@
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
34#include <linux/mutex.h>
35#include <linux/poll.h>
36#include <linux/idr.h>
37#include <linux/in.h>
38#include <linux/in6.h>
39#include <linux/miscdevice.h>
40
41#include <rdma/rdma_user_cm.h>
42#include <rdma/ib_marshall.h>
43#include <rdma/rdma_cm.h>
44
45MODULE_AUTHOR("Sean Hefty");
46MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
47MODULE_LICENSE("Dual BSD/GPL");
48
49enum {
50 UCMA_MAX_BACKLOG = 128
51};
52
53struct ucma_file {
54 struct mutex mut;
55 struct file *filp;
56 struct list_head ctx_list;
57 struct list_head event_list;
58 wait_queue_head_t poll_wait;
59};
60
61struct ucma_context {
62 int id;
63 struct completion comp;
64 atomic_t ref;
65 int events_reported;
66 int backlog;
67
68 struct ucma_file *file;
69 struct rdma_cm_id *cm_id;
70 u64 uid;
71
72 struct list_head list;
73};
74
75struct ucma_event {
76 struct ucma_context *ctx;
77 struct list_head list;
78 struct rdma_cm_id *cm_id;
79 struct rdma_ucm_event_resp resp;
80};
81
82static DEFINE_MUTEX(mut);
83static DEFINE_IDR(ctx_idr);
84
85static inline struct ucma_context *_ucma_find_context(int id,
86 struct ucma_file *file)
87{
88 struct ucma_context *ctx;
89
90 ctx = idr_find(&ctx_idr, id);
91 if (!ctx)
92 ctx = ERR_PTR(-ENOENT);
93 else if (ctx->file != file)
94 ctx = ERR_PTR(-EINVAL);
95 return ctx;
96}
97
98static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
99{
100 struct ucma_context *ctx;
101
102 mutex_lock(&mut);
103 ctx = _ucma_find_context(id, file);
104 if (!IS_ERR(ctx))
105 atomic_inc(&ctx->ref);
106 mutex_unlock(&mut);
107 return ctx;
108}
109
110static void ucma_put_ctx(struct ucma_context *ctx)
111{
112 if (atomic_dec_and_test(&ctx->ref))
113 complete(&ctx->comp);
114}
115
116static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
117{
118 struct ucma_context *ctx;
119 int ret;
120
121 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
122 if (!ctx)
123 return NULL;
124
125 atomic_set(&ctx->ref, 1);
126 init_completion(&ctx->comp);
127 ctx->file = file;
128
129 do {
130 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
131 if (!ret)
132 goto error;
133
134 mutex_lock(&mut);
135 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
136 mutex_unlock(&mut);
137 } while (ret == -EAGAIN);
138
139 if (ret)
140 goto error;
141
142 list_add_tail(&ctx->list, &file->ctx_list);
143 return ctx;
144
145error:
146 kfree(ctx);
147 return NULL;
148}
149
150static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
151 struct rdma_conn_param *src)
152{
153 if (src->private_data_len)
154 memcpy(dst->private_data, src->private_data,
155 src->private_data_len);
156 dst->private_data_len = src->private_data_len;
157 dst->responder_resources =src->responder_resources;
158 dst->initiator_depth = src->initiator_depth;
159 dst->flow_control = src->flow_control;
160 dst->retry_count = src->retry_count;
161 dst->rnr_retry_count = src->rnr_retry_count;
162 dst->srq = src->srq;
163 dst->qp_num = src->qp_num;
164}
165
166static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
167 struct rdma_ud_param *src)
168{
169 if (src->private_data_len)
170 memcpy(dst->private_data, src->private_data,
171 src->private_data_len);
172 dst->private_data_len = src->private_data_len;
173 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
174 dst->qp_num = src->qp_num;
175 dst->qkey = src->qkey;
176}
177
178static void ucma_set_event_context(struct ucma_context *ctx,
179 struct rdma_cm_event *event,
180 struct ucma_event *uevent)
181{
182 uevent->ctx = ctx;
183 uevent->resp.uid = ctx->uid;
184 uevent->resp.id = ctx->id;
185}
186
187static int ucma_event_handler(struct rdma_cm_id *cm_id,
188 struct rdma_cm_event *event)
189{
190 struct ucma_event *uevent;
191 struct ucma_context *ctx = cm_id->context;
192 int ret = 0;
193
194 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
195 if (!uevent)
196 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
197
198 uevent->cm_id = cm_id;
199 ucma_set_event_context(ctx, event, uevent);
200 uevent->resp.event = event->event;
201 uevent->resp.status = event->status;
202 if (cm_id->ps == RDMA_PS_UDP)
203 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
204 else
205 ucma_copy_conn_event(&uevent->resp.param.conn,
206 &event->param.conn);
207
208 mutex_lock(&ctx->file->mut);
209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
210 if (!ctx->backlog) {
211 ret = -EDQUOT;
212 goto out;
213 }
214 ctx->backlog--;
215 }
216 list_add_tail(&uevent->list, &ctx->file->event_list);
217 wake_up_interruptible(&ctx->file->poll_wait);
218out:
219 mutex_unlock(&ctx->file->mut);
220 return ret;
221}
222
223static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
224 int in_len, int out_len)
225{
226 struct ucma_context *ctx;
227 struct rdma_ucm_get_event cmd;
228 struct ucma_event *uevent;
229 int ret = 0;
230 DEFINE_WAIT(wait);
231
232 if (out_len < sizeof uevent->resp)
233 return -ENOSPC;
234
235 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
236 return -EFAULT;
237
238 mutex_lock(&file->mut);
239 while (list_empty(&file->event_list)) {
240 if (file->filp->f_flags & O_NONBLOCK) {
241 ret = -EAGAIN;
242 break;
243 }
244
245 if (signal_pending(current)) {
246 ret = -ERESTARTSYS;
247 break;
248 }
249
250 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
251 mutex_unlock(&file->mut);
252 schedule();
253 mutex_lock(&file->mut);
254 finish_wait(&file->poll_wait, &wait);
255 }
256
257 if (ret)
258 goto done;
259
260 uevent = list_entry(file->event_list.next, struct ucma_event, list);
261
262 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
263 ctx = ucma_alloc_ctx(file);
264 if (!ctx) {
265 ret = -ENOMEM;
266 goto done;
267 }
268 uevent->ctx->backlog++;
269 ctx->cm_id = uevent->cm_id;
270 ctx->cm_id->context = ctx;
271 uevent->resp.id = ctx->id;
272 }
273
274 if (copy_to_user((void __user *)(unsigned long)cmd.response,
275 &uevent->resp, sizeof uevent->resp)) {
276 ret = -EFAULT;
277 goto done;
278 }
279
280 list_del(&uevent->list);
281 uevent->ctx->events_reported++;
282 kfree(uevent);
283done:
284 mutex_unlock(&file->mut);
285 return ret;
286}
287
288static ssize_t ucma_create_id(struct ucma_file *file,
289 const char __user *inbuf,
290 int in_len, int out_len)
291{
292 struct rdma_ucm_create_id cmd;
293 struct rdma_ucm_create_id_resp resp;
294 struct ucma_context *ctx;
295 int ret;
296
297 if (out_len < sizeof(resp))
298 return -ENOSPC;
299
300 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
301 return -EFAULT;
302
303 mutex_lock(&file->mut);
304 ctx = ucma_alloc_ctx(file);
305 mutex_unlock(&file->mut);
306 if (!ctx)
307 return -ENOMEM;
308
309 ctx->uid = cmd.uid;
310 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
311 if (IS_ERR(ctx->cm_id)) {
312 ret = PTR_ERR(ctx->cm_id);
313 goto err1;
314 }
315
316 resp.id = ctx->id;
317 if (copy_to_user((void __user *)(unsigned long)cmd.response,
318 &resp, sizeof(resp))) {
319 ret = -EFAULT;
320 goto err2;
321 }
322 return 0;
323
324err2:
325 rdma_destroy_id(ctx->cm_id);
326err1:
327 mutex_lock(&mut);
328 idr_remove(&ctx_idr, ctx->id);
329 mutex_unlock(&mut);
330 kfree(ctx);
331 return ret;
332}
333
334static void ucma_cleanup_events(struct ucma_context *ctx)
335{
336 struct ucma_event *uevent, *tmp;
337
338 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
339 if (uevent->ctx != ctx)
340 continue;
341
342 list_del(&uevent->list);
343
344 /* clear incoming connections. */
345 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
346 rdma_destroy_id(uevent->cm_id);
347
348 kfree(uevent);
349 }
350}
351
352static int ucma_free_ctx(struct ucma_context *ctx)
353{
354 int events_reported;
355
356 /* No new events will be generated after destroying the id. */
357 rdma_destroy_id(ctx->cm_id);
358
359 /* Cleanup events not yet reported to the user. */
360 mutex_lock(&ctx->file->mut);
361 ucma_cleanup_events(ctx);
362 list_del(&ctx->list);
363 mutex_unlock(&ctx->file->mut);
364
365 events_reported = ctx->events_reported;
366 kfree(ctx);
367 return events_reported;
368}
369
370static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
371 int in_len, int out_len)
372{
373 struct rdma_ucm_destroy_id cmd;
374 struct rdma_ucm_destroy_id_resp resp;
375 struct ucma_context *ctx;
376 int ret = 0;
377
378 if (out_len < sizeof(resp))
379 return -ENOSPC;
380
381 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
382 return -EFAULT;
383
384 mutex_lock(&mut);
385 ctx = _ucma_find_context(cmd.id, file);
386 if (!IS_ERR(ctx))
387 idr_remove(&ctx_idr, ctx->id);
388 mutex_unlock(&mut);
389
390 if (IS_ERR(ctx))
391 return PTR_ERR(ctx);
392
393 ucma_put_ctx(ctx);
394 wait_for_completion(&ctx->comp);
395 resp.events_reported = ucma_free_ctx(ctx);
396
397 if (copy_to_user((void __user *)(unsigned long)cmd.response,
398 &resp, sizeof(resp)))
399 ret = -EFAULT;
400
401 return ret;
402}
403
404static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
405 int in_len, int out_len)
406{
407 struct rdma_ucm_bind_addr cmd;
408 struct ucma_context *ctx;
409 int ret;
410
411 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
412 return -EFAULT;
413
414 ctx = ucma_get_ctx(file, cmd.id);
415 if (IS_ERR(ctx))
416 return PTR_ERR(ctx);
417
418 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
419 ucma_put_ctx(ctx);
420 return ret;
421}
422
423static ssize_t ucma_resolve_addr(struct ucma_file *file,
424 const char __user *inbuf,
425 int in_len, int out_len)
426{
427 struct rdma_ucm_resolve_addr cmd;
428 struct ucma_context *ctx;
429 int ret;
430
431 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
432 return -EFAULT;
433
434 ctx = ucma_get_ctx(file, cmd.id);
435 if (IS_ERR(ctx))
436 return PTR_ERR(ctx);
437
438 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
439 (struct sockaddr *) &cmd.dst_addr,
440 cmd.timeout_ms);
441 ucma_put_ctx(ctx);
442 return ret;
443}
444
445static ssize_t ucma_resolve_route(struct ucma_file *file,
446 const char __user *inbuf,
447 int in_len, int out_len)
448{
449 struct rdma_ucm_resolve_route cmd;
450 struct ucma_context *ctx;
451 int ret;
452
453 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
454 return -EFAULT;
455
456 ctx = ucma_get_ctx(file, cmd.id);
457 if (IS_ERR(ctx))
458 return PTR_ERR(ctx);
459
460 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
461 ucma_put_ctx(ctx);
462 return ret;
463}
464
465static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
466 struct rdma_route *route)
467{
468 struct rdma_dev_addr *dev_addr;
469
470 resp->num_paths = route->num_paths;
471 switch (route->num_paths) {
472 case 0:
473 dev_addr = &route->addr.dev_addr;
474 ib_addr_get_dgid(dev_addr,
475 (union ib_gid *) &resp->ib_route[0].dgid);
476 ib_addr_get_sgid(dev_addr,
477 (union ib_gid *) &resp->ib_route[0].sgid);
478 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
479 break;
480 case 2:
481 ib_copy_path_rec_to_user(&resp->ib_route[1],
482 &route->path_rec[1]);
483 /* fall through */
484 case 1:
485 ib_copy_path_rec_to_user(&resp->ib_route[0],
486 &route->path_rec[0]);
487 break;
488 default:
489 break;
490 }
491}
492
493static ssize_t ucma_query_route(struct ucma_file *file,
494 const char __user *inbuf,
495 int in_len, int out_len)
496{
497 struct rdma_ucm_query_route cmd;
498 struct rdma_ucm_query_route_resp resp;
499 struct ucma_context *ctx;
500 struct sockaddr *addr;
501 int ret = 0;
502
503 if (out_len < sizeof(resp))
504 return -ENOSPC;
505
506 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
507 return -EFAULT;
508
509 ctx = ucma_get_ctx(file, cmd.id);
510 if (IS_ERR(ctx))
511 return PTR_ERR(ctx);
512
513 memset(&resp, 0, sizeof resp);
514 addr = &ctx->cm_id->route.addr.src_addr;
515 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
516 sizeof(struct sockaddr_in) :
517 sizeof(struct sockaddr_in6));
518 addr = &ctx->cm_id->route.addr.dst_addr;
519 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
520 sizeof(struct sockaddr_in) :
521 sizeof(struct sockaddr_in6));
522 if (!ctx->cm_id->device)
523 goto out;
524
525 resp.node_guid = ctx->cm_id->device->node_guid;
526 resp.port_num = ctx->cm_id->port_num;
527 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
528 case RDMA_TRANSPORT_IB:
529 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
530 break;
531 default:
532 break;
533 }
534
535out:
536 if (copy_to_user((void __user *)(unsigned long)cmd.response,
537 &resp, sizeof(resp)))
538 ret = -EFAULT;
539
540 ucma_put_ctx(ctx);
541 return ret;
542}
543
544static void ucma_copy_conn_param(struct rdma_conn_param *dst,
545 struct rdma_ucm_conn_param *src)
546{
547 dst->private_data = src->private_data;
548 dst->private_data_len = src->private_data_len;
549 dst->responder_resources =src->responder_resources;
550 dst->initiator_depth = src->initiator_depth;
551 dst->flow_control = src->flow_control;
552 dst->retry_count = src->retry_count;
553 dst->rnr_retry_count = src->rnr_retry_count;
554 dst->srq = src->srq;
555 dst->qp_num = src->qp_num;
556}
557
558static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
559 int in_len, int out_len)
560{
561 struct rdma_ucm_connect cmd;
562 struct rdma_conn_param conn_param;
563 struct ucma_context *ctx;
564 int ret;
565
566 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
567 return -EFAULT;
568
569 if (!cmd.conn_param.valid)
570 return -EINVAL;
571
572 ctx = ucma_get_ctx(file, cmd.id);
573 if (IS_ERR(ctx))
574 return PTR_ERR(ctx);
575
576 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
577 ret = rdma_connect(ctx->cm_id, &conn_param);
578 ucma_put_ctx(ctx);
579 return ret;
580}
581
582static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
583 int in_len, int out_len)
584{
585 struct rdma_ucm_listen cmd;
586 struct ucma_context *ctx;
587 int ret;
588
589 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
590 return -EFAULT;
591
592 ctx = ucma_get_ctx(file, cmd.id);
593 if (IS_ERR(ctx))
594 return PTR_ERR(ctx);
595
596 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
597 cmd.backlog : UCMA_MAX_BACKLOG;
598 ret = rdma_listen(ctx->cm_id, ctx->backlog);
599 ucma_put_ctx(ctx);
600 return ret;
601}
602
603static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
604 int in_len, int out_len)
605{
606 struct rdma_ucm_accept cmd;
607 struct rdma_conn_param conn_param;
608 struct ucma_context *ctx;
609 int ret;
610
611 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
612 return -EFAULT;
613
614 ctx = ucma_get_ctx(file, cmd.id);
615 if (IS_ERR(ctx))
616 return PTR_ERR(ctx);
617
618 if (cmd.conn_param.valid) {
619 ctx->uid = cmd.uid;
620 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
621 ret = rdma_accept(ctx->cm_id, &conn_param);
622 } else
623 ret = rdma_accept(ctx->cm_id, NULL);
624
625 ucma_put_ctx(ctx);
626 return ret;
627}
628
629static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
630 int in_len, int out_len)
631{
632 struct rdma_ucm_reject cmd;
633 struct ucma_context *ctx;
634 int ret;
635
636 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
637 return -EFAULT;
638
639 ctx = ucma_get_ctx(file, cmd.id);
640 if (IS_ERR(ctx))
641 return PTR_ERR(ctx);
642
643 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
644 ucma_put_ctx(ctx);
645 return ret;
646}
647
648static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
649 int in_len, int out_len)
650{
651 struct rdma_ucm_disconnect cmd;
652 struct ucma_context *ctx;
653 int ret;
654
655 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
656 return -EFAULT;
657
658 ctx = ucma_get_ctx(file, cmd.id);
659 if (IS_ERR(ctx))
660 return PTR_ERR(ctx);
661
662 ret = rdma_disconnect(ctx->cm_id);
663 ucma_put_ctx(ctx);
664 return ret;
665}
666
667static ssize_t ucma_init_qp_attr(struct ucma_file *file,
668 const char __user *inbuf,
669 int in_len, int out_len)
670{
671 struct rdma_ucm_init_qp_attr cmd;
672 struct ib_uverbs_qp_attr resp;
673 struct ucma_context *ctx;
674 struct ib_qp_attr qp_attr;
675 int ret;
676
677 if (out_len < sizeof(resp))
678 return -ENOSPC;
679
680 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
681 return -EFAULT;
682
683 ctx = ucma_get_ctx(file, cmd.id);
684 if (IS_ERR(ctx))
685 return PTR_ERR(ctx);
686
687 resp.qp_attr_mask = 0;
688 memset(&qp_attr, 0, sizeof qp_attr);
689 qp_attr.qp_state = cmd.qp_state;
690 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
691 if (ret)
692 goto out;
693
694 ib_copy_qp_attr_to_user(&resp, &qp_attr);
695 if (copy_to_user((void __user *)(unsigned long)cmd.response,
696 &resp, sizeof(resp)))
697 ret = -EFAULT;
698
699out:
700 ucma_put_ctx(ctx);
701 return ret;
702}
703
704static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
705 int in_len, int out_len)
706{
707 struct rdma_ucm_notify cmd;
708 struct ucma_context *ctx;
709 int ret;
710
711 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
712 return -EFAULT;
713
714 ctx = ucma_get_ctx(file, cmd.id);
715 if (IS_ERR(ctx))
716 return PTR_ERR(ctx);
717
718 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
719 ucma_put_ctx(ctx);
720 return ret;
721}
722
723static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
724 const char __user *inbuf,
725 int in_len, int out_len) = {
726 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
727 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
728 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
729 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
730 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
731 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
732 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
733 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
734 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
735 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
736 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
737 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
738 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
739 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
740 [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
741 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
742};
743
744static ssize_t ucma_write(struct file *filp, const char __user *buf,
745 size_t len, loff_t *pos)
746{
747 struct ucma_file *file = filp->private_data;
748 struct rdma_ucm_cmd_hdr hdr;
749 ssize_t ret;
750
751 if (len < sizeof(hdr))
752 return -EINVAL;
753
754 if (copy_from_user(&hdr, buf, sizeof(hdr)))
755 return -EFAULT;
756
757 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
758 return -EINVAL;
759
760 if (hdr.in + sizeof(hdr) > len)
761 return -EINVAL;
762
763 if (!ucma_cmd_table[hdr.cmd])
764 return -ENOSYS;
765
766 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
767 if (!ret)
768 ret = len;
769
770 return ret;
771}
772
773static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
774{
775 struct ucma_file *file = filp->private_data;
776 unsigned int mask = 0;
777
778 poll_wait(filp, &file->poll_wait, wait);
779
780 if (!list_empty(&file->event_list))
781 mask = POLLIN | POLLRDNORM;
782
783 return mask;
784}
785
786static int ucma_open(struct inode *inode, struct file *filp)
787{
788 struct ucma_file *file;
789
790 file = kmalloc(sizeof *file, GFP_KERNEL);
791 if (!file)
792 return -ENOMEM;
793
794 INIT_LIST_HEAD(&file->event_list);
795 INIT_LIST_HEAD(&file->ctx_list);
796 init_waitqueue_head(&file->poll_wait);
797 mutex_init(&file->mut);
798
799 filp->private_data = file;
800 file->filp = filp;
801 return 0;
802}
803
804static int ucma_close(struct inode *inode, struct file *filp)
805{
806 struct ucma_file *file = filp->private_data;
807 struct ucma_context *ctx, *tmp;
808
809 mutex_lock(&file->mut);
810 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
811 mutex_unlock(&file->mut);
812
813 mutex_lock(&mut);
814 idr_remove(&ctx_idr, ctx->id);
815 mutex_unlock(&mut);
816
817 ucma_free_ctx(ctx);
818 mutex_lock(&file->mut);
819 }
820 mutex_unlock(&file->mut);
821 kfree(file);
822 return 0;
823}
824
825static struct file_operations ucma_fops = {
826 .owner = THIS_MODULE,
827 .open = ucma_open,
828 .release = ucma_close,
829 .write = ucma_write,
830 .poll = ucma_poll,
831};
832
833static struct miscdevice ucma_misc = {
834 .minor = MISC_DYNAMIC_MINOR,
835 .name = "rdma_cm",
836 .fops = &ucma_fops,
837};
838
839static ssize_t show_abi_version(struct device *dev,
840 struct device_attribute *attr,
841 char *buf)
842{
843 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
844}
845static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
846
847static int __init ucma_init(void)
848{
849 int ret;
850
851 ret = misc_register(&ucma_misc);
852 if (ret)
853 return ret;
854
855 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
856 if (ret) {
857 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
858 goto err;
859 }
860 return 0;
861err:
862 misc_deregister(&ucma_misc);
863 return ret;
864}
865
866static void __exit ucma_cleanup(void)
867{
868 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
869 misc_deregister(&ucma_misc);
870 idr_destroy(&ctx_idr);
871}
872
873module_init(ucma_init);
874module_exit(ucma_cleanup);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index ce46b13ae02b..5440da0e59b4 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -32,8 +32,8 @@
32 32
33#include <rdma/ib_marshall.h> 33#include <rdma/ib_marshall.h>
34 34
35static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, 35void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
36 struct ib_ah_attr *src) 36 struct ib_ah_attr *src)
37{ 37{
38 memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid); 38 memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid);
39 dst->grh.flow_label = src->grh.flow_label; 39 dst->grh.flow_label = src->grh.flow_label;
@@ -47,6 +47,7 @@ static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
47 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; 47 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
48 dst->port_num = src->port_num; 48 dst->port_num = src->port_num;
49} 49}
50EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
50 51
51void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 52void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
52 struct ib_qp_attr *src) 53 struct ib_qp_attr *src)
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index db12cc0841df..c95fe952abd5 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
52 int i; 52 int i;
53 53
54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { 54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
55 dma_unmap_sg(dev->dma_device, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 if (umem->writable && dirty) 58 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 59 set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
136 chunk->page_list[i].length = PAGE_SIZE; 136 chunk->page_list[i].length = PAGE_SIZE;
137 } 137 }
138 138
139 chunk->nmap = dma_map_sg(dev->dma_device, 139 chunk->nmap = ib_dma_map_sg(dev,
140 &chunk->page_list[0], 140 &chunk->page_list[0],
141 chunk->nents, 141 chunk->nents,
142 DMA_BIDIRECTIONAL); 142 DMA_BIDIRECTIONAL);
143 if (chunk->nmap <= 0) { 143 if (chunk->nmap <= 0) {
144 for (i = 0; i < chunk->nents; ++i) 144 for (i = 0; i < chunk->nents; ++i)
145 put_page(chunk->page_list[i].page); 145 put_page(chunk->page_list[i].page);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 179d005ed4a5..420c1380f5c3 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -161,8 +161,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
161 161
162 if (attr_mask & IB_QP_STATE) { 162 if (attr_mask & IB_QP_STATE) {
163 /* Ensure the state is valid */ 163 /* Ensure the state is valid */
164 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) 164 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
165 return -EINVAL; 165 err = -EINVAL;
166 goto bail0;
167 }
166 168
167 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); 169 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
168 170
@@ -184,9 +186,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
184 if (attr->cur_qp_state != IB_QPS_RTR && 186 if (attr->cur_qp_state != IB_QPS_RTR &&
185 attr->cur_qp_state != IB_QPS_RTS && 187 attr->cur_qp_state != IB_QPS_RTS &&
186 attr->cur_qp_state != IB_QPS_SQD && 188 attr->cur_qp_state != IB_QPS_SQD &&
187 attr->cur_qp_state != IB_QPS_SQE) 189 attr->cur_qp_state != IB_QPS_SQE) {
188 return -EINVAL; 190 err = -EINVAL;
189 else 191 goto bail0;
192 } else
190 wr.next_qp_state = 193 wr.next_qp_state =
191 cpu_to_be32(to_c2_state(attr->cur_qp_state)); 194 cpu_to_be32(to_c2_state(attr->cur_qp_state));
192 195
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index 7dc10551cf18..ec2e603ea241 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
6ib_ipath-y := \ 6ib_ipath-y := \
7 ipath_cq.o \ 7 ipath_cq.o \
8 ipath_diag.o \ 8 ipath_diag.o \
9 ipath_dma.o \
9 ipath_driver.o \ 10 ipath_driver.o \
10 ipath_eeprom.o \ 11 ipath_eeprom.o \
11 ipath_file_ops.o \ 12 ipath_file_ops.o \
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
new file mode 100644
index 000000000000..6e0f2b8918ce
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_verbs.h>
34
35#include "ipath_verbs.h"
36
37#define BAD_DMA_ADDRESS ((u64) 0)
38
39/*
40 * The following functions implement driver specific replacements
41 * for the ib_dma_*() functions.
42 *
43 * These functions return kernel virtual addresses instead of
44 * device bus addresses since the driver uses the CPU to copy
45 * data instead of using hardware DMA.
46 */
47
48static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
49{
50 return dma_addr == BAD_DMA_ADDRESS;
51}
52
53static u64 ipath_dma_map_single(struct ib_device *dev,
54 void *cpu_addr, size_t size,
55 enum dma_data_direction direction)
56{
57 BUG_ON(!valid_dma_direction(direction));
58 return (u64) cpu_addr;
59}
60
61static void ipath_dma_unmap_single(struct ib_device *dev,
62 u64 addr, size_t size,
63 enum dma_data_direction direction)
64{
65 BUG_ON(!valid_dma_direction(direction));
66}
67
68static u64 ipath_dma_map_page(struct ib_device *dev,
69 struct page *page,
70 unsigned long offset,
71 size_t size,
72 enum dma_data_direction direction)
73{
74 u64 addr;
75
76 BUG_ON(!valid_dma_direction(direction));
77
78 if (offset + size > PAGE_SIZE) {
79 addr = BAD_DMA_ADDRESS;
80 goto done;
81 }
82
83 addr = (u64) page_address(page);
84 if (addr)
85 addr += offset;
86 /* TODO: handle highmem pages */
87
88done:
89 return addr;
90}
91
92static void ipath_dma_unmap_page(struct ib_device *dev,
93 u64 addr, size_t size,
94 enum dma_data_direction direction)
95{
96 BUG_ON(!valid_dma_direction(direction));
97}
98
99int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
100 enum dma_data_direction direction)
101{
102 u64 addr;
103 int i;
104 int ret = nents;
105
106 BUG_ON(!valid_dma_direction(direction));
107
108 for (i = 0; i < nents; i++) {
109 addr = (u64) page_address(sg[i].page);
110 /* TODO: handle highmem pages */
111 if (!addr) {
112 ret = 0;
113 break;
114 }
115 }
116 return ret;
117}
118
119static void ipath_unmap_sg(struct ib_device *dev,
120 struct scatterlist *sg, int nents,
121 enum dma_data_direction direction)
122{
123 BUG_ON(!valid_dma_direction(direction));
124}
125
126static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
127{
128 u64 addr = (u64) page_address(sg->page);
129
130 if (addr)
131 addr += sg->offset;
132 return addr;
133}
134
135static unsigned int ipath_sg_dma_len(struct ib_device *dev,
136 struct scatterlist *sg)
137{
138 return sg->length;
139}
140
141static void ipath_sync_single_for_cpu(struct ib_device *dev,
142 u64 addr,
143 size_t size,
144 enum dma_data_direction dir)
145{
146}
147
148static void ipath_sync_single_for_device(struct ib_device *dev,
149 u64 addr,
150 size_t size,
151 enum dma_data_direction dir)
152{
153}
154
155static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
156 u64 *dma_handle, gfp_t flag)
157{
158 struct page *p;
159 void *addr = NULL;
160
161 p = alloc_pages(flag, get_order(size));
162 if (p)
163 addr = page_address(p);
164 if (dma_handle)
165 *dma_handle = (u64) addr;
166 return addr;
167}
168
169static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
170 void *cpu_addr, dma_addr_t dma_handle)
171{
172 free_pages((unsigned long) cpu_addr, get_order(size));
173}
174
175struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
176 ipath_mapping_error,
177 ipath_dma_map_single,
178 ipath_dma_unmap_single,
179 ipath_dma_map_page,
180 ipath_dma_unmap_page,
181 ipath_map_sg,
182 ipath_unmap_sg,
183 ipath_sg_dma_address,
184 ipath_sg_dma_len,
185 ipath_sync_single_for_cpu,
186 ipath_sync_single_for_device,
187 ipath_dma_alloc_coherent,
188 ipath_dma_free_coherent
189};
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 1aeddb48e355..ae7f21a0cdc0 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1825,8 +1825,6 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1825 */ 1825 */
1826void ipath_shutdown_device(struct ipath_devdata *dd) 1826void ipath_shutdown_device(struct ipath_devdata *dd)
1827{ 1827{
1828 u64 val;
1829
1830 ipath_dbg("Shutting down the device\n"); 1828 ipath_dbg("Shutting down the device\n");
1831 1829
1832 dd->ipath_flags |= IPATH_LINKUNK; 1830 dd->ipath_flags |= IPATH_LINKUNK;
@@ -1849,7 +1847,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1849 */ 1847 */
1850 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL); 1848 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
1851 /* flush it */ 1849 /* flush it */
1852 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1850 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1853 /* 1851 /*
1854 * enough for anything that's going to trickle out to have actually 1852 * enough for anything that's going to trickle out to have actually
1855 * done so. 1853 * done so.
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 340f27e3ebff..b932bcb67a5e 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -699,7 +699,6 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
699 int start_stop) 699 int start_stop)
700{ 700{
701 struct ipath_devdata *dd = pd->port_dd; 701 struct ipath_devdata *dd = pd->port_dd;
702 u64 tval;
703 702
704 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", 703 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
705 start_stop ? "en" : "dis", dd->ipath_unit, 704 start_stop ? "en" : "dis", dd->ipath_unit,
@@ -729,7 +728,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
729 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 728 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
730 dd->ipath_rcvctrl); 729 dd->ipath_rcvctrl);
731 /* now be sure chip saw it before we return */ 730 /* now be sure chip saw it before we return */
732 tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 731 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
733 if (start_stop) { 732 if (start_stop) {
734 /* 733 /*
735 * And try to be sure that tail reg update has happened too. 734 * And try to be sure that tail reg update has happened too.
@@ -738,7 +737,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
738 * in memory copy, since we could overwrite an update by the 737 * in memory copy, since we could overwrite an update by the
739 * chip if we did. 738 * chip if we did.
740 */ 739 */
741 tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 740 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
742 } 741 }
743 /* always; new head should be equal to new tail; see above */ 742 /* always; new head should be equal to new tail; see above */
744bail: 743bail:
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index e57c7a351cb5..7468477ba837 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1447,7 +1447,7 @@ static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
1447static int ipath_ht_early_init(struct ipath_devdata *dd) 1447static int ipath_ht_early_init(struct ipath_devdata *dd)
1448{ 1448{
1449 u32 __iomem *piobuf; 1449 u32 __iomem *piobuf;
1450 u32 pioincr, val32, egrsize; 1450 u32 pioincr, val32;
1451 int i; 1451 int i;
1452 1452
1453 /* 1453 /*
@@ -1467,7 +1467,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1467 * errors interrupts if we ever see one). 1467 * errors interrupts if we ever see one).
1468 */ 1468 */
1469 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k; 1469 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
1470 egrsize = dd->ipath_rcvegrbufsize;
1471 1470
1472 /* 1471 /*
1473 * the min() check here is currently a nop, but it may not 1472 * the min() check here is currently a nop, but it may not
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 6af89683f710..ae8bf9950c6d 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -602,7 +602,7 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
602 */ 602 */
603static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) 603static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
604{ 604{
605 u64 val, tmp, config1, prev_val; 605 u64 val, config1, prev_val;
606 int ret = 0; 606 int ret = 0;
607 607
608 ipath_dbg("Trying to bringup serdes\n"); 608 ipath_dbg("Trying to bringup serdes\n");
@@ -633,7 +633,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
633 | INFINIPATH_SERDC0_L1PWR_DN; 633 | INFINIPATH_SERDC0_L1PWR_DN;
634 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 634 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
635 /* be sure chip saw it */ 635 /* be sure chip saw it */
636 tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 636 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
637 udelay(5); /* need pll reset set at least for a bit */ 637 udelay(5); /* need pll reset set at least for a bit */
638 /* 638 /*
639 * after PLL is reset, set the per-lane Resets and TxIdle and 639 * after PLL is reset, set the per-lane Resets and TxIdle and
@@ -647,7 +647,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
647 "and txidle (%llx)\n", (unsigned long long) val); 647 "and txidle (%llx)\n", (unsigned long long) val);
648 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 648 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
649 /* be sure chip saw it */ 649 /* be sure chip saw it */
650 tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 650 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
651 /* need PLL reset clear for at least 11 usec before lane 651 /* need PLL reset clear for at least 11 usec before lane
652 * resets cleared; give it a few more to be sure */ 652 * resets cleared; give it a few more to be sure */
653 udelay(15); 653 udelay(15);
@@ -851,12 +851,12 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
851 int pos, ret; 851 int pos, ret;
852 852
853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ 853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
854 dd->ipath_irq = pdev->irq;
855 ret = pci_enable_msi(dd->pcidev); 854 ret = pci_enable_msi(dd->pcidev);
856 if (ret) 855 if (ret)
857 ipath_dev_err(dd, "pci_enable_msi failed: %d, " 856 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
858 "interrupts may not work\n", ret); 857 "interrupts may not work\n", ret);
859 /* continue even if it fails, we may still be OK... */ 858 /* continue even if it fails, we may still be OK... */
859 dd->ipath_irq = pdev->irq;
860 860
861 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { 861 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
862 u16 control; 862 u16 control;
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index d819cca524cd..d4f6b5239ef8 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -347,10 +347,9 @@ done:
347static int init_chip_reset(struct ipath_devdata *dd, 347static int init_chip_reset(struct ipath_devdata *dd,
348 struct ipath_portdata **pdp) 348 struct ipath_portdata **pdp)
349{ 349{
350 struct ipath_portdata *pd;
351 u32 rtmp; 350 u32 rtmp;
352 351
353 *pdp = pd = dd->ipath_pd[0]; 352 *pdp = dd->ipath_pd[0];
354 /* ensure chip does no sends or receives while we re-initialize */ 353 /* ensure chip does no sends or receives while we re-initialize */
355 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; 354 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
356 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0); 355 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 5652a550d442..72b9e279d19d 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -598,10 +598,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
598 * on close 598 * on close
599 */ 599 */
600 if (errs & INFINIPATH_E_RRCVHDRFULL) { 600 if (errs & INFINIPATH_E_RRCVHDRFULL) {
601 int any;
602 u32 hd, tl; 601 u32 hd, tl;
603 ipath_stats.sps_hdrqfull++; 602 ipath_stats.sps_hdrqfull++;
604 for (any = i = 0; i < dd->ipath_cfgports; i++) { 603 for (i = 0; i < dd->ipath_cfgports; i++) {
605 struct ipath_portdata *pd = dd->ipath_pd[i]; 604 struct ipath_portdata *pd = dd->ipath_pd[i];
606 if (i == 0) { 605 if (i == 0) {
607 hd = dd->ipath_port0head; 606 hd = dd->ipath_port0head;
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 9a6cbd05adcd..851763d7d2db 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -134,7 +134,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
134 */ 134 */
135 if (sge->lkey == 0) { 135 if (sge->lkey == 0) {
136 isge->mr = NULL; 136 isge->mr = NULL;
137 isge->vaddr = bus_to_virt(sge->addr); 137 isge->vaddr = (void *) sge->addr;
138 isge->length = sge->length; 138 isge->length = sge->length;
139 isge->sge_length = sge->length; 139 isge->sge_length = sge->length;
140 ret = 1; 140 ret = 1;
@@ -202,12 +202,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
202 int ret; 202 int ret;
203 203
204 /* 204 /*
205 * We use RKEY == zero for physical addresses 205 * We use RKEY == zero for kernel virtual addresses
206 * (see ipath_get_dma_mr). 206 * (see ipath_get_dma_mr and ipath_dma.c).
207 */ 207 */
208 if (rkey == 0) { 208 if (rkey == 0) {
209 sge->mr = NULL; 209 sge->mr = NULL;
210 sge->vaddr = phys_to_virt(vaddr); 210 sge->vaddr = (void *) vaddr;
211 sge->length = len; 211 sge->length = len;
212 sge->sge_length = len; 212 sge->sge_length = len;
213 ss->sg_list = NULL; 213 ss->sg_list = NULL;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index a0673c1eef71..8cc8598d6c69 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -54,6 +54,8 @@ static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
54 * @acc: access flags 54 * @acc: access flags
55 * 55 *
56 * Returns the memory region on success, otherwise returns an errno. 56 * Returns the memory region on success, otherwise returns an errno.
57 * Note that all DMA addresses should be created via the
58 * struct ib_dma_mapping_ops functions (see ipath_dma.c).
57 */ 59 */
58struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) 60struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
59{ 61{
@@ -149,8 +151,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
149 m = 0; 151 m = 0;
150 n = 0; 152 n = 0;
151 for (i = 0; i < num_phys_buf; i++) { 153 for (i = 0; i < num_phys_buf; i++) {
152 mr->mr.map[m]->segs[n].vaddr = 154 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
153 phys_to_virt(buffer_list[i].addr);
154 mr->mr.map[m]->segs[n].length = buffer_list[i].size; 155 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
155 mr->mr.length += buffer_list[i].size; 156 mr->mr.length += buffer_list[i].size;
156 n++; 157 n++;
@@ -347,7 +348,7 @@ int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
347 n = 0; 348 n = 0;
348 ps = 1 << fmr->page_shift; 349 ps = 1 << fmr->page_shift;
349 for (i = 0; i < list_len; i++) { 350 for (i = 0; i < list_len; i++) {
350 fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]); 351 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
351 fmr->mr.map[m]->segs[n].length = ps; 352 fmr->mr.map[m]->segs[n].length = ps;
352 if (++n == IPATH_SEGSZ) { 353 if (++n == IPATH_SEGSZ) {
353 m++; 354 m++;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 182de34f9f47..ffa6318ad0cc 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -215,7 +215,6 @@ static ssize_t store_mlid(struct device *dev,
215 size_t count) 215 size_t count)
216{ 216{
217 struct ipath_devdata *dd = dev_get_drvdata(dev); 217 struct ipath_devdata *dd = dev_get_drvdata(dev);
218 int unit;
219 u16 mlid; 218 u16 mlid;
220 int ret; 219 int ret;
221 220
@@ -223,8 +222,6 @@ static ssize_t store_mlid(struct device *dev,
223 if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE) 222 if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
224 goto invalid; 223 goto invalid;
225 224
226 unit = dd->ipath_unit;
227
228 dd->ipath_mlid = mlid; 225 dd->ipath_mlid = mlid;
229 226
230 goto bail; 227 goto bail;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index acdee33ee1f8..2aaacdb7e52a 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1599,6 +1599,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1599 dev->detach_mcast = ipath_multicast_detach; 1599 dev->detach_mcast = ipath_multicast_detach;
1600 dev->process_mad = ipath_process_mad; 1600 dev->process_mad = ipath_process_mad;
1601 dev->mmap = ipath_mmap; 1601 dev->mmap = ipath_mmap;
1602 dev->dma_ops = &ipath_dma_mapping_ops;
1602 1603
1603 snprintf(dev->node_desc, sizeof(dev->node_desc), 1604 snprintf(dev->node_desc, sizeof(dev->node_desc),
1604 IPATH_IDSTR " %s", init_utsname()->nodename); 1605 IPATH_IDSTR " %s", init_utsname()->nodename);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 8039f6e5f0c8..c0c8d5b24a7d 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -812,4 +812,6 @@ extern unsigned int ib_ipath_max_srq_wrs;
812 812
813extern const u32 ib_ipath_rnr_table[]; 813extern const u32 ib_ipath_rnr_table[];
814 814
815extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
816
815#endif /* IPATH_VERBS_H */ 817#endif /* IPATH_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 99547996aba2..07deee8f81ce 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -105,12 +105,12 @@ struct ipoib_mcast;
105 105
106struct ipoib_rx_buf { 106struct ipoib_rx_buf {
107 struct sk_buff *skb; 107 struct sk_buff *skb;
108 dma_addr_t mapping; 108 u64 mapping;
109}; 109};
110 110
111struct ipoib_tx_buf { 111struct ipoib_tx_buf {
112 struct sk_buff *skb; 112 struct sk_buff *skb;
113 DECLARE_PCI_UNMAP_ADDR(mapping) 113 u64 mapping;
114}; 114};
115 115
116/* 116/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10fba5d3265..59d9594ed6d9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 109 ret = ib_post_recv(priv->qp, &param, &bad_wr);
110 if (unlikely(ret)) { 110 if (unlikely(ret)) {
111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
112 dma_unmap_single(priv->ca->dma_device, 112 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
113 priv->rx_ring[id].mapping, 113 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
114 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
115 dev_kfree_skb_any(priv->rx_ring[id].skb); 114 dev_kfree_skb_any(priv->rx_ring[id].skb);
116 priv->rx_ring[id].skb = NULL; 115 priv->rx_ring[id].skb = NULL;
117 } 116 }
@@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
123{ 122{
124 struct ipoib_dev_priv *priv = netdev_priv(dev); 123 struct ipoib_dev_priv *priv = netdev_priv(dev);
125 struct sk_buff *skb; 124 struct sk_buff *skb;
126 dma_addr_t addr; 125 u64 addr;
127 126
128 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 127 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
129 if (!skb) 128 if (!skb)
@@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
136 */ 135 */
137 skb_reserve(skb, 4); 136 skb_reserve(skb, 4);
138 137
139 addr = dma_map_single(priv->ca->dma_device, 138 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
140 skb->data, IPOIB_BUF_SIZE, 139 DMA_FROM_DEVICE);
141 DMA_FROM_DEVICE); 140 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
142 if (unlikely(dma_mapping_error(addr))) {
143 dev_kfree_skb_any(skb); 141 dev_kfree_skb_any(skb);
144 return -EIO; 142 return -EIO;
145 } 143 }
@@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
174 struct ipoib_dev_priv *priv = netdev_priv(dev); 172 struct ipoib_dev_priv *priv = netdev_priv(dev);
175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
176 struct sk_buff *skb; 174 struct sk_buff *skb;
177 dma_addr_t addr; 175 u64 addr;
178 176
179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", 177 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
180 wr_id, wc->opcode, wc->status); 178 wr_id, wc->opcode, wc->status);
@@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
193 ipoib_warn(priv, "failed recv event " 191 ipoib_warn(priv, "failed recv event "
194 "(status=%d, wrid=%d vend_err %x)\n", 192 "(status=%d, wrid=%d vend_err %x)\n",
195 wc->status, wr_id, wc->vendor_err); 193 wc->status, wr_id, wc->vendor_err);
196 dma_unmap_single(priv->ca->dma_device, addr, 194 ib_dma_unmap_single(priv->ca, addr,
197 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 195 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
198 dev_kfree_skb_any(skb); 196 dev_kfree_skb_any(skb);
199 priv->rx_ring[wr_id].skb = NULL; 197 priv->rx_ring[wr_id].skb = NULL;
200 return; 198 return;
@@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 210 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
213 wc->byte_len, wc->slid); 211 wc->byte_len, wc->slid);
214 212
215 dma_unmap_single(priv->ca->dma_device, addr, 213 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
216 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
217 214
218 skb_put(skb, wc->byte_len); 215 skb_put(skb, wc->byte_len);
219 skb_pull(skb, IB_GRH_BYTES); 216 skb_pull(skb, IB_GRH_BYTES);
@@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
261 258
262 tx_req = &priv->tx_ring[wr_id]; 259 tx_req = &priv->tx_ring[wr_id];
263 260
264 dma_unmap_single(priv->ca->dma_device, 261 ib_dma_unmap_single(priv->ca, tx_req->mapping,
265 pci_unmap_addr(tx_req, mapping), 262 tx_req->skb->len, DMA_TO_DEVICE);
266 tx_req->skb->len,
267 DMA_TO_DEVICE);
268 263
269 ++priv->stats.tx_packets; 264 ++priv->stats.tx_packets;
270 priv->stats.tx_bytes += tx_req->skb->len; 265 priv->stats.tx_bytes += tx_req->skb->len;
@@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
311static inline int post_send(struct ipoib_dev_priv *priv, 306static inline int post_send(struct ipoib_dev_priv *priv,
312 unsigned int wr_id, 307 unsigned int wr_id,
313 struct ib_ah *address, u32 qpn, 308 struct ib_ah *address, u32 qpn,
314 dma_addr_t addr, int len) 309 u64 addr, int len)
315{ 310{
316 struct ib_send_wr *bad_wr; 311 struct ib_send_wr *bad_wr;
317 312
@@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
330{ 325{
331 struct ipoib_dev_priv *priv = netdev_priv(dev); 326 struct ipoib_dev_priv *priv = netdev_priv(dev);
332 struct ipoib_tx_buf *tx_req; 327 struct ipoib_tx_buf *tx_req;
333 dma_addr_t addr; 328 u64 addr;
334 329
335 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { 330 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
336 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 331 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
353 */ 348 */
354 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 349 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
355 tx_req->skb = skb; 350 tx_req->skb = skb;
356 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, 351 addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
357 DMA_TO_DEVICE); 352 DMA_TO_DEVICE);
358 if (unlikely(dma_mapping_error(addr))) { 353 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
359 ++priv->stats.tx_errors; 354 ++priv->stats.tx_errors;
360 dev_kfree_skb_any(skb); 355 dev_kfree_skb_any(skb);
361 return; 356 return;
362 } 357 }
363 pci_unmap_addr_set(tx_req, mapping, addr); 358 tx_req->mapping = addr;
364 359
365 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 360 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
366 address->ah, qpn, addr, skb->len))) { 361 address->ah, qpn, addr, skb->len))) {
367 ipoib_warn(priv, "post_send failed\n"); 362 ipoib_warn(priv, "post_send failed\n");
368 ++priv->stats.tx_errors; 363 ++priv->stats.tx_errors;
369 dma_unmap_single(priv->ca->dma_device, addr, skb->len, 364 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
370 DMA_TO_DEVICE);
371 dev_kfree_skb_any(skb); 365 dev_kfree_skb_any(skb);
372 } else { 366 } else {
373 dev->trans_start = jiffies; 367 dev->trans_start = jiffies;
@@ -538,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev)
538 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 532 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
539 tx_req = &priv->tx_ring[priv->tx_tail & 533 tx_req = &priv->tx_ring[priv->tx_tail &
540 (ipoib_sendq_size - 1)]; 534 (ipoib_sendq_size - 1)];
541 dma_unmap_single(priv->ca->dma_device, 535 ib_dma_unmap_single(priv->ca,
542 pci_unmap_addr(tx_req, mapping), 536 tx_req->mapping,
543 tx_req->skb->len, 537 tx_req->skb->len,
544 DMA_TO_DEVICE); 538 DMA_TO_DEVICE);
545 dev_kfree_skb_any(tx_req->skb); 539 dev_kfree_skb_any(tx_req->skb);
546 ++priv->tx_tail; 540 ++priv->tx_tail;
547 } 541 }
548 542
549 for (i = 0; i < ipoib_recvq_size; ++i) 543 for (i = 0; i < ipoib_recvq_size; ++i) {
550 if (priv->rx_ring[i].skb) { 544 struct ipoib_rx_buf *rx_req;
551 dma_unmap_single(priv->ca->dma_device, 545
552 pci_unmap_addr(&priv->rx_ring[i], 546 rx_req = &priv->rx_ring[i];
553 mapping), 547 if (!rx_req->skb)
554 IPOIB_BUF_SIZE, 548 continue;
555 DMA_FROM_DEVICE); 549 ib_dma_unmap_single(priv->ca,
556 dev_kfree_skb_any(priv->rx_ring[i].skb); 550 rx_req->mapping,
557 priv->rx_ring[i].skb = NULL; 551 IPOIB_BUF_SIZE,
558 } 552 DMA_FROM_DEVICE);
553 dev_kfree_skb_any(rx_req->skb);
554 rx_req->skb = NULL;
555 }
559 556
560 goto timeout; 557 goto timeout;
561 } 558 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c09280243726..705eb1d0e554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -497,8 +497,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
497 return; 497 return;
498 } 498 }
499 499
500 skb_queue_head_init(&neigh->queue);
501
502 /* 500 /*
503 * We can only be called from ipoib_start_xmit, so we're 501 * We can only be called from ipoib_start_xmit, so we're
504 * inside tx_lock -- no need to save/restore flags. 502 * inside tx_lock -- no need to save/restore flags.
@@ -806,6 +804,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
806 804
807 neigh->neighbour = neighbour; 805 neigh->neighbour = neighbour;
808 *to_ipoib_neigh(neighbour) = neigh; 806 *to_ipoib_neigh(neighbour) = neigh;
807 skb_queue_head_init(&neigh->queue);
809 808
810 return neigh; 809 return neigh;
811} 810}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 234e5b061a75..cae8c96a55f8 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -182,7 +182,7 @@ struct iser_regd_buf {
182 struct iser_mem_reg reg; /* memory registration info */ 182 struct iser_mem_reg reg; /* memory registration info */
183 void *virt_addr; 183 void *virt_addr;
184 struct iser_device *device; /* device->device for dma_unmap */ 184 struct iser_device *device; /* device->device for dma_unmap */
185 dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */ 185 u64 dma_addr; /* if non zero, addr for dma_unmap */
186 enum dma_data_direction direction; /* direction for dma_unmap */ 186 enum dma_data_direction direction; /* direction for dma_unmap */
187 unsigned int data_size; 187 unsigned int data_size;
188 atomic_t ref_count; /* refcount, freed when dec to 0 */ 188 atomic_t ref_count; /* refcount, freed when dec to 0 */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9b3d79c796c8..e73c87b9be43 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -487,10 +487,8 @@ int iser_send_control(struct iscsi_conn *conn,
487 struct iscsi_iser_conn *iser_conn = conn->dd_data; 487 struct iscsi_iser_conn *iser_conn = conn->dd_data;
488 struct iser_desc *mdesc = mtask->dd_data; 488 struct iser_desc *mdesc = mtask->dd_data;
489 struct iser_dto *send_dto = NULL; 489 struct iser_dto *send_dto = NULL;
490 unsigned int itt;
491 unsigned long data_seg_len; 490 unsigned long data_seg_len;
492 int err = 0; 491 int err = 0;
493 unsigned char opcode;
494 struct iser_regd_buf *regd_buf; 492 struct iser_regd_buf *regd_buf;
495 struct iser_device *device; 493 struct iser_device *device;
496 494
@@ -512,8 +510,6 @@ int iser_send_control(struct iscsi_conn *conn,
512 510
513 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); 511 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
514 512
515 itt = ntohl(mtask->hdr->itt);
516 opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK;
517 data_seg_len = ntoh24(mtask->hdr->dlength); 513 data_seg_len = ntoh24(mtask->hdr->dlength);
518 514
519 if (data_seg_len > 0) { 515 if (data_seg_len > 0) {
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 3aedd59b8a84..fc9f1fd0ae54 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -52,7 +52,7 @@
52 */ 52 */
53int iser_regd_buff_release(struct iser_regd_buf *regd_buf) 53int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
54{ 54{
55 struct device *dma_device; 55 struct ib_device *dev;
56 56
57 if ((atomic_read(&regd_buf->ref_count) == 0) || 57 if ((atomic_read(&regd_buf->ref_count) == 0) ||
58 atomic_dec_and_test(&regd_buf->ref_count)) { 58 atomic_dec_and_test(&regd_buf->ref_count)) {
@@ -61,8 +61,8 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
61 iser_unreg_mem(&regd_buf->reg); 61 iser_unreg_mem(&regd_buf->reg);
62 62
63 if (regd_buf->dma_addr) { 63 if (regd_buf->dma_addr) {
64 dma_device = regd_buf->device->ib_device->dma_device; 64 dev = regd_buf->device->ib_device;
65 dma_unmap_single(dma_device, 65 ib_dma_unmap_single(dev,
66 regd_buf->dma_addr, 66 regd_buf->dma_addr,
67 regd_buf->data_size, 67 regd_buf->data_size,
68 regd_buf->direction); 68 regd_buf->direction);
@@ -84,12 +84,12 @@ void iser_reg_single(struct iser_device *device,
84 struct iser_regd_buf *regd_buf, 84 struct iser_regd_buf *regd_buf,
85 enum dma_data_direction direction) 85 enum dma_data_direction direction)
86{ 86{
87 dma_addr_t dma_addr; 87 u64 dma_addr;
88 88
89 dma_addr = dma_map_single(device->ib_device->dma_device, 89 dma_addr = ib_dma_map_single(device->ib_device,
90 regd_buf->virt_addr, 90 regd_buf->virt_addr,
91 regd_buf->data_size, direction); 91 regd_buf->data_size, direction);
92 BUG_ON(dma_mapping_error(dma_addr)); 92 BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
93 93
94 regd_buf->reg.lkey = device->mr->lkey; 94 regd_buf->reg.lkey = device->mr->lkey;
95 regd_buf->reg.len = regd_buf->data_size; 95 regd_buf->reg.len = regd_buf->data_size;
@@ -107,7 +107,7 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
107 enum iser_data_dir cmd_dir) 107 enum iser_data_dir cmd_dir)
108{ 108{
109 int dma_nents; 109 int dma_nents;
110 struct device *dma_device; 110 struct ib_device *dev;
111 char *mem = NULL; 111 char *mem = NULL;
112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; 112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
113 unsigned long cmd_data_len = data->data_len; 113 unsigned long cmd_data_len = data->data_len;
@@ -147,17 +147,12 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
147 147
148 iser_ctask->data_copy[cmd_dir].copy_buf = mem; 148 iser_ctask->data_copy[cmd_dir].copy_buf = mem;
149 149
150 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 150 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
151 151 dma_nents = ib_dma_map_sg(dev,
152 if (cmd_dir == ISER_DIR_OUT) 152 &iser_ctask->data_copy[cmd_dir].sg_single,
153 dma_nents = dma_map_sg(dma_device, 153 1,
154 &iser_ctask->data_copy[cmd_dir].sg_single, 154 (cmd_dir == ISER_DIR_OUT) ?
155 1, DMA_TO_DEVICE); 155 DMA_TO_DEVICE : DMA_FROM_DEVICE);
156 else
157 dma_nents = dma_map_sg(dma_device,
158 &iser_ctask->data_copy[cmd_dir].sg_single,
159 1, DMA_FROM_DEVICE);
160
161 BUG_ON(dma_nents == 0); 156 BUG_ON(dma_nents == 0);
162 157
163 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; 158 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
@@ -170,19 +165,16 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
170void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 165void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
171 enum iser_data_dir cmd_dir) 166 enum iser_data_dir cmd_dir)
172{ 167{
173 struct device *dma_device; 168 struct ib_device *dev;
174 struct iser_data_buf *mem_copy; 169 struct iser_data_buf *mem_copy;
175 unsigned long cmd_data_len; 170 unsigned long cmd_data_len;
176 171
177 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 172 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
178 mem_copy = &iser_ctask->data_copy[cmd_dir]; 173 mem_copy = &iser_ctask->data_copy[cmd_dir];
179 174
180 if (cmd_dir == ISER_DIR_OUT) 175 ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
181 dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, 176 (cmd_dir == ISER_DIR_OUT) ?
182 DMA_TO_DEVICE); 177 DMA_TO_DEVICE : DMA_FROM_DEVICE);
183 else
184 dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
185 DMA_FROM_DEVICE);
186 178
187 if (cmd_dir == ISER_DIR_IN) { 179 if (cmd_dir == ISER_DIR_IN) {
188 char *mem; 180 char *mem;
@@ -231,11 +223,12 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
231 * consecutive elements. Also, it handles one entry SG. 223 * consecutive elements. Also, it handles one entry SG.
232 */ 224 */
233static int iser_sg_to_page_vec(struct iser_data_buf *data, 225static int iser_sg_to_page_vec(struct iser_data_buf *data,
234 struct iser_page_vec *page_vec) 226 struct iser_page_vec *page_vec,
227 struct ib_device *ibdev)
235{ 228{
236 struct scatterlist *sg = (struct scatterlist *)data->buf; 229 struct scatterlist *sg = (struct scatterlist *)data->buf;
237 dma_addr_t first_addr, last_addr, page; 230 u64 first_addr, last_addr, page;
238 int start_aligned, end_aligned; 231 int end_aligned;
239 unsigned int cur_page = 0; 232 unsigned int cur_page = 0;
240 unsigned long total_sz = 0; 233 unsigned long total_sz = 0;
241 int i; 234 int i;
@@ -244,19 +237,21 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
244 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 237 page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
245 238
246 for (i = 0; i < data->dma_nents; i++) { 239 for (i = 0; i < data->dma_nents; i++) {
247 total_sz += sg_dma_len(&sg[i]); 240 unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
241
242 total_sz += dma_len;
248 243
249 first_addr = sg_dma_address(&sg[i]); 244 first_addr = ib_sg_dma_address(ibdev, &sg[i]);
250 last_addr = first_addr + sg_dma_len(&sg[i]); 245 last_addr = first_addr + dma_len;
251 246
252 start_aligned = !(first_addr & ~MASK_4K);
253 end_aligned = !(last_addr & ~MASK_4K); 247 end_aligned = !(last_addr & ~MASK_4K);
254 248
255 /* continue to collect page fragments till aligned or SG ends */ 249 /* continue to collect page fragments till aligned or SG ends */
256 while (!end_aligned && (i + 1 < data->dma_nents)) { 250 while (!end_aligned && (i + 1 < data->dma_nents)) {
257 i++; 251 i++;
258 total_sz += sg_dma_len(&sg[i]); 252 dma_len = ib_sg_dma_len(ibdev, &sg[i]);
259 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); 253 total_sz += dma_len;
254 last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
260 end_aligned = !(last_addr & ~MASK_4K); 255 end_aligned = !(last_addr & ~MASK_4K);
261 } 256 }
262 257
@@ -288,10 +283,11 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
288 * the number of entries which are aligned correctly. Supports the case where 283 * the number of entries which are aligned correctly. Supports the case where
289 * consecutive SG elements are actually fragments of the same physcial page. 284 * consecutive SG elements are actually fragments of the same physcial page.
290 */ 285 */
291static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) 286static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
287 struct ib_device *ibdev)
292{ 288{
293 struct scatterlist *sg; 289 struct scatterlist *sg;
294 dma_addr_t end_addr, next_addr; 290 u64 end_addr, next_addr;
295 int i, cnt; 291 int i, cnt;
296 unsigned int ret_len = 0; 292 unsigned int ret_len = 0;
297 293
@@ -303,12 +299,12 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
303 (unsigned long)page_to_phys(sg[i].page), 299 (unsigned long)page_to_phys(sg[i].page),
304 (unsigned long)sg[i].offset, 300 (unsigned long)sg[i].offset,
305 (unsigned long)sg[i].length); */ 301 (unsigned long)sg[i].length); */
306 end_addr = sg_dma_address(&sg[i]) + 302 end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
307 sg_dma_len(&sg[i]); 303 ib_sg_dma_len(ibdev, &sg[i]);
308 /* iser_dbg("Checking sg iobuf end address " 304 /* iser_dbg("Checking sg iobuf end address "
309 "0x%08lX\n", end_addr); */ 305 "0x%08lX\n", end_addr); */
310 if (i + 1 < data->dma_nents) { 306 if (i + 1 < data->dma_nents) {
311 next_addr = sg_dma_address(&sg[i+1]); 307 next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
312 /* are i, i+1 fragments of the same page? */ 308 /* are i, i+1 fragments of the same page? */
313 if (end_addr == next_addr) 309 if (end_addr == next_addr)
314 continue; 310 continue;
@@ -325,7 +321,8 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
325 return ret_len; 321 return ret_len;
326} 322}
327 323
328static void iser_data_buf_dump(struct iser_data_buf *data) 324static void iser_data_buf_dump(struct iser_data_buf *data,
325 struct ib_device *ibdev)
329{ 326{
330 struct scatterlist *sg = (struct scatterlist *)data->buf; 327 struct scatterlist *sg = (struct scatterlist *)data->buf;
331 int i; 328 int i;
@@ -333,9 +330,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data)
333 for (i = 0; i < data->dma_nents; i++) 330 for (i = 0; i < data->dma_nents; i++)
334 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 331 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
335 "off:0x%x sz:0x%x dma_len:0x%x\n", 332 "off:0x%x sz:0x%x dma_len:0x%x\n",
336 i, (unsigned long)sg_dma_address(&sg[i]), 333 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
337 sg[i].page, sg[i].offset, 334 sg[i].page, sg[i].offset,
338 sg[i].length,sg_dma_len(&sg[i])); 335 sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
339} 336}
340 337
341static void iser_dump_page_vec(struct iser_page_vec *page_vec) 338static void iser_dump_page_vec(struct iser_page_vec *page_vec)
@@ -349,7 +346,8 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
349} 346}
350 347
351static void iser_page_vec_build(struct iser_data_buf *data, 348static void iser_page_vec_build(struct iser_data_buf *data,
352 struct iser_page_vec *page_vec) 349 struct iser_page_vec *page_vec,
350 struct ib_device *ibdev)
353{ 351{
354 int page_vec_len = 0; 352 int page_vec_len = 0;
355 353
@@ -357,14 +355,14 @@ static void iser_page_vec_build(struct iser_data_buf *data,
357 page_vec->offset = 0; 355 page_vec->offset = 0;
358 356
359 iser_dbg("Translating sg sz: %d\n", data->dma_nents); 357 iser_dbg("Translating sg sz: %d\n", data->dma_nents);
360 page_vec_len = iser_sg_to_page_vec(data,page_vec); 358 page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
361 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); 359 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
362 360
363 page_vec->length = page_vec_len; 361 page_vec->length = page_vec_len;
364 362
365 if (page_vec_len * SIZE_4K < page_vec->data_size) { 363 if (page_vec_len * SIZE_4K < page_vec->data_size) {
366 iser_err("page_vec too short to hold this SG\n"); 364 iser_err("page_vec too short to hold this SG\n");
367 iser_data_buf_dump(data); 365 iser_data_buf_dump(data, ibdev);
368 iser_dump_page_vec(page_vec); 366 iser_dump_page_vec(page_vec);
369 BUG(); 367 BUG();
370 } 368 }
@@ -375,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
375 enum iser_data_dir iser_dir, 373 enum iser_data_dir iser_dir,
376 enum dma_data_direction dma_dir) 374 enum dma_data_direction dma_dir)
377{ 375{
378 struct device *dma_device; 376 struct ib_device *dev;
379 377
380 iser_ctask->dir[iser_dir] = 1; 378 iser_ctask->dir[iser_dir] = 1;
381 dma_device = 379 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
382 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
383 380
384 data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); 381 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
385 if (data->dma_nents == 0) { 382 if (data->dma_nents == 0) {
386 iser_err("dma_map_sg failed!!!\n"); 383 iser_err("dma_map_sg failed!!!\n");
387 return -EINVAL; 384 return -EINVAL;
@@ -391,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
391 388
392void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 389void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
393{ 390{
394 struct device *dma_device; 391 struct ib_device *dev;
395 struct iser_data_buf *data; 392 struct iser_data_buf *data;
396 393
397 dma_device = 394 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
398 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
399 395
400 if (iser_ctask->dir[ISER_DIR_IN]) { 396 if (iser_ctask->dir[ISER_DIR_IN]) {
401 data = &iser_ctask->data[ISER_DIR_IN]; 397 data = &iser_ctask->data[ISER_DIR_IN];
402 dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); 398 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
403 } 399 }
404 400
405 if (iser_ctask->dir[ISER_DIR_OUT]) { 401 if (iser_ctask->dir[ISER_DIR_OUT]) {
406 data = &iser_ctask->data[ISER_DIR_OUT]; 402 data = &iser_ctask->data[ISER_DIR_OUT];
407 dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); 403 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
408 } 404 }
409} 405}
410 406
@@ -419,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
419{ 415{
420 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 416 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
421 struct iser_device *device = ib_conn->device; 417 struct iser_device *device = ib_conn->device;
418 struct ib_device *ibdev = device->ib_device;
422 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 419 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
423 struct iser_regd_buf *regd_buf; 420 struct iser_regd_buf *regd_buf;
424 int aligned_len; 421 int aligned_len;
@@ -428,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
428 425
429 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 426 regd_buf = &iser_ctask->rdma_regd[cmd_dir];
430 427
431 aligned_len = iser_data_buf_aligned_len(mem); 428 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
432 if (aligned_len != mem->dma_nents) { 429 if (aligned_len != mem->dma_nents) {
433 iser_err("rdma alignment violation %d/%d aligned\n", 430 iser_err("rdma alignment violation %d/%d aligned\n",
434 aligned_len, mem->size); 431 aligned_len, mem->size);
435 iser_data_buf_dump(mem); 432 iser_data_buf_dump(mem, ibdev);
436 433
437 /* unmap the command data before accessing it */ 434 /* unmap the command data before accessing it */
438 iser_dma_unmap_task_data(iser_ctask); 435 iser_dma_unmap_task_data(iser_ctask);
@@ -450,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
450 447
451 regd_buf->reg.lkey = device->mr->lkey; 448 regd_buf->reg.lkey = device->mr->lkey;
452 regd_buf->reg.rkey = device->mr->rkey; 449 regd_buf->reg.rkey = device->mr->rkey;
453 regd_buf->reg.len = sg_dma_len(&sg[0]); 450 regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
454 regd_buf->reg.va = sg_dma_address(&sg[0]); 451 regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
455 regd_buf->reg.is_fmr = 0; 452 regd_buf->reg.is_fmr = 0;
456 453
457 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " 454 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
@@ -461,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
461 (unsigned long)regd_buf->reg.va, 458 (unsigned long)regd_buf->reg.va,
462 (unsigned long)regd_buf->reg.len); 459 (unsigned long)regd_buf->reg.len);
463 } else { /* use FMR for multiple dma entries */ 460 } else { /* use FMR for multiple dma entries */
464 iser_page_vec_build(mem, ib_conn->page_vec); 461 iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
465 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); 462 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
466 if (err) { 463 if (err) {
467 iser_data_buf_dump(mem); 464 iser_data_buf_dump(mem, ibdev);
468 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, 465 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
469 ntoh24(iser_ctask->desc.iscsi_header.dlength)); 466 ntoh24(iser_ctask->desc.iscsi_header.dlength));
470 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 467 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a6289595557b..e9b6a6f07dd7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
122 if (!iu->buf) 122 if (!iu->buf)
123 goto out_free_iu; 123 goto out_free_iu;
124 124
125 iu->dma = dma_map_single(host->dev->dev->dma_device, 125 iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
126 iu->buf, size, direction); 126 if (ib_dma_mapping_error(host->dev->dev, iu->dma))
127 if (dma_mapping_error(iu->dma))
128 goto out_free_buf; 127 goto out_free_buf;
129 128
130 iu->size = size; 129 iu->size = size;
@@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
145 if (!iu) 144 if (!iu)
146 return; 145 return;
147 146
148 dma_unmap_single(host->dev->dev->dma_device, 147 ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
149 iu->dma, iu->size, iu->direction);
150 kfree(iu->buf); 148 kfree(iu->buf);
151 kfree(iu); 149 kfree(iu);
152} 150}
@@ -482,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
482 scat = &req->fake_sg; 480 scat = &req->fake_sg;
483 } 481 }
484 482
485 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, 483 ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
486 scmnd->sc_data_direction); 484 scmnd->sc_data_direction);
487} 485}
488 486
489static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 487static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@@ -595,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
595 int i, j; 593 int i, j;
596 int ret; 594 int ret;
597 struct srp_device *dev = target->srp_host->dev; 595 struct srp_device *dev = target->srp_host->dev;
596 struct ib_device *ibdev = dev->dev;
598 597
599 if (!dev->fmr_pool) 598 if (!dev->fmr_pool)
600 return -ENODEV; 599 return -ENODEV;
601 600
602 if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && 601 if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) &&
603 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 602 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
604 return -EINVAL; 603 return -EINVAL;
605 604
606 len = page_cnt = 0; 605 len = page_cnt = 0;
607 for (i = 0; i < sg_cnt; ++i) { 606 for (i = 0; i < sg_cnt; ++i) {
608 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { 607 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
608
609 if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
609 if (i > 0) 610 if (i > 0)
610 return -EINVAL; 611 return -EINVAL;
611 else 612 else
612 ++page_cnt; 613 ++page_cnt;
613 } 614 }
614 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & 615 if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
615 ~dev->fmr_page_mask) { 616 ~dev->fmr_page_mask) {
616 if (i < sg_cnt - 1) 617 if (i < sg_cnt - 1)
617 return -EINVAL; 618 return -EINVAL;
@@ -619,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
619 ++page_cnt; 620 ++page_cnt;
620 } 621 }
621 622
622 len += sg_dma_len(&scat[i]); 623 len += dma_len;
623 } 624 }
624 625
625 page_cnt += len >> dev->fmr_page_shift; 626 page_cnt += len >> dev->fmr_page_shift;
@@ -631,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
631 return -ENOMEM; 632 return -ENOMEM;
632 633
633 page_cnt = 0; 634 page_cnt = 0;
634 for (i = 0; i < sg_cnt; ++i) 635 for (i = 0; i < sg_cnt; ++i) {
635 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) 636 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
637
638 for (j = 0; j < dma_len; j += dev->fmr_page_size)
636 dma_pages[page_cnt++] = 639 dma_pages[page_cnt++] =
637 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; 640 (ib_sg_dma_address(ibdev, &scat[i]) &
641 dev->fmr_page_mask) + j;
642 }
638 643
639 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 644 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
640 dma_pages, page_cnt, io_addr); 645 dma_pages, page_cnt, io_addr);
@@ -644,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
644 goto out; 649 goto out;
645 } 650 }
646 651
647 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); 652 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
653 ~dev->fmr_page_mask);
648 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 654 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
649 buf->len = cpu_to_be32(len); 655 buf->len = cpu_to_be32(len);
650 656
@@ -663,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
663 struct srp_cmd *cmd = req->cmd->buf; 669 struct srp_cmd *cmd = req->cmd->buf;
664 int len, nents, count; 670 int len, nents, count;
665 u8 fmt = SRP_DATA_DESC_DIRECT; 671 u8 fmt = SRP_DATA_DESC_DIRECT;
672 struct srp_device *dev;
673 struct ib_device *ibdev;
666 674
667 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 675 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
668 return sizeof (struct srp_cmd); 676 return sizeof (struct srp_cmd);
@@ -687,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
687 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 695 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
688 } 696 }
689 697
690 count = dma_map_sg(target->srp_host->dev->dev->dma_device, 698 dev = target->srp_host->dev;
691 scat, nents, scmnd->sc_data_direction); 699 ibdev = dev->dev;
700
701 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
692 702
693 fmt = SRP_DATA_DESC_DIRECT; 703 fmt = SRP_DATA_DESC_DIRECT;
694 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 704 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
@@ -702,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
702 */ 712 */
703 struct srp_direct_buf *buf = (void *) cmd->add_data; 713 struct srp_direct_buf *buf = (void *) cmd->add_data;
704 714
705 buf->va = cpu_to_be64(sg_dma_address(scat)); 715 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
706 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 716 buf->key = cpu_to_be32(dev->mr->rkey);
707 buf->len = cpu_to_be32(sg_dma_len(scat)); 717 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
708 } else if (srp_map_fmr(target, scat, count, req, 718 } else if (srp_map_fmr(target, scat, count, req,
709 (void *) cmd->add_data)) { 719 (void *) cmd->add_data)) {
710 /* 720 /*
@@ -722,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
722 count * sizeof (struct srp_direct_buf); 732 count * sizeof (struct srp_direct_buf);
723 733
724 for (i = 0; i < count; ++i) { 734 for (i = 0; i < count; ++i) {
735 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
736
725 buf->desc_list[i].va = 737 buf->desc_list[i].va =
726 cpu_to_be64(sg_dma_address(&scat[i])); 738 cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
727 buf->desc_list[i].key = 739 buf->desc_list[i].key =
728 cpu_to_be32(target->srp_host->dev->mr->rkey); 740 cpu_to_be32(dev->mr->rkey);
729 buf->desc_list[i].len = 741 buf->desc_list[i].len = cpu_to_be32(dma_len);
730 cpu_to_be32(sg_dma_len(&scat[i])); 742 datalen += dma_len;
731 datalen += sg_dma_len(&scat[i]);
732 } 743 }
733 744
734 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 745 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@@ -808,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
808 819
809static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 820static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
810{ 821{
822 struct ib_device *dev;
811 struct srp_iu *iu; 823 struct srp_iu *iu;
812 u8 opcode; 824 u8 opcode;
813 825
814 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 826 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
815 827
816 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 828 dev = target->srp_host->dev->dev;
817 target->max_ti_iu_len, DMA_FROM_DEVICE); 829 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
830 DMA_FROM_DEVICE);
818 831
819 opcode = *(u8 *) iu->buf; 832 opcode = *(u8 *) iu->buf;
820 833
@@ -850,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
850 break; 863 break;
851 } 864 }
852 865
853 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 866 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
854 target->max_ti_iu_len, DMA_FROM_DEVICE); 867 DMA_FROM_DEVICE);
855} 868}
856 869
857static void srp_completion(struct ib_cq *cq, void *target_ptr) 870static void srp_completion(struct ib_cq *cq, void *target_ptr)
@@ -969,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
969 struct srp_request *req; 982 struct srp_request *req;
970 struct srp_iu *iu; 983 struct srp_iu *iu;
971 struct srp_cmd *cmd; 984 struct srp_cmd *cmd;
985 struct ib_device *dev;
972 int len; 986 int len;
973 987
974 if (target->state == SRP_TARGET_CONNECTING) 988 if (target->state == SRP_TARGET_CONNECTING)
@@ -985,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
985 if (!iu) 999 if (!iu)
986 goto err; 1000 goto err;
987 1001
988 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 1002 dev = target->srp_host->dev->dev;
989 srp_max_iu_len, DMA_TO_DEVICE); 1003 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1004 DMA_TO_DEVICE);
990 1005
991 req = list_entry(target->free_reqs.next, struct srp_request, list); 1006 req = list_entry(target->free_reqs.next, struct srp_request, list);
992 1007
@@ -1018,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
1018 goto err_unmap; 1033 goto err_unmap;
1019 } 1034 }
1020 1035
1021 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 1036 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1022 srp_max_iu_len, DMA_TO_DEVICE); 1037 DMA_TO_DEVICE);
1023 1038
1024 if (__srp_post_send(target, iu, len)) { 1039 if (__srp_post_send(target, iu, len)) {
1025 printk(KERN_ERR PFX "Send failed\n"); 1040 printk(KERN_ERR PFX "Send failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index d4e35ef51374..868a540ef7cd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,7 +161,7 @@ struct srp_target_port {
161}; 161};
162 162
163struct srp_iu { 163struct srp_iu {
164 dma_addr_t dma; 164 u64 dma;
165 void *buf; 165 void *buf;
166 size_t size; 166 size_t size;
167 enum dma_data_direction direction; 167 enum dma_data_direction direction;
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index 54bc569db4b0..35461eab2faf 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -23,7 +23,12 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/hil.h> 25#include <linux/hil.h>
26#include <linux/io.h>
26#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <asm/irq.h>
29#ifdef CONFIG_HP300
30#include <asm/hwtest.h>
31#endif
27 32
28 33
29MODULE_AUTHOR("Philip Blundell, Matthew Wilcox, Helge Deller"); 34MODULE_AUTHOR("Philip Blundell, Matthew Wilcox, Helge Deller");
diff --git a/drivers/isdn/act2000/act2000_isa.c b/drivers/isdn/act2000/act2000_isa.c
index 3cac23739344..09ea50dd3459 100644
--- a/drivers/isdn/act2000/act2000_isa.c
+++ b/drivers/isdn/act2000/act2000_isa.c
@@ -408,7 +408,7 @@ act2000_isa_download(act2000_card * card, act2000_ddef __user * cb)
408 p = cblock.buffer; 408 p = cblock.buffer;
409 if (!access_ok(VERIFY_READ, p, length)) 409 if (!access_ok(VERIFY_READ, p, length))
410 return -EFAULT; 410 return -EFAULT;
411 buf = (u_char *) kmalloc(1024, GFP_KERNEL); 411 buf = kmalloc(1024, GFP_KERNEL);
412 if (!buf) 412 if (!buf)
413 return -ENOMEM; 413 return -ENOMEM;
414 timeout = 0; 414 timeout = 0;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 097bfa7bc323..c4d438c17dab 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -2013,7 +2013,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
2013 strcpy(card->name, id); 2013 strcpy(card->name, id);
2014 card->contrnr = contr; 2014 card->contrnr = contr;
2015 card->nbchan = profp->nbchannel; 2015 card->nbchan = profp->nbchannel;
2016 card->bchans = (capidrv_bchan *) kmalloc(sizeof(capidrv_bchan) * card->nbchan, GFP_ATOMIC); 2016 card->bchans = kmalloc(sizeof(capidrv_bchan) * card->nbchan, GFP_ATOMIC);
2017 if (!card->bchans) { 2017 if (!card->bchans) {
2018 printk(KERN_WARNING 2018 printk(KERN_WARNING
2019 "capidrv: (%s) Could not allocate bchan-structs.\n", id); 2019 "capidrv: (%s) Could not allocate bchan-structs.\n", id);
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 399b316111f7..06967da7c4a8 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -45,7 +45,7 @@ put_info_buffer(char *cp)
45 return; 45 return;
46 if (!*cp) 46 if (!*cp)
47 return; 47 return;
48 if (!(ib = (struct divert_info *) kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC))) 48 if (!(ib = kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC)))
49 return; /* no memory */ 49 return; /* no memory */
50 strcpy(ib->info_start, cp); /* set output string */ 50 strcpy(ib->info_start, cp); /* set output string */
51 ib->next = NULL; 51 ib->next = NULL;
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 03319ea5aa0c..7d97d54588d9 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -153,7 +153,7 @@ int cf_command(int drvid, int mode,
153 *ielenp = p - ielenp - 1; /* set total IE length */ 153 *ielenp = p - ielenp - 1; /* set total IE length */
154 154
155 /* allocate mem for information struct */ 155 /* allocate mem for information struct */
156 if (!(cs = (struct call_struc *) kmalloc(sizeof(struct call_struc), GFP_ATOMIC))) 156 if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
157 return(-ENOMEM); /* no memory */ 157 return(-ENOMEM); /* no memory */
158 init_timer(&cs->timer); 158 init_timer(&cs->timer);
159 cs->info[0] = '\0'; 159 cs->info[0] = '\0';
@@ -276,7 +276,7 @@ int insertrule(int idx, divert_rule *newrule)
276{ struct deflect_struc *ds,*ds1=NULL; 276{ struct deflect_struc *ds,*ds1=NULL;
277 unsigned long flags; 277 unsigned long flags;
278 278
279 if (!(ds = (struct deflect_struc *) kmalloc(sizeof(struct deflect_struc), 279 if (!(ds = kmalloc(sizeof(struct deflect_struc),
280 GFP_KERNEL))) 280 GFP_KERNEL)))
281 return(-ENOMEM); /* no memory */ 281 return(-ENOMEM); /* no memory */
282 282
@@ -451,7 +451,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
451 if (dv->rule.action == DEFLECT_PROCEED) 451 if (dv->rule.action == DEFLECT_PROCEED)
452 if ((!if_used) || ((!extern_wait_max) && (!dv->rule.waittime))) 452 if ((!if_used) || ((!extern_wait_max) && (!dv->rule.waittime)))
453 return(0); /* no external deflection needed */ 453 return(0); /* no external deflection needed */
454 if (!(cs = (struct call_struc *) kmalloc(sizeof(struct call_struc), GFP_ATOMIC))) 454 if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
455 return(0); /* no memory */ 455 return(0); /* no memory */
456 init_timer(&cs->timer); 456 init_timer(&cs->timer);
457 cs->info[0] = '\0'; 457 cs->info[0] = '\0';
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 63b629b1cdb2..b5e7f9c7d74e 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -1853,20 +1853,24 @@ static int gigaset_write_cmd(struct cardstate *cs,
1853{ 1853{
1854 struct cmdbuf_t *cb; 1854 struct cmdbuf_t *cb;
1855 unsigned long flags; 1855 unsigned long flags;
1856 int status; 1856 int rc;
1857 1857
1858 gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ? 1858 gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
1859 DEBUG_TRANSCMD : DEBUG_LOCKCMD, 1859 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
1860 "CMD Transmit", len, buf); 1860 "CMD Transmit", len, buf);
1861 1861
1862 if (len <= 0) 1862 if (len <= 0) {
1863 return 0; /* nothing to do */ 1863 /* nothing to do */
1864 rc = 0;
1865 goto notqueued;
1866 }
1864 1867
1865 if (len > IF_WRITEBUF) 1868 if (len > IF_WRITEBUF)
1866 len = IF_WRITEBUF; 1869 len = IF_WRITEBUF;
1867 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 1870 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
1868 dev_err(cs->dev, "%s: out of memory\n", __func__); 1871 dev_err(cs->dev, "%s: out of memory\n", __func__);
1869 return -ENOMEM; 1872 rc = -ENOMEM;
1873 goto notqueued;
1870 } 1874 }
1871 1875
1872 memcpy(cb->buf, buf, len); 1876 memcpy(cb->buf, buf, len);
@@ -1891,11 +1895,21 @@ static int gigaset_write_cmd(struct cardstate *cs,
1891 if (unlikely(!cs->connected)) { 1895 if (unlikely(!cs->connected)) {
1892 spin_unlock_irqrestore(&cs->lock, flags); 1896 spin_unlock_irqrestore(&cs->lock, flags);
1893 gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__); 1897 gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
1898 /* flush command queue */
1899 spin_lock_irqsave(&cs->cmdlock, flags);
1900 while (cs->cmdbuf != NULL)
1901 complete_cb(cs);
1902 spin_unlock_irqrestore(&cs->cmdlock, flags);
1894 return -ENODEV; 1903 return -ENODEV;
1895 } 1904 }
1896 status = start_cbsend(cs); 1905 rc = start_cbsend(cs);
1897 spin_unlock_irqrestore(&cs->lock, flags); 1906 spin_unlock_irqrestore(&cs->lock, flags);
1898 return status < 0 ? status : len; 1907 return rc < 0 ? rc : len;
1908
1909notqueued: /* request handled without queuing */
1910 if (wake_tasklet)
1911 tasklet_schedule(wake_tasklet);
1912 return rc;
1899} 1913}
1900 1914
1901/* gigaset_write_room 1915/* gigaset_write_room
@@ -1964,20 +1978,15 @@ static int gigaset_freebcshw(struct bc_state *bcs)
1964 1978
1965 /* kill URBs and tasklets before freeing - better safe than sorry */ 1979 /* kill URBs and tasklets before freeing - better safe than sorry */
1966 atomic_set(&ubc->running, 0); 1980 atomic_set(&ubc->running, 0);
1967 for (i = 0; i < BAS_OUTURBS; ++i) 1981 gig_dbg(DEBUG_INIT, "%s: killing iso URBs", __func__);
1968 if (ubc->isoouturbs[i].urb) { 1982 for (i = 0; i < BAS_OUTURBS; ++i) {
1969 gig_dbg(DEBUG_INIT, "%s: killing iso out URB %d", 1983 usb_kill_urb(ubc->isoouturbs[i].urb);
1970 __func__, i); 1984 usb_free_urb(ubc->isoouturbs[i].urb);
1971 usb_kill_urb(ubc->isoouturbs[i].urb); 1985 }
1972 usb_free_urb(ubc->isoouturbs[i].urb); 1986 for (i = 0; i < BAS_INURBS; ++i) {
1973 } 1987 usb_kill_urb(ubc->isoinurbs[i]);
1974 for (i = 0; i < BAS_INURBS; ++i) 1988 usb_free_urb(ubc->isoinurbs[i]);
1975 if (ubc->isoinurbs[i]) { 1989 }
1976 gig_dbg(DEBUG_INIT, "%s: killing iso in URB %d",
1977 __func__, i);
1978 usb_kill_urb(ubc->isoinurbs[i]);
1979 usb_free_urb(ubc->isoinurbs[i]);
1980 }
1981 tasklet_kill(&ubc->sent_tasklet); 1990 tasklet_kill(&ubc->sent_tasklet);
1982 tasklet_kill(&ubc->rcvd_tasklet); 1991 tasklet_kill(&ubc->rcvd_tasklet);
1983 kfree(ubc->isooutbuf); 1992 kfree(ubc->isooutbuf);
@@ -2099,55 +2108,32 @@ static void freeurbs(struct cardstate *cs)
2099 struct bas_bc_state *ubc; 2108 struct bas_bc_state *ubc;
2100 int i, j; 2109 int i, j;
2101 2110
2111 gig_dbg(DEBUG_INIT, "%s: killing URBs", __func__);
2102 for (j = 0; j < 2; ++j) { 2112 for (j = 0; j < 2; ++j) {
2103 ubc = cs->bcs[j].hw.bas; 2113 ubc = cs->bcs[j].hw.bas;
2104 for (i = 0; i < BAS_OUTURBS; ++i) 2114 for (i = 0; i < BAS_OUTURBS; ++i) {
2105 if (ubc->isoouturbs[i].urb) { 2115 usb_kill_urb(ubc->isoouturbs[i].urb);
2106 usb_kill_urb(ubc->isoouturbs[i].urb); 2116 usb_free_urb(ubc->isoouturbs[i].urb);
2107 gig_dbg(DEBUG_INIT, 2117 ubc->isoouturbs[i].urb = NULL;
2108 "%s: isoc output URB %d/%d unlinked", 2118 }
2109 __func__, j, i); 2119 for (i = 0; i < BAS_INURBS; ++i) {
2110 usb_free_urb(ubc->isoouturbs[i].urb); 2120 usb_kill_urb(ubc->isoinurbs[i]);
2111 ubc->isoouturbs[i].urb = NULL; 2121 usb_free_urb(ubc->isoinurbs[i]);
2112 } 2122 ubc->isoinurbs[i] = NULL;
2113 for (i = 0; i < BAS_INURBS; ++i) 2123 }
2114 if (ubc->isoinurbs[i]) {
2115 usb_kill_urb(ubc->isoinurbs[i]);
2116 gig_dbg(DEBUG_INIT,
2117 "%s: isoc input URB %d/%d unlinked",
2118 __func__, j, i);
2119 usb_free_urb(ubc->isoinurbs[i]);
2120 ubc->isoinurbs[i] = NULL;
2121 }
2122 }
2123 if (ucs->urb_int_in) {
2124 usb_kill_urb(ucs->urb_int_in);
2125 gig_dbg(DEBUG_INIT, "%s: interrupt input URB unlinked",
2126 __func__);
2127 usb_free_urb(ucs->urb_int_in);
2128 ucs->urb_int_in = NULL;
2129 }
2130 if (ucs->urb_cmd_out) {
2131 usb_kill_urb(ucs->urb_cmd_out);
2132 gig_dbg(DEBUG_INIT, "%s: command output URB unlinked",
2133 __func__);
2134 usb_free_urb(ucs->urb_cmd_out);
2135 ucs->urb_cmd_out = NULL;
2136 }
2137 if (ucs->urb_cmd_in) {
2138 usb_kill_urb(ucs->urb_cmd_in);
2139 gig_dbg(DEBUG_INIT, "%s: command input URB unlinked",
2140 __func__);
2141 usb_free_urb(ucs->urb_cmd_in);
2142 ucs->urb_cmd_in = NULL;
2143 }
2144 if (ucs->urb_ctrl) {
2145 usb_kill_urb(ucs->urb_ctrl);
2146 gig_dbg(DEBUG_INIT, "%s: control output URB unlinked",
2147 __func__);
2148 usb_free_urb(ucs->urb_ctrl);
2149 ucs->urb_ctrl = NULL;
2150 } 2124 }
2125 usb_kill_urb(ucs->urb_int_in);
2126 usb_free_urb(ucs->urb_int_in);
2127 ucs->urb_int_in = NULL;
2128 usb_kill_urb(ucs->urb_cmd_out);
2129 usb_free_urb(ucs->urb_cmd_out);
2130 ucs->urb_cmd_out = NULL;
2131 usb_kill_urb(ucs->urb_cmd_in);
2132 usb_free_urb(ucs->urb_cmd_in);
2133 ucs->urb_cmd_in = NULL;
2134 usb_kill_urb(ucs->urb_ctrl);
2135 usb_free_urb(ucs->urb_ctrl);
2136 ucs->urb_ctrl = NULL;
2151} 2137}
2152 2138
2153/* gigaset_probe 2139/* gigaset_probe
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 8e2b03889f3c..94a935089119 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -275,7 +275,7 @@ hysdn_conf_open(struct inode *ino, struct file *filep)
275 } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) { 275 } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
276 /* read access -> output card info data */ 276 /* read access -> output card info data */
277 277
278 if (!(tmp = (char *) kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) { 278 if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) {
279 unlock_kernel(); 279 unlock_kernel();
280 return (-EFAULT); /* out of memory */ 280 return (-EFAULT); /* out of memory */
281 } 281 }
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index f241f5e551cb..375d956884d7 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -111,7 +111,7 @@ put_log_buffer(hysdn_card * card, char *cp)
111 if (pd->if_used <= 0) 111 if (pd->if_used <= 0)
112 return; /* no open file for read */ 112 return; /* no open file for read */
113 113
114 if (!(ib = (struct log_data *) kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC))) 114 if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC)))
115 return; /* no memory */ 115 return; /* no memory */
116 strcpy(ib->log_start, cp); /* set output string */ 116 strcpy(ib->log_start, cp); /* set output string */
117 ib->next = NULL; 117 ib->next = NULL;
diff --git a/drivers/isdn/i4l/isdn_audio.c b/drivers/isdn/i4l/isdn_audio.c
index 2cc56d6a9fae..fb350c567c6b 100644
--- a/drivers/isdn/i4l/isdn_audio.c
+++ b/drivers/isdn/i4l/isdn_audio.c
@@ -328,7 +328,7 @@ adpcm_state *
328isdn_audio_adpcm_init(adpcm_state * s, int nbits) 328isdn_audio_adpcm_init(adpcm_state * s, int nbits)
329{ 329{
330 if (!s) 330 if (!s)
331 s = (adpcm_state *) kmalloc(sizeof(adpcm_state), GFP_ATOMIC); 331 s = kmalloc(sizeof(adpcm_state), GFP_ATOMIC);
332 if (s) { 332 if (s) {
333 s->a = 0; 333 s->a = 0;
334 s->d = 5; 334 s->d = 5;
@@ -343,7 +343,7 @@ dtmf_state *
343isdn_audio_dtmf_init(dtmf_state * s) 343isdn_audio_dtmf_init(dtmf_state * s)
344{ 344{
345 if (!s) 345 if (!s)
346 s = (dtmf_state *) kmalloc(sizeof(dtmf_state), GFP_ATOMIC); 346 s = kmalloc(sizeof(dtmf_state), GFP_ATOMIC);
347 if (s) { 347 if (s) {
348 s->idx = 0; 348 s->idx = 0;
349 s->last = ' '; 349 s->last = ' ';
@@ -621,7 +621,7 @@ silence_state *
621isdn_audio_silence_init(silence_state * s) 621isdn_audio_silence_init(silence_state * s)
622{ 622{
623 if (!s) 623 if (!s)
624 s = (silence_state *) kmalloc(sizeof(silence_state), GFP_ATOMIC); 624 s = kmalloc(sizeof(silence_state), GFP_ATOMIC);
625 if (s) { 625 if (s) {
626 s->idx = 0; 626 s->idx = 0;
627 s->state = 0; 627 s->state = 0;
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index c36c817578cb..838b3734e2b6 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2948,7 +2948,7 @@ isdn_net_addphone(isdn_net_ioctl_phone * phone)
2948 isdn_net_phone *n; 2948 isdn_net_phone *n;
2949 2949
2950 if (p) { 2950 if (p) {
2951 if (!(n = (isdn_net_phone *) kmalloc(sizeof(isdn_net_phone), GFP_KERNEL))) 2951 if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL)))
2952 return -ENOMEM; 2952 return -ENOMEM;
2953 strcpy(n->num, phone->phone); 2953 strcpy(n->num, phone->phone);
2954 n->next = p->local->phone[phone->outgoing & 1]; 2954 n->next = p->local->phone[phone->outgoing & 1];
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 43811795b46b..1726131b20be 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -717,7 +717,7 @@ isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot)
717 printk(KERN_DEBUG "ippp: device not activated.\n"); 717 printk(KERN_DEBUG "ippp: device not activated.\n");
718 return 0; 718 return 0;
719 } 719 }
720 nbuf = (unsigned char *) kmalloc(len + 4, GFP_ATOMIC); 720 nbuf = kmalloc(len + 4, GFP_ATOMIC);
721 if (!nbuf) { 721 if (!nbuf) {
722 printk(KERN_WARNING "ippp: Can't alloc buf\n"); 722 printk(KERN_WARNING "ippp: Can't alloc buf\n");
723 return 0; 723 return 0;
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index 6ff85574e941..eafcce5e656a 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -100,7 +100,7 @@ pcbit_l2_write(struct pcbit_dev *dev, ulong msg, ushort refnum,
100 dev_kfree_skb(skb); 100 dev_kfree_skb(skb);
101 return -1; 101 return -1;
102 } 102 }
103 if ((frame = (struct frame_buf *) kmalloc(sizeof(struct frame_buf), 103 if ((frame = kmalloc(sizeof(struct frame_buf),
104 GFP_ATOMIC)) == NULL) { 104 GFP_ATOMIC)) == NULL) {
105 printk(KERN_WARNING "pcbit_2_write: kmalloc failed\n"); 105 printk(KERN_WARNING "pcbit_2_write: kmalloc failed\n");
106 dev_kfree_skb(skb); 106 dev_kfree_skb(skb);
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index 36412e90f09b..703cc88d1ef9 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -1,6 +1,8 @@
1# 1#
2# KVM configuration 2# KVM configuration
3# 3#
4menu "Virtualization"
5
4config KVM 6config KVM
5 tristate "Kernel-based Virtual Machine (KVM) support" 7 tristate "Kernel-based Virtual Machine (KVM) support"
6 depends on X86 && EXPERIMENTAL 8 depends on X86 && EXPERIMENTAL
@@ -31,3 +33,5 @@ config KVM_AMD
31 ---help--- 33 ---help---
32 Provides support for KVM on AMD processors equipped with the AMD-V 34 Provides support for KVM on AMD processors equipped with the AMD-V
33 (SVM) extensions. 35 (SVM) extensions.
36
37endmenu
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 5785d0870ab6..930e04ce1af6 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -140,7 +140,7 @@ enum {
140 VCPU_REGS_RBP = 5, 140 VCPU_REGS_RBP = 5,
141 VCPU_REGS_RSI = 6, 141 VCPU_REGS_RSI = 6,
142 VCPU_REGS_RDI = 7, 142 VCPU_REGS_RDI = 7,
143#ifdef __x86_64__ 143#ifdef CONFIG_X86_64
144 VCPU_REGS_R8 = 8, 144 VCPU_REGS_R8 = 8,
145 VCPU_REGS_R9 = 9, 145 VCPU_REGS_R9 = 9,
146 VCPU_REGS_R10 = 10, 146 VCPU_REGS_R10 = 10,
@@ -375,7 +375,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
375void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); 375void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
376void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 376void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
377 377
378#ifdef __x86_64__ 378#ifdef CONFIG_X86_64
379void set_efer(struct kvm_vcpu *vcpu, u64 efer); 379void set_efer(struct kvm_vcpu *vcpu, u64 efer);
380#endif 380#endif
381 381
@@ -485,7 +485,7 @@ static inline unsigned long read_tr_base(void)
485 return segment_base(tr); 485 return segment_base(tr);
486} 486}
487 487
488#ifdef __x86_64__ 488#ifdef CONFIG_X86_64
489static inline unsigned long read_msr(unsigned long msr) 489static inline unsigned long read_msr(unsigned long msr)
490{ 490{
491 u64 value; 491 u64 value;
@@ -533,7 +533,7 @@ static inline u32 get_rdx_init_val(void)
533#define TSS_REDIRECTION_SIZE (256 / 8) 533#define TSS_REDIRECTION_SIZE (256 / 8)
534#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 534#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
535 535
536#ifdef __x86_64__ 536#ifdef CONFIG_X86_64
537 537
538/* 538/*
539 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore 539 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index b6b8a41b5ec8..fd1bb870545c 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -72,18 +72,7 @@ static struct dentry *debugfs_dir;
72#define CR8_RESEVED_BITS (~0x0fULL) 72#define CR8_RESEVED_BITS (~0x0fULL)
73#define EFER_RESERVED_BITS 0xfffffffffffff2fe 73#define EFER_RESERVED_BITS 0xfffffffffffff2fe
74 74
75struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) 75#ifdef CONFIG_X86_64
76{
77 int i;
78
79 for (i = 0; i < vcpu->nmsrs; ++i)
80 if (vcpu->guest_msrs[i].index == msr)
81 return &vcpu->guest_msrs[i];
82 return 0;
83}
84EXPORT_SYMBOL_GPL(find_msr_entry);
85
86#ifdef __x86_64__
87// LDT or TSS descriptor in the GDT. 16 bytes. 76// LDT or TSS descriptor in the GDT. 16 bytes.
88struct segment_descriptor_64 { 77struct segment_descriptor_64 {
89 struct segment_descriptor s; 78 struct segment_descriptor s;
@@ -115,7 +104,7 @@ unsigned long segment_base(u16 selector)
115 } 104 }
116 d = (struct segment_descriptor *)(table_base + (selector & ~7)); 105 d = (struct segment_descriptor *)(table_base + (selector & ~7));
117 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24); 106 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
118#ifdef __x86_64__ 107#ifdef CONFIG_X86_64
119 if (d->system == 0 108 if (d->system == 0
120 && (d->type == 2 || d->type == 9 || d->type == 11)) 109 && (d->type == 2 || d->type == 9 || d->type == 11))
121 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32; 110 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
@@ -216,7 +205,6 @@ static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
216static void vcpu_put(struct kvm_vcpu *vcpu) 205static void vcpu_put(struct kvm_vcpu *vcpu)
217{ 206{
218 kvm_arch_ops->vcpu_put(vcpu); 207 kvm_arch_ops->vcpu_put(vcpu);
219 put_cpu();
220 mutex_unlock(&vcpu->mutex); 208 mutex_unlock(&vcpu->mutex);
221} 209}
222 210
@@ -351,7 +339,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
351 } 339 }
352 340
353 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { 341 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
354#ifdef __x86_64__ 342#ifdef CONFIG_X86_64
355 if ((vcpu->shadow_efer & EFER_LME)) { 343 if ((vcpu->shadow_efer & EFER_LME)) {
356 int cs_db, cs_l; 344 int cs_db, cs_l;
357 345
@@ -1120,12 +1108,10 @@ static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1120 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata); 1108 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1121} 1109}
1122 1110
1123#ifdef __x86_64__ 1111#ifdef CONFIG_X86_64
1124 1112
1125void set_efer(struct kvm_vcpu *vcpu, u64 efer) 1113void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1126{ 1114{
1127 struct vmx_msr_entry *msr;
1128
1129 if (efer & EFER_RESERVED_BITS) { 1115 if (efer & EFER_RESERVED_BITS) {
1130 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", 1116 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1131 efer); 1117 efer);
@@ -1140,16 +1126,12 @@ void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1140 return; 1126 return;
1141 } 1127 }
1142 1128
1129 kvm_arch_ops->set_efer(vcpu, efer);
1130
1143 efer &= ~EFER_LMA; 1131 efer &= ~EFER_LMA;
1144 efer |= vcpu->shadow_efer & EFER_LMA; 1132 efer |= vcpu->shadow_efer & EFER_LMA;
1145 1133
1146 vcpu->shadow_efer = efer; 1134 vcpu->shadow_efer = efer;
1147
1148 msr = find_msr_entry(vcpu, MSR_EFER);
1149
1150 if (!(efer & EFER_LMA))
1151 efer &= ~EFER_LME;
1152 msr->data = efer;
1153} 1135}
1154EXPORT_SYMBOL_GPL(set_efer); 1136EXPORT_SYMBOL_GPL(set_efer);
1155 1137
@@ -1243,7 +1225,7 @@ static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
1243 regs->rdi = vcpu->regs[VCPU_REGS_RDI]; 1225 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1244 regs->rsp = vcpu->regs[VCPU_REGS_RSP]; 1226 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1245 regs->rbp = vcpu->regs[VCPU_REGS_RBP]; 1227 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1246#ifdef __x86_64__ 1228#ifdef CONFIG_X86_64
1247 regs->r8 = vcpu->regs[VCPU_REGS_R8]; 1229 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1248 regs->r9 = vcpu->regs[VCPU_REGS_R9]; 1230 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1249 regs->r10 = vcpu->regs[VCPU_REGS_R10]; 1231 regs->r10 = vcpu->regs[VCPU_REGS_R10];
@@ -1287,7 +1269,7 @@ static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
1287 vcpu->regs[VCPU_REGS_RDI] = regs->rdi; 1269 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1288 vcpu->regs[VCPU_REGS_RSP] = regs->rsp; 1270 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
1289 vcpu->regs[VCPU_REGS_RBP] = regs->rbp; 1271 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
1290#ifdef __x86_64__ 1272#ifdef CONFIG_X86_64
1291 vcpu->regs[VCPU_REGS_R8] = regs->r8; 1273 vcpu->regs[VCPU_REGS_R8] = regs->r8;
1292 vcpu->regs[VCPU_REGS_R9] = regs->r9; 1274 vcpu->regs[VCPU_REGS_R9] = regs->r9;
1293 vcpu->regs[VCPU_REGS_R10] = regs->r10; 1275 vcpu->regs[VCPU_REGS_R10] = regs->r10;
@@ -1401,7 +1383,7 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1401 vcpu->cr8 = sregs->cr8; 1383 vcpu->cr8 = sregs->cr8;
1402 1384
1403 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer; 1385 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
1404#ifdef __x86_64__ 1386#ifdef CONFIG_X86_64
1405 kvm_arch_ops->set_efer(vcpu, sregs->efer); 1387 kvm_arch_ops->set_efer(vcpu, sregs->efer);
1406#endif 1388#endif
1407 vcpu->apic_base = sregs->apic_base; 1389 vcpu->apic_base = sregs->apic_base;
@@ -1434,7 +1416,7 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1434static u32 msrs_to_save[] = { 1416static u32 msrs_to_save[] = {
1435 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1417 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1436 MSR_K6_STAR, 1418 MSR_K6_STAR,
1437#ifdef __x86_64__ 1419#ifdef CONFIG_X86_64
1438 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1420 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1439#endif 1421#endif
1440 MSR_IA32_TIME_STAMP_COUNTER, 1422 MSR_IA32_TIME_STAMP_COUNTER,
diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h
index 7d7f2aa10960..74cc862f4935 100644
--- a/drivers/kvm/kvm_svm.h
+++ b/drivers/kvm/kvm_svm.h
@@ -9,7 +9,7 @@
9#include "kvm.h" 9#include "kvm.h"
10 10
11static const u32 host_save_msrs[] = { 11static const u32 host_save_msrs[] = {
12#ifdef __x86_64__ 12#ifdef CONFIG_X86_64
13 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, 13 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
14 MSR_FS_BASE, MSR_GS_BASE, 14 MSR_FS_BASE, MSR_GS_BASE,
15#endif 15#endif
diff --git a/drivers/kvm/kvm_vmx.h b/drivers/kvm/kvm_vmx.h
index 87e12d2bfa16..d139f73fb6e1 100644
--- a/drivers/kvm/kvm_vmx.h
+++ b/drivers/kvm/kvm_vmx.h
@@ -1,7 +1,7 @@
1#ifndef __KVM_VMX_H 1#ifndef __KVM_VMX_H
2#define __KVM_VMX_H 2#define __KVM_VMX_H
3 3
4#ifdef __x86_64__ 4#ifdef CONFIG_X86_64
5/* 5/*
6 * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt 6 * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
7 * mechanism (cpu bug AA24) 7 * mechanism (cpu bug AA24)
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 4e29d9b7211c..3d367cbfe1f9 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -61,22 +61,9 @@
61 61
62 62
63#define PT32_PTE_COPY_MASK \ 63#define PT32_PTE_COPY_MASK \
64 (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \ 64 (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
65 PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_PAT_MASK | \
66 PT_GLOBAL_MASK )
67
68#define PT32_NON_PTE_COPY_MASK \
69 (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \
70 PT_ACCESSED_MASK | PT_DIRTY_MASK)
71
72
73#define PT64_PTE_COPY_MASK \
74 (PT64_NX_MASK | PT32_PTE_COPY_MASK)
75
76#define PT64_NON_PTE_COPY_MASK \
77 (PT64_NX_MASK | PT32_NON_PTE_COPY_MASK)
78
79 65
66#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
80 67
81#define PT_FIRST_AVAIL_BITS_SHIFT 9 68#define PT_FIRST_AVAIL_BITS_SHIFT 9
82#define PT64_SECOND_AVAIL_BITS_SHIFT 52 69#define PT64_SECOND_AVAIL_BITS_SHIFT 52
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 765c2e1a048e..a9771b4c5bb8 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -32,7 +32,6 @@
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK 34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35 #define PT_NON_PTE_COPY_MASK PT64_NON_PTE_COPY_MASK
36#elif PTTYPE == 32 35#elif PTTYPE == 32
37 #define pt_element_t u32 36 #define pt_element_t u32
38 #define guest_walker guest_walker32 37 #define guest_walker guest_walker32
@@ -43,7 +42,6 @@
43 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 42 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
44 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) 43 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
45 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK 44 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
46 #define PT_NON_PTE_COPY_MASK PT32_NON_PTE_COPY_MASK
47#else 45#else
48 #error Invalid PTTYPE value 46 #error Invalid PTTYPE value
49#endif 47#endif
@@ -105,9 +103,7 @@ static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
105 if (PTTYPE == 32 && is_cpuid_PSE36()) 103 if (PTTYPE == 32 && is_cpuid_PSE36())
106 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << 104 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
107 (32 - PT32_DIR_PSE36_SHIFT); 105 (32 - PT32_DIR_PSE36_SHIFT);
108 *shadow_pte = (guest_pde & (PT_NON_PTE_COPY_MASK | PT_GLOBAL_MASK)) | 106 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
109 ((guest_pde & PT_DIR_PAT_MASK) >>
110 (PT_DIR_PAT_SHIFT - PT_PAT_SHIFT));
111 set_pte_common(vcpu, shadow_pte, gaddr, 107 set_pte_common(vcpu, shadow_pte, gaddr,
112 guest_pde & PT_DIRTY_MASK, access_bits); 108 guest_pde & PT_DIRTY_MASK, access_bits);
113} 109}
@@ -162,6 +158,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
162 u32 index = SHADOW_PT_INDEX(addr, level); 158 u32 index = SHADOW_PT_INDEX(addr, level);
163 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; 159 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
164 pt_element_t *guest_ent; 160 pt_element_t *guest_ent;
161 u64 shadow_pte;
165 162
166 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { 163 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
167 if (level == PT_PAGE_TABLE_LEVEL) 164 if (level == PT_PAGE_TABLE_LEVEL)
@@ -204,14 +201,11 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
204 shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent); 201 shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent);
205 if (!VALID_PAGE(shadow_addr)) 202 if (!VALID_PAGE(shadow_addr))
206 return ERR_PTR(-ENOMEM); 203 return ERR_PTR(-ENOMEM);
207 if (!kvm_arch_ops->is_long_mode(vcpu) && level == 3) 204 shadow_pte = shadow_addr | PT_PRESENT_MASK;
208 *shadow_ent = shadow_addr | 205 if (vcpu->mmu.root_level > 3 || level != 3)
209 (*guest_ent & (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK)); 206 shadow_pte |= PT_ACCESSED_MASK
210 else { 207 | PT_WRITABLE_MASK | PT_USER_MASK;
211 *shadow_ent = shadow_addr | 208 *shadow_ent = shadow_pte;
212 (*guest_ent & PT_NON_PTE_COPY_MASK);
213 *shadow_ent |= (PT_WRITABLE_MASK | PT_USER_MASK);
214 }
215 prev_shadow_ent = shadow_ent; 209 prev_shadow_ent = shadow_ent;
216 } 210 }
217} 211}
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index a33a89c68138..0e6bc8c649ce 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -287,7 +287,7 @@ static void svm_hardware_enable(void *garbage)
287 287
288 struct svm_cpu_data *svm_data; 288 struct svm_cpu_data *svm_data;
289 uint64_t efer; 289 uint64_t efer;
290#ifdef __x86_64__ 290#ifdef CONFIG_X86_64
291 struct desc_ptr gdt_descr; 291 struct desc_ptr gdt_descr;
292#else 292#else
293 struct Xgt_desc_struct gdt_descr; 293 struct Xgt_desc_struct gdt_descr;
@@ -377,6 +377,7 @@ static __init int svm_hardware_setup(void)
377 void *msrpm_va; 377 void *msrpm_va;
378 int r; 378 int r;
379 379
380 kvm_emulator_want_group7_invlpg();
380 381
381 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); 382 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
382 383
@@ -397,7 +398,7 @@ static __init int svm_hardware_setup(void)
397 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); 398 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
398 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT; 399 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
399 400
400#ifdef __x86_64__ 401#ifdef CONFIG_X86_64
401 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1); 402 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
402 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1); 403 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
403 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1); 404 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
@@ -704,7 +705,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
704 705
705static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 706static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
706{ 707{
707#ifdef __x86_64__ 708#ifdef CONFIG_X86_64
708 if (vcpu->shadow_efer & KVM_EFER_LME) { 709 if (vcpu->shadow_efer & KVM_EFER_LME) {
709 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { 710 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
710 vcpu->shadow_efer |= KVM_EFER_LMA; 711 vcpu->shadow_efer |= KVM_EFER_LMA;
@@ -1097,7 +1098,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1097 case MSR_IA32_APICBASE: 1098 case MSR_IA32_APICBASE:
1098 *data = vcpu->apic_base; 1099 *data = vcpu->apic_base;
1099 break; 1100 break;
1100#ifdef __x86_64__ 1101#ifdef CONFIG_X86_64
1101 case MSR_STAR: 1102 case MSR_STAR:
1102 *data = vcpu->svm->vmcb->save.star; 1103 *data = vcpu->svm->vmcb->save.star;
1103 break; 1104 break;
@@ -1149,7 +1150,7 @@ static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1149static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) 1150static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1150{ 1151{
1151 switch (ecx) { 1152 switch (ecx) {
1152#ifdef __x86_64__ 1153#ifdef CONFIG_X86_64
1153 case MSR_EFER: 1154 case MSR_EFER:
1154 set_efer(vcpu, data); 1155 set_efer(vcpu, data);
1155 break; 1156 break;
@@ -1172,7 +1173,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1172 case MSR_IA32_APICBASE: 1173 case MSR_IA32_APICBASE:
1173 vcpu->apic_base = data; 1174 vcpu->apic_base = data;
1174 break; 1175 break;
1175#ifdef __x86_64___ 1176#ifdef CONFIG_X86_64_
1176 case MSR_STAR: 1177 case MSR_STAR:
1177 vcpu->svm->vmcb->save.star = data; 1178 vcpu->svm->vmcb->save.star = data;
1178 break; 1179 break;
@@ -1345,53 +1346,18 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu)
1345 1346
1346static void save_db_regs(unsigned long *db_regs) 1347static void save_db_regs(unsigned long *db_regs)
1347{ 1348{
1348#ifdef __x86_64__ 1349 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1349 asm ("mov %%dr0, %%rax \n\t" 1350 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1350 "mov %%rax, %[dr0] \n\t" 1351 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1351 "mov %%dr1, %%rax \n\t" 1352 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
1352 "mov %%rax, %[dr1] \n\t"
1353 "mov %%dr2, %%rax \n\t"
1354 "mov %%rax, %[dr2] \n\t"
1355 "mov %%dr3, %%rax \n\t"
1356 "mov %%rax, %[dr3] \n\t"
1357 : [dr0] "=m"(db_regs[0]),
1358 [dr1] "=m"(db_regs[1]),
1359 [dr2] "=m"(db_regs[2]),
1360 [dr3] "=m"(db_regs[3])
1361 : : "rax");
1362#else
1363 asm ("mov %%dr0, %%eax \n\t"
1364 "mov %%eax, %[dr0] \n\t"
1365 "mov %%dr1, %%eax \n\t"
1366 "mov %%eax, %[dr1] \n\t"
1367 "mov %%dr2, %%eax \n\t"
1368 "mov %%eax, %[dr2] \n\t"
1369 "mov %%dr3, %%eax \n\t"
1370 "mov %%eax, %[dr3] \n\t"
1371 : [dr0] "=m"(db_regs[0]),
1372 [dr1] "=m"(db_regs[1]),
1373 [dr2] "=m"(db_regs[2]),
1374 [dr3] "=m"(db_regs[3])
1375 : : "eax");
1376#endif
1377} 1353}
1378 1354
1379static void load_db_regs(unsigned long *db_regs) 1355static void load_db_regs(unsigned long *db_regs)
1380{ 1356{
1381 asm volatile ("mov %[dr0], %%dr0 \n\t" 1357 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1382 "mov %[dr1], %%dr1 \n\t" 1358 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1383 "mov %[dr2], %%dr2 \n\t" 1359 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1384 "mov %[dr3], %%dr3 \n\t" 1360 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1385 :
1386 : [dr0] "r"(db_regs[0]),
1387 [dr1] "r"(db_regs[1]),
1388 [dr2] "r"(db_regs[2]),
1389 [dr3] "r"(db_regs[3])
1390#ifdef __x86_64__
1391 : "rax");
1392#else
1393 : "eax");
1394#endif
1395} 1361}
1396 1362
1397static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1363static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1422,7 +1388,7 @@ again:
1422 load_db_regs(vcpu->svm->db_regs); 1388 load_db_regs(vcpu->svm->db_regs);
1423 } 1389 }
1424 asm volatile ( 1390 asm volatile (
1425#ifdef __x86_64__ 1391#ifdef CONFIG_X86_64
1426 "push %%rbx; push %%rcx; push %%rdx;" 1392 "push %%rbx; push %%rcx; push %%rdx;"
1427 "push %%rsi; push %%rdi; push %%rbp;" 1393 "push %%rsi; push %%rdi; push %%rbp;"
1428 "push %%r8; push %%r9; push %%r10; push %%r11;" 1394 "push %%r8; push %%r9; push %%r10; push %%r11;"
@@ -1432,7 +1398,7 @@ again:
1432 "push %%esi; push %%edi; push %%ebp;" 1398 "push %%esi; push %%edi; push %%ebp;"
1433#endif 1399#endif
1434 1400
1435#ifdef __x86_64__ 1401#ifdef CONFIG_X86_64
1436 "mov %c[rbx](%[vcpu]), %%rbx \n\t" 1402 "mov %c[rbx](%[vcpu]), %%rbx \n\t"
1437 "mov %c[rcx](%[vcpu]), %%rcx \n\t" 1403 "mov %c[rcx](%[vcpu]), %%rcx \n\t"
1438 "mov %c[rdx](%[vcpu]), %%rdx \n\t" 1404 "mov %c[rdx](%[vcpu]), %%rdx \n\t"
@@ -1456,7 +1422,7 @@ again:
1456 "mov %c[rbp](%[vcpu]), %%ebp \n\t" 1422 "mov %c[rbp](%[vcpu]), %%ebp \n\t"
1457#endif 1423#endif
1458 1424
1459#ifdef __x86_64__ 1425#ifdef CONFIG_X86_64
1460 /* Enter guest mode */ 1426 /* Enter guest mode */
1461 "push %%rax \n\t" 1427 "push %%rax \n\t"
1462 "mov %c[svm](%[vcpu]), %%rax \n\t" 1428 "mov %c[svm](%[vcpu]), %%rax \n\t"
@@ -1477,7 +1443,7 @@ again:
1477#endif 1443#endif
1478 1444
1479 /* Save guest registers, load host registers */ 1445 /* Save guest registers, load host registers */
1480#ifdef __x86_64__ 1446#ifdef CONFIG_X86_64
1481 "mov %%rbx, %c[rbx](%[vcpu]) \n\t" 1447 "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
1482 "mov %%rcx, %c[rcx](%[vcpu]) \n\t" 1448 "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
1483 "mov %%rdx, %c[rdx](%[vcpu]) \n\t" 1449 "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
@@ -1518,7 +1484,7 @@ again:
1518 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), 1484 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1519 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), 1485 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1520 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])) 1486 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
1521#ifdef __x86_64__ 1487#ifdef CONFIG_X86_64
1522 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), 1488 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1523 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), 1489 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1524 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), 1490 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
@@ -1663,9 +1629,7 @@ static struct kvm_arch_ops svm_arch_ops = {
1663 1629
1664static int __init svm_init(void) 1630static int __init svm_init(void)
1665{ 1631{
1666 kvm_emulator_want_group7_invlpg(); 1632 return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
1667 kvm_init_arch(&svm_arch_ops, THIS_MODULE);
1668 return 0;
1669} 1633}
1670 1634
1671static void __exit svm_exit(void) 1635static void __exit svm_exit(void)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index bda7a7ae2167..f0f0b1a781f8 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -22,6 +22,7 @@
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/desc.h>
25 26
26#include "segment_descriptor.h" 27#include "segment_descriptor.h"
27 28
@@ -33,7 +34,7 @@ MODULE_LICENSE("GPL");
33static DEFINE_PER_CPU(struct vmcs *, vmxarea); 34static DEFINE_PER_CPU(struct vmcs *, vmxarea);
34static DEFINE_PER_CPU(struct vmcs *, current_vmcs); 35static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
35 36
36#ifdef __x86_64__ 37#ifdef CONFIG_X86_64
37#define HOST_IS_64 1 38#define HOST_IS_64 1
38#else 39#else
39#define HOST_IS_64 0 40#define HOST_IS_64 0
@@ -70,15 +71,13 @@ static struct kvm_vmx_segment_field {
70}; 71};
71 72
72static const u32 vmx_msr_index[] = { 73static const u32 vmx_msr_index[] = {
73#ifdef __x86_64__ 74#ifdef CONFIG_X86_64
74 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, 75 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
75#endif 76#endif
76 MSR_EFER, MSR_K6_STAR, 77 MSR_EFER, MSR_K6_STAR,
77}; 78};
78#define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index)) 79#define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index))
79 80
80struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr);
81
82static inline int is_page_fault(u32 intr_info) 81static inline int is_page_fault(u32 intr_info)
83{ 82{
84 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | 83 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -92,6 +91,16 @@ static inline int is_external_interrupt(u32 intr_info)
92 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 91 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
93} 92}
94 93
94static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
95{
96 int i;
97
98 for (i = 0; i < vcpu->nmsrs; ++i)
99 if (vcpu->guest_msrs[i].index == msr)
100 return &vcpu->guest_msrs[i];
101 return 0;
102}
103
95static void vmcs_clear(struct vmcs *vmcs) 104static void vmcs_clear(struct vmcs *vmcs)
96{ 105{
97 u64 phys_addr = __pa(vmcs); 106 u64 phys_addr = __pa(vmcs);
@@ -137,7 +146,7 @@ static u32 vmcs_read32(unsigned long field)
137 146
138static u64 vmcs_read64(unsigned long field) 147static u64 vmcs_read64(unsigned long field)
139{ 148{
140#ifdef __x86_64__ 149#ifdef CONFIG_X86_64
141 return vmcs_readl(field); 150 return vmcs_readl(field);
142#else 151#else
143 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); 152 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
@@ -167,7 +176,7 @@ static void vmcs_write32(unsigned long field, u32 value)
167 176
168static void vmcs_write64(unsigned long field, u64 value) 177static void vmcs_write64(unsigned long field, u64 value)
169{ 178{
170#ifdef __x86_64__ 179#ifdef CONFIG_X86_64
171 vmcs_writel(field, value); 180 vmcs_writel(field, value);
172#else 181#else
173 vmcs_writel(field, value); 182 vmcs_writel(field, value);
@@ -296,7 +305,7 @@ static void guest_write_tsc(u64 guest_tsc)
296 305
297static void reload_tss(void) 306static void reload_tss(void)
298{ 307{
299#ifndef __x86_64__ 308#ifndef CONFIG_X86_64
300 309
301 /* 310 /*
302 * VT restores TR but not its size. Useless. 311 * VT restores TR but not its size. Useless.
@@ -327,7 +336,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
327 } 336 }
328 337
329 switch (msr_index) { 338 switch (msr_index) {
330#ifdef __x86_64__ 339#ifdef CONFIG_X86_64
331 case MSR_FS_BASE: 340 case MSR_FS_BASE:
332 data = vmcs_readl(GUEST_FS_BASE); 341 data = vmcs_readl(GUEST_FS_BASE);
333 break; 342 break;
@@ -390,7 +399,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
390{ 399{
391 struct vmx_msr_entry *msr; 400 struct vmx_msr_entry *msr;
392 switch (msr_index) { 401 switch (msr_index) {
393#ifdef __x86_64__ 402#ifdef CONFIG_X86_64
394 case MSR_FS_BASE: 403 case MSR_FS_BASE:
395 vmcs_writel(GUEST_FS_BASE, data); 404 vmcs_writel(GUEST_FS_BASE, data);
396 break; 405 break;
@@ -525,7 +534,7 @@ static __init void hardware_enable(void *garbage)
525 u64 old; 534 u64 old;
526 535
527 rdmsrl(MSR_IA32_FEATURE_CONTROL, old); 536 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
528 if ((old & 5) == 0) 537 if ((old & 5) != 5)
529 /* enable and lock */ 538 /* enable and lock */
530 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5); 539 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5);
531 write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */ 540 write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
@@ -725,7 +734,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
725 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); 734 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
726} 735}
727 736
728#ifdef __x86_64__ 737#ifdef CONFIG_X86_64
729 738
730static void enter_lmode(struct kvm_vcpu *vcpu) 739static void enter_lmode(struct kvm_vcpu *vcpu)
731{ 740{
@@ -767,7 +776,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
767 if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) 776 if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
768 enter_rmode(vcpu); 777 enter_rmode(vcpu);
769 778
770#ifdef __x86_64__ 779#ifdef CONFIG_X86_64
771 if (vcpu->shadow_efer & EFER_LME) { 780 if (vcpu->shadow_efer & EFER_LME) {
772 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) 781 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
773 enter_lmode(vcpu); 782 enter_lmode(vcpu);
@@ -808,7 +817,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
808 vcpu->cr4 = cr4; 817 vcpu->cr4 = cr4;
809} 818}
810 819
811#ifdef __x86_64__ 820#ifdef CONFIG_X86_64
812 821
813static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 822static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
814{ 823{
@@ -883,6 +892,8 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
883 ar |= (var->db & 1) << 14; 892 ar |= (var->db & 1) << 14;
884 ar |= (var->g & 1) << 15; 893 ar |= (var->g & 1) << 15;
885 } 894 }
895 if (ar == 0) /* a 0 value means unusable */
896 ar = AR_UNUSABLE_MASK;
886 vmcs_write32(sf->ar_bytes, ar); 897 vmcs_write32(sf->ar_bytes, ar);
887} 898}
888 899
@@ -1095,7 +1106,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1095 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ 1106 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1096 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ 1107 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1097 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 1108 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1098#ifdef __x86_64__ 1109#ifdef CONFIG_X86_64
1099 rdmsrl(MSR_FS_BASE, a); 1110 rdmsrl(MSR_FS_BASE, a);
1100 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ 1111 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1101 rdmsrl(MSR_GS_BASE, a); 1112 rdmsrl(MSR_GS_BASE, a);
@@ -1164,8 +1175,10 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1164 VM_ENTRY_CONTROLS, 0); 1175 VM_ENTRY_CONTROLS, 0);
1165 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ 1176 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1166 1177
1178#ifdef CONFIG_X86_64
1167 vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0); 1179 vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
1168 vmcs_writel(TPR_THRESHOLD, 0); 1180 vmcs_writel(TPR_THRESHOLD, 0);
1181#endif
1169 1182
1170 vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK); 1183 vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK);
1171 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 1184 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
@@ -1173,7 +1186,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1173 vcpu->cr0 = 0x60000010; 1186 vcpu->cr0 = 0x60000010;
1174 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode 1187 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1175 vmx_set_cr4(vcpu, 0); 1188 vmx_set_cr4(vcpu, 0);
1176#ifdef __x86_64__ 1189#ifdef CONFIG_X86_64
1177 vmx_set_efer(vcpu, 0); 1190 vmx_set_efer(vcpu, 0);
1178#endif 1191#endif
1179 1192
@@ -1689,7 +1702,7 @@ again:
1689 vmcs_write16(HOST_GS_SELECTOR, 0); 1702 vmcs_write16(HOST_GS_SELECTOR, 0);
1690 } 1703 }
1691 1704
1692#ifdef __x86_64__ 1705#ifdef CONFIG_X86_64
1693 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); 1706 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1694 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); 1707 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1695#else 1708#else
@@ -1713,7 +1726,7 @@ again:
1713 asm ( 1726 asm (
1714 /* Store host registers */ 1727 /* Store host registers */
1715 "pushf \n\t" 1728 "pushf \n\t"
1716#ifdef __x86_64__ 1729#ifdef CONFIG_X86_64
1717 "push %%rax; push %%rbx; push %%rdx;" 1730 "push %%rax; push %%rbx; push %%rdx;"
1718 "push %%rsi; push %%rdi; push %%rbp;" 1731 "push %%rsi; push %%rdi; push %%rbp;"
1719 "push %%r8; push %%r9; push %%r10; push %%r11;" 1732 "push %%r8; push %%r9; push %%r10; push %%r11;"
@@ -1727,7 +1740,7 @@ again:
1727 /* Check if vmlaunch of vmresume is needed */ 1740 /* Check if vmlaunch of vmresume is needed */
1728 "cmp $0, %1 \n\t" 1741 "cmp $0, %1 \n\t"
1729 /* Load guest registers. Don't clobber flags. */ 1742 /* Load guest registers. Don't clobber flags. */
1730#ifdef __x86_64__ 1743#ifdef CONFIG_X86_64
1731 "mov %c[cr2](%3), %%rax \n\t" 1744 "mov %c[cr2](%3), %%rax \n\t"
1732 "mov %%rax, %%cr2 \n\t" 1745 "mov %%rax, %%cr2 \n\t"
1733 "mov %c[rax](%3), %%rax \n\t" 1746 "mov %c[rax](%3), %%rax \n\t"
@@ -1764,7 +1777,7 @@ again:
1764 ".globl kvm_vmx_return \n\t" 1777 ".globl kvm_vmx_return \n\t"
1765 "kvm_vmx_return: " 1778 "kvm_vmx_return: "
1766 /* Save guest registers, load host registers, keep flags */ 1779 /* Save guest registers, load host registers, keep flags */
1767#ifdef __x86_64__ 1780#ifdef CONFIG_X86_64
1768 "xchg %3, 0(%%rsp) \n\t" 1781 "xchg %3, 0(%%rsp) \n\t"
1769 "mov %%rax, %c[rax](%3) \n\t" 1782 "mov %%rax, %c[rax](%3) \n\t"
1770 "mov %%rbx, %c[rbx](%3) \n\t" 1783 "mov %%rbx, %c[rbx](%3) \n\t"
@@ -1816,7 +1829,7 @@ again:
1816 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), 1829 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1817 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), 1830 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1818 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])), 1831 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
1819#ifdef __x86_64__ 1832#ifdef CONFIG_X86_64
1820 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), 1833 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1821 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), 1834 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1822 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), 1835 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
@@ -1837,7 +1850,7 @@ again:
1837 fx_save(vcpu->guest_fx_image); 1850 fx_save(vcpu->guest_fx_image);
1838 fx_restore(vcpu->host_fx_image); 1851 fx_restore(vcpu->host_fx_image);
1839 1852
1840#ifndef __x86_64__ 1853#ifndef CONFIG_X86_64
1841 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 1854 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
1842#endif 1855#endif
1843 1856
@@ -1855,7 +1868,7 @@ again:
1855 */ 1868 */
1856 local_irq_disable(); 1869 local_irq_disable();
1857 load_gs(gs_sel); 1870 load_gs(gs_sel);
1858#ifdef __x86_64__ 1871#ifdef CONFIG_X86_64
1859 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 1872 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
1860#endif 1873#endif
1861 local_irq_enable(); 1874 local_irq_enable();
@@ -1965,7 +1978,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
1965 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, 1978 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
1966 .set_cr3 = vmx_set_cr3, 1979 .set_cr3 = vmx_set_cr3,
1967 .set_cr4 = vmx_set_cr4, 1980 .set_cr4 = vmx_set_cr4,
1968#ifdef __x86_64__ 1981#ifdef CONFIG_X86_64
1969 .set_efer = vmx_set_efer, 1982 .set_efer = vmx_set_efer,
1970#endif 1983#endif
1971 .get_idt = vmx_get_idt, 1984 .get_idt = vmx_get_idt,
@@ -1989,8 +2002,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
1989 2002
1990static int __init vmx_init(void) 2003static int __init vmx_init(void)
1991{ 2004{
1992 kvm_init_arch(&vmx_arch_ops, THIS_MODULE); 2005 return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
1993 return 0;
1994} 2006}
1995 2007
1996static void __exit vmx_exit(void) 2008static void __exit vmx_exit(void)
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 7e838bf0592d..1bff3e925fda 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -238,7 +238,7 @@ struct operand {
238 * any modified flags. 238 * any modified flags.
239 */ 239 */
240 240
241#if defined(__x86_64__) 241#if defined(CONFIG_X86_64)
242#define _LO32 "k" /* force 32-bit operand */ 242#define _LO32 "k" /* force 32-bit operand */
243#define _STK "%%rsp" /* stack pointer */ 243#define _STK "%%rsp" /* stack pointer */
244#elif defined(__i386__) 244#elif defined(__i386__)
@@ -385,7 +385,7 @@ struct operand {
385 } while (0) 385 } while (0)
386 386
387/* Emulate an instruction with quadword operands (x86/64 only). */ 387/* Emulate an instruction with quadword operands (x86/64 only). */
388#if defined(__x86_64__) 388#if defined(CONFIG_X86_64)
389#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \ 389#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \
390 do { \ 390 do { \
391 __asm__ __volatile__ ( \ 391 __asm__ __volatile__ ( \
@@ -495,7 +495,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
495 case X86EMUL_MODE_PROT32: 495 case X86EMUL_MODE_PROT32:
496 op_bytes = ad_bytes = 4; 496 op_bytes = ad_bytes = 4;
497 break; 497 break;
498#ifdef __x86_64__ 498#ifdef CONFIG_X86_64
499 case X86EMUL_MODE_PROT64: 499 case X86EMUL_MODE_PROT64:
500 op_bytes = 4; 500 op_bytes = 4;
501 ad_bytes = 8; 501 ad_bytes = 8;
@@ -1341,7 +1341,7 @@ twobyte_special_insn:
1341 } 1341 }
1342 break; 1342 break;
1343 } 1343 }
1344#elif defined(__x86_64__) 1344#elif defined(CONFIG_X86_64)
1345 { 1345 {
1346 unsigned long old, new; 1346 unsigned long old, new;
1347 if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0) 1347 if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0)
diff --git a/drivers/kvm/x86_emulate.h b/drivers/kvm/x86_emulate.h
index 658b58de30fc..5d41bd55125e 100644
--- a/drivers/kvm/x86_emulate.h
+++ b/drivers/kvm/x86_emulate.h
@@ -162,7 +162,7 @@ struct x86_emulate_ctxt {
162/* Host execution mode. */ 162/* Host execution mode. */
163#if defined(__i386__) 163#if defined(__i386__)
164#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 164#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
165#elif defined(__x86_64__) 165#elif defined(CONFIG_X86_64)
166#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 166#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
167#endif 167#endif
168 168
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 176142c61492..7399ba791116 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -12,7 +12,7 @@ config NEW_LEDS
12 12
13config LEDS_CLASS 13config LEDS_CLASS
14 tristate "LED Class Support" 14 tristate "LED Class Support"
15 depends NEW_LEDS 15 depends on NEW_LEDS
16 help 16 help
17 This option enables the led sysfs class in /sys/class/leds. You'll 17 This option enables the led sysfs class in /sys/class/leds. You'll
18 need this to do anything useful with LEDs. If unsure, say N. 18 need this to do anything useful with LEDs. If unsure, say N.
@@ -21,28 +21,28 @@ comment "LED drivers"
21 21
22config LEDS_CORGI 22config LEDS_CORGI
23 tristate "LED Support for the Sharp SL-C7x0 series" 23 tristate "LED Support for the Sharp SL-C7x0 series"
24 depends LEDS_CLASS && PXA_SHARP_C7xx 24 depends on LEDS_CLASS && PXA_SHARP_C7xx
25 help 25 help
26 This option enables support for the LEDs on Sharp Zaurus 26 This option enables support for the LEDs on Sharp Zaurus
27 SL-C7x0 series (C700, C750, C760, C860). 27 SL-C7x0 series (C700, C750, C760, C860).
28 28
29config LEDS_LOCOMO 29config LEDS_LOCOMO
30 tristate "LED Support for Locomo device" 30 tristate "LED Support for Locomo device"
31 depends LEDS_CLASS && SHARP_LOCOMO 31 depends on LEDS_CLASS && SHARP_LOCOMO
32 help 32 help
33 This option enables support for the LEDs on Sharp Locomo. 33 This option enables support for the LEDs on Sharp Locomo.
34 Zaurus models SL-5500 and SL-5600. 34 Zaurus models SL-5500 and SL-5600.
35 35
36config LEDS_SPITZ 36config LEDS_SPITZ
37 tristate "LED Support for the Sharp SL-Cxx00 series" 37 tristate "LED Support for the Sharp SL-Cxx00 series"
38 depends LEDS_CLASS && PXA_SHARP_Cxx00 38 depends on LEDS_CLASS && PXA_SHARP_Cxx00
39 help 39 help
40 This option enables support for the LEDs on Sharp Zaurus 40 This option enables support for the LEDs on Sharp Zaurus
41 SL-Cxx00 series (C1000, C3000, C3100). 41 SL-Cxx00 series (C1000, C3000, C3100).
42 42
43config LEDS_IXP4XX 43config LEDS_IXP4XX
44 tristate "LED Support for GPIO connected LEDs on IXP4XX processors" 44 tristate "LED Support for GPIO connected LEDs on IXP4XX processors"
45 depends LEDS_CLASS && ARCH_IXP4XX 45 depends on LEDS_CLASS && ARCH_IXP4XX
46 help 46 help
47 This option enables support for the LEDs connected to GPIO 47 This option enables support for the LEDs connected to GPIO
48 outputs of the Intel IXP4XX processors. To be useful the 48 outputs of the Intel IXP4XX processors. To be useful the
@@ -51,7 +51,7 @@ config LEDS_IXP4XX
51 51
52config LEDS_TOSA 52config LEDS_TOSA
53 tristate "LED Support for the Sharp SL-6000 series" 53 tristate "LED Support for the Sharp SL-6000 series"
54 depends LEDS_CLASS && PXA_SHARPSL 54 depends on LEDS_CLASS && PXA_SHARPSL
55 help 55 help
56 This option enables support for the LEDs on Sharp Zaurus 56 This option enables support for the LEDs on Sharp Zaurus
57 SL-6000 series. 57 SL-6000 series.
@@ -65,7 +65,7 @@ config LEDS_S3C24XX
65 65
66config LEDS_AMS_DELTA 66config LEDS_AMS_DELTA
67 tristate "LED Support for the Amstrad Delta (E3)" 67 tristate "LED Support for the Amstrad Delta (E3)"
68 depends LEDS_CLASS && MACH_AMS_DELTA 68 depends on LEDS_CLASS && MACH_AMS_DELTA
69 help 69 help
70 This option enables support for the LEDs on Amstrad Delta (E3). 70 This option enables support for the LEDs on Amstrad Delta (E3).
71 71
@@ -86,7 +86,7 @@ comment "LED Triggers"
86 86
87config LEDS_TRIGGERS 87config LEDS_TRIGGERS
88 bool "LED Trigger support" 88 bool "LED Trigger support"
89 depends NEW_LEDS 89 depends on NEW_LEDS
90 help 90 help
91 This option enables trigger support for the leds class. 91 This option enables trigger support for the leds class.
92 These triggers allow kernel events to drive the LEDs and can 92 These triggers allow kernel events to drive the LEDs and can
@@ -94,21 +94,21 @@ config LEDS_TRIGGERS
94 94
95config LEDS_TRIGGER_TIMER 95config LEDS_TRIGGER_TIMER
96 tristate "LED Timer Trigger" 96 tristate "LED Timer Trigger"
97 depends LEDS_TRIGGERS 97 depends on LEDS_TRIGGERS
98 help 98 help
99 This allows LEDs to be controlled by a programmable timer 99 This allows LEDs to be controlled by a programmable timer
100 via sysfs. If unsure, say Y. 100 via sysfs. If unsure, say Y.
101 101
102config LEDS_TRIGGER_IDE_DISK 102config LEDS_TRIGGER_IDE_DISK
103 bool "LED IDE Disk Trigger" 103 bool "LED IDE Disk Trigger"
104 depends LEDS_TRIGGERS && BLK_DEV_IDEDISK 104 depends on LEDS_TRIGGERS && BLK_DEV_IDEDISK
105 help 105 help
106 This allows LEDs to be controlled by IDE disk activity. 106 This allows LEDs to be controlled by IDE disk activity.
107 If unsure, say Y. 107 If unsure, say Y.
108 108
109config LEDS_TRIGGER_HEARTBEAT 109config LEDS_TRIGGER_HEARTBEAT
110 tristate "LED Heartbeat Trigger" 110 tristate "LED Heartbeat Trigger"
111 depends LEDS_TRIGGERS 111 depends on LEDS_TRIGGERS
112 help 112 help
113 This allows LEDs to be controlled by a CPU load average. 113 This allows LEDs to be controlled by a CPU load average.
114 The flash frequency is a hyperbolic function of the 1-minute 114 The flash frequency is a hyperbolic function of the 1-minute
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index d43ea81d6df9..7cec6de5e2b0 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -828,7 +828,7 @@ static ssize_t adb_write(struct file *file, const char __user *buf,
828 if (!access_ok(VERIFY_READ, buf, count)) 828 if (!access_ok(VERIFY_READ, buf, count))
829 return -EFAULT; 829 return -EFAULT;
830 830
831 req = (struct adb_request *) kmalloc(sizeof(struct adb_request), 831 req = kmalloc(sizeof(struct adb_request),
832 GFP_KERNEL); 832 GFP_KERNEL);
833 if (req == NULL) 833 if (req == NULL)
834 return -ENOMEM; 834 return -ENOMEM;
diff --git a/drivers/macintosh/apm_emu.c b/drivers/macintosh/apm_emu.c
index 8862a83b8d84..4300c628f8af 100644
--- a/drivers/macintosh/apm_emu.c
+++ b/drivers/macintosh/apm_emu.c
@@ -321,7 +321,7 @@ static int do_open(struct inode * inode, struct file * filp)
321{ 321{
322 struct apm_user * as; 322 struct apm_user * as;
323 323
324 as = (struct apm_user *)kmalloc(sizeof(*as), GFP_KERNEL); 324 as = kmalloc(sizeof(*as), GFP_KERNEL);
325 if (as == NULL) { 325 if (as == NULL) {
326 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", 326 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n",
327 sizeof(*as)); 327 sizeof(*as));
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 6dde27ab79a8..6f30459b9385 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -945,7 +945,7 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
945 */ 945 */
946 tlen = sizeof(struct property) + len + 18; 946 tlen = sizeof(struct property) + len + 18;
947 947
948 prop = kcalloc(tlen, 1, GFP_KERNEL); 948 prop = kzalloc(tlen, GFP_KERNEL);
949 if (prop == NULL) 949 if (prop == NULL)
950 return NULL; 950 return NULL;
951 hdr = (struct smu_sdbp_header *)(prop + 1); 951 hdr = (struct smu_sdbp_header *)(prop + 1);
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index d9986f3a3fbf..93e6ef9233f9 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -847,7 +847,7 @@ pbook_pci_save(void)
847 n_pbook_pci_saves = npci; 847 n_pbook_pci_saves = npci;
848 if (npci == 0) 848 if (npci == 0)
849 return; 849 return;
850 ps = (struct pci_save *) kmalloc(npci * sizeof(*ps), GFP_KERNEL); 850 ps = kmalloc(npci * sizeof(*ps), GFP_KERNEL);
851 pbook_pci_saves = ps; 851 pbook_pci_saves = ps;
852 if (ps == NULL) 852 if (ps == NULL)
853 return; 853 return;
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index a7a5ab554338..4ebd0f2a75ec 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -173,7 +173,7 @@ static int make_request(request_queue_t *q, struct bio *bio)
173 conf_t *conf = (conf_t*)mddev->private; 173 conf_t *conf = (conf_t*)mddev->private;
174 int failit = 0; 174 int failit = 0;
175 175
176 if (bio->bi_rw & 1) { 176 if (bio_data_dir(bio) == WRITE) {
177 /* write request */ 177 /* write request */
178 if (atomic_read(&conf->counters[WriteAll])) { 178 if (atomic_read(&conf->counters[WriteAll])) {
179 /* special case - don't decrement, don't generic_make_request, 179 /* special case - don't decrement, don't generic_make_request,
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index b3c5e12f081d..b30f74be3982 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1736,7 +1736,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1736 /* take from bio_init */ 1736 /* take from bio_init */
1737 bio->bi_next = NULL; 1737 bio->bi_next = NULL;
1738 bio->bi_flags |= 1 << BIO_UPTODATE; 1738 bio->bi_flags |= 1 << BIO_UPTODATE;
1739 bio->bi_rw = 0; 1739 bio->bi_rw = READ;
1740 bio->bi_vcnt = 0; 1740 bio->bi_vcnt = 0;
1741 bio->bi_idx = 0; 1741 bio->bi_idx = 0;
1742 bio->bi_phys_segments = 0; 1742 bio->bi_phys_segments = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7492d6033ac6..f0141910bb8d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1785,7 +1785,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1785 biolist = bio; 1785 biolist = bio;
1786 bio->bi_private = r10_bio; 1786 bio->bi_private = r10_bio;
1787 bio->bi_end_io = end_sync_read; 1787 bio->bi_end_io = end_sync_read;
1788 bio->bi_rw = 0; 1788 bio->bi_rw = READ;
1789 bio->bi_sector = r10_bio->devs[j].addr + 1789 bio->bi_sector = r10_bio->devs[j].addr +
1790 conf->mirrors[d].rdev->data_offset; 1790 conf->mirrors[d].rdev->data_offset;
1791 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 1791 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -1801,7 +1801,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1801 biolist = bio; 1801 biolist = bio;
1802 bio->bi_private = r10_bio; 1802 bio->bi_private = r10_bio;
1803 bio->bi_end_io = end_sync_write; 1803 bio->bi_end_io = end_sync_write;
1804 bio->bi_rw = 1; 1804 bio->bi_rw = WRITE;
1805 bio->bi_sector = r10_bio->devs[k].addr + 1805 bio->bi_sector = r10_bio->devs[k].addr +
1806 conf->mirrors[i].rdev->data_offset; 1806 conf->mirrors[i].rdev->data_offset;
1807 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1807 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
@@ -1870,7 +1870,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1870 biolist = bio; 1870 biolist = bio;
1871 bio->bi_private = r10_bio; 1871 bio->bi_private = r10_bio;
1872 bio->bi_end_io = end_sync_read; 1872 bio->bi_end_io = end_sync_read;
1873 bio->bi_rw = 0; 1873 bio->bi_rw = READ;
1874 bio->bi_sector = r10_bio->devs[i].addr + 1874 bio->bi_sector = r10_bio->devs[i].addr +
1875 conf->mirrors[d].rdev->data_offset; 1875 conf->mirrors[d].rdev->data_offset;
1876 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 1876 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 377f8bc9b78b..be008f034ada 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1827,16 +1827,16 @@ static void handle_stripe5(struct stripe_head *sh)
1827 struct bio *bi; 1827 struct bio *bi;
1828 mdk_rdev_t *rdev; 1828 mdk_rdev_t *rdev;
1829 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 1829 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1830 rw = 1; 1830 rw = WRITE;
1831 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1831 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1832 rw = 0; 1832 rw = READ;
1833 else 1833 else
1834 continue; 1834 continue;
1835 1835
1836 bi = &sh->dev[i].req; 1836 bi = &sh->dev[i].req;
1837 1837
1838 bi->bi_rw = rw; 1838 bi->bi_rw = rw;
1839 if (rw) 1839 if (rw == WRITE)
1840 bi->bi_end_io = raid5_end_write_request; 1840 bi->bi_end_io = raid5_end_write_request;
1841 else 1841 else
1842 bi->bi_end_io = raid5_end_read_request; 1842 bi->bi_end_io = raid5_end_read_request;
@@ -1872,7 +1872,7 @@ static void handle_stripe5(struct stripe_head *sh)
1872 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 1872 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1873 generic_make_request(bi); 1873 generic_make_request(bi);
1874 } else { 1874 } else {
1875 if (rw == 1) 1875 if (rw == WRITE)
1876 set_bit(STRIPE_DEGRADED, &sh->state); 1876 set_bit(STRIPE_DEGRADED, &sh->state);
1877 PRINTK("skip op %ld on disc %d for sector %llu\n", 1877 PRINTK("skip op %ld on disc %d for sector %llu\n",
1878 bi->bi_rw, i, (unsigned long long)sh->sector); 1878 bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -2370,16 +2370,16 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2370 struct bio *bi; 2370 struct bio *bi;
2371 mdk_rdev_t *rdev; 2371 mdk_rdev_t *rdev;
2372 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 2372 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
2373 rw = 1; 2373 rw = WRITE;
2374 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 2374 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
2375 rw = 0; 2375 rw = READ;
2376 else 2376 else
2377 continue; 2377 continue;
2378 2378
2379 bi = &sh->dev[i].req; 2379 bi = &sh->dev[i].req;
2380 2380
2381 bi->bi_rw = rw; 2381 bi->bi_rw = rw;
2382 if (rw) 2382 if (rw == WRITE)
2383 bi->bi_end_io = raid5_end_write_request; 2383 bi->bi_end_io = raid5_end_write_request;
2384 else 2384 else
2385 bi->bi_end_io = raid5_end_read_request; 2385 bi->bi_end_io = raid5_end_read_request;
@@ -2415,7 +2415,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2415 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2415 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
2416 generic_make_request(bi); 2416 generic_make_request(bi);
2417 } else { 2417 } else {
2418 if (rw == 1) 2418 if (rw == WRITE)
2419 set_bit(STRIPE_DEGRADED, &sh->state); 2419 set_bit(STRIPE_DEGRADED, &sh->state);
2420 PRINTK("skip op %ld on disc %d for sector %llu\n", 2420 PRINTK("skip op %ld on disc %d for sector %llu\n",
2421 bi->bi_rw, i, (unsigned long long)sh->sector); 2421 bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -2567,7 +2567,7 @@ static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
2567 unsigned int chunk_sectors = mddev->chunk_size >> 9; 2567 unsigned int chunk_sectors = mddev->chunk_size >> 9;
2568 unsigned int bio_sectors = bio->bi_size >> 9; 2568 unsigned int bio_sectors = bio->bi_size >> 9;
2569 2569
2570 if (bio_data_dir(bio)) 2570 if (bio_data_dir(bio) == WRITE)
2571 return biovec->bv_len; /* always allow writes to be mergeable */ 2571 return biovec->bv_len; /* always allow writes to be mergeable */
2572 2572
2573 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 2573 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -2751,7 +2751,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
2751 disk_stat_inc(mddev->gendisk, ios[rw]); 2751 disk_stat_inc(mddev->gendisk, ios[rw]);
2752 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 2752 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
2753 2753
2754 if (bio_data_dir(bi) == READ && 2754 if (rw == READ &&
2755 mddev->reshape_position == MaxSector && 2755 mddev->reshape_position == MaxSector &&
2756 chunk_aligned_read(q,bi)) 2756 chunk_aligned_read(q,bi))
2757 return 0; 2757 return 0;
diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c
index 240ad084fa78..50bc32a8bd55 100644
--- a/drivers/media/dvb/bt8xx/dst_ca.c
+++ b/drivers/media/dvb/bt8xx/dst_ca.c
@@ -480,7 +480,7 @@ static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message,
480 struct ca_msg *hw_buffer; 480 struct ca_msg *hw_buffer;
481 int result = 0; 481 int result = 0;
482 482
483 if ((hw_buffer = (struct ca_msg *) kmalloc(sizeof (struct ca_msg), GFP_KERNEL)) == NULL) { 483 if ((hw_buffer = kmalloc(sizeof (struct ca_msg), GFP_KERNEL)) == NULL) {
484 dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure"); 484 dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure");
485 return -ENOMEM; 485 return -ENOMEM;
486 } 486 }
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 80a85cb4975f..3e35931af35d 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -657,7 +657,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
657 657
658 case BTTV_BOARD_TWINHAN_DST: 658 case BTTV_BOARD_TWINHAN_DST:
659 /* DST is not a frontend driver !!! */ 659 /* DST is not a frontend driver !!! */
660 state = (struct dst_state *) kmalloc(sizeof (struct dst_state), GFP_KERNEL); 660 state = kmalloc(sizeof (struct dst_state), GFP_KERNEL);
661 if (!state) { 661 if (!state) {
662 printk("dvb_bt8xx: No memory\n"); 662 printk("dvb_bt8xx: No memory\n");
663 break; 663 break;
diff --git a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
index 42f39a89bc4d..a6fb1d6a7b5d 100644
--- a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
@@ -195,7 +195,7 @@ struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* conf
195 struct ttusbdecfe_state* state = NULL; 195 struct ttusbdecfe_state* state = NULL;
196 196
197 /* allocate memory for the internal state */ 197 /* allocate memory for the internal state */
198 state = (struct ttusbdecfe_state*) kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL); 198 state = kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL);
199 if (state == NULL) 199 if (state == NULL)
200 return NULL; 200 return NULL;
201 201
@@ -215,7 +215,7 @@ struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* conf
215 struct ttusbdecfe_state* state = NULL; 215 struct ttusbdecfe_state* state = NULL;
216 216
217 /* allocate memory for the internal state */ 217 /* allocate memory for the internal state */
218 state = (struct ttusbdecfe_state*) kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL); 218 state = kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL);
219 if (state == NULL) 219 if (state == NULL)
220 return NULL; 220 return NULL;
221 221
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index b1012e92ee04..917021fc2993 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -218,7 +218,7 @@ static int dabusb_alloc_buffers (pdabusb_t s)
218 pipesize, packets, transfer_buffer_length); 218 pipesize, packets, transfer_buffer_length);
219 219
220 while (buffers < (s->total_buffer_size << 10)) { 220 while (buffers < (s->total_buffer_size << 10)) {
221 b = (pbuff_t) kzalloc (sizeof (buff_t), GFP_KERNEL); 221 b = kzalloc(sizeof (buff_t), GFP_KERNEL);
222 if (!b) { 222 if (!b) {
223 err("kzalloc(sizeof(buff_t))==NULL"); 223 err("kzalloc(sizeof(buff_t))==NULL");
224 goto err; 224 goto err;
@@ -659,7 +659,7 @@ static int dabusb_ioctl (struct inode *inode, struct file *file, unsigned int cm
659 switch (cmd) { 659 switch (cmd) {
660 660
661 case IOCTL_DAB_BULK: 661 case IOCTL_DAB_BULK:
662 pbulk = (pbulk_transfer_t) kmalloc (sizeof (bulk_transfer_t), GFP_KERNEL); 662 pbulk = kmalloc(sizeof (bulk_transfer_t), GFP_KERNEL);
663 663
664 if (!pbulk) { 664 if (!pbulk) {
665 ret = -ENOMEM; 665 ret = -ENOMEM;
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 89dd18c3c5cc..5ed0adc4ca26 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -18,7 +18,7 @@
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19 19
20 20
21MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net."); 21MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
22MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors"); 22MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index 368d6e219fa4..86d2884e16c6 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -138,7 +138,7 @@ static int grabbuf_alloc(struct planb *pb)
138 + MAX_LNUM 138 + MAX_LNUM
139#endif /* PLANB_GSCANLINE */ 139#endif /* PLANB_GSCANLINE */
140 ); 140 );
141 if ((pb->rawbuf = (unsigned char**) kmalloc (npage 141 if ((pb->rawbuf = kmalloc(npage
142 * sizeof(unsigned long), GFP_KERNEL)) == 0) 142 * sizeof(unsigned long), GFP_KERNEL)) == 0)
143 return -ENOMEM; 143 return -ENOMEM;
144 for (i = 0; i < npage; i++) { 144 for (i = 0; i < npage; i++) {
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index d8b88024bc2f..b560c9d7c516 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -690,7 +690,7 @@ int usbvideo_register(
690 } 690 }
691 691
692 base_size = num_cams * sizeof(struct uvd) + sizeof(struct usbvideo); 692 base_size = num_cams * sizeof(struct uvd) + sizeof(struct usbvideo);
693 cams = (struct usbvideo *) kzalloc(base_size, GFP_KERNEL); 693 cams = kzalloc(base_size, GFP_KERNEL);
694 if (cams == NULL) { 694 if (cams == NULL) {
695 err("Failed to allocate %d. bytes for usbvideo struct", base_size); 695 err("Failed to allocate %d. bytes for usbvideo struct", base_size);
696 return -ENOMEM; 696 return -ENOMEM;
diff --git a/drivers/media/video/videocodec.c b/drivers/media/video/videocodec.c
index 2ae3fb250630..290e64135650 100644
--- a/drivers/media/video/videocodec.c
+++ b/drivers/media/video/videocodec.c
@@ -346,7 +346,7 @@ videocodec_build_table (void)
346 size); 346 size);
347 347
348 kfree(videocodec_buf); 348 kfree(videocodec_buf);
349 videocodec_buf = (char *) kmalloc(size, GFP_KERNEL); 349 videocodec_buf = kmalloc(size, GFP_KERNEL);
350 350
351 i = 0; 351 i = 0;
352 i += scnprintf(videocodec_buf + i, size - 1, 352 i += scnprintf(videocodec_buf + i, size - 1,
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index dc388a3ff5e0..cbe384fb848c 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -18,7 +18,7 @@ extern struct i2o_driver i2o_exec_driver;
18extern int i2o_exec_lct_get(struct i2o_controller *); 18extern int i2o_exec_lct_get(struct i2o_controller *);
19 19
20extern int __init i2o_exec_init(void); 20extern int __init i2o_exec_init(void);
21extern void __exit i2o_exec_exit(void); 21extern void i2o_exec_exit(void);
22 22
23/* driver */ 23/* driver */
24extern struct bus_type i2o_bus_type; 24extern struct bus_type i2o_bus_type;
@@ -26,7 +26,7 @@ extern struct bus_type i2o_bus_type;
26extern int i2o_driver_dispatch(struct i2o_controller *, u32); 26extern int i2o_driver_dispatch(struct i2o_controller *, u32);
27 27
28extern int __init i2o_driver_init(void); 28extern int __init i2o_driver_init(void);
29extern void __exit i2o_driver_exit(void); 29extern void i2o_driver_exit(void);
30 30
31/* PCI */ 31/* PCI */
32extern int __init i2o_pci_init(void); 32extern int __init i2o_pci_init(void);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 9104b65ff70f..d3235f213c89 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -362,7 +362,7 @@ int __init i2o_driver_init(void)
362 * 362 *
363 * Unregisters the I2O bus and frees driver array. 363 * Unregisters the I2O bus and frees driver array.
364 */ 364 */
365void __exit i2o_driver_exit(void) 365void i2o_driver_exit(void)
366{ 366{
367 bus_unregister(&i2o_bus_type); 367 bus_unregister(&i2o_bus_type);
368 kfree(i2o_drivers); 368 kfree(i2o_drivers);
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 902753b2c661..a539d3b61e76 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -595,7 +595,7 @@ int __init i2o_exec_init(void)
595 * 595 *
596 * Unregisters the Exec OSM from the I2O core. 596 * Unregisters the Exec OSM from the I2O core.
597 */ 597 */
598void __exit i2o_exec_exit(void) 598void i2o_exec_exit(void)
599{ 599{
600 i2o_driver_unregister(&i2o_exec_driver); 600 i2o_driver_unregister(&i2o_exec_driver);
601}; 601};
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 1de30d711671..e33d446e7493 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,7 +186,7 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
186 if (!dev) 186 if (!dev)
187 return -ENXIO; 187 return -ENXIO;
188 188
189 ops = (u8 *) kmalloc(kcmd.oplen, GFP_KERNEL); 189 ops = kmalloc(kcmd.oplen, GFP_KERNEL);
190 if (!ops) 190 if (!ops)
191 return -ENOMEM; 191 return -ENOMEM;
192 192
@@ -199,7 +199,7 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
199 * It's possible to have a _very_ large table 199 * It's possible to have a _very_ large table
200 * and that the user asks for all of it at once... 200 * and that the user asks for all of it at once...
201 */ 201 */
202 res = (u8 *) kmalloc(65536, GFP_KERNEL); 202 res = kmalloc(65536, GFP_KERNEL);
203 if (!res) { 203 if (!res) {
204 kfree(ops); 204 kfree(ops);
205 return -ENOMEM; 205 return -ENOMEM;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 5db716045927..0a7e86859bf1 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -459,7 +459,7 @@ add_dataflash(struct spi_device *spi, char *name,
459 struct mtd_info *device; 459 struct mtd_info *device;
460 struct flash_platform_data *pdata = spi->dev.platform_data; 460 struct flash_platform_data *pdata = spi->dev.platform_data;
461 461
462 priv = (struct dataflash *) kzalloc(sizeof *priv, GFP_KERNEL); 462 priv = kzalloc(sizeof *priv, GFP_KERNEL);
463 if (!priv) 463 if (!priv)
464 return -ENOMEM; 464 return -ENOMEM;
465 465
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index fa4362fb4dd8..0f3baa5d9c2a 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -768,7 +768,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
768 if (mtd->type != MTD_NORFLASH) 768 if (mtd->type != MTD_NORFLASH)
769 return; 769 return;
770 770
771 part = kcalloc(1, sizeof(struct partition), GFP_KERNEL); 771 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
772 if (!part) 772 if (!part)
773 return; 773 return;
774 774
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index b98592a8bac8..f22e46dfd770 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -186,7 +186,7 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
186 */ 186 */
187static int ipddp_create(struct ipddp_route *new_rt) 187static int ipddp_create(struct ipddp_route *new_rt)
188{ 188{
189 struct ipddp_route *rt =(struct ipddp_route*) kmalloc(sizeof(*rt), GFP_KERNEL); 189 struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
190 190
191 if (rt == NULL) 191 if (rt == NULL)
192 return -ENOMEM; 192 return -ENOMEM;
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index bae1de1e7802..7845eaf6f29f 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -395,7 +395,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
395 * Allocate the main control structure for this instance. 395 * Allocate the main control structure for this instance.
396 */ 396 */
397 maxmaxcode = MAXCODE(bits); 397 maxmaxcode = MAXCODE(bits);
398 db = (struct bsd_db *) kmalloc (sizeof (struct bsd_db), 398 db = kmalloc(sizeof (struct bsd_db),
399 GFP_KERNEL); 399 GFP_KERNEL);
400 if (!db) 400 if (!db)
401 { 401 {
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 03bf164f9e8d..c2ae2a24629b 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1930,9 +1930,8 @@ static int e100_rx_alloc_list(struct nic *nic)
1930 nic->rx_to_use = nic->rx_to_clean = NULL; 1930 nic->rx_to_use = nic->rx_to_clean = NULL;
1931 nic->ru_running = RU_UNINITIALIZED; 1931 nic->ru_running = RU_UNINITIALIZED;
1932 1932
1933 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) 1933 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1934 return -ENOMEM; 1934 return -ENOMEM;
1935 memset(nic->rxs, 0, sizeof(struct rx) * count);
1936 1935
1937 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1936 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1938 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; 1937 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 16620bd97fbf..11af0ae7510e 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1603,7 +1603,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1603 irda_qos_bits_to_value (&self->qos); 1603 irda_qos_bits_to_value (&self->qos);
1604 1604
1605 /* Allocate twice the size to guarantee alignment */ 1605 /* Allocate twice the size to guarantee alignment */
1606 self->ringbuf = (void *) kmalloc (OBOE_RING_LEN << 1, GFP_KERNEL); 1606 self->ringbuf = kmalloc(OBOE_RING_LEN << 1, GFP_KERNEL);
1607 if (!self->ringbuf) 1607 if (!self->ringbuf)
1608 { 1608 {
1609 printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n"); 1609 printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n");
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 6e95645e7245..3ca1082ec776 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1747,7 +1747,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1747 /* Don't change this buffer size and allocation without doing 1747 /* Don't change this buffer size and allocation without doing
1748 * some heavy and complete testing. Don't ask why :-( 1748 * some heavy and complete testing. Don't ask why :-(
1749 * Jean II */ 1749 * Jean II */
1750 self->speed_buff = (char *) kmalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL); 1750 self->speed_buff = kmalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL);
1751 if (self->speed_buff == NULL) 1751 if (self->speed_buff == NULL)
1752 goto err_out_3; 1752 goto err_out_3;
1753 1753
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 654a68b490ae..3098960dc2a1 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -164,7 +164,7 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
164 164
165 /* Allocate memory if needed */ 165 /* Allocate memory if needed */
166 if (self->tx_buff.truesize > 0) { 166 if (self->tx_buff.truesize > 0) {
167 self->tx_buff.head = (__u8 *) kmalloc(self->tx_buff.truesize, 167 self->tx_buff.head = kmalloc(self->tx_buff.truesize,
168 GFP_KERNEL); 168 GFP_KERNEL);
169 if (self->tx_buff.head == NULL) { 169 if (self->tx_buff.head == NULL) {
170 IRDA_ERROR("%s(), can't allocate memory for " 170 IRDA_ERROR("%s(), can't allocate memory for "
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index b833016f1825..177c502f7385 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -884,7 +884,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
884 884
885 dev->trans_start = jiffies; 885 dev->trans_start = jiffies;
886 886
887 tx_cmd = (struct tx_cmd *) kmalloc ((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); 887 tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
888 if (tx_cmd == NULL) { 888 if (tx_cmd == NULL) {
889 printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); 889 printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
890 lp->stats.tx_dropped++; 890 lp->stats.tx_dropped++;
@@ -1266,7 +1266,7 @@ static void set_multicast_list(struct net_device *dev) {
1266 if (dev->mc_count > 0) { 1266 if (dev->mc_count > 0) {
1267 struct dev_mc_list *dmi; 1267 struct dev_mc_list *dmi;
1268 char *cp; 1268 char *cp;
1269 cmd = (struct i596_cmd *)kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC); 1269 cmd = kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
1270 if (cmd == NULL) { 1270 if (cmd == NULL) {
1271 printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name); 1271 printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
1272 return; 1272 return;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b01fc70a57db..a4d7529ef415 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -50,7 +50,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
50 struct phy_device *dev; 50 struct phy_device *dev;
51 /* We allocate the device, and initialize the 51 /* We allocate the device, and initialize the
52 * default values */ 52 * default values */
53 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); 53 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
54 54
55 if (NULL == dev) 55 if (NULL == dev)
56 return (struct phy_device*) PTR_ERR((void*)-ENOMEM); 56 return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index f54c55242f4a..72c8d6628f58 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -121,7 +121,7 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
121 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) 121 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
122 return NULL; 122 return NULL;
123 123
124 state = (struct ppp_deflate_state *) kmalloc(sizeof(*state), 124 state = kmalloc(sizeof(*state),
125 GFP_KERNEL); 125 GFP_KERNEL);
126 if (state == NULL) 126 if (state == NULL)
127 return NULL; 127 return NULL;
@@ -341,7 +341,7 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
341 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) 341 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
342 return NULL; 342 return NULL;
343 343
344 state = (struct ppp_deflate_state *) kmalloc(sizeof(*state), GFP_KERNEL); 344 state = kmalloc(sizeof(*state), GFP_KERNEL);
345 if (state == NULL) 345 if (state == NULL)
346 return NULL; 346 return NULL;
347 347
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index f3655fd772f5..d5bdd2574659 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -200,7 +200,7 @@ static void *mppe_alloc(unsigned char *options, int optlen)
200 || options[0] != CI_MPPE || options[1] != CILEN_MPPE) 200 || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
201 goto out; 201 goto out;
202 202
203 state = (struct ppp_mppe_state *) kmalloc(sizeof(*state), GFP_KERNEL); 203 state = kmalloc(sizeof(*state), GFP_KERNEL);
204 if (state == NULL) 204 if (state == NULL)
205 goto out; 205 goto out;
206 206
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b60f0451f6cd..8a39376f87dc 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -749,7 +749,7 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
749 struct skge_element *e; 749 struct skge_element *e;
750 int i; 750 int i;
751 751
752 ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL); 752 ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
753 if (!ring->start) 753 if (!ring->start)
754 return -ENOMEM; 754 return -ENOMEM;
755 755
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 39c2152a07f4..a0806d262fc6 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -229,10 +229,10 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
229 if (len < 576 * 2) 229 if (len < 576 * 2)
230 len = 576 * 2; 230 len = 576 * 2;
231 231
232 xbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 232 xbuff = kmalloc(len + 4, GFP_ATOMIC);
233 rbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 233 rbuff = kmalloc(len + 4, GFP_ATOMIC);
234#ifdef SL_INCLUDE_CSLIP 234#ifdef SL_INCLUDE_CSLIP
235 cbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 235 cbuff = kmalloc(len + 4, GFP_ATOMIC);
236#endif 236#endif
237 237
238 238
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index a4f735723c41..a02c5fb40567 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -231,7 +231,7 @@ static struct sv11_device *sv11_init(int iobase, int irq)
231 return NULL; 231 return NULL;
232 } 232 }
233 233
234 sv=(struct sv11_device *)kmalloc(sizeof(struct sv11_device), GFP_KERNEL); 234 sv = kmalloc(sizeof(struct sv11_device), GFP_KERNEL);
235 if(!sv) 235 if(!sv)
236 goto fail3; 236 goto fail3;
237 237
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 36d1c3ff7078..62184dee377c 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3455,7 +3455,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3455 if ((err = pci_enable_device(pdev)) < 0) 3455 if ((err = pci_enable_device(pdev)) < 0)
3456 return err; 3456 return err;
3457 3457
3458 card = (pc300_t *) kmalloc(sizeof(pc300_t), GFP_KERNEL); 3458 card = kmalloc(sizeof(pc300_t), GFP_KERNEL);
3459 if (card == NULL) { 3459 if (card == NULL) {
3460 printk("PC300 found at RAM 0x%016llx, " 3460 printk("PC300 found at RAM 0x%016llx, "
3461 "but could not allocate card structure.\n", 3461 "but could not allocate card structure.\n",
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index b2a23aed4428..5873c346e7e9 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -784,7 +784,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
784 continue; 784 continue;
785 } 785 }
786 786
787 new = (st_cpc_rx_buf *)kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC); 787 new = kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
788 if (new == 0) { 788 if (new == 0) {
789 cpc_tty_rx_disc_frame(pc300chan); 789 cpc_tty_rx_disc_frame(pc300chan);
790 continue; 790 continue;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 9c3ccc669143..1c9edd97accd 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -123,8 +123,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
123 unsigned char *xbuff, *rbuff; 123 unsigned char *xbuff, *rbuff;
124 int len = 2* newmtu; 124 int len = 2* newmtu;
125 125
126 xbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 126 xbuff = kmalloc(len + 4, GFP_ATOMIC);
127 rbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 127 rbuff = kmalloc(len + 4, GFP_ATOMIC);
128 128
129 if (xbuff == NULL || rbuff == NULL) 129 if (xbuff == NULL || rbuff == NULL)
130 { 130 {
@@ -465,11 +465,11 @@ static int x25_asy_open(struct net_device *dev)
465 465
466 len = dev->mtu * 2; 466 len = dev->mtu * 2;
467 467
468 sl->rbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL); 468 sl->rbuff = kmalloc(len + 4, GFP_KERNEL);
469 if (sl->rbuff == NULL) { 469 if (sl->rbuff == NULL) {
470 goto norbuff; 470 goto norbuff;
471 } 471 }
472 sl->xbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL); 472 sl->xbuff = kmalloc(len + 4, GFP_KERNEL);
473 if (sl->xbuff == NULL) { 473 if (sl->xbuff == NULL) {
474 goto noxbuff; 474 goto noxbuff;
475 } 475 }
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 974a8e5bec8b..efb8cf3bd8ad 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -1253,7 +1253,7 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
1253 return NULL; 1253 return NULL;
1254 } 1254 }
1255 1255
1256 tmpbuf = (char *) kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC); 1256 tmpbuf = kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC);
1257 if (tmpbuf == NULL) { 1257 if (tmpbuf == NULL) {
1258 PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n"); 1258 PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n");
1259 return NULL; 1259 return NULL;
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
index 24fc387bba67..c7678e67697d 100644
--- a/drivers/net/wireless/hostap/hostap_download.c
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -201,7 +201,7 @@ static u8 * prism2_read_pda(struct net_device *dev)
201 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */, 201 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */,
202 }; 202 };
203 203
204 buf = (u8 *) kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL); 204 buf = kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL);
205 if (buf == NULL) 205 if (buf == NULL)
206 return NULL; 206 return NULL;
207 207
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index a394a23b9a20..3079378fb8cd 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2252,7 +2252,7 @@ static int hostap_tx_compl_read(local_info_t *local, int error,
2252 if (txdesc->sw_support) { 2252 if (txdesc->sw_support) {
2253 len = le16_to_cpu(txdesc->data_len); 2253 len = le16_to_cpu(txdesc->data_len);
2254 if (len < PRISM2_DATA_MAXLEN) { 2254 if (len < PRISM2_DATA_MAXLEN) {
2255 *payload = (char *) kmalloc(len, GFP_ATOMIC); 2255 *payload = kmalloc(len, GFP_ATOMIC);
2256 if (*payload == NULL || 2256 if (*payload == NULL ||
2257 hfa384x_from_bap(dev, BAP0, *payload, len)) { 2257 hfa384x_from_bap(dev, BAP0, *payload, len)) {
2258 PDEBUG(DEBUG_EXTRA, "%s: could not read TX " 2258 PDEBUG(DEBUG_EXTRA, "%s: could not read TX "
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 3b7b8063ff1c..cb08bc5db2bd 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3829,7 +3829,7 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
3829 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) 3829 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
3830 return -EINVAL; 3830 return -EINVAL;
3831 3831
3832 param = (struct prism2_hostapd_param *) kmalloc(p->length, GFP_KERNEL); 3832 param = kmalloc(p->length, GFP_KERNEL);
3833 if (param == NULL) 3833 if (param == NULL)
3834 return -ENOMEM; 3834 return -ENOMEM;
3835 3835
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 0796be9d9e77..04c19cefa1da 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -250,7 +250,7 @@ u16 hostap_tx_callback_register(local_info_t *local,
250 unsigned long flags; 250 unsigned long flags;
251 struct hostap_tx_callback_info *entry; 251 struct hostap_tx_callback_info *entry;
252 252
253 entry = (struct hostap_tx_callback_info *) kmalloc(sizeof(*entry), 253 entry = kmalloc(sizeof(*entry),
254 GFP_ATOMIC); 254 GFP_ATOMIC);
255 if (entry == NULL) 255 if (entry == NULL)
256 return 0; 256 return 0;
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index dd9ba4aad7bb..0e94fbbf7a94 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -2246,7 +2246,7 @@ static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
2246 if (priv->snapshot[0]) 2246 if (priv->snapshot[0])
2247 return 1; 2247 return 1;
2248 for (i = 0; i < 0x30; i++) { 2248 for (i = 0; i < 0x30; i++) {
2249 priv->snapshot[i] = (u8 *) kmalloc(0x1000, GFP_ATOMIC); 2249 priv->snapshot[i] = kmalloc(0x1000, GFP_ATOMIC);
2250 if (!priv->snapshot[i]) { 2250 if (!priv->snapshot[i]) {
2251 IPW_DEBUG_INFO("%s: Error allocating snapshot " 2251 IPW_DEBUG_INFO("%s: Error allocating snapshot "
2252 "buffer %d\n", priv->net_dev->name, i); 2252 "buffer %d\n", priv->net_dev->name, i);
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 96606ed10076..838d510213c6 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2775,7 +2775,7 @@ prism54_hostapd(struct net_device *ndev, struct iw_point *p)
2775 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) 2775 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
2776 return -EINVAL; 2776 return -EINVAL;
2777 2777
2778 param = (struct prism2_hostapd_param *) kmalloc(p->length, GFP_KERNEL); 2778 param = kmalloc(p->length, GFP_KERNEL);
2779 if (param == NULL) 2779 if (param == NULL)
2780 return -ENOMEM; 2780 return -ENOMEM;
2781 2781
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 233d906c08f0..5eb81638e846 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -603,7 +603,7 @@ static wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char se
603 if(lp->wavepoint_table.num_wavepoints==MAX_WAVEPOINTS) 603 if(lp->wavepoint_table.num_wavepoints==MAX_WAVEPOINTS)
604 return NULL; 604 return NULL;
605 605
606 new_wavepoint=(wavepoint_history *) kmalloc(sizeof(wavepoint_history),GFP_ATOMIC); 606 new_wavepoint = kmalloc(sizeof(wavepoint_history),GFP_ATOMIC);
607 if(new_wavepoint==NULL) 607 if(new_wavepoint==NULL)
608 return NULL; 608 return NULL;
609 609
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 77e11ddad836..78ea72fb8f0c 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -101,7 +101,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
101 101
102 /* Allocate a single memory block for values and addresses. */ 102 /* Allocate a single memory block for values and addresses. */
103 count16 = 2*count; 103 count16 = 2*count;
104 a16 = (zd_addr_t *)kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), 104 a16 = kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
105 GFP_NOFS); 105 GFP_NOFS);
106 if (!a16) { 106 if (!a16) {
107 dev_dbg_f(zd_chip_dev(chip), 107 dev_dbg_f(zd_chip_dev(chip),
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 12bab64a62a1..6fb3f7979f21 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -874,7 +874,7 @@ void *iosapic_register(unsigned long hpa)
874 return NULL; 874 return NULL;
875 } 875 }
876 876
877 isi = (struct iosapic_info *)kzalloc(sizeof(struct iosapic_info), GFP_KERNEL); 877 isi = kzalloc(sizeof(struct iosapic_info), GFP_KERNEL);
878 if (!isi) { 878 if (!isi) {
879 BUG(); 879 BUG();
880 return NULL; 880 return NULL;
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 298a6cfd8406..ae5e974c45a7 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -520,7 +520,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
520 return 2; 520 return 2;
521 521
522 while (nummem--) { 522 while (nummem--) {
523 mem_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); 523 mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
524 524
525 if (!mem_node) 525 if (!mem_node)
526 break; 526 break;
@@ -548,7 +548,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
548 } 548 }
549 549
550 while (numpmem--) { 550 while (numpmem--) {
551 p_mem_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); 551 p_mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
552 552
553 if (!p_mem_node) 553 if (!p_mem_node)
554 break; 554 break;
@@ -576,7 +576,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
576 } 576 }
577 577
578 while (numio--) { 578 while (numio--) {
579 io_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); 579 io_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
580 580
581 if (!io_node) 581 if (!io_node)
582 break; 582 break;
@@ -604,7 +604,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
604 } 604 }
605 605
606 while (numbus--) { 606 while (numbus--) {
607 bus_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); 607 bus_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
608 608
609 if (!bus_node) 609 if (!bus_node)
610 break; 610 break;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 6d3f580f2666..25d3aadfddbf 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -1320,7 +1320,7 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1320 DBG_ENTER_ROUTINE 1320 DBG_ENTER_ROUTINE
1321 1321
1322 spin_lock_init(&list_lock); 1322 spin_lock_init(&list_lock);
1323 php_ctlr = (struct php_ctlr_state_s *) kmalloc(sizeof(struct php_ctlr_state_s), GFP_KERNEL); 1323 php_ctlr = kmalloc(sizeof(struct php_ctlr_state_s), GFP_KERNEL);
1324 1324
1325 if (!php_ctlr) { /* allocate controller state data */ 1325 if (!php_ctlr) { /* allocate controller state data */
1326 err("%s: HPC controller memory allocation error!\n", __FUNCTION__); 1326 err("%s: HPC controller memory allocation error!\n", __FUNCTION__);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 55866b6b26fa..6f5fabbd14e5 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -148,7 +148,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
148{ 148{
149 struct aer_rpc *rpc; 149 struct aer_rpc *rpc;
150 150
151 if (!(rpc = (struct aer_rpc *)kmalloc(sizeof(struct aer_rpc), 151 if (!(rpc = kmalloc(sizeof(struct aer_rpc),
152 GFP_KERNEL))) 152 GFP_KERNEL)))
153 return NULL; 153 return NULL;
154 154
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 52d4a38b3667..3334f22a86c0 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -230,7 +230,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
230 if (!io) 230 if (!io)
231 return -ENODEV; 231 return -ENODEV;
232 232
233 cf = kcalloc(1, sizeof *cf, GFP_KERNEL); 233 cf = kzalloc(sizeof *cf, GFP_KERNEL);
234 if (!cf) 234 if (!cf)
235 return -ENOMEM; 235 return -ENOMEM;
236 236
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 06bf7f48836e..e65a6b8188f6 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -220,7 +220,7 @@ static int __devinit omap_cf_probe(struct device *dev)
220 if (irq < 0) 220 if (irq < 0)
221 return -EINVAL; 221 return -EINVAL;
222 222
223 cf = kcalloc(1, sizeof *cf, GFP_KERNEL); 223 cf = kzalloc(sizeof *cf, GFP_KERNEL);
224 if (!cf) 224 if (!cf)
225 return -ENOMEM; 225 return -ENOMEM;
226 init_timer(&cf->timer); 226 init_timer(&cf->timer);
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 3ac5b123215a..a0b158704ca1 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -395,7 +395,7 @@ static void isapnp_parse_id(struct pnp_dev * dev, unsigned short vendor, unsigne
395 struct pnp_id * id; 395 struct pnp_id * id;
396 if (!dev) 396 if (!dev)
397 return; 397 return;
398 id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 398 id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
399 if (!id) 399 if (!id)
400 return; 400 return;
401 sprintf(id->id, "%c%c%c%x%x%x%x", 401 sprintf(id->id, "%c%c%c%x%x%x%x",
@@ -419,7 +419,7 @@ static struct pnp_dev * __init isapnp_parse_device(struct pnp_card *card, int si
419 struct pnp_dev *dev; 419 struct pnp_dev *dev;
420 420
421 isapnp_peek(tmp, size); 421 isapnp_peek(tmp, size);
422 dev = kcalloc(1, sizeof(struct pnp_dev), GFP_KERNEL); 422 dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
423 if (!dev) 423 if (!dev)
424 return NULL; 424 return NULL;
425 dev->number = number; 425 dev->number = number;
@@ -450,7 +450,7 @@ static void __init isapnp_parse_irq_resource(struct pnp_option *option,
450 unsigned long bits; 450 unsigned long bits;
451 451
452 isapnp_peek(tmp, size); 452 isapnp_peek(tmp, size);
453 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL); 453 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
454 if (!irq) 454 if (!irq)
455 return; 455 return;
456 bits = (tmp[1] << 8) | tmp[0]; 456 bits = (tmp[1] << 8) | tmp[0];
@@ -474,7 +474,7 @@ static void __init isapnp_parse_dma_resource(struct pnp_option *option,
474 struct pnp_dma *dma; 474 struct pnp_dma *dma;
475 475
476 isapnp_peek(tmp, size); 476 isapnp_peek(tmp, size);
477 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL); 477 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
478 if (!dma) 478 if (!dma)
479 return; 479 return;
480 dma->map = tmp[0]; 480 dma->map = tmp[0];
@@ -494,7 +494,7 @@ static void __init isapnp_parse_port_resource(struct pnp_option *option,
494 struct pnp_port *port; 494 struct pnp_port *port;
495 495
496 isapnp_peek(tmp, size); 496 isapnp_peek(tmp, size);
497 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 497 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
498 if (!port) 498 if (!port)
499 return; 499 return;
500 port->min = (tmp[2] << 8) | tmp[1]; 500 port->min = (tmp[2] << 8) | tmp[1];
@@ -517,7 +517,7 @@ static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
517 struct pnp_port *port; 517 struct pnp_port *port;
518 518
519 isapnp_peek(tmp, size); 519 isapnp_peek(tmp, size);
520 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 520 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
521 if (!port) 521 if (!port)
522 return; 522 return;
523 port->min = port->max = (tmp[1] << 8) | tmp[0]; 523 port->min = port->max = (tmp[1] << 8) | tmp[0];
@@ -539,7 +539,7 @@ static void __init isapnp_parse_mem_resource(struct pnp_option *option,
539 struct pnp_mem *mem; 539 struct pnp_mem *mem;
540 540
541 isapnp_peek(tmp, size); 541 isapnp_peek(tmp, size);
542 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 542 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
543 if (!mem) 543 if (!mem)
544 return; 544 return;
545 mem->min = ((tmp[2] << 8) | tmp[1]) << 8; 545 mem->min = ((tmp[2] << 8) | tmp[1]) << 8;
@@ -562,7 +562,7 @@ static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
562 struct pnp_mem *mem; 562 struct pnp_mem *mem;
563 563
564 isapnp_peek(tmp, size); 564 isapnp_peek(tmp, size);
565 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 565 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
566 if (!mem) 566 if (!mem)
567 return; 567 return;
568 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 568 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
@@ -584,7 +584,7 @@ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
584 struct pnp_mem *mem; 584 struct pnp_mem *mem;
585 585
586 isapnp_peek(tmp, size); 586 isapnp_peek(tmp, size);
587 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 587 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
588 if (!mem) 588 if (!mem)
589 return; 589 return;
590 mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 590 mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
@@ -829,7 +829,7 @@ static unsigned char __init isapnp_checksum(unsigned char *data)
829 829
830static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device) 830static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device)
831{ 831{
832 struct pnp_id * id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 832 struct pnp_id * id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
833 if (!id) 833 if (!id)
834 return; 834 return;
835 sprintf(id->id, "%c%c%c%x%x%x%x", 835 sprintf(id->id, "%c%c%c%x%x%x%x",
@@ -865,7 +865,7 @@ static int __init isapnp_build_device_list(void)
865 header[4], header[5], header[6], header[7], header[8]); 865 header[4], header[5], header[6], header[7], header[8]);
866 printk(KERN_DEBUG "checksum = 0x%x\n", checksum); 866 printk(KERN_DEBUG "checksum = 0x%x\n", checksum);
867#endif 867#endif
868 if ((card = kcalloc(1, sizeof(struct pnp_card), GFP_KERNEL)) == NULL) 868 if ((card = kzalloc(sizeof(struct pnp_card), GFP_KERNEL)) == NULL)
869 continue; 869 continue;
870 870
871 card->number = csn; 871 card->number = csn;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 6cf34a63c790..62eda5d59024 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -139,7 +139,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
139 return 0; 139 return 0;
140 140
141 pnp_dbg("ACPI device : hid %s", acpi_device_hid(device)); 141 pnp_dbg("ACPI device : hid %s", acpi_device_hid(device));
142 dev = kcalloc(1, sizeof(struct pnp_dev), GFP_KERNEL); 142 dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
143 if (!dev) { 143 if (!dev) {
144 pnp_err("Out of memory"); 144 pnp_err("Out of memory");
145 return -ENOMEM; 145 return -ENOMEM;
@@ -169,7 +169,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
169 dev->number = num; 169 dev->number = num;
170 170
171 /* set the initial values for the PnP device */ 171 /* set the initial values for the PnP device */
172 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 172 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
173 if (!dev_id) 173 if (!dev_id)
174 goto err; 174 goto err;
175 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id); 175 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id);
@@ -201,7 +201,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
201 for (i = 0; i < cid_list->count; i++) { 201 for (i = 0; i < cid_list->count; i++) {
202 if (!ispnpidacpi(cid_list->id[i].value)) 202 if (!ispnpidacpi(cid_list->id[i].value))
203 continue; 203 continue;
204 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 204 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
205 if (!dev_id) 205 if (!dev_id)
206 continue; 206 continue;
207 207
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 379048fdf05d..7a535542fe92 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -298,7 +298,7 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option, struct acpi_reso
298 298
299 if (p->channel_count == 0) 299 if (p->channel_count == 0)
300 return; 300 return;
301 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL); 301 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
302 if (!dma) 302 if (!dma)
303 return; 303 return;
304 304
@@ -354,7 +354,7 @@ static void pnpacpi_parse_irq_option(struct pnp_option *option,
354 354
355 if (p->interrupt_count == 0) 355 if (p->interrupt_count == 0)
356 return; 356 return;
357 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL); 357 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
358 if (!irq) 358 if (!irq)
359 return; 359 return;
360 360
@@ -375,7 +375,7 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
375 375
376 if (p->interrupt_count == 0) 376 if (p->interrupt_count == 0)
377 return; 377 return;
378 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL); 378 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
379 if (!irq) 379 if (!irq)
380 return; 380 return;
381 381
@@ -396,7 +396,7 @@ pnpacpi_parse_port_option(struct pnp_option *option,
396 396
397 if (io->address_length == 0) 397 if (io->address_length == 0)
398 return; 398 return;
399 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 399 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
400 if (!port) 400 if (!port)
401 return; 401 return;
402 port->min = io->minimum; 402 port->min = io->minimum;
@@ -417,7 +417,7 @@ pnpacpi_parse_fixed_port_option(struct pnp_option *option,
417 417
418 if (io->address_length == 0) 418 if (io->address_length == 0)
419 return; 419 return;
420 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 420 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
421 if (!port) 421 if (!port)
422 return; 422 return;
423 port->min = port->max = io->address; 423 port->min = port->max = io->address;
@@ -436,7 +436,7 @@ pnpacpi_parse_mem24_option(struct pnp_option *option,
436 436
437 if (p->address_length == 0) 437 if (p->address_length == 0)
438 return; 438 return;
439 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 439 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
440 if (!mem) 440 if (!mem)
441 return; 441 return;
442 mem->min = p->minimum; 442 mem->min = p->minimum;
@@ -459,7 +459,7 @@ pnpacpi_parse_mem32_option(struct pnp_option *option,
459 459
460 if (p->address_length == 0) 460 if (p->address_length == 0)
461 return; 461 return;
462 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 462 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
463 if (!mem) 463 if (!mem)
464 return; 464 return;
465 mem->min = p->minimum; 465 mem->min = p->minimum;
@@ -482,7 +482,7 @@ pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
482 482
483 if (p->address_length == 0) 483 if (p->address_length == 0)
484 return; 484 return;
485 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 485 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
486 if (!mem) 486 if (!mem)
487 return; 487 return;
488 mem->min = mem->max = p->address; 488 mem->min = mem->max = p->address;
@@ -514,7 +514,7 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
514 return; 514 return;
515 515
516 if (p->resource_type == ACPI_MEMORY_RANGE) { 516 if (p->resource_type == ACPI_MEMORY_RANGE) {
517 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 517 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
518 if (!mem) 518 if (!mem)
519 return; 519 return;
520 mem->min = mem->max = p->minimum; 520 mem->min = mem->max = p->minimum;
@@ -524,7 +524,7 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
524 ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE : 0; 524 ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE : 0;
525 pnp_register_mem_resource(option, mem); 525 pnp_register_mem_resource(option, mem);
526 } else if (p->resource_type == ACPI_IO_RANGE) { 526 } else if (p->resource_type == ACPI_IO_RANGE) {
527 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 527 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
528 if (!port) 528 if (!port)
529 return; 529 return;
530 port->min = port->max = p->minimum; 530 port->min = port->max = p->minimum;
@@ -721,7 +721,7 @@ int pnpacpi_build_resource_template(acpi_handle handle,
721 if (!res_cnt) 721 if (!res_cnt)
722 return -EINVAL; 722 return -EINVAL;
723 buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1; 723 buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1;
724 buffer->pointer = kcalloc(1, buffer->length - 1, GFP_KERNEL); 724 buffer->pointer = kzalloc(buffer->length - 1, GFP_KERNEL);
725 if (!buffer->pointer) 725 if (!buffer->pointer)
726 return -ENOMEM; 726 return -ENOMEM;
727 pnp_dbg("Res cnt %d", res_cnt); 727 pnp_dbg("Res cnt %d", res_cnt);
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 33adeba1a31f..95738dbd5d45 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -109,10 +109,10 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
109 if (!current->fs->root) { 109 if (!current->fs->root) {
110 return -EAGAIN; 110 return -EAGAIN;
111 } 111 }
112 if (!(envp = (char **) kcalloc (20, sizeof (char *), GFP_KERNEL))) { 112 if (!(envp = kcalloc(20, sizeof (char *), GFP_KERNEL))) {
113 return -ENOMEM; 113 return -ENOMEM;
114 } 114 }
115 if (!(buf = kcalloc (1, 256, GFP_KERNEL))) { 115 if (!(buf = kzalloc(256, GFP_KERNEL))) {
116 kfree (envp); 116 kfree (envp);
117 return -ENOMEM; 117 return -ENOMEM;
118 } 118 }
@@ -220,7 +220,7 @@ static int pnpbios_get_resources(struct pnp_dev * dev, struct pnp_resource_table
220 if(!pnpbios_is_dynamic(dev)) 220 if(!pnpbios_is_dynamic(dev))
221 return -EPERM; 221 return -EPERM;
222 222
223 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 223 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
224 if (!node) 224 if (!node)
225 return -1; 225 return -1;
226 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) { 226 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
@@ -243,7 +243,7 @@ static int pnpbios_set_resources(struct pnp_dev * dev, struct pnp_resource_table
243 if (!pnpbios_is_dynamic(dev)) 243 if (!pnpbios_is_dynamic(dev))
244 return -EPERM; 244 return -EPERM;
245 245
246 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 246 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
247 if (!node) 247 if (!node)
248 return -1; 248 return -1;
249 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) { 249 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
@@ -294,7 +294,7 @@ static int pnpbios_disable_resources(struct pnp_dev *dev)
294 if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev)) 294 if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev))
295 return -EPERM; 295 return -EPERM;
296 296
297 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 297 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
298 if (!node) 298 if (!node)
299 return -ENOMEM; 299 return -ENOMEM;
300 300
@@ -336,7 +336,7 @@ static int insert_device(struct pnp_dev *dev, struct pnp_bios_node * node)
336 } 336 }
337 337
338 /* set the initial values for the PnP device */ 338 /* set the initial values for the PnP device */
339 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 339 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
340 if (!dev_id) 340 if (!dev_id)
341 return -1; 341 return -1;
342 pnpid32_to_pnpid(node->eisa_id,id); 342 pnpid32_to_pnpid(node->eisa_id,id);
@@ -374,7 +374,7 @@ static void __init build_devlist(void)
374 struct pnp_bios_node *node; 374 struct pnp_bios_node *node;
375 struct pnp_dev *dev; 375 struct pnp_dev *dev;
376 376
377 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 377 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
378 if (!node) 378 if (!node)
379 return; 379 return;
380 380
@@ -391,7 +391,7 @@ static void __init build_devlist(void)
391 break; 391 break;
392 } 392 }
393 nodes_got++; 393 nodes_got++;
394 dev = kcalloc(1, sizeof (struct pnp_dev), GFP_KERNEL); 394 dev = kzalloc(sizeof (struct pnp_dev), GFP_KERNEL);
395 if (!dev) 395 if (!dev)
396 break; 396 break;
397 if(insert_device(dev,node)<0) 397 if(insert_device(dev,node)<0)
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 5a3dfc97f5e9..8027073f7919 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -87,7 +87,7 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
87 return -EFBIG; 87 return -EFBIG;
88 } 88 }
89 89
90 tmpbuf = kcalloc(1, escd.escd_size, GFP_KERNEL); 90 tmpbuf = kzalloc(escd.escd_size, GFP_KERNEL);
91 if (!tmpbuf) return -ENOMEM; 91 if (!tmpbuf) return -ENOMEM;
92 92
93 if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) { 93 if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) {
@@ -133,7 +133,7 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
133 if (pos >= 0xff) 133 if (pos >= 0xff)
134 return 0; 134 return 0;
135 135
136 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 136 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
137 if (!node) return -ENOMEM; 137 if (!node) return -ENOMEM;
138 138
139 for (nodenum=pos; nodenum<0xff; ) { 139 for (nodenum=pos; nodenum<0xff; ) {
@@ -168,7 +168,7 @@ static int proc_read_node(char *buf, char **start, off_t pos,
168 u8 nodenum = (long)data; 168 u8 nodenum = (long)data;
169 int len; 169 int len;
170 170
171 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 171 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
172 if (!node) return -ENOMEM; 172 if (!node) return -ENOMEM;
173 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { 173 if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
174 kfree(node); 174 kfree(node);
@@ -188,7 +188,7 @@ static int proc_write_node(struct file *file, const char __user *buf,
188 u8 nodenum = (long)data; 188 u8 nodenum = (long)data;
189 int ret = count; 189 int ret = count;
190 190
191 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 191 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
192 if (!node) 192 if (!node)
193 return -ENOMEM; 193 return -ENOMEM;
194 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { 194 if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index ef508a4de557..95b79685a9d1 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -248,7 +248,7 @@ static void
248pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option) 248pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option)
249{ 249{
250 struct pnp_mem * mem; 250 struct pnp_mem * mem;
251 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 251 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
252 if (!mem) 252 if (!mem)
253 return; 253 return;
254 mem->min = ((p[5] << 8) | p[4]) << 8; 254 mem->min = ((p[5] << 8) | p[4]) << 8;
@@ -264,7 +264,7 @@ static void
264pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option) 264pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option)
265{ 265{
266 struct pnp_mem * mem; 266 struct pnp_mem * mem;
267 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 267 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
268 if (!mem) 268 if (!mem)
269 return; 269 return;
270 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 270 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
@@ -280,7 +280,7 @@ static void
280pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option) 280pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option)
281{ 281{
282 struct pnp_mem * mem; 282 struct pnp_mem * mem;
283 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 283 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
284 if (!mem) 284 if (!mem)
285 return; 285 return;
286 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 286 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
@@ -297,7 +297,7 @@ pnpbios_parse_irq_option(unsigned char *p, int size, struct pnp_option *option)
297 struct pnp_irq * irq; 297 struct pnp_irq * irq;
298 unsigned long bits; 298 unsigned long bits;
299 299
300 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL); 300 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
301 if (!irq) 301 if (!irq)
302 return; 302 return;
303 bits = (p[2] << 8) | p[1]; 303 bits = (p[2] << 8) | p[1];
@@ -314,7 +314,7 @@ static void
314pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option) 314pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option)
315{ 315{
316 struct pnp_dma * dma; 316 struct pnp_dma * dma;
317 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL); 317 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
318 if (!dma) 318 if (!dma)
319 return; 319 return;
320 dma->map = p[1]; 320 dma->map = p[1];
@@ -327,7 +327,7 @@ static void
327pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option) 327pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option)
328{ 328{
329 struct pnp_port * port; 329 struct pnp_port * port;
330 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 330 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
331 if (!port) 331 if (!port)
332 return; 332 return;
333 port->min = (p[3] << 8) | p[2]; 333 port->min = (p[3] << 8) | p[2];
@@ -343,7 +343,7 @@ static void
343pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option) 343pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option)
344{ 344{
345 struct pnp_port * port; 345 struct pnp_port * port;
346 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 346 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
347 if (!port) 347 if (!port)
348 return; 348 return;
349 port->min = port->max = (p[2] << 8) | p[1]; 349 port->min = port->max = (p[2] << 8) | p[1];
@@ -527,7 +527,7 @@ pnpbios_parse_compatible_ids(unsigned char *p, unsigned char *end, struct pnp_de
527 case SMALL_TAG_COMPATDEVID: /* compatible ID */ 527 case SMALL_TAG_COMPATDEVID: /* compatible ID */
528 if (len != 4) 528 if (len != 4)
529 goto len_err; 529 goto len_err;
530 dev_id = kcalloc(1, sizeof (struct pnp_id), GFP_KERNEL); 530 dev_id = kzalloc(sizeof (struct pnp_id), GFP_KERNEL);
531 if (!dev_id) 531 if (!dev_id)
532 return NULL; 532 return NULL;
533 memset(dev_id, 0, sizeof(struct pnp_id)); 533 memset(dev_id, 0, sizeof(struct pnp_id));
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 5c8addcaf1fb..4f654c901c64 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -137,6 +137,9 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
137 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); 137 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
138 tm->tm_year = at91_alarm_year - 1900; 138 tm->tm_year = at91_alarm_year - 1900;
139 139
140 alrm->enabled = (at91_sys_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
141 ? 1 : 0;
142
140 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__, 143 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
141 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 144 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
142 tm->tm_hour, tm->tm_min, tm->tm_sec); 145 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -223,8 +226,6 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
223{ 226{
224 unsigned long imr = at91_sys_read(AT91_RTC_IMR); 227 unsigned long imr = at91_sys_read(AT91_RTC_IMR);
225 228
226 seq_printf(seq, "alarm_IRQ\t: %s\n",
227 (imr & AT91_RTC_ALARM) ? "yes" : "no");
228 seq_printf(seq, "update_IRQ\t: %s\n", 229 seq_printf(seq, "update_IRQ\t: %s\n",
229 (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 230 (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
230 seq_printf(seq, "periodic_IRQ\t: %s\n", 231 seq_printf(seq, "periodic_IRQ\t: %s\n",
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 828b329e08e0..94d3df62a5fa 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -435,7 +435,7 @@ static int rtc_dev_add_device(struct class_device *class_dev,
435 goto err_cdev_del; 435 goto err_cdev_del;
436 } 436 }
437 437
438 dev_info(class_dev->dev, "rtc intf: dev (%d:%d)\n", 438 dev_dbg(class_dev->dev, "rtc intf: dev (%d:%d)\n",
439 MAJOR(rtc->rtc_dev->devt), 439 MAJOR(rtc->rtc_dev->devt),
440 MINOR(rtc->rtc_dev->devt)); 440 MINOR(rtc->rtc_dev->devt));
441 441
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index eac5fb1fc02f..d59880d44fba 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -279,9 +279,8 @@ static int omap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
279 local_irq_enable(); 279 local_irq_enable();
280 280
281 bcd2tm(&alm->time); 281 bcd2tm(&alm->time);
282 alm->pending = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG) 282 alm->enabled = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG)
283 & OMAP_RTC_INTERRUPTS_IT_ALARM); 283 & OMAP_RTC_INTERRUPTS_IT_ALARM);
284 alm->enabled = alm->pending && device_may_wakeup(dev);
285 284
286 return 0; 285 return 0;
287} 286}
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index d51d8f20e634..c272afd62173 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -65,7 +65,7 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
65 seq_printf(seq, "%02d\n", alrm.time.tm_mday); 65 seq_printf(seq, "%02d\n", alrm.time.tm_mday);
66 else 66 else
67 seq_printf(seq, "**\n"); 67 seq_printf(seq, "**\n");
68 seq_printf(seq, "alrm_wakeup\t: %s\n", 68 seq_printf(seq, "alarm_IRQ\t: %s\n",
69 alrm.enabled ? "yes" : "no"); 69 alrm.enabled ? "yes" : "no");
70 seq_printf(seq, "alrm_pending\t: %s\n", 70 seq_printf(seq, "alrm_pending\t: %s\n",
71 alrm.pending ? "yes" : "no"); 71 alrm.pending ? "yes" : "no");
@@ -120,7 +120,7 @@ static int rtc_proc_add_device(struct class_device *class_dev,
120 ent->owner = rtc->owner; 120 ent->owner = rtc->owner;
121 ent->data = class_dev; 121 ent->data = class_dev;
122 122
123 dev_info(class_dev->dev, "rtc intf: proc\n"); 123 dev_dbg(class_dev->dev, "rtc intf: proc\n");
124 } 124 }
125 else 125 else
126 rtc_dev = NULL; 126 rtc_dev = NULL;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index e301dea57bb3..f406a2b55aea 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -191,6 +191,8 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
191 191
192 alm_en = readb(base + S3C2410_RTCALM); 192 alm_en = readb(base + S3C2410_RTCALM);
193 193
194 alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
195
194 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", 196 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
195 alm_en, 197 alm_en,
196 alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday, 198 alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
@@ -331,12 +333,8 @@ static int s3c_rtc_ioctl(struct device *dev,
331 333
332static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) 334static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
333{ 335{
334 unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM);
335 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); 336 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
336 337
337 seq_printf(seq, "alarm_IRQ\t: %s\n",
338 (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" );
339
340 seq_printf(seq, "periodic_IRQ\t: %s\n", 338 seq_printf(seq, "periodic_IRQ\t: %s\n",
341 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" ); 339 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" );
342 340
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index bd4d7d174ef4..9c8ead43a59c 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -289,9 +289,7 @@ static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
289 289
290static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) 290static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
291{ 291{
292 seq_printf(seq, "trim/divider\t: 0x%08lx\n", RTTR); 292 seq_printf(seq, "trim/divider\t: 0x%08x\n", (u32) RTTR);
293 seq_printf(seq, "alarm_IRQ\t: %s\n",
294 (RTSR & RTSR_ALE) ? "yes" : "no" );
295 seq_printf(seq, "update_IRQ\t: %s\n", 293 seq_printf(seq, "update_IRQ\t: %s\n",
296 (RTSR & RTSR_HZE) ? "yes" : "no"); 294 (RTSR & RTSR_HZE) ? "yes" : "no");
297 seq_printf(seq, "periodic_IRQ\t: %s\n", 295 seq_printf(seq, "periodic_IRQ\t: %s\n",
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 625637b84d33..9418a59fb368 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -83,7 +83,7 @@ static int __devinit rtc_sysfs_add_device(struct class_device *class_dev,
83{ 83{
84 int err; 84 int err;
85 85
86 dev_info(class_dev->dev, "rtc intf: sysfs\n"); 86 dev_dbg(class_dev->dev, "rtc intf: sysfs\n");
87 87
88 err = sysfs_create_group(&class_dev->kobj, &rtc_attr_group); 88 err = sysfs_create_group(&class_dev->kobj, &rtc_attr_group);
89 if (err) 89 if (err)
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index c9321b920e90..25b5d7a66417 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -688,7 +688,7 @@ raw3215_probe (struct ccw_device *cdev)
688 raw->cdev = cdev; 688 raw->cdev = cdev;
689 raw->inbuf = (char *) raw + sizeof(struct raw3215_info); 689 raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
690 memset(raw, 0, sizeof(struct raw3215_info)); 690 memset(raw, 0, sizeof(struct raw3215_info));
691 raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE, 691 raw->buffer = kmalloc(RAW3215_BUFFER_SIZE,
692 GFP_KERNEL|GFP_DMA); 692 GFP_KERNEL|GFP_DMA);
693 if (raw->buffer == NULL) { 693 if (raw->buffer == NULL) {
694 spin_lock(&raw3215_device_lock); 694 spin_lock(&raw3215_device_lock);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index e3491a5f5219..3e86fd1756e5 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -377,7 +377,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
377 if (!(key_map = kbd->key_maps[tmp.kb_table])) { 377 if (!(key_map = kbd->key_maps[tmp.kb_table])) {
378 int j; 378 int j;
379 379
380 key_map = (ushort *) kmalloc(sizeof(plain_map), 380 key_map = kmalloc(sizeof(plain_map),
381 GFP_KERNEL); 381 GFP_KERNEL);
382 if (!key_map) 382 if (!key_map)
383 return -ENOMEM; 383 return -ENOMEM;
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 732dfbdb85c4..f7c10d954ec6 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -127,7 +127,7 @@ cpi_prepare_req(void)
127 struct cpi_sccb *sccb; 127 struct cpi_sccb *sccb;
128 struct cpi_evbuf *evb; 128 struct cpi_evbuf *evb;
129 129
130 req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL); 130 req = kmalloc(sizeof(struct sclp_req), GFP_KERNEL);
131 if (req == NULL) 131 if (req == NULL)
132 return ERR_PTR(-ENOMEM); 132 return ERR_PTR(-ENOMEM);
133 sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA); 133 sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index a62b00083d0c..5bb13a9d0898 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -295,7 +295,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
295 struct completion work; 295 struct completion work;
296 int rc; 296 int rc;
297 297
298 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 298 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
299 if (!ap_msg.message) 299 if (!ap_msg.message)
300 return -ENOMEM; 300 return -ENOMEM;
301 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 301 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -337,7 +337,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
337 struct completion work; 337 struct completion work;
338 int rc; 338 int rc;
339 339
340 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 340 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
341 if (!ap_msg.message) 341 if (!ap_msg.message)
342 return -ENOMEM; 342 return -ENOMEM;
343 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 343 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index b6a4ecdc8025..32e37014345c 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -279,7 +279,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
279 struct completion work; 279 struct completion work;
280 int rc; 280 int rc;
281 281
282 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 282 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
283 if (!ap_msg.message) 283 if (!ap_msg.message)
284 return -ENOMEM; 284 return -ENOMEM;
285 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 285 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -321,7 +321,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
321 struct completion work; 321 struct completion work;
322 int rc; 322 int rc;
323 323
324 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 324 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
325 if (!ap_msg.message) 325 if (!ap_msg.message)
326 return -ENOMEM; 326 return -ENOMEM;
327 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 327 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 2da8b9381407..b7153c1e15cd 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -717,7 +717,7 @@ long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB)
717 }; 717 };
718 int rc; 718 int rc;
719 719
720 ap_msg.message = (void *) kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 720 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
721 if (!ap_msg.message) 721 if (!ap_msg.message)
722 return -ENOMEM; 722 return -ENOMEM;
723 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 723 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 3257c22dd79c..03cc263fe0da 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1646,7 +1646,7 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1646 return -1; 1646 return -1;
1647 } 1647 }
1648 memset(ch, 0, sizeof (struct channel)); 1648 memset(ch, 0, sizeof (struct channel));
1649 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1), 1649 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1),
1650 GFP_KERNEL | GFP_DMA)) == NULL) { 1650 GFP_KERNEL | GFP_DMA)) == NULL) {
1651 kfree(ch); 1651 kfree(ch);
1652 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1652 ctc_pr_warn("ctc: Out of memory in add_channel\n");
@@ -1693,7 +1693,7 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1693 return -1; 1693 return -1;
1694 } 1694 }
1695 fsm_newstate(ch->fsm, CH_STATE_IDLE); 1695 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1696 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb), 1696 if ((ch->irb = kmalloc(sizeof (struct irb),
1697 GFP_KERNEL)) == NULL) { 1697 GFP_KERNEL)) == NULL) {
1698 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1698 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1699 kfree_fsm(ch->fsm); 1699 kfree_fsm(ch->fsm);
@@ -2535,7 +2535,7 @@ ctc_print_statistics(struct ctc_priv *priv)
2535 DBF_TEXT(trace, 4, __FUNCTION__); 2535 DBF_TEXT(trace, 4, __FUNCTION__);
2536 if (!priv) 2536 if (!priv)
2537 return; 2537 return;
2538 sbuf = (char *)kmalloc(2048, GFP_KERNEL); 2538 sbuf = kmalloc(2048, GFP_KERNEL);
2539 if (sbuf == NULL) 2539 if (sbuf == NULL)
2540 return; 2540 return;
2541 p = sbuf; 2541 p = sbuf;
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index 1476ce2b437c..229aeb5fc399 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -772,7 +772,7 @@ iucv_register_program (__u8 pgmname[16],
772 } 772 }
773 773
774 /* Allocate handler entry */ 774 /* Allocate handler entry */
775 new_handler = (handler *)kmalloc(sizeof(handler), GFP_ATOMIC); 775 new_handler = kmalloc(sizeof(handler), GFP_ATOMIC);
776 if (new_handler == NULL) { 776 if (new_handler == NULL) {
777 printk(KERN_WARNING "%s: storage allocation for new handler " 777 printk(KERN_WARNING "%s: storage allocation for new handler "
778 "failed.\n", __FUNCTION__); 778 "failed.\n", __FUNCTION__);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 5d39b2df0cc4..85093b71f9fa 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -237,7 +237,7 @@ zfcp_device_setup(char *devstr)
237 return 0; 237 return 0;
238 238
239 len = strlen(devstr) + 1; 239 len = strlen(devstr) + 1;
240 str = (char *) kmalloc(len, GFP_KERNEL); 240 str = kmalloc(len, GFP_KERNEL);
241 if (!str) 241 if (!str)
242 goto err_out; 242 goto err_out;
243 memcpy(str, devstr, len); 243 memcpy(str, devstr, len);
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c
index 2722af5d3404..386e7de0b7e3 100644
--- a/drivers/sbus/char/vfc_dev.c
+++ b/drivers/sbus/char/vfc_dev.c
@@ -659,7 +659,7 @@ static int vfc_probe(void)
659 if (!cards) 659 if (!cards)
660 return -ENODEV; 660 return -ENODEV;
661 661
662 vfc_dev_lst = (struct vfc_dev **)kmalloc(sizeof(struct vfc_dev *) * 662 vfc_dev_lst = kmalloc(sizeof(struct vfc_dev *) *
663 (cards+1), 663 (cards+1),
664 GFP_KERNEL); 664 GFP_KERNEL);
665 if (vfc_dev_lst == NULL) 665 if (vfc_dev_lst == NULL)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index ac108f9e2674..426cd6f49f5d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -288,7 +288,7 @@ int aac_get_containers(struct aac_dev *dev)
288 288
289 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 289 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
290 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 290 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
291 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 291 fsa_dev_ptr = kmalloc(
292 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 292 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
293 if (!fsa_dev_ptr) { 293 if (!fsa_dev_ptr) {
294 aac_fib_free(fibptr); 294 aac_fib_free(fibptr);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index d5cf8b91a0e7..6d305b2f854e 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -386,7 +386,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
386 * Ok now init the communication subsystem 386 * Ok now init the communication subsystem
387 */ 387 */
388 388
389 dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL); 389 dev->queues = kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
390 if (dev->queues == NULL) { 390 if (dev->queues == NULL) {
391 printk(KERN_ERR "Error could not allocate comm region.\n"); 391 printk(KERN_ERR "Error could not allocate comm region.\n");
392 return NULL; 392 return NULL;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index d7a61a6bdaae..1d239f6c0103 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -699,7 +699,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
699#endif 699#endif
700 int i; 700 int i;
701 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 701 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
702 SCpnt->host_scribble = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA); 702 SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
703 sgpnt = (struct scatterlist *) SCpnt->request_buffer; 703 sgpnt = (struct scatterlist *) SCpnt->request_buffer;
704 cptr = (struct chain *) SCpnt->host_scribble; 704 cptr = (struct chain *) SCpnt->host_scribble;
705 if (cptr == NULL) { 705 if (cptr == NULL) {
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 46eed10b25d9..7d1fec620948 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2565,7 +2565,7 @@ aic7xxx_allocate_scb(struct aic7xxx_host *p)
2565 } 2565 }
2566 } 2566 }
2567 scb_count = min( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs); 2567 scb_count = min( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs);
2568 scb_ap = (struct aic7xxx_scb *)kmalloc(sizeof (struct aic7xxx_scb) * scb_count 2568 scb_ap = kmalloc(sizeof (struct aic7xxx_scb) * scb_count
2569 + sizeof(struct aic7xxx_scb_dma), GFP_ATOMIC); 2569 + sizeof(struct aic7xxx_scb_dma), GFP_ATOMIC);
2570 if (scb_ap == NULL) 2570 if (scb_ap == NULL)
2571 return(0); 2571 return(0);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index e95b367d09ed..a965ed3548d5 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4319,7 +4319,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4319 4319
4320 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); 4320 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
4321 while (pages--) { 4321 while (pages--) {
4322 ptr = (struct SGentry *)kmalloc(PAGE_SIZE, GFP_KERNEL); 4322 ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4323 if (!ptr) { 4323 if (!ptr) {
4324 adapter_sg_tables_free(acb); 4324 adapter_sg_tables_free(acb);
4325 return 1; 4325 return 1;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 60b1b434eba7..365db537a28d 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -297,7 +297,7 @@ static void adpt_inquiry(adpt_hba* pHba)
297 s32 rcode; 297 s32 rcode;
298 298
299 memset(msg, 0, sizeof(msg)); 299 memset(msg, 0, sizeof(msg));
300 buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32); 300 buf = kmalloc(80,GFP_KERNEL|ADDR32);
301 if(!buf){ 301 if(!buf){
302 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); 302 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
303 return; 303 return;
@@ -1311,7 +1311,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1311 schedule_timeout_uninterruptible(1); 1311 schedule_timeout_uninterruptible(1);
1312 } while (m == EMPTY_QUEUE); 1312 } while (m == EMPTY_QUEUE);
1313 1313
1314 status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32); 1314 status = kmalloc(4, GFP_KERNEL|ADDR32);
1315 if(status == NULL) { 1315 if(status == NULL) {
1316 adpt_send_nop(pHba, m); 1316 adpt_send_nop(pHba, m);
1317 printk(KERN_ERR"IOP reset failed - no free memory.\n"); 1317 printk(KERN_ERR"IOP reset failed - no free memory.\n");
@@ -1444,7 +1444,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba)
1444 } 1444 }
1445 continue; 1445 continue;
1446 } 1446 }
1447 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL); 1447 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1448 if(d==NULL) 1448 if(d==NULL)
1449 { 1449 {
1450 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name); 1450 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
@@ -2425,7 +2425,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2425 pDev = pDev->next_lun; 2425 pDev = pDev->next_lun;
2426 } 2426 }
2427 if(!pDev ) { // Something new add it 2427 if(!pDev ) { // Something new add it
2428 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL); 2428 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
2429 if(d==NULL) 2429 if(d==NULL)
2430 { 2430 {
2431 printk(KERN_CRIT "Out of memory for I2O device data.\n"); 2431 printk(KERN_CRIT "Out of memory for I2O device data.\n");
@@ -2728,7 +2728,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2728 2728
2729 kfree(pHba->reply_pool); 2729 kfree(pHba->reply_pool);
2730 2730
2731 pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); 2731 pHba->reply_pool = kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2732 if(!pHba->reply_pool){ 2732 if(!pHba->reply_pool){
2733 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name); 2733 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2734 return -1; 2734 return -1;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index f160357e37a6..d561663fb4e4 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2828,7 +2828,7 @@ static int i91u_detect(struct scsi_host_template * tpnt)
2828 2828
2829 for (; tul_num_scb >= MAX_TARGETS + 3; tul_num_scb--) { 2829 for (; tul_num_scb >= MAX_TARGETS + 3; tul_num_scb--) {
2830 i = tul_num_ch * tul_num_scb * sizeof(SCB); 2830 i = tul_num_ch * tul_num_scb * sizeof(SCB);
2831 if ((tul_scb = (SCB *) kmalloc(i, GFP_ATOMIC | GFP_DMA)) != NULL) 2831 if ((tul_scb = kmalloc(i, GFP_ATOMIC | GFP_DMA)) != NULL)
2832 break; 2832 break;
2833 } 2833 }
2834 if (tul_scb == NULL) { 2834 if (tul_scb == NULL) {
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 824fe080d1dc..7d2311067903 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5777,7 +5777,7 @@ static int osst_probe(struct device *dev)
5777 dev_num = i; 5777 dev_num = i;
5778 5778
5779 /* allocate a struct osst_tape for this device */ 5779 /* allocate a struct osst_tape for this device */
5780 tpnt = (struct osst_tape *)kmalloc(sizeof(struct osst_tape), GFP_ATOMIC); 5780 tpnt = kmalloc(sizeof(struct osst_tape), GFP_ATOMIC);
5781 if (tpnt == NULL) { 5781 if (tpnt == NULL) {
5782 write_unlock(&os_scsi_tapes_lock); 5782 write_unlock(&os_scsi_tapes_lock);
5783 printk(KERN_ERR "osst :E: Can't allocate device descriptor, device not attached.\n"); 5783 printk(KERN_ERR "osst :E: Can't allocate device descriptor, device not attached.\n");
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index aa60a5f1fbc3..3b2e1a53e6e2 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -117,7 +117,7 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
117#endif 117#endif
118 return 0; 118 return 0;
119 } 119 }
120 fcs = (struct ctrl_inquiry *) kmalloc (sizeof (struct ctrl_inquiry) * fcscount, GFP_DMA); 120 fcs = kmalloc(sizeof (struct ctrl_inquiry) * fcscount, GFP_DMA);
121 if (!fcs) { 121 if (!fcs) {
122 printk ("PLUTO: Not enough memory to probe\n"); 122 printk ("PLUTO: Not enough memory to probe\n");
123 return 0; 123 return 0;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index d1268cb46837..0578ba42718b 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -546,7 +546,7 @@ int sr_is_xa(Scsi_CD *cd)
546 if (!xa_test) 546 if (!xa_test)
547 return 0; 547 return 0;
548 548
549 raw_sector = (unsigned char *) kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd)); 549 raw_sector = kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd));
550 if (!raw_sector) 550 if (!raw_sector)
551 return -ENOMEM; 551 return -ENOMEM;
552 if (0 == sr_read_sector(cd, cd->ms_offset + 16, 552 if (0 == sr_read_sector(cd, cd->ms_offset + 16,
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index a3e9d0f2eb5b..4eb3da996b36 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -117,7 +117,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
117 density = (blocklength > 2048) ? 0x81 : 0x83; 117 density = (blocklength > 2048) ? 0x81 : 0x83;
118#endif 118#endif
119 119
120 buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA); 120 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
121 if (!buffer) 121 if (!buffer)
122 return -ENOMEM; 122 return -ENOMEM;
123 123
@@ -164,7 +164,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
164 if (cd->cdi.mask & CDC_MULTI_SESSION) 164 if (cd->cdi.mask & CDC_MULTI_SESSION)
165 return 0; 165 return 0;
166 166
167 buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA); 167 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
168 if (!buffer) 168 if (!buffer)
169 return -ENOMEM; 169 return -ENOMEM;
170 170
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 940fa1e6f994..21cd4c7f5289 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -5545,7 +5545,7 @@ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram
5545 /* 5545 /*
5546 * Allocate the array of lists of CCBs hashed by DSA. 5546 * Allocate the array of lists of CCBs hashed by DSA.
5547 */ 5547 */
5548 np->ccbh = kcalloc(sizeof(struct sym_ccb **), CCB_HASH_SIZE, GFP_KERNEL); 5548 np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL);
5549 if (!np->ccbh) 5549 if (!np->ccbh)
5550 goto attach_failed; 5550 goto attach_failed;
5551 5551
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 4d0ff8f4a01b..52e2e64c6649 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2239,6 +2239,30 @@ static struct pci_device_id serial_pci_tbl[] = {
2239 pbn_b0_bt_1_460800 }, 2239 pbn_b0_bt_1_460800 },
2240 2240
2241 /* 2241 /*
2242 * Korenix Jetcard F0/F1 cards (JC1204, JC1208, JC1404, JC1408).
2243 * Cards are identified by their subsystem vendor IDs, which
2244 * (in hex) match the model number.
2245 *
2246 * Note that JC140x are RS422/485 cards which require ox950
2247 * ACR = 0x10, and as such are not currently fully supported.
2248 */
2249 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0,
2250 0x1204, 0x0004, 0, 0,
2251 pbn_b0_4_921600 },
2252 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0,
2253 0x1208, 0x0004, 0, 0,
2254 pbn_b0_4_921600 },
2255/* { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0,
2256 0x1402, 0x0002, 0, 0,
2257 pbn_b0_2_921600 }, */
2258/* { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0,
2259 0x1404, 0x0004, 0, 0,
2260 pbn_b0_4_921600 }, */
2261 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF1,
2262 0x1208, 0x0004, 0, 0,
2263 pbn_b0_4_921600 },
2264
2265 /*
2242 * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com 2266 * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com
2243 */ 2267 */
2244 { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RAC4, 2268 { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RAC4,
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 0b36dd5cdac2..2978c09860ee 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -663,7 +663,7 @@ config V850E_UART
663 663
664config V850E_UARTB 664config V850E_UARTB
665 bool 665 bool
666 depends V850E_UART && V850E_ME2 666 depends on V850E_UART && V850E_ME2
667 default y 667 default y
668 668
669config V850E_UART_CONSOLE 669config V850E_UART_CONSOLE
@@ -909,7 +909,7 @@ config SERIAL_M32R_PLDSIO
909 909
910config SERIAL_TXX9 910config SERIAL_TXX9
911 bool "TMPTX39XX/49XX SIO support" 911 bool "TMPTX39XX/49XX SIO support"
912 depends HAS_TXX9_SERIAL 912 depends on HAS_TXX9_SERIAL
913 select SERIAL_CORE 913 select SERIAL_CORE
914 default y 914 default y
915 915
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 7d623003e65e..71e6a24d8c28 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1510,7 +1510,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1510 } 1510 }
1511 1511
1512 if ( (retval = pci_request_regions(dev, "icom"))) { 1512 if ( (retval = pci_request_regions(dev, "icom"))) {
1513 dev_err(&dev->dev, "pci_request_region FAILED\n"); 1513 dev_err(&dev->dev, "pci_request_regions FAILED\n");
1514 pci_disable_device(dev); 1514 pci_disable_device(dev);
1515 return retval; 1515 return retval;
1516 } 1516 }
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 72f3db99ff94..3e0abbb49fe1 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -598,7 +598,7 @@ at91_ep_alloc_request(struct usb_ep *_ep, unsigned int gfp_flags)
598{ 598{
599 struct at91_request *req; 599 struct at91_request *req;
600 600
601 req = kcalloc(1, sizeof (struct at91_request), gfp_flags); 601 req = kzalloc(sizeof (struct at91_request), gfp_flags);
602 if (!req) 602 if (!req)
603 return NULL; 603 return NULL;
604 604
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 5516c59ed5ec..2d12bf9f19d6 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -2195,7 +2195,7 @@ static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
2195 if (size == 0) 2195 if (size == 0)
2196 return NULL; 2196 return NULL;
2197 2197
2198 gb = (struct gs_buf *)kmalloc(sizeof(struct gs_buf), kmalloc_flags); 2198 gb = kmalloc(sizeof(struct gs_buf), kmalloc_flags);
2199 if (gb == NULL) 2199 if (gb == NULL)
2200 return NULL; 2200 return NULL;
2201 2201
diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c
index 9325e46a68c0..282d82efc0b0 100644
--- a/drivers/usb/host/hc_crisv10.c
+++ b/drivers/usb/host/hc_crisv10.c
@@ -365,7 +365,7 @@ static inline struct urb *urb_list_first(int epid)
365/* Adds an urb_entry last in the list for this epid. */ 365/* Adds an urb_entry last in the list for this epid. */
366static inline void urb_list_add(struct urb *urb, int epid) 366static inline void urb_list_add(struct urb *urb, int epid)
367{ 367{
368 urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG); 368 urb_entry_t *urb_entry = kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG);
369 assert(urb_entry); 369 assert(urb_entry);
370 370
371 urb_entry->urb = urb; 371 urb_entry->urb = urb;
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index c703f73e1655..6c7f3efb1d40 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -766,7 +766,7 @@ static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned
766 bep->bufp = kmalloc (bufsize, GFP_KERNEL); 766 bep->bufp = kmalloc (bufsize, GFP_KERNEL);
767 if (!bep->bufp) 767 if (!bep->bufp)
768 goto bl_fail; 768 goto bl_fail;
769 bep->dr = (struct usb_ctrlrequest *) kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL); 769 bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL);
770 if (!bep->dr) 770 if (!bep->dr)
771 goto bl_fail; 771 goto bl_fail;
772 bep->urbp = usb_alloc_urb (0, GFP_KERNEL); 772 bep->urbp = usb_alloc_urb (0, GFP_KERNEL);
@@ -1969,7 +1969,7 @@ static int auerswald_probe (struct usb_interface *intf,
1969 info("device is a %s", cp->dev_desc); 1969 info("device is a %s", cp->dev_desc);
1970 1970
1971 /* get the maximum allowed control transfer length */ 1971 /* get the maximum allowed control transfer length */
1972 pbuf = (__le16 *) kmalloc (2, GFP_KERNEL); /* use an allocated buffer because of urb target */ 1972 pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */
1973 if (!pbuf) { 1973 if (!pbuf) {
1974 err( "out of memory"); 1974 err( "out of memory");
1975 goto pfail; 1975 goto pfail;
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 7e8a0acd52ee..70250252ae2a 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -705,7 +705,7 @@ static int uss720_probe(struct usb_interface *intf,
705 /* 705 /*
706 * Allocate parport interface 706 * Allocate parport interface
707 */ 707 */
708 if (!(priv = kcalloc(sizeof(struct parport_uss720_private), 1, GFP_KERNEL))) { 708 if (!(priv = kzalloc(sizeof(struct parport_uss720_private), GFP_KERNEL))) {
709 usb_put_dev(usbdev); 709 usb_put_dev(usbdev);
710 return -ENOMEM; 710 return -ENOMEM;
711 } 711 }
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index 99f26b3e502f..ea5f44de3de2 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -469,7 +469,7 @@ static void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
469 struct rndis_halt *halt; 469 struct rndis_halt *halt;
470 470
471 /* try to clear any rndis state/activity (no i/o from stack!) */ 471 /* try to clear any rndis state/activity (no i/o from stack!) */
472 halt = kcalloc(1, sizeof *halt, GFP_KERNEL); 472 halt = kzalloc(sizeof *halt, GFP_KERNEL);
473 if (halt) { 473 if (halt) {
474 halt->msg_type = RNDIS_MSG_HALT; 474 halt->msg_type = RNDIS_MSG_HALT;
475 halt->msg_len = ccpu2(sizeof *halt); 475 halt->msg_len = ccpu2(sizeof *halt);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index a1fdb85b8c0a..45cdf9bc43b2 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1493,7 +1493,7 @@ static struct cypress_buf *cypress_buf_alloc(unsigned int size)
1493 if (size == 0) 1493 if (size == 0)
1494 return NULL; 1494 return NULL;
1495 1495
1496 cb = (struct cypress_buf *)kmalloc(sizeof(struct cypress_buf), GFP_KERNEL); 1496 cb = kmalloc(sizeof(struct cypress_buf), GFP_KERNEL);
1497 if (cb == NULL) 1497 if (cb == NULL)
1498 return NULL; 1498 return NULL;
1499 1499
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 9d9ea874639c..efd9ce3f931f 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1681,7 +1681,7 @@ dbg( "digi_startup: TOP" );
1681 for( i=0; i<serial->type->num_ports+1; i++ ) { 1681 for( i=0; i<serial->type->num_ports+1; i++ ) {
1682 1682
1683 /* allocate port private structure */ 1683 /* allocate port private structure */
1684 priv = (struct digi_port *)kmalloc( sizeof(struct digi_port), 1684 priv = kmalloc( sizeof(struct digi_port),
1685 GFP_KERNEL ); 1685 GFP_KERNEL );
1686 if( priv == (struct digi_port *)0 ) { 1686 if( priv == (struct digi_port *)0 ) {
1687 while( --i >= 0 ) 1687 while( --i >= 0 )
@@ -1714,7 +1714,7 @@ dbg( "digi_startup: TOP" );
1714 } 1714 }
1715 1715
1716 /* allocate serial private structure */ 1716 /* allocate serial private structure */
1717 serial_priv = (struct digi_serial *)kmalloc( sizeof(struct digi_serial), 1717 serial_priv = kmalloc( sizeof(struct digi_serial),
1718 GFP_KERNEL ); 1718 GFP_KERNEL );
1719 if( serial_priv == (struct digi_serial *)0 ) { 1719 if( serial_priv == (struct digi_serial *)0 ) {
1720 for( i=0; i<serial->type->num_ports+1; i++ ) 1720 for( i=0; i<serial->type->num_ports+1; i++ )
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 2da2684e0809..980285c0233a 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2811,7 +2811,7 @@ static struct edge_buf *edge_buf_alloc(unsigned int size)
2811 if (size == 0) 2811 if (size == 0)
2812 return NULL; 2812 return NULL;
2813 2813
2814 eb = (struct edge_buf *)kmalloc(sizeof(struct edge_buf), GFP_KERNEL); 2814 eb = kmalloc(sizeof(struct edge_buf), GFP_KERNEL);
2815 if (eb == NULL) 2815 if (eb == NULL)
2816 return NULL; 2816 return NULL;
2817 2817
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index d72cf8bc7f76..42f757a5b876 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -595,7 +595,7 @@ static int ipaq_open(struct usb_serial_port *port, struct file *filp)
595 595
596 bytes_in = 0; 596 bytes_in = 0;
597 bytes_out = 0; 597 bytes_out = 0;
598 priv = (struct ipaq_private *)kmalloc(sizeof(struct ipaq_private), GFP_KERNEL); 598 priv = kmalloc(sizeof(struct ipaq_private), GFP_KERNEL);
599 if (priv == NULL) { 599 if (priv == NULL) {
600 err("%s - Out of memory", __FUNCTION__); 600 err("%s - Out of memory", __FUNCTION__);
601 return -ENOMEM; 601 return -ENOMEM;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index e284d6c0fd35..62bea0c923bd 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -269,7 +269,7 @@ static int kobil_open (struct usb_serial_port *port, struct file *filp)
269 } 269 }
270 270
271 // allocate memory for write_urb transfer buffer 271 // allocate memory for write_urb transfer buffer
272 port->write_urb->transfer_buffer = (unsigned char *) kmalloc(write_urb_transfer_buffer_length, GFP_KERNEL); 272 port->write_urb->transfer_buffer = kmalloc(write_urb_transfer_buffer_length, GFP_KERNEL);
273 if (! port->write_urb->transfer_buffer) { 273 if (! port->write_urb->transfer_buffer) {
274 kfree(transfer_buffer); 274 kfree(transfer_buffer);
275 usb_free_urb(port->write_urb); 275 usb_free_urb(port->write_urb);
@@ -696,7 +696,7 @@ static int kobil_ioctl(struct usb_serial_port *port, struct file *file,
696 return 0; 696 return 0;
697 697
698 case TCFLSH: // 0x540B 698 case TCFLSH: // 0x540B
699 transfer_buffer = (unsigned char *) kmalloc(transfer_buffer_length, GFP_KERNEL); 699 transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL);
700 if (! transfer_buffer) { 700 if (! transfer_buffer) {
701 return -ENOBUFS; 701 return -ENOBUFS;
702 } 702 }
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index d124d780e42e..5dc2ac9afa90 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -159,7 +159,7 @@ static struct pl2303_buf *pl2303_buf_alloc(unsigned int size)
159 if (size == 0) 159 if (size == 0)
160 return NULL; 160 return NULL;
161 161
162 pb = (struct pl2303_buf *)kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL); 162 pb = kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL);
163 if (pb == NULL) 163 if (pb == NULL)
164 return NULL; 164 return NULL;
165 165
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index f42eb9ea6405..83189005c6fb 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1710,7 +1710,7 @@ static struct circ_buf *ti_buf_alloc(void)
1710{ 1710{
1711 struct circ_buf *cb; 1711 struct circ_buf *cb;
1712 1712
1713 cb = (struct circ_buf *)kmalloc(sizeof(struct circ_buf), GFP_KERNEL); 1713 cb = kmalloc(sizeof(struct circ_buf), GFP_KERNEL);
1714 if (cb == NULL) 1714 if (cb == NULL)
1715 return NULL; 1715 return NULL;
1716 1716
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index dc45e58e2b8c..5483d8564c1b 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -416,7 +416,7 @@ static int whiteheat_attach (struct usb_serial *serial)
416 for (i = 0; i < serial->num_ports; i++) { 416 for (i = 0; i < serial->num_ports; i++) {
417 port = serial->port[i]; 417 port = serial->port[i];
418 418
419 info = (struct whiteheat_private *)kmalloc(sizeof(struct whiteheat_private), GFP_KERNEL); 419 info = kmalloc(sizeof(struct whiteheat_private), GFP_KERNEL);
420 if (info == NULL) { 420 if (info == NULL) {
421 err("%s: Out of memory for port structures\n", serial->type->description); 421 err("%s: Out of memory for port structures\n", serial->type->description);
422 goto no_private; 422 goto no_private;
@@ -487,7 +487,7 @@ static int whiteheat_attach (struct usb_serial *serial)
487 usb_set_serial_port_data(port, info); 487 usb_set_serial_port_data(port, info);
488 } 488 }
489 489
490 command_info = (struct whiteheat_command_private *)kmalloc(sizeof(struct whiteheat_command_private), GFP_KERNEL); 490 command_info = kmalloc(sizeof(struct whiteheat_command_private), GFP_KERNEL);
491 if (command_info == NULL) { 491 if (command_info == NULL) {
492 err("%s: Out of memory for port structures\n", serial->type->description); 492 err("%s: Out of memory for port structures\n", serial->type->description);
493 goto no_command_private; 493 goto no_command_private;
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index fb8bacaae27c..e3528eca29a5 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -646,7 +646,7 @@ sddr09_read_sg_test_only(struct us_data *us) {
646 return result; 646 return result;
647 } 647 }
648 648
649 buf = (unsigned char *) kmalloc(bulklen, GFP_NOIO); 649 buf = kmalloc(bulklen, GFP_NOIO);
650 if (!buf) 650 if (!buf)
651 return -ENOMEM; 651 return -ENOMEM;
652 652
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 6761b68c35e9..6c9dc2e69c82 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,7 +447,7 @@ static int clcdfb_probe(struct amba_device *dev, void *id)
447 goto out; 447 goto out;
448 } 448 }
449 449
450 fb = (struct clcd_fb *) kmalloc(sizeof(struct clcd_fb), GFP_KERNEL); 450 fb = kmalloc(sizeof(struct clcd_fb), GFP_KERNEL);
451 if (!fb) { 451 if (!fb) {
452 printk(KERN_INFO "CLCD: could not allocate new clcd_fb struct\n"); 452 printk(KERN_INFO "CLCD: could not allocate new clcd_fb struct\n");
453 ret = -ENOMEM; 453 ret = -ENOMEM;
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 88a47845c4f7..1a849b870bcc 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -2906,14 +2906,6 @@ static int ami_decode_var(struct fb_var_screeninfo *var,
2906 par->crsr.spot_x = par->crsr.spot_y = 0; 2906 par->crsr.spot_x = par->crsr.spot_y = 0;
2907 par->crsr.height = par->crsr.width = 0; 2907 par->crsr.height = par->crsr.width = 0;
2908 2908
2909#if 0 /* fbmon not done. uncomment for 2.5.x -brad */
2910 if (!fbmon_valid_timings(pixclock[clk_shift], htotal, vtotal,
2911 &fb_info)) {
2912 DPRINTK("mode doesn't fit for monitor\n");
2913 return -EINVAL;
2914 }
2915#endif
2916
2917 return 0; 2909 return 0;
2918} 2910}
2919 2911
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 176f9b85cdbe..09684d7a7ce9 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1488,10 +1488,6 @@ static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1488 else 1488 else
1489 info->var.accel_flags = 0; 1489 info->var.accel_flags = 0;
1490 1490
1491#if 0 /* fbmon is not done. uncomment for 2.5.x -brad */
1492 if (!fbmon_valid_timings(pixclock, htotal, vtotal, info))
1493 return -EINVAL;
1494#endif
1495 aty_crtc_to_var(&crtc, var); 1491 aty_crtc_to_var(&crtc, var);
1496 var->pixclock = par->pll_ops->pll_to_var(info, &pll); 1492 var->pixclock = par->pll_ops->pll_to_var(info, &pll);
1497 return 0; 1493 return 0;
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c
index 797b42305b0f..fe28848e7b52 100644
--- a/drivers/video/matrox/i2c-matroxfb.c
+++ b/drivers/video/matrox/i2c-matroxfb.c
@@ -146,7 +146,7 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
146 unsigned long flags; 146 unsigned long flags;
147 struct matroxfb_dh_maven_info* m2info; 147 struct matroxfb_dh_maven_info* m2info;
148 148
149 m2info = (struct matroxfb_dh_maven_info*)kmalloc(sizeof(*m2info), GFP_KERNEL); 149 m2info = kmalloc(sizeof(*m2info), GFP_KERNEL);
150 if (!m2info) 150 if (!m2info)
151 return NULL; 151 return NULL;
152 152
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index e9b4115fcad0..cb2aa402ddfd 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -2028,7 +2028,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
2028 } 2028 }
2029 2029
2030#ifdef CONFIG_FB_MATROX_MULTIHEAD 2030#ifdef CONFIG_FB_MATROX_MULTIHEAD
2031 minfo = (struct matrox_fb_info*)kmalloc(sizeof(*minfo), GFP_KERNEL); 2031 minfo = kmalloc(sizeof(*minfo), GFP_KERNEL);
2032 if (!minfo) 2032 if (!minfo)
2033 return -1; 2033 return -1;
2034#else 2034#else
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 27eb4bb4f89f..2c9801090fae 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -694,7 +694,7 @@ static void* matroxfb_crtc2_probe(struct matrox_fb_info* minfo) {
694 /* hardware is CRTC2 incapable... */ 694 /* hardware is CRTC2 incapable... */
695 if (!ACCESS_FBINFO(devflags.crtc2)) 695 if (!ACCESS_FBINFO(devflags.crtc2))
696 return NULL; 696 return NULL;
697 m2info = (struct matroxfb_dh_fb_info*)kmalloc(sizeof(*m2info), GFP_KERNEL); 697 m2info = kmalloc(sizeof(*m2info), GFP_KERNEL);
698 if (!m2info) { 698 if (!m2info) {
699 printk(KERN_ERR "matroxfb_crtc2: Not enough memory for CRTC2 control structs\n"); 699 printk(KERN_ERR "matroxfb_crtc2: Not enough memory for CRTC2 control structs\n");
700 return NULL; 700 return NULL;
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 711cb11d6eb3..59cd1e750f30 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -21,6 +21,11 @@
21 * Remove never finished and bogus 24/32bit support 21 * Remove never finished and bogus 24/32bit support
22 * Clean up macro abuse 22 * Clean up macro abuse
23 * Minor tidying for format. 23 * Minor tidying for format.
24 * 12/2006 Helge Deller <deller@gmx.de>
25 * add /sys/class/graphics/fbX/vgapass sysfs-interface
26 * add module option "mode_option" to set initial screen mode
27 * use fbdev default videomode database
28 * remove debug functions from ioctl
24 */ 29 */
25 30
26/* 31/*
@@ -65,19 +70,10 @@
65 * 70 *
66 * sstfb specific ioctls: 71 * sstfb specific ioctls:
67 * toggle vga (0x46db) : toggle vga_pass_through 72 * toggle vga (0x46db) : toggle vga_pass_through
68 * fill fb (0x46dc) : fills fb
69 * test disp (0x46de) : draws a test image
70 */ 73 */
71 74
72#undef SST_DEBUG 75#undef SST_DEBUG
73 76
74/*
75 Default video mode .
76 0 800x600@60 took from glide
77 1 640x480@75 took from glide
78 2 1024x768@76 std fb.mode
79 3 640x480@60 glide default */
80#define DEFAULT_MODE 3
81 77
82/* 78/*
83 * Includes 79 * Includes
@@ -92,20 +88,24 @@
92#include <linux/init.h> 88#include <linux/init.h>
93#include <linux/slab.h> 89#include <linux/slab.h>
94#include <asm/io.h> 90#include <asm/io.h>
95#include <asm/ioctl.h>
96#include <asm/uaccess.h> 91#include <asm/uaccess.h>
97#include <video/sstfb.h> 92#include <video/sstfb.h>
98 93
99 94
100/* initialized by setup */ 95/* initialized by setup */
101 96
102static int vgapass; /* enable Vga passthrough cable */ 97static int vgapass; /* enable VGA passthrough cable */
103static int mem; /* mem size in MB, 0 = autodetect */ 98static int mem; /* mem size in MB, 0 = autodetect */
104static int clipping = 1; /* use clipping (slower, safer) */ 99static int clipping = 1; /* use clipping (slower, safer) */
105static int gfxclk; /* force FBI freq in Mhz . Dangerous */ 100static int gfxclk; /* force FBI freq in Mhz . Dangerous */
106static int slowpci; /* slow PCI settings */ 101static int slowpci; /* slow PCI settings */
107 102
108static char *mode_option __devinitdata; 103/*
104 Possible default video modes: 800x600@60, 640x480@75, 1024x768@76, 640x480@60
105*/
106#define DEFAULT_VIDEO_MODE "640x480@60"
107
108static char *mode_option __devinitdata = DEFAULT_VIDEO_MODE;
109 109
110enum { 110enum {
111 ID_VOODOO1 = 0, 111 ID_VOODOO1 = 0,
@@ -119,48 +119,11 @@ static struct sst_spec voodoo_spec[] __devinitdata = {
119 { .name = "Voodoo2", .default_gfx_clock = 75000, .max_gfxclk = 85 }, 119 { .name = "Voodoo2", .default_gfx_clock = 75000, .max_gfxclk = 85 },
120}; 120};
121 121
122static struct fb_var_screeninfo sstfb_default =
123#if ( DEFAULT_MODE == 0 )
124 { /* 800x600@60, 16 bpp .borowed from glide/sst1/include/sst1init.h */
125 800, 600, 800, 600, 0, 0, 16, 0,
126 {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0},
127 0, 0, -1, -1, 0,
128 25000, 86, 41, 23, 1, 127, 4,
129 0, FB_VMODE_NONINTERLACED };
130#elif ( DEFAULT_MODE == 1 )
131 {/* 640x480@75, 16 bpp .borowed from glide/sst1/include/sst1init.h */
132 640, 480, 640, 480, 0, 0, 16, 0,
133 {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0},
134 0, 0, -1, -1, 0,
135 31746, 118, 17, 16, 1, 63, 3,
136 0, FB_VMODE_NONINTERLACED };
137#elif ( DEFAULT_MODE == 2 )
138 { /* 1024x768@76 took from my /etc/fb.modes */
139 1024, 768, 1024, 768,0, 0, 16,0,
140 {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0},
141 0, 0, -1, -1, 0,
142 11764, 208, 8, 36, 16, 120, 3 ,
143 0, FB_VMODE_NONINTERLACED };
144#elif ( DEFAULT_MODE == 3 )
145 { /* 640x480@60 , 16bpp glide default ?*/
146 640, 480, 640, 480, 0, 0, 16, 0,
147 {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0},
148 0, 0, -1, -1, 0,
149 39721 , 38, 26 , 25 ,18 , 96 ,2,
150 0, FB_VMODE_NONINTERLACED };
151#elif
152 #error "Invalid DEFAULT_MODE value !"
153#endif
154
155 122
156/* 123/*
157 * debug functions 124 * debug functions
158 */ 125 */
159 126
160static void sstfb_drawdebugimage(struct fb_info *info);
161static int sstfb_dump_regs(struct fb_info *info);
162
163
164#if (SST_DEBUG_REG > 0) 127#if (SST_DEBUG_REG > 0)
165static void sst_dbg_print_read_reg(u32 reg, u32 val) { 128static void sst_dbg_print_read_reg(u32 reg, u32 val) {
166 const char *regname; 129 const char *regname;
@@ -726,51 +689,77 @@ static int sstfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
726 return 0; 689 return 0;
727} 690}
728 691
729static int sstfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) 692static void sstfb_setvgapass( struct fb_info *info, int enable )
730{ 693{
731 struct sstfb_par *par = info->par; 694 struct sstfb_par *par = info->par;
732 struct pci_dev *sst_dev = par->dev; 695 struct pci_dev *sst_dev = par->dev;
733 u32 fbiinit0, tmp, val; 696 u32 fbiinit0, tmp;
734 u_long p; 697
698 enable = enable ? 1:0;
699 if (par->vgapass == enable)
700 return;
701 par->vgapass = enable;
702
703 pci_read_config_dword(sst_dev, PCI_INIT_ENABLE, &tmp);
704 pci_write_config_dword(sst_dev, PCI_INIT_ENABLE,
705 tmp | PCI_EN_INIT_WR );
706 fbiinit0 = sst_read (FBIINIT0);
707 if (par->vgapass) {
708 sst_write(FBIINIT0, fbiinit0 & ~DIS_VGA_PASSTHROUGH);
709 printk(KERN_INFO "fb%d: Enabling VGA pass-through\n", info->node );
710 } else {
711 sst_write(FBIINIT0, fbiinit0 | DIS_VGA_PASSTHROUGH);
712 printk(KERN_INFO "fb%d: Disabling VGA pass-through\n", info->node );
713 }
714 pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, tmp);
715}
716
717static ssize_t store_vgapass(struct device *device, struct device_attribute *attr,
718 const char *buf, size_t count)
719{
720 struct fb_info *info = dev_get_drvdata(device);
721 char ** last = NULL;
722 int val;
723
724 val = simple_strtoul(buf, last, 0);
725 sstfb_setvgapass(info, val);
726
727 return count;
728}
729
730static ssize_t show_vgapass(struct device *device, struct device_attribute *attr,
731 char *buf)
732{
733 struct fb_info *info = dev_get_drvdata(device);
734 struct sstfb_par *par = info->par;
735 return snprintf(buf, PAGE_SIZE, "%d\n", par->vgapass);
736}
737
738static struct device_attribute device_attrs[] = {
739 __ATTR(vgapass, S_IRUGO|S_IWUSR, show_vgapass, store_vgapass)
740 };
741
742static int sstfb_ioctl(struct fb_info *info, unsigned int cmd,
743 unsigned long arg)
744{
745 struct sstfb_par *par;
746 u32 val;
735 747
736 switch (cmd) { 748 switch (cmd) {
737 749 /* set/get VGA pass_through mode */
738 /* dump current FBIINIT values to system log */ 750 case SSTFB_SET_VGAPASS:
739 case _IO('F', 0xdb): /* 0x46db */
740 return sstfb_dump_regs(info);
741
742 /* fills lfb with #arg pixels */
743 case _IOW('F', 0xdc, u32): /* 0x46dc */
744 if (copy_from_user(&val, (void __user *)arg, sizeof(val))) 751 if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
745 return -EFAULT; 752 return -EFAULT;
746 if (val > info->fix.smem_len) 753 sstfb_setvgapass(info, val);
747 val = info->fix.smem_len;
748 for (p = 0 ; p < val; p += 2)
749 writew(p >> 6, info->screen_base + p);
750 return 0; 754 return 0;
751 755 case SSTFB_GET_VGAPASS:
752 /* change VGA pass_through mode */ 756 par = info->par;
753 case _IOW('F', 0xdd, u32): /* 0x46dd */ 757 val = par->vgapass;
754 if (copy_from_user(&val, (void __user *)arg, sizeof(val))) 758 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
755 return -EFAULT; 759 return -EFAULT;
756 pci_read_config_dword(sst_dev, PCI_INIT_ENABLE, &tmp);
757 pci_write_config_dword(sst_dev, PCI_INIT_ENABLE,
758 tmp | PCI_EN_INIT_WR );
759 fbiinit0 = sst_read (FBIINIT0);
760 if (val)
761 sst_write(FBIINIT0, fbiinit0 & ~EN_VGA_PASSTHROUGH);
762 else
763 sst_write(FBIINIT0, fbiinit0 | EN_VGA_PASSTHROUGH);
764 pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, tmp);
765 return 0;
766
767 /* draw test image */
768 case _IO('F', 0xde): /* 0x46de */
769 f_dprintk("test color display at %d bpp\n",
770 info->var.bits_per_pixel);
771 sstfb_drawdebugimage(info);
772 return 0; 760 return 0;
773 } 761 }
762
774 return -EINVAL; 763 return -EINVAL;
775} 764}
776 765
@@ -804,6 +793,7 @@ static void sstfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
804/* 793/*
805 * FillRect 2D command (solidfill or invert (via ROP_XOR)) - Voodoo2 only 794 * FillRect 2D command (solidfill or invert (via ROP_XOR)) - Voodoo2 only
806 */ 795 */
796#if 0
807static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 797static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
808{ 798{
809 struct sstfb_par *par = info->par; 799 struct sstfb_par *par = info->par;
@@ -825,6 +815,7 @@ static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
825 | (BLT_16BPP_FMT << 3) /* | BIT(14) */ | BIT(15) | BIT(16) ); 815 | (BLT_16BPP_FMT << 3) /* | BIT(14) */ | BIT(15) | BIT(16) );
826 sst_wait_idle(); 816 sst_wait_idle();
827} 817}
818#endif
828 819
829 820
830 821
@@ -1156,6 +1147,7 @@ static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
1156 struct pll_timing gfx_timings; 1147 struct pll_timing gfx_timings;
1157 struct sst_spec *spec; 1148 struct sst_spec *spec;
1158 int Fout; 1149 int Fout;
1150 int gfx_clock;
1159 1151
1160 spec = &voodoo_spec[par->type]; 1152 spec = &voodoo_spec[par->type];
1161 f_ddprintk(" fbiinit0 fbiinit1 fbiinit2 fbiinit3 fbiinit4 " 1153 f_ddprintk(" fbiinit0 fbiinit1 fbiinit2 fbiinit3 fbiinit4 "
@@ -1196,15 +1188,15 @@ static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
1196 } 1188 }
1197 1189
1198 /* set graphic clock */ 1190 /* set graphic clock */
1199 par->gfx_clock = spec->default_gfx_clock; 1191 gfx_clock = spec->default_gfx_clock;
1200 if ((gfxclk >10 ) && (gfxclk < spec->max_gfxclk)) { 1192 if ((gfxclk >10 ) && (gfxclk < spec->max_gfxclk)) {
1201 printk(KERN_INFO "sstfb: Using supplied graphic freq : %dMHz\n", gfxclk); 1193 printk(KERN_INFO "sstfb: Using supplied graphic freq : %dMHz\n", gfxclk);
1202 par->gfx_clock = gfxclk *1000; 1194 gfx_clock = gfxclk *1000;
1203 } else if (gfxclk) { 1195 } else if (gfxclk) {
1204 printk(KERN_WARNING "sstfb: %dMhz is way out of spec! Using default\n", gfxclk); 1196 printk(KERN_WARNING "sstfb: %dMhz is way out of spec! Using default\n", gfxclk);
1205 } 1197 }
1206 1198
1207 sst_calc_pll(par->gfx_clock, &Fout, &gfx_timings); 1199 sst_calc_pll(gfx_clock, &Fout, &gfx_timings);
1208 par->dac_sw.set_pll(info, &gfx_timings, GFX_CLOCK); 1200 par->dac_sw.set_pll(info, &gfx_timings, GFX_CLOCK);
1209 1201
1210 /* disable fbiinit remap */ 1202 /* disable fbiinit remap */
@@ -1215,10 +1207,11 @@ static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
1215 fbiinit0 = FBIINIT0_DEFAULT; 1207 fbiinit0 = FBIINIT0_DEFAULT;
1216 fbiinit1 = FBIINIT1_DEFAULT; 1208 fbiinit1 = FBIINIT1_DEFAULT;
1217 fbiinit4 = FBIINIT4_DEFAULT; 1209 fbiinit4 = FBIINIT4_DEFAULT;
1218 if (vgapass) 1210 par->vgapass = vgapass;
1219 fbiinit0 &= ~EN_VGA_PASSTHROUGH; 1211 if (par->vgapass)
1212 fbiinit0 &= ~DIS_VGA_PASSTHROUGH;
1220 else 1213 else
1221 fbiinit0 |= EN_VGA_PASSTHROUGH; 1214 fbiinit0 |= DIS_VGA_PASSTHROUGH;
1222 if (slowpci) { 1215 if (slowpci) {
1223 fbiinit1 |= SLOW_PCI_WRITES; 1216 fbiinit1 |= SLOW_PCI_WRITES;
1224 fbiinit4 |= SLOW_PCI_READS; 1217 fbiinit4 |= SLOW_PCI_READS;
@@ -1267,7 +1260,7 @@ static void __devexit sst_shutdown(struct fb_info *info)
1267 /* TODO maybe shutdown the dac, vrefresh and so on... */ 1260 /* TODO maybe shutdown the dac, vrefresh and so on... */
1268 pci_write_config_dword(dev, PCI_INIT_ENABLE, 1261 pci_write_config_dword(dev, PCI_INIT_ENABLE,
1269 PCI_EN_INIT_WR); 1262 PCI_EN_INIT_WR);
1270 sst_unset_bits(FBIINIT0, FBI_RESET | FIFO_RESET | EN_VGA_PASSTHROUGH); 1263 sst_unset_bits(FBIINIT0, FBI_RESET | FIFO_RESET | DIS_VGA_PASSTHROUGH);
1271 pci_write_config_dword(dev, PCI_VCLK_DISABLE,0); 1264 pci_write_config_dword(dev, PCI_VCLK_DISABLE,0);
1272 /* maybe keep fbiinit* and PCI_INIT_enable in the fb_info struct 1265 /* maybe keep fbiinit* and PCI_INIT_enable in the fb_info struct
1273 * from start ? */ 1266 * from start ? */
@@ -1278,8 +1271,7 @@ static void __devexit sst_shutdown(struct fb_info *info)
1278/* 1271/*
1279 * Interface to the world 1272 * Interface to the world
1280 */ 1273 */
1281#ifndef MODULE 1274static int __devinit sstfb_setup(char *options)
1282static int __init sstfb_setup(char *options)
1283{ 1275{
1284 char *this_opt; 1276 char *this_opt;
1285 1277
@@ -1312,7 +1304,7 @@ static int __init sstfb_setup(char *options)
1312 } 1304 }
1313 return 0; 1305 return 0;
1314} 1306}
1315#endif 1307
1316 1308
1317static struct fb_ops sstfb_ops = { 1309static struct fb_ops sstfb_ops = {
1318 .owner = THIS_MODULE, 1310 .owner = THIS_MODULE,
@@ -1416,15 +1408,10 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
1416 */ 1408 */
1417 fix->line_length = 2048; /* default value, for 24 or 32bit: 4096 */ 1409 fix->line_length = 2048; /* default value, for 24 or 32bit: 4096 */
1418 1410
1419 if ( mode_option && 1411 fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 16);
1420 fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 16)) {
1421 printk(KERN_ERR "sstfb: can't set supplied video mode. Using default\n");
1422 info->var = sstfb_default;
1423 } else
1424 info->var = sstfb_default;
1425 1412
1426 if (sstfb_check_var(&info->var, info)) { 1413 if (sstfb_check_var(&info->var, info)) {
1427 printk(KERN_ERR "sstfb: invalid default video mode.\n"); 1414 printk(KERN_ERR "sstfb: invalid video mode.\n");
1428 goto fail; 1415 goto fail;
1429 } 1416 }
1430 1417
@@ -1442,10 +1429,11 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
1442 goto fail; 1429 goto fail;
1443 } 1430 }
1444 1431
1445 if (1) /* set to 0 to see an initial bitmap instead */ 1432 sstfb_clear_screen(info);
1446 sstfb_clear_screen(info); 1433
1447 else 1434 if (device_create_file(info->dev, &device_attrs[0]))
1448 sstfb_drawdebugimage(info); 1435 printk(KERN_WARNING "sstfb: can't create sysfs entry.\n");
1436
1449 1437
1450 printk(KERN_INFO "fb%d: %s frame buffer device at 0x%p\n", 1438 printk(KERN_INFO "fb%d: %s frame buffer device at 0x%p\n",
1451 info->node, fix->id, info->screen_base); 1439 info->node, fix->id, info->screen_base);
@@ -1453,6 +1441,7 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
1453 return 0; 1441 return 0;
1454 1442
1455fail: 1443fail:
1444 fb_dealloc_cmap(&info->cmap);
1456 iounmap(info->screen_base); 1445 iounmap(info->screen_base);
1457fail_fb_remap: 1446fail_fb_remap:
1458 iounmap(par->mmio_vbase); 1447 iounmap(par->mmio_vbase);
@@ -1473,21 +1462,23 @@ static void __devexit sstfb_remove(struct pci_dev *pdev)
1473 info = pci_get_drvdata(pdev); 1462 info = pci_get_drvdata(pdev);
1474 par = info->par; 1463 par = info->par;
1475 1464
1465 device_remove_file(info->dev, &device_attrs[0]);
1476 sst_shutdown(info); 1466 sst_shutdown(info);
1477 unregister_framebuffer(info);
1478 iounmap(info->screen_base); 1467 iounmap(info->screen_base);
1479 iounmap(par->mmio_vbase); 1468 iounmap(par->mmio_vbase);
1480 release_mem_region(info->fix.smem_start, 0x400000); 1469 release_mem_region(info->fix.smem_start, 0x400000);
1481 release_mem_region(info->fix.mmio_start, info->fix.mmio_len); 1470 release_mem_region(info->fix.mmio_start, info->fix.mmio_len);
1471 fb_dealloc_cmap(&info->cmap);
1472 unregister_framebuffer(info);
1482 framebuffer_release(info); 1473 framebuffer_release(info);
1483} 1474}
1484 1475
1485 1476
1486static struct pci_device_id sstfb_id_tbl[] = { 1477static const struct pci_device_id sstfb_id_tbl[] = {
1487 { PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO, 1478 { PCI_DEVICE(PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO ),
1488 PCI_ANY_ID, PCI_ANY_ID, 0, 0, ID_VOODOO1 }, 1479 .driver_data = ID_VOODOO1, },
1489 { PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO2, 1480 { PCI_DEVICE(PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO2),
1490 PCI_ANY_ID, PCI_ANY_ID, 0, 0, ID_VOODOO2 }, 1481 .driver_data = ID_VOODOO2, },
1491 { 0 }, 1482 { 0 },
1492}; 1483};
1493 1484
@@ -1501,142 +1492,23 @@ static struct pci_driver sstfb_driver = {
1501 1492
1502static int __devinit sstfb_init(void) 1493static int __devinit sstfb_init(void)
1503{ 1494{
1504#ifndef MODULE
1505 char *option = NULL; 1495 char *option = NULL;
1506 1496
1507 if (fb_get_options("sstfb", &option)) 1497 if (fb_get_options("sstfb", &option))
1508 return -ENODEV; 1498 return -ENODEV;
1509 sstfb_setup(option); 1499 sstfb_setup(option);
1510#endif 1500
1511 return pci_register_driver(&sstfb_driver); 1501 return pci_register_driver(&sstfb_driver);
1512} 1502}
1513 1503
1514#ifdef MODULE
1515static void __devexit sstfb_exit(void) 1504static void __devexit sstfb_exit(void)
1516{ 1505{
1517 pci_unregister_driver(&sstfb_driver); 1506 pci_unregister_driver(&sstfb_driver);
1518} 1507}
1519#endif
1520 1508
1521 1509
1522/*
1523 * testing and debugging functions
1524 */
1525
1526static int sstfb_dump_regs(struct fb_info *info)
1527{
1528#ifdef SST_DEBUG
1529 static struct { u32 reg ; const char *reg_name;} pci_regs[] = {
1530 { PCI_INIT_ENABLE, "initenable"},
1531 { PCI_VCLK_ENABLE, "enable vclk"},
1532 { PCI_VCLK_DISABLE, "disable vclk"},
1533 };
1534
1535 static struct { u32 reg ; const char *reg_name;} sst_regs[] = {
1536 {FBIINIT0,"fbiinit0"},
1537 {FBIINIT1,"fbiinit1"},
1538 {FBIINIT2,"fbiinit2"},
1539 {FBIINIT3,"fbiinit3"},
1540 {FBIINIT4,"fbiinit4"},
1541 {FBIINIT5,"fbiinit5"},
1542 {FBIINIT6,"fbiinit6"},
1543 {FBIINIT7,"fbiinit7"},
1544 {LFBMODE,"lfbmode"},
1545 {FBZMODE,"fbzmode"},
1546 };
1547
1548 const int pci_s = ARRAY_SIZE(pci_regs);
1549 const int sst_s = ARRAY_SIZE(sst_regs);
1550 struct sstfb_par *par = info->par;
1551 struct pci_dev *dev = par->dev;
1552 u32 pci_res[pci_s];
1553 u32 sst_res[sst_s];
1554 int i;
1555
1556 for (i=0; i<pci_s; i++) {
1557 pci_read_config_dword(dev, pci_regs[i].reg, &pci_res[i]);
1558 }
1559 for (i=0; i<sst_s; i++) {
1560 sst_res[i] = sst_read(sst_regs[i].reg);
1561 }
1562
1563 dprintk("hardware register dump:\n");
1564 for (i=0; i<pci_s; i++) {
1565 dprintk("%s %0#10x\n", pci_regs[i].reg_name, pci_res[i]);
1566 }
1567 for (i=0; i<sst_s; i++) {
1568 dprintk("%s %0#10x\n", sst_regs[i].reg_name, sst_res[i]);
1569 }
1570 return 0;
1571#else
1572 return -EINVAL;
1573#endif
1574}
1575
1576static void sstfb_fillrect_softw( struct fb_info *info, const struct fb_fillrect *rect)
1577{
1578 u8 __iomem *fbbase_virt = info->screen_base;
1579 int x, y, w = info->var.bits_per_pixel == 16 ? 2 : 4;
1580 u32 color = rect->color, height = rect->height;
1581 u8 __iomem *p;
1582
1583 if (w==2) color |= color<<16;
1584 for (y=rect->dy; height; y++, height--) {
1585 p = fbbase_virt + y*info->fix.line_length + rect->dx*w;
1586 x = rect->width;
1587 if (w==2) x>>=1;
1588 while (x) {
1589 writel(color, p);
1590 p += 4;
1591 x--;
1592 }
1593 }
1594}
1595
1596static void sstfb_drawrect_XY( struct fb_info *info, int x, int y,
1597 int w, int h, int color, int hwfunc)
1598{
1599 struct fb_fillrect rect;
1600 rect.dx = x;
1601 rect.dy = y;
1602 rect.height = h;
1603 rect.width = w;
1604 rect.color = color;
1605 rect.rop = ROP_COPY;
1606 if (hwfunc)
1607 sstfb_fillrect(info, &rect);
1608 else
1609 sstfb_fillrect_softw(info, &rect);
1610}
1611
1612/* print some squares on the fb */
1613static void sstfb_drawdebugimage(struct fb_info *info)
1614{
1615 static int idx;
1616
1617 /* clear screen */
1618 sstfb_clear_screen(info);
1619
1620 idx = (idx+1) & 1;
1621
1622 /* white rect */
1623 sstfb_drawrect_XY(info, 0, 0, 50, 50, 0xffff, idx);
1624
1625 /* blue rect */
1626 sstfb_drawrect_XY(info, 50, 50, 50, 50, 0x001f, idx);
1627
1628 /* green rect */
1629 sstfb_drawrect_XY(info, 100, 100, 80, 80, 0x07e0, idx);
1630
1631 /* red rect */
1632 sstfb_drawrect_XY(info, 250, 250, 120, 100, 0xf800, idx);
1633}
1634
1635module_init(sstfb_init); 1510module_init(sstfb_init);
1636
1637#ifdef MODULE
1638module_exit(sstfb_exit); 1511module_exit(sstfb_exit);
1639#endif
1640 1512
1641MODULE_AUTHOR("(c) 2000,2002 Ghozlane Toumi <gtoumi@laposte.net>"); 1513MODULE_AUTHOR("(c) 2000,2002 Ghozlane Toumi <gtoumi@laposte.net>");
1642MODULE_DESCRIPTION("FBDev driver for 3dfx Voodoo Graphics and Voodoo2 based video boards"); 1514MODULE_DESCRIPTION("FBDev driver for 3dfx Voodoo Graphics and Voodoo2 based video boards");
@@ -1652,3 +1524,6 @@ module_param(gfxclk, int, 0);
1652MODULE_PARM_DESC(gfxclk, "Force graphic chip frequency in MHz. DANGEROUS. (default=auto)"); 1524MODULE_PARM_DESC(gfxclk, "Force graphic chip frequency in MHz. DANGEROUS. (default=auto)");
1653module_param(slowpci, bool, 0); 1525module_param(slowpci, bool, 0);
1654MODULE_PARM_DESC(slowpci, "Uses slow PCI settings (0 or 1) (default=0)"); 1526MODULE_PARM_DESC(slowpci, "Uses slow PCI settings (0 or 1) (default=0)");
1527module_param(mode_option, charp, 0);
1528MODULE_PARM_DESC(mode_option, "Initial video mode (default=" DEFAULT_VIDEO_MODE ")");
1529
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index d18d6424cd21..904e5aeb696c 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -9,7 +9,7 @@ config W1_SLAVE_THERM
9 tristate "Thermal family implementation" 9 tristate "Thermal family implementation"
10 depends on W1 10 depends on W1
11 help 11 help
12 Say Y here if you want to connect 1-wire thermal sensors to you 12 Say Y here if you want to connect 1-wire thermal sensors to your
13 wire. 13 wire.
14 14
15config W1_SLAVE_SMEM 15config W1_SLAVE_SMEM
@@ -17,7 +17,7 @@ config W1_SLAVE_SMEM
17 depends on W1 17 depends on W1
18 help 18 help
19 Say Y here if you want to connect 1-wire 19 Say Y here if you want to connect 1-wire
20 simple 64bit memory rom(ds2401/ds2411/ds1990*) to you wire. 20 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
21 21
22config W1_SLAVE_DS2433 22config W1_SLAVE_DS2433
23 tristate "4kb EEPROM family support (DS2433)" 23 tristate "4kb EEPROM family support (DS2433)"
diff --git a/fs/Kconfig b/fs/Kconfig
index b3b5aa0edff9..276ff3baaafe 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -12,9 +12,7 @@ config EXT2_FS
12 Ext2 is a standard Linux file system for hard disks. 12 Ext2 is a standard Linux file system for hard disks.
13 13
14 To compile this file system support as a module, choose M here: the 14 To compile this file system support as a module, choose M here: the
15 module will be called ext2. Be aware however that the file system 15 module will be called ext2.
16 of your root partition (the one containing the directory /) cannot
17 be compiled as a module, and so this could be dangerous.
18 16
19 If unsure, say Y. 17 If unsure, say Y.
20 18
@@ -98,9 +96,7 @@ config EXT3_FS
98 (available at <http://sourceforge.net/projects/e2fsprogs/>). 96 (available at <http://sourceforge.net/projects/e2fsprogs/>).
99 97
100 To compile this file system support as a module, choose M here: the 98 To compile this file system support as a module, choose M here: the
101 module will be called ext3. Be aware however that the file system 99 module will be called ext3.
102 of your root partition (the one containing the directory /) cannot
103 be compiled as a module, and so this may be dangerous.
104 100
105config EXT3_FS_XATTR 101config EXT3_FS_XATTR
106 bool "Ext3 extended attributes" 102 bool "Ext3 extended attributes"
@@ -163,9 +159,7 @@ config EXT4DEV_FS
163 features will be added to ext4dev gradually. 159 features will be added to ext4dev gradually.
164 160
165 To compile this file system support as a module, choose M here. The 161 To compile this file system support as a module, choose M here. The
166 module will be called ext4dev. Be aware, however, that the filesystem 162 module will be called ext4dev.
167 of your root partition (the one containing the directory /) cannot
168 be compiled as a module, and so this could be dangerous.
169 163
170 If unsure, say N. 164 If unsure, say N.
171 165
@@ -1008,7 +1002,7 @@ config TMPFS_POSIX_ACL
1008 1002
1009config HUGETLBFS 1003config HUGETLBFS
1010 bool "HugeTLB file system support" 1004 bool "HugeTLB file system support"
1011 depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN 1005 depends on X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
1012 help 1006 help
1013 hugetlbfs is a filesystem backing for HugeTLB pages, based on 1007 hugetlbfs is a filesystem backing for HugeTLB pages, based on
1014 ramfs. For architectures that support it, say Y here and read 1008 ramfs. For architectures that support it, say Y here and read
diff --git a/fs/aio.c b/fs/aio.c
index d3a6ec2c9627..5f577a63bdf0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -586,7 +586,7 @@ static void use_mm(struct mm_struct *mm)
586 * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise 586 * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise
587 * it won't work. Update it accordingly if you change it here 587 * it won't work. Update it accordingly if you change it here
588 */ 588 */
589 activate_mm(active_mm, mm); 589 switch_mm(active_mm, mm, tsk);
590 task_unlock(tsk); 590 task_unlock(tsk);
591 591
592 mmdrop(active_mm); 592 mmdrop(active_mm);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 9c48250fd726..e8f6c5ad3e90 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -313,7 +313,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
313 struct autofs_sb_info *sbi; 313 struct autofs_sb_info *sbi;
314 struct autofs_info *ino; 314 struct autofs_info *ino;
315 315
316 sbi = (struct autofs_sb_info *) kmalloc(sizeof(*sbi), GFP_KERNEL); 316 sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
317 if ( !sbi ) 317 if ( !sbi )
318 goto fail_unlock; 318 goto fail_unlock;
319 DPRINTK("starting up, sbi = %p",sbi); 319 DPRINTK("starting up, sbi = %p",sbi);
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 81b042ee24e6..af5bb93276f8 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -260,7 +260,7 @@ befs_btree_find(struct super_block *sb, befs_data_stream * ds,
260 goto error; 260 goto error;
261 } 261 }
262 262
263 this_node = (befs_btree_node *) kmalloc(sizeof (befs_btree_node), 263 this_node = kmalloc(sizeof (befs_btree_node),
264 GFP_NOFS); 264 GFP_NOFS);
265 if (!this_node) { 265 if (!this_node) {
266 befs_error(sb, "befs_btree_find() failed to allocate %u " 266 befs_error(sb, "befs_btree_find() failed to allocate %u "
diff --git a/fs/befs/debug.c b/fs/befs/debug.c
index e831a8f30849..b8e304a0661e 100644
--- a/fs/befs/debug.c
+++ b/fs/befs/debug.c
@@ -28,7 +28,7 @@ void
28befs_error(const struct super_block *sb, const char *fmt, ...) 28befs_error(const struct super_block *sb, const char *fmt, ...)
29{ 29{
30 va_list args; 30 va_list args;
31 char *err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL); 31 char *err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL);
32 if (err_buf == NULL) { 32 if (err_buf == NULL) {
33 printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); 33 printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE);
34 return; 34 return;
@@ -46,7 +46,7 @@ void
46befs_warning(const struct super_block *sb, const char *fmt, ...) 46befs_warning(const struct super_block *sb, const char *fmt, ...)
47{ 47{
48 va_list args; 48 va_list args;
49 char *err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL); 49 char *err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL);
50 if (err_buf == NULL) { 50 if (err_buf == NULL) {
51 printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); 51 printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE);
52 return; 52 return;
@@ -70,7 +70,7 @@ befs_debug(const struct super_block *sb, const char *fmt, ...)
70 char *err_buf = NULL; 70 char *err_buf = NULL;
71 71
72 if (BEFS_SB(sb)->mount_opts.debug) { 72 if (BEFS_SB(sb)->mount_opts.debug) {
73 err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL); 73 err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL);
74 if (err_buf == NULL) { 74 if (err_buf == NULL) {
75 printk(KERN_ERR "could not allocate %d bytes\n", 75 printk(KERN_ERR "could not allocate %d bytes\n",
76 ERRBUFSIZE); 76 ERRBUFSIZE);
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index eac175ed9f44..134c99941a63 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/bfs/inode.c 2 * fs/bfs/inode.c
3 * BFS superblock and inode operations. 3 * BFS superblock and inode operations.
4 * Copyright (C) 1999,2000 Tigran Aivazian <tigran@veritas.com> 4 * Copyright (C) 1999-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * From fs/minix, Copyright (C) 1991, 1992 Linus Torvalds. 5 * From fs/minix, Copyright (C) 1991, 1992 Linus Torvalds.
6 * 6 *
7 * Made endianness-clean by Andrew Stribblehill <ads@wompom.org>, 2005. 7 * Made endianness-clean by Andrew Stribblehill <ads@wompom.org>, 2005.
@@ -18,7 +18,7 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include "bfs.h" 19#include "bfs.h"
20 20
21MODULE_AUTHOR("Tigran A. Aivazian <tigran@veritas.com>"); 21MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
22MODULE_DESCRIPTION("SCO UnixWare BFS filesystem for Linux"); 22MODULE_DESCRIPTION("SCO UnixWare BFS filesystem for Linux");
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 76f06f6bc2f6..6e6d4568d548 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -706,12 +706,11 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
706 return -ELIBBAD; 706 return -ELIBBAD;
707 707
708 size = sizeof(*loadmap) + nloads * sizeof(*seg); 708 size = sizeof(*loadmap) + nloads * sizeof(*seg);
709 loadmap = kmalloc(size, GFP_KERNEL); 709 loadmap = kzalloc(size, GFP_KERNEL);
710 if (!loadmap) 710 if (!loadmap)
711 return -ENOMEM; 711 return -ENOMEM;
712 712
713 params->loadmap = loadmap; 713 params->loadmap = loadmap;
714 memset(loadmap, 0, size);
715 714
716 loadmap->version = ELF32_FDPIC_LOADMAP_VERSION; 715 loadmap->version = ELF32_FDPIC_LOADMAP_VERSION;
717 loadmap->nsegs = nloads; 716 loadmap->nsegs = nloads;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 00687ea62738..c2e08252af35 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -311,7 +311,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
311 311
312 err = -ENOMEM; 312 err = -ENOMEM;
313 memsize = sizeof(Node) + count + 8; 313 memsize = sizeof(Node) + count + 8;
314 e = (Node *) kmalloc(memsize, GFP_USER); 314 e = kmalloc(memsize, GFP_USER);
315 if (!e) 315 if (!e)
316 goto out; 316 goto out;
317 317
diff --git a/fs/bio.c b/fs/bio.c
index 7ec737eda72b..7618bcb18368 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -916,7 +916,7 @@ void bio_set_pages_dirty(struct bio *bio)
916 } 916 }
917} 917}
918 918
919static void bio_release_pages(struct bio *bio) 919void bio_release_pages(struct bio *bio)
920{ 920{
921 struct bio_vec *bvec = bio->bi_io_vec; 921 struct bio_vec *bvec = bio->bi_io_vec;
922 int i; 922 int i;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 197f93921847..1715d6b5f411 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -129,43 +129,191 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
129 return 0; 129 return 0;
130} 130}
131 131
132static int 132static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error)
133blkdev_get_blocks(struct inode *inode, sector_t iblock,
134 struct buffer_head *bh, int create)
135{ 133{
136 sector_t end_block = max_block(I_BDEV(inode)); 134 struct kiocb *iocb = bio->bi_private;
137 unsigned long max_blocks = bh->b_size >> inode->i_blkbits; 135 atomic_t *bio_count = &iocb->ki_bio_count;
138 136
139 if ((iblock + max_blocks) > end_block) { 137 if (bio_data_dir(bio) == READ)
140 max_blocks = end_block - iblock; 138 bio_check_pages_dirty(bio);
141 if ((long)max_blocks <= 0) { 139 else {
142 if (create) 140 bio_release_pages(bio);
143 return -EIO; /* write fully beyond EOF */ 141 bio_put(bio);
144 /* 142 }
145 * It is a read which is fully beyond EOF. We return 143
146 * a !buffer_mapped buffer 144 /* iocb->ki_nbytes stores error code from LLDD */
147 */ 145 if (error)
148 max_blocks = 0; 146 iocb->ki_nbytes = -EIO;
149 } 147
148 if (atomic_dec_and_test(bio_count)) {
149 if (iocb->ki_nbytes < 0)
150 aio_complete(iocb, iocb->ki_nbytes, 0);
151 else
152 aio_complete(iocb, iocb->ki_left, 0);
150 } 153 }
151 154
152 bh->b_bdev = I_BDEV(inode);
153 bh->b_blocknr = iblock;
154 bh->b_size = max_blocks << inode->i_blkbits;
155 if (max_blocks)
156 set_buffer_mapped(bh);
157 return 0; 155 return 0;
158} 156}
159 157
158#define VEC_SIZE 16
159struct pvec {
160 unsigned short nr;
161 unsigned short idx;
162 struct page *page[VEC_SIZE];
163};
164
165#define PAGES_SPANNED(addr, len) \
166 (DIV_ROUND_UP((addr) + (len), PAGE_SIZE) - (addr) / PAGE_SIZE);
167
168/*
169 * get page pointer for user addr, we internally cache struct page array for
170 * (addr, count) range in pvec to avoid frequent call to get_user_pages. If
171 * internal page list is exhausted, a batch count of up to VEC_SIZE is used
172 * to get next set of page struct.
173 */
174static struct page *blk_get_page(unsigned long addr, size_t count, int rw,
175 struct pvec *pvec)
176{
177 int ret, nr_pages;
178 if (pvec->idx == pvec->nr) {
179 nr_pages = PAGES_SPANNED(addr, count);
180 nr_pages = min(nr_pages, VEC_SIZE);
181 down_read(&current->mm->mmap_sem);
182 ret = get_user_pages(current, current->mm, addr, nr_pages,
183 rw == READ, 0, pvec->page, NULL);
184 up_read(&current->mm->mmap_sem);
185 if (ret < 0)
186 return ERR_PTR(ret);
187 pvec->nr = ret;
188 pvec->idx = 0;
189 }
190 return pvec->page[pvec->idx++];
191}
192
160static ssize_t 193static ssize_t
161blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 194blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
162 loff_t offset, unsigned long nr_segs) 195 loff_t pos, unsigned long nr_segs)
163{ 196{
164 struct file *file = iocb->ki_filp; 197 struct inode *inode = iocb->ki_filp->f_mapping->host;
165 struct inode *inode = file->f_mapping->host; 198 unsigned blkbits = blksize_bits(bdev_hardsect_size(I_BDEV(inode)));
199 unsigned blocksize_mask = (1 << blkbits) - 1;
200 unsigned long seg = 0; /* iov segment iterator */
201 unsigned long nvec; /* number of bio vec needed */
202 unsigned long cur_off; /* offset into current page */
203 unsigned long cur_len; /* I/O len of current page, up to PAGE_SIZE */
204
205 unsigned long addr; /* user iovec address */
206 size_t count; /* user iovec len */
207 size_t nbytes = iocb->ki_nbytes = iocb->ki_left; /* total xfer size */
208 loff_t size; /* size of block device */
209 struct bio *bio;
210 atomic_t *bio_count = &iocb->ki_bio_count;
211 struct page *page;
212 struct pvec pvec;
213
214 pvec.nr = 0;
215 pvec.idx = 0;
216
217 if (pos & blocksize_mask)
218 return -EINVAL;
219
220 size = i_size_read(inode);
221 if (pos + nbytes > size) {
222 nbytes = size - pos;
223 iocb->ki_left = nbytes;
224 }
225
226 /*
227 * check first non-zero iov alignment, the remaining
228 * iov alignment is checked inside bio loop below.
229 */
230 do {
231 addr = (unsigned long) iov[seg].iov_base;
232 count = min(iov[seg].iov_len, nbytes);
233 if (addr & blocksize_mask || count & blocksize_mask)
234 return -EINVAL;
235 } while (!count && ++seg < nr_segs);
236 atomic_set(bio_count, 1);
237
238 while (nbytes) {
239 /* roughly estimate number of bio vec needed */
240 nvec = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
241 nvec = max(nvec, nr_segs - seg);
242 nvec = min(nvec, (unsigned long) BIO_MAX_PAGES);
243
244 /* bio_alloc should not fail with GFP_KERNEL flag */
245 bio = bio_alloc(GFP_KERNEL, nvec);
246 bio->bi_bdev = I_BDEV(inode);
247 bio->bi_end_io = blk_end_aio;
248 bio->bi_private = iocb;
249 bio->bi_sector = pos >> blkbits;
250same_bio:
251 cur_off = addr & ~PAGE_MASK;
252 cur_len = PAGE_SIZE - cur_off;
253 if (count < cur_len)
254 cur_len = count;
255
256 page = blk_get_page(addr, count, rw, &pvec);
257 if (unlikely(IS_ERR(page)))
258 goto backout;
259
260 if (bio_add_page(bio, page, cur_len, cur_off)) {
261 pos += cur_len;
262 addr += cur_len;
263 count -= cur_len;
264 nbytes -= cur_len;
265
266 if (count)
267 goto same_bio;
268 while (++seg < nr_segs) {
269 addr = (unsigned long) iov[seg].iov_base;
270 count = iov[seg].iov_len;
271 if (!count)
272 continue;
273 if (unlikely(addr & blocksize_mask ||
274 count & blocksize_mask)) {
275 page = ERR_PTR(-EINVAL);
276 goto backout;
277 }
278 count = min(count, nbytes);
279 goto same_bio;
280 }
281 }
166 282
167 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode), 283 /* bio is ready, submit it */
168 iov, offset, nr_segs, blkdev_get_blocks, NULL); 284 if (rw == READ)
285 bio_set_pages_dirty(bio);
286 atomic_inc(bio_count);
287 submit_bio(rw, bio);
288 }
289
290completion:
291 iocb->ki_left -= nbytes;
292 nbytes = iocb->ki_left;
293 iocb->ki_pos += nbytes;
294
295 blk_run_address_space(inode->i_mapping);
296 if (atomic_dec_and_test(bio_count))
297 aio_complete(iocb, nbytes, 0);
298
299 return -EIOCBQUEUED;
300
301backout:
302 /*
303 * back out nbytes count constructed so far for this bio,
304 * we will throw away current bio.
305 */
306 nbytes += bio->bi_size;
307 bio_release_pages(bio);
308 bio_put(bio);
309
310 /*
311 * if no bio was submmitted, return the error code.
312 * otherwise, proceed with pending I/O completion.
313 */
314 if (atomic_read(bio_count) == 1)
315 return PTR_ERR(page);
316 goto completion;
169} 317}
170 318
171static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 319static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 098790eb2aa1..472e33e0f3cf 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4876,7 +4876,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
4876 } else { 4876 } else {
4877 /* Add file to outstanding requests */ 4877 /* Add file to outstanding requests */
4878 /* BB change to kmem cache alloc */ 4878 /* BB change to kmem cache alloc */
4879 dnotify_req = (struct dir_notify_req *) kmalloc( 4879 dnotify_req = kmalloc(
4880 sizeof(struct dir_notify_req), 4880 sizeof(struct dir_notify_req),
4881 GFP_KERNEL); 4881 GFP_KERNEL);
4882 if(dnotify_req) { 4882 if(dnotify_req) {
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 137d76c3f90a..c692487346ea 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -24,6 +24,7 @@
24#include <linux/kobject.h> 24#include <linux/kobject.h>
25#include <linux/namei.h> 25#include <linux/namei.h>
26#include <linux/debugfs.h> 26#include <linux/debugfs.h>
27#include <linux/fsnotify.h>
27 28
28#define DEBUGFS_MAGIC 0x64626720 29#define DEBUGFS_MAGIC 0x64626720
29 30
@@ -54,7 +55,8 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
54 inode->i_op = &simple_dir_inode_operations; 55 inode->i_op = &simple_dir_inode_operations;
55 inode->i_fop = &simple_dir_operations; 56 inode->i_fop = &simple_dir_operations;
56 57
57 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 58 /* directory inodes start off with i_nlink == 2
59 * (for "." entry) */
58 inc_nlink(inode); 60 inc_nlink(inode);
59 break; 61 break;
60 } 62 }
@@ -87,15 +89,22 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
87 89
88 mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; 90 mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
89 res = debugfs_mknod(dir, dentry, mode, 0); 91 res = debugfs_mknod(dir, dentry, mode, 0);
90 if (!res) 92 if (!res) {
91 inc_nlink(dir); 93 inc_nlink(dir);
94 fsnotify_mkdir(dir, dentry);
95 }
92 return res; 96 return res;
93} 97}
94 98
95static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode) 99static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode)
96{ 100{
101 int res;
102
97 mode = (mode & S_IALLUGO) | S_IFREG; 103 mode = (mode & S_IALLUGO) | S_IFREG;
98 return debugfs_mknod(dir, dentry, mode, 0); 104 res = debugfs_mknod(dir, dentry, mode, 0);
105 if (!res)
106 fsnotify_create(dir, dentry);
107 return res;
99} 108}
100 109
101static inline int debugfs_positive(struct dentry *dentry) 110static inline int debugfs_positive(struct dentry *dentry)
@@ -135,7 +144,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
135 * block. A pointer to that is in the struct vfsmount that we 144 * block. A pointer to that is in the struct vfsmount that we
136 * have around. 145 * have around.
137 */ 146 */
138 if (!parent ) { 147 if (!parent) {
139 if (debugfs_mount && debugfs_mount->mnt_sb) { 148 if (debugfs_mount && debugfs_mount->mnt_sb) {
140 parent = debugfs_mount->mnt_sb->s_root; 149 parent = debugfs_mount->mnt_sb->s_root;
141 } 150 }
@@ -153,6 +162,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
153 error = debugfs_mkdir(parent->d_inode, *dentry, mode); 162 error = debugfs_mkdir(parent->d_inode, *dentry, mode);
154 else 163 else
155 error = debugfs_create(parent->d_inode, *dentry, mode); 164 error = debugfs_create(parent->d_inode, *dentry, mode);
165 dput(*dentry);
156 } else 166 } else
157 error = PTR_ERR(*dentry); 167 error = PTR_ERR(*dentry);
158 mutex_unlock(&parent->d_inode->i_mutex); 168 mutex_unlock(&parent->d_inode->i_mutex);
@@ -197,13 +207,15 @@ struct dentry *debugfs_create_file(const char *name, mode_t mode,
197 207
198 pr_debug("debugfs: creating file '%s'\n",name); 208 pr_debug("debugfs: creating file '%s'\n",name);
199 209
200 error = simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count); 210 error = simple_pin_fs(&debug_fs_type, &debugfs_mount,
211 &debugfs_mount_count);
201 if (error) 212 if (error)
202 goto exit; 213 goto exit;
203 214
204 error = debugfs_create_by_name(name, mode, parent, &dentry); 215 error = debugfs_create_by_name(name, mode, parent, &dentry);
205 if (error) { 216 if (error) {
206 dentry = NULL; 217 dentry = NULL;
218 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
207 goto exit; 219 goto exit;
208 } 220 }
209 221
@@ -262,6 +274,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_dir);
262void debugfs_remove(struct dentry *dentry) 274void debugfs_remove(struct dentry *dentry)
263{ 275{
264 struct dentry *parent; 276 struct dentry *parent;
277 int ret = 0;
265 278
266 if (!dentry) 279 if (!dentry)
267 return; 280 return;
@@ -273,11 +286,19 @@ void debugfs_remove(struct dentry *dentry)
273 mutex_lock(&parent->d_inode->i_mutex); 286 mutex_lock(&parent->d_inode->i_mutex);
274 if (debugfs_positive(dentry)) { 287 if (debugfs_positive(dentry)) {
275 if (dentry->d_inode) { 288 if (dentry->d_inode) {
276 if (S_ISDIR(dentry->d_inode->i_mode)) 289 dget(dentry);
277 simple_rmdir(parent->d_inode, dentry); 290 if (S_ISDIR(dentry->d_inode->i_mode)) {
278 else 291 ret = simple_rmdir(parent->d_inode, dentry);
292 if (ret)
293 printk(KERN_ERR
294 "DebugFS rmdir on %s failed : "
295 "directory not empty.\n",
296 dentry->d_name.name);
297 } else
279 simple_unlink(parent->d_inode, dentry); 298 simple_unlink(parent->d_inode, dentry);
280 dput(dentry); 299 if (!ret)
300 d_delete(dentry);
301 dput(dentry);
281 } 302 }
282 } 303 }
283 mutex_unlock(&parent->d_inode->i_mutex); 304 mutex_unlock(&parent->d_inode->i_mutex);
diff --git a/fs/inode.c b/fs/inode.c
index d00de182ecb9..bf21dc6d0dbd 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1144,7 +1144,6 @@ sector_t bmap(struct inode * inode, sector_t block)
1144 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1144 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1145 return res; 1145 return res;
1146} 1146}
1147
1148EXPORT_SYMBOL(bmap); 1147EXPORT_SYMBOL(bmap);
1149 1148
1150/** 1149/**
@@ -1163,27 +1162,43 @@ void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1163 1162
1164 if (IS_RDONLY(inode)) 1163 if (IS_RDONLY(inode))
1165 return; 1164 return;
1166 1165 if (inode->i_flags & S_NOATIME)
1167 if ((inode->i_flags & S_NOATIME) || 1166 return;
1168 (inode->i_sb->s_flags & MS_NOATIME) || 1167 if (inode->i_sb->s_flags & MS_NOATIME)
1169 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))) 1168 return;
1169 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1170 return; 1170 return;
1171 1171
1172 /* 1172 /*
1173 * We may have a NULL vfsmount when coming from NFSD 1173 * We may have a NULL vfsmount when coming from NFSD
1174 */ 1174 */
1175 if (mnt && 1175 if (mnt) {
1176 ((mnt->mnt_flags & MNT_NOATIME) || 1176 if (mnt->mnt_flags & MNT_NOATIME)
1177 ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))) 1177 return;
1178 return; 1178 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1179 return;
1179 1180
1180 now = current_fs_time(inode->i_sb); 1181 if (mnt->mnt_flags & MNT_RELATIME) {
1181 if (!timespec_equal(&inode->i_atime, &now)) { 1182 /*
1182 inode->i_atime = now; 1183 * With relative atime, only update atime if the
1183 mark_inode_dirty_sync(inode); 1184 * previous atime is earlier than either the ctime or
1185 * mtime.
1186 */
1187 if (timespec_compare(&inode->i_mtime,
1188 &inode->i_atime) < 0 &&
1189 timespec_compare(&inode->i_ctime,
1190 &inode->i_atime) < 0)
1191 return;
1192 }
1184 } 1193 }
1185}
1186 1194
1195 now = current_fs_time(inode->i_sb);
1196 if (timespec_equal(&inode->i_atime, &now))
1197 return;
1198
1199 inode->i_atime = now;
1200 mark_inode_dirty_sync(inode);
1201}
1187EXPORT_SYMBOL(touch_atime); 1202EXPORT_SYMBOL(touch_atime);
1188 1203
1189/** 1204/**
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 7b40c69f44eb..43baa1afa021 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -818,7 +818,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
818 818
819 D1({ 819 D1({
820 int len = dentry->d_name.len; 820 int len = dentry->d_name.len;
821 char *_name = (char *) kmalloc(len + 1, GFP_KERNEL); 821 char *_name = kmalloc(len + 1, GFP_KERNEL);
822 memcpy(_name, dentry->d_name.name, len); 822 memcpy(_name, dentry->d_name.name, len);
823 _name[len] = '\0'; 823 _name[len] = '\0';
824 printk("***jffs_mkdir(): dir = 0x%p, name = \"%s\", " 824 printk("***jffs_mkdir(): dir = 0x%p, name = \"%s\", "
@@ -964,7 +964,7 @@ jffs_remove(struct inode *dir, struct dentry *dentry, int type)
964 D1({ 964 D1({
965 int len = dentry->d_name.len; 965 int len = dentry->d_name.len;
966 const char *name = dentry->d_name.name; 966 const char *name = dentry->d_name.name;
967 char *_name = (char *) kmalloc(len + 1, GFP_KERNEL); 967 char *_name = kmalloc(len + 1, GFP_KERNEL);
968 memcpy(_name, name, len); 968 memcpy(_name, name, len);
969 _name[len] = '\0'; 969 _name[len] = '\0';
970 printk("***jffs_remove(): file = \"%s\", ino = %ld\n", _name, dentry->d_inode->i_ino); 970 printk("***jffs_remove(): file = \"%s\", ino = %ld\n", _name, dentry->d_inode->i_ino);
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index d0e783f199ea..6dd18911b44c 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -436,7 +436,7 @@ jffs_checksum_flash(struct mtd_info *mtd, loff_t start, int size, __u32 *result)
436 int i, length; 436 int i, length;
437 437
438 /* Allocate read buffer */ 438 /* Allocate read buffer */
439 read_buf = (__u8 *) kmalloc (sizeof(__u8) * 4096, GFP_KERNEL); 439 read_buf = kmalloc(sizeof(__u8) * 4096, GFP_KERNEL);
440 if (!read_buf) { 440 if (!read_buf) {
441 printk(KERN_NOTICE "kmalloc failed in jffs_checksum_flash()\n"); 441 printk(KERN_NOTICE "kmalloc failed in jffs_checksum_flash()\n");
442 return -ENOMEM; 442 return -ENOMEM;
@@ -744,11 +744,11 @@ static int check_partly_erased_sectors(struct jffs_fmcontrol *fmc){
744 744
745 745
746 /* Allocate read buffers */ 746 /* Allocate read buffers */
747 read_buf1 = (__u8 *) kmalloc (sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL); 747 read_buf1 = kmalloc(sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL);
748 if (!read_buf1) 748 if (!read_buf1)
749 return -ENOMEM; 749 return -ENOMEM;
750 750
751 read_buf2 = (__u8 *) kmalloc (sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL); 751 read_buf2 = kmalloc(sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL);
752 if (!read_buf2) { 752 if (!read_buf2) {
753 kfree(read_buf1); 753 kfree(read_buf1);
754 return -ENOMEM; 754 return -ENOMEM;
@@ -876,7 +876,7 @@ jffs_scan_flash(struct jffs_control *c)
876 } 876 }
877 877
878 /* Allocate read buffer */ 878 /* Allocate read buffer */
879 read_buf = (__u8 *) kmalloc (sizeof(__u8) * 4096, GFP_KERNEL); 879 read_buf = kmalloc(sizeof(__u8) * 4096, GFP_KERNEL);
880 if (!read_buf) { 880 if (!read_buf) {
881 flash_safe_release(fmc->mtd); 881 flash_safe_release(fmc->mtd);
882 return -ENOMEM; 882 return -ENOMEM;
@@ -1463,7 +1463,7 @@ jffs_insert_node(struct jffs_control *c, struct jffs_file *f,
1463 kfree(f->name); 1463 kfree(f->name);
1464 DJM(no_name--); 1464 DJM(no_name--);
1465 } 1465 }
1466 if (!(f->name = (char *) kmalloc(raw_inode->nsize + 1, 1466 if (!(f->name = kmalloc(raw_inode->nsize + 1,
1467 GFP_KERNEL))) { 1467 GFP_KERNEL))) {
1468 return -ENOMEM; 1468 return -ENOMEM;
1469 } 1469 }
@@ -1737,7 +1737,7 @@ jffs_find_child(struct jffs_file *dir, const char *name, int len)
1737 printk("jffs_find_child(): Found \"%s\".\n", f->name); 1737 printk("jffs_find_child(): Found \"%s\".\n", f->name);
1738 } 1738 }
1739 else { 1739 else {
1740 char *copy = (char *) kmalloc(len + 1, GFP_KERNEL); 1740 char *copy = kmalloc(len + 1, GFP_KERNEL);
1741 if (copy) { 1741 if (copy) {
1742 memcpy(copy, name, len); 1742 memcpy(copy, name, len);
1743 copy[len] = '\0'; 1743 copy[len] = '\0';
@@ -2627,7 +2627,7 @@ jffs_print_tree(struct jffs_file *first_file, int indent)
2627 return; 2627 return;
2628 } 2628 }
2629 2629
2630 if (!(space = (char *) kmalloc(indent + 1, GFP_KERNEL))) { 2630 if (!(space = kmalloc(indent + 1, GFP_KERNEL))) {
2631 printk("jffs_print_tree(): Out of memory!\n"); 2631 printk("jffs_print_tree(): Out of memory!\n");
2632 return; 2632 return;
2633 } 2633 }
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 47bc0b5d1324..6d62f3222892 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3777,12 +3777,12 @@ static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
3777 struct component_name lkey; 3777 struct component_name lkey;
3778 struct component_name rkey; 3778 struct component_name rkey;
3779 3779
3780 lkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t), 3780 lkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
3781 GFP_KERNEL); 3781 GFP_KERNEL);
3782 if (lkey.name == NULL) 3782 if (lkey.name == NULL)
3783 return -ENOMEM; 3783 return -ENOMEM;
3784 3784
3785 rkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t), 3785 rkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
3786 GFP_KERNEL); 3786 GFP_KERNEL);
3787 if (rkey.name == NULL) { 3787 if (rkey.name == NULL) {
3788 kfree(lkey.name); 3788 kfree(lkey.name);
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index ee9b473b7b80..53f63b47a6d3 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -120,7 +120,7 @@ int diMount(struct inode *ipimap)
120 * allocate/initialize the in-memory inode map control structure 120 * allocate/initialize the in-memory inode map control structure
121 */ 121 */
122 /* allocate the in-memory inode map control structure. */ 122 /* allocate the in-memory inode map control structure. */
123 imap = (struct inomap *) kmalloc(sizeof(struct inomap), GFP_KERNEL); 123 imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
124 if (imap == NULL) { 124 if (imap == NULL) {
125 jfs_err("diMount: kmalloc returned NULL!"); 125 jfs_err("diMount: kmalloc returned NULL!");
126 return -ENOMEM; 126 return -ENOMEM;
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 92681c9e9b20..062707745162 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -36,7 +36,7 @@ struct nlm_wait {
36 struct nlm_host * b_host; 36 struct nlm_host * b_host;
37 struct file_lock * b_lock; /* local file lock */ 37 struct file_lock * b_lock; /* local file lock */
38 unsigned short b_reclaim; /* got to reclaim lock */ 38 unsigned short b_reclaim; /* got to reclaim lock */
39 u32 b_status; /* grant callback status */ 39 __be32 b_status; /* grant callback status */
40}; 40};
41 41
42static LIST_HEAD(nlm_blocked); 42static LIST_HEAD(nlm_blocked);
@@ -53,7 +53,7 @@ struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *
53 block->b_host = host; 53 block->b_host = host;
54 block->b_lock = fl; 54 block->b_lock = fl;
55 init_waitqueue_head(&block->b_wait); 55 init_waitqueue_head(&block->b_wait);
56 block->b_status = NLM_LCK_BLOCKED; 56 block->b_status = nlm_lck_blocked;
57 list_add(&block->b_list, &nlm_blocked); 57 list_add(&block->b_list, &nlm_blocked);
58 } 58 }
59 return block; 59 return block;
@@ -89,7 +89,7 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
89 * nlmclnt_lock for an explanation. 89 * nlmclnt_lock for an explanation.
90 */ 90 */
91 ret = wait_event_interruptible_timeout(block->b_wait, 91 ret = wait_event_interruptible_timeout(block->b_wait,
92 block->b_status != NLM_LCK_BLOCKED, 92 block->b_status != nlm_lck_blocked,
93 timeout); 93 timeout);
94 if (ret < 0) 94 if (ret < 0)
95 return -ERESTARTSYS; 95 return -ERESTARTSYS;
@@ -131,7 +131,7 @@ __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock
131 /* Alright, we found a lock. Set the return status 131 /* Alright, we found a lock. Set the return status
132 * and wake up the caller 132 * and wake up the caller
133 */ 133 */
134 block->b_status = NLM_LCK_GRANTED; 134 block->b_status = nlm_granted;
135 wake_up(&block->b_wait); 135 wake_up(&block->b_wait);
136 res = nlm_granted; 136 res = nlm_granted;
137 } 137 }
@@ -211,7 +211,7 @@ restart:
211 /* Now, wake up all processes that sleep on a blocked lock */ 211 /* Now, wake up all processes that sleep on a blocked lock */
212 list_for_each_entry(block, &nlm_blocked, b_list) { 212 list_for_each_entry(block, &nlm_blocked, b_list) {
213 if (block->b_host == host) { 213 if (block->b_host == host) {
214 block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; 214 block->b_status = nlm_lck_denied_grace_period;
215 wake_up(&block->b_wait); 215 wake_up(&block->b_wait);
216 } 216 }
217 } 217 }
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 80a1a6dccc8f..0b4acc1c5e7d 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -27,7 +27,7 @@
27static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); 27static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
28static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); 28static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
29static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); 29static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
30static int nlm_stat_to_errno(u32 stat); 30static int nlm_stat_to_errno(__be32 stat);
31static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); 31static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
32static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *); 32static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
33 33
@@ -325,7 +325,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
325 } 325 }
326 break; 326 break;
327 } else 327 } else
328 if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) { 328 if (resp->status == nlm_lck_denied_grace_period) {
329 dprintk("lockd: server in grace period\n"); 329 dprintk("lockd: server in grace period\n");
330 if (argp->reclaim) { 330 if (argp->reclaim) {
331 printk(KERN_WARNING 331 printk(KERN_WARNING
@@ -411,10 +411,10 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
411 goto out; 411 goto out;
412 412
413 switch (req->a_res.status) { 413 switch (req->a_res.status) {
414 case NLM_LCK_GRANTED: 414 case nlm_granted:
415 fl->fl_type = F_UNLCK; 415 fl->fl_type = F_UNLCK;
416 break; 416 break;
417 case NLM_LCK_DENIED: 417 case nlm_lck_denied:
418 /* 418 /*
419 * Report the conflicting lock back to the application. 419 * Report the conflicting lock back to the application.
420 */ 420 */
@@ -524,9 +524,9 @@ again:
524 if (!req->a_args.block) 524 if (!req->a_args.block)
525 break; 525 break;
526 /* Did a reclaimer thread notify us of a server reboot? */ 526 /* Did a reclaimer thread notify us of a server reboot? */
527 if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) 527 if (resp->status == nlm_lck_denied_grace_period)
528 continue; 528 continue;
529 if (resp->status != NLM_LCK_BLOCKED) 529 if (resp->status != nlm_lck_blocked)
530 break; 530 break;
531 /* Wait on an NLM blocking lock */ 531 /* Wait on an NLM blocking lock */
532 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); 532 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
@@ -535,11 +535,11 @@ again:
535 */ 535 */
536 if (status < 0) 536 if (status < 0)
537 goto out_unblock; 537 goto out_unblock;
538 if (resp->status != NLM_LCK_BLOCKED) 538 if (resp->status != nlm_lck_blocked)
539 break; 539 break;
540 } 540 }
541 541
542 if (resp->status == NLM_LCK_GRANTED) { 542 if (resp->status == nlm_granted) {
543 down_read(&host->h_rwsem); 543 down_read(&host->h_rwsem);
544 /* Check whether or not the server has rebooted */ 544 /* Check whether or not the server has rebooted */
545 if (fl->fl_u.nfs_fl.state != host->h_state) { 545 if (fl->fl_u.nfs_fl.state != host->h_state) {
@@ -556,7 +556,7 @@ again:
556out_unblock: 556out_unblock:
557 nlmclnt_finish_block(block); 557 nlmclnt_finish_block(block);
558 /* Cancel the blocked request if it is still pending */ 558 /* Cancel the blocked request if it is still pending */
559 if (resp->status == NLM_LCK_BLOCKED) 559 if (resp->status == nlm_lck_blocked)
560 nlmclnt_cancel(host, req->a_args.block, fl); 560 nlmclnt_cancel(host, req->a_args.block, fl);
561out: 561out:
562 nlm_release_call(req); 562 nlm_release_call(req);
@@ -585,12 +585,12 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
585 req->a_args.reclaim = 1; 585 req->a_args.reclaim = 1;
586 586
587 if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0 587 if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
588 && req->a_res.status == NLM_LCK_GRANTED) 588 && req->a_res.status == nlm_granted)
589 return 0; 589 return 0;
590 590
591 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " 591 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
592 "(errno %d, status %d)\n", fl->fl_pid, 592 "(errno %d, status %d)\n", fl->fl_pid,
593 status, req->a_res.status); 593 status, ntohl(req->a_res.status));
594 594
595 /* 595 /*
596 * FIXME: This is a serious failure. We can 596 * FIXME: This is a serious failure. We can
@@ -637,10 +637,10 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
637 if (status < 0) 637 if (status < 0)
638 goto out; 638 goto out;
639 639
640 if (resp->status == NLM_LCK_GRANTED) 640 if (resp->status == nlm_granted)
641 goto out; 641 goto out;
642 642
643 if (resp->status != NLM_LCK_DENIED_NOLOCKS) 643 if (resp->status != nlm_lck_denied_nolocks)
644 printk("lockd: unexpected unlock status: %d\n", resp->status); 644 printk("lockd: unexpected unlock status: %d\n", resp->status);
645 /* What to do now? I'm out of my depth... */ 645 /* What to do now? I'm out of my depth... */
646 status = -ENOLCK; 646 status = -ENOLCK;
@@ -652,7 +652,7 @@ out:
652static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) 652static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
653{ 653{
654 struct nlm_rqst *req = data; 654 struct nlm_rqst *req = data;
655 int status = req->a_res.status; 655 u32 status = ntohl(req->a_res.status);
656 656
657 if (RPC_ASSASSINATED(task)) 657 if (RPC_ASSASSINATED(task))
658 goto die; 658 goto die;
@@ -720,6 +720,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
720static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) 720static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
721{ 721{
722 struct nlm_rqst *req = data; 722 struct nlm_rqst *req = data;
723 u32 status = ntohl(req->a_res.status);
723 724
724 if (RPC_ASSASSINATED(task)) 725 if (RPC_ASSASSINATED(task))
725 goto die; 726 goto die;
@@ -731,9 +732,9 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
731 } 732 }
732 733
733 dprintk("lockd: cancel status %u (task %u)\n", 734 dprintk("lockd: cancel status %u (task %u)\n",
734 req->a_res.status, task->tk_pid); 735 status, task->tk_pid);
735 736
736 switch (req->a_res.status) { 737 switch (status) {
737 case NLM_LCK_GRANTED: 738 case NLM_LCK_GRANTED:
738 case NLM_LCK_DENIED_GRACE_PERIOD: 739 case NLM_LCK_DENIED_GRACE_PERIOD:
739 case NLM_LCK_DENIED: 740 case NLM_LCK_DENIED:
@@ -744,7 +745,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
744 goto retry_cancel; 745 goto retry_cancel;
745 default: 746 default:
746 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", 747 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
747 req->a_res.status); 748 status);
748 } 749 }
749 750
750die: 751die:
@@ -768,9 +769,9 @@ static const struct rpc_call_ops nlmclnt_cancel_ops = {
768 * Convert an NLM status code to a generic kernel errno 769 * Convert an NLM status code to a generic kernel errno
769 */ 770 */
770static int 771static int
771nlm_stat_to_errno(u32 status) 772nlm_stat_to_errno(__be32 status)
772{ 773{
773 switch(status) { 774 switch(ntohl(status)) {
774 case NLM_LCK_GRANTED: 775 case NLM_LCK_GRANTED:
775 return 0; 776 return 0;
776 case NLM_LCK_DENIED: 777 case NLM_LCK_DENIED:
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 5c054b20fd5e..c7db0a5bccdc 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -645,7 +645,7 @@ static const struct rpc_call_ops nlmsvc_grant_ops = {
645 * block. 645 * block.
646 */ 646 */
647void 647void
648nlmsvc_grant_reply(struct nlm_cookie *cookie, u32 status) 648nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
649{ 649{
650 struct nlm_block *block; 650 struct nlm_block *block;
651 651
@@ -655,7 +655,7 @@ nlmsvc_grant_reply(struct nlm_cookie *cookie, u32 status)
655 return; 655 return;
656 656
657 if (block) { 657 if (block) {
658 if (status == NLM_LCK_DENIED_GRACE_PERIOD) { 658 if (status == nlm_lck_denied_grace_period) {
659 /* Try again in a couple of seconds */ 659 /* Try again in a couple of seconds */
660 nlmsvc_insert_block(block, 10 * HZ); 660 nlmsvc_insert_block(block, 10 * HZ);
661 } else { 661 } else {
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
index 6220dc2a3f2c..068886de4dda 100644
--- a/fs/lockd/svcshare.c
+++ b/fs/lockd/svcshare.c
@@ -39,7 +39,7 @@ nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file,
39 return nlm_lck_denied; 39 return nlm_lck_denied;
40 } 40 }
41 41
42 share = (struct nlm_share *) kmalloc(sizeof(*share) + oh->len, 42 share = kmalloc(sizeof(*share) + oh->len,
43 GFP_KERNEL); 43 GFP_KERNEL);
44 if (share == NULL) 44 if (share == NULL)
45 return nlm_lck_denied_nolocks; 45 return nlm_lck_denied_nolocks;
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index b7c949256e5a..34dae5d70738 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -361,7 +361,7 @@ nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
361{ 361{
362 if (!(p = nlm_decode_cookie(p, &resp->cookie))) 362 if (!(p = nlm_decode_cookie(p, &resp->cookie)))
363 return 0; 363 return 0;
364 resp->status = ntohl(*p++); 364 resp->status = *p++;
365 return xdr_argsize_check(rqstp, p); 365 return xdr_argsize_check(rqstp, p);
366} 366}
367 367
@@ -407,8 +407,8 @@ nlmclt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
407{ 407{
408 if (!(p = nlm_decode_cookie(p, &resp->cookie))) 408 if (!(p = nlm_decode_cookie(p, &resp->cookie)))
409 return -EIO; 409 return -EIO;
410 resp->status = ntohl(*p++); 410 resp->status = *p++;
411 if (resp->status == NLM_LCK_DENIED) { 411 if (resp->status == nlm_lck_denied) {
412 struct file_lock *fl = &resp->lock.fl; 412 struct file_lock *fl = &resp->lock.fl;
413 u32 excl; 413 u32 excl;
414 s32 start, len, end; 414 s32 start, len, end;
@@ -506,7 +506,7 @@ nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
506{ 506{
507 if (!(p = nlm_decode_cookie(p, &resp->cookie))) 507 if (!(p = nlm_decode_cookie(p, &resp->cookie)))
508 return -EIO; 508 return -EIO;
509 resp->status = ntohl(*p++); 509 resp->status = *p++;
510 return 0; 510 return 0;
511} 511}
512 512
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index f4c0b2b9f75a..a78240551219 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -367,7 +367,7 @@ nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
367{ 367{
368 if (!(p = nlm4_decode_cookie(p, &resp->cookie))) 368 if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
369 return 0; 369 return 0;
370 resp->status = ntohl(*p++); 370 resp->status = *p++;
371 return xdr_argsize_check(rqstp, p); 371 return xdr_argsize_check(rqstp, p);
372} 372}
373 373
@@ -413,8 +413,8 @@ nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
413{ 413{
414 if (!(p = nlm4_decode_cookie(p, &resp->cookie))) 414 if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
415 return -EIO; 415 return -EIO;
416 resp->status = ntohl(*p++); 416 resp->status = *p++;
417 if (resp->status == NLM_LCK_DENIED) { 417 if (resp->status == nlm_lck_denied) {
418 struct file_lock *fl = &resp->lock.fl; 418 struct file_lock *fl = &resp->lock.fl;
419 u32 excl; 419 u32 excl;
420 s64 start, end, len; 420 s64 start, end, len;
@@ -512,7 +512,7 @@ nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
512{ 512{
513 if (!(p = nlm4_decode_cookie(p, &resp->cookie))) 513 if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
514 return -EIO; 514 return -EIO;
515 resp->status = ntohl(*p++); 515 resp->status = *p++;
516 return 0; 516 return 0;
517} 517}
518 518
diff --git a/fs/namespace.c b/fs/namespace.c
index fde8553faa76..5ef336c1103c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -368,6 +368,7 @@ static int show_vfsmnt(struct seq_file *m, void *v)
368 { MNT_NOEXEC, ",noexec" }, 368 { MNT_NOEXEC, ",noexec" },
369 { MNT_NOATIME, ",noatime" }, 369 { MNT_NOATIME, ",noatime" },
370 { MNT_NODIRATIME, ",nodiratime" }, 370 { MNT_NODIRATIME, ",nodiratime" },
371 { MNT_RELATIME, ",relatime" },
371 { 0, NULL } 372 { 0, NULL }
372 }; 373 };
373 struct proc_fs_info *fs_infop; 374 struct proc_fs_info *fs_infop;
@@ -1405,9 +1406,11 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1405 mnt_flags |= MNT_NOATIME; 1406 mnt_flags |= MNT_NOATIME;
1406 if (flags & MS_NODIRATIME) 1407 if (flags & MS_NODIRATIME)
1407 mnt_flags |= MNT_NODIRATIME; 1408 mnt_flags |= MNT_NODIRATIME;
1409 if (flags & MS_RELATIME)
1410 mnt_flags |= MNT_RELATIME;
1408 1411
1409 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | 1412 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
1410 MS_NOATIME | MS_NODIRATIME); 1413 MS_NOATIME | MS_NODIRATIME | MS_RELATIME);
1411 1414
1412 /* ... and get the mountpoint */ 1415 /* ... and get the mountpoint */
1413 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd); 1416 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 47462ac94474..67a90bf795d5 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -327,11 +327,12 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
327 char *optarg; 327 char *optarg;
328 unsigned long optint; 328 unsigned long optint;
329 int version = 0; 329 int version = 0;
330 int ret;
330 331
331 data->flags = 0; 332 data->flags = 0;
332 data->int_flags = 0; 333 data->int_flags = 0;
333 data->mounted_uid = 0; 334 data->mounted_uid = 0;
334 data->wdog_pid = -1; 335 data->wdog_pid = NULL;
335 data->ncp_fd = ~0; 336 data->ncp_fd = ~0;
336 data->time_out = 10; 337 data->time_out = 10;
337 data->retry_count = 20; 338 data->retry_count = 20;
@@ -343,8 +344,9 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
343 data->mounted_vol[0] = 0; 344 data->mounted_vol[0] = 0;
344 345
345 while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) { 346 while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) {
346 if (optval < 0) 347 ret = optval;
347 return optval; 348 if (ret < 0)
349 goto err;
348 switch (optval) { 350 switch (optval) {
349 case 'u': 351 case 'u':
350 data->uid = optint; 352 data->uid = optint;
@@ -371,7 +373,7 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
371 data->flags = optint; 373 data->flags = optint;
372 break; 374 break;
373 case 'w': 375 case 'w':
374 data->wdog_pid = optint; 376 data->wdog_pid = find_get_pid(optint);
375 break; 377 break;
376 case 'n': 378 case 'n':
377 data->ncp_fd = optint; 379 data->ncp_fd = optint;
@@ -380,18 +382,21 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
380 data->info_fd = optint; 382 data->info_fd = optint;
381 break; 383 break;
382 case 'v': 384 case 'v':
383 if (optint < NCP_MOUNT_VERSION_V4) { 385 ret = -ECHRNG;
384 return -ECHRNG; 386 if (optint < NCP_MOUNT_VERSION_V4)
385 } 387 goto err;
386 if (optint > NCP_MOUNT_VERSION_V5) { 388 if (optint > NCP_MOUNT_VERSION_V5)
387 return -ECHRNG; 389 goto err;
388 }
389 version = optint; 390 version = optint;
390 break; 391 break;
391 392
392 } 393 }
393 } 394 }
394 return 0; 395 return 0;
396err:
397 put_pid(data->wdog_pid);
398 data->wdog_pid = NULL;
399 return ret;
395} 400}
396 401
397static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) 402static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
@@ -409,6 +414,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
409#endif 414#endif
410 struct ncp_entry_info finfo; 415 struct ncp_entry_info finfo;
411 416
417 data.wdog_pid = NULL;
412 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); 418 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
413 if (!server) 419 if (!server)
414 return -ENOMEM; 420 return -ENOMEM;
@@ -425,7 +431,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
425 data.flags = md->flags; 431 data.flags = md->flags;
426 data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE; 432 data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE;
427 data.mounted_uid = md->mounted_uid; 433 data.mounted_uid = md->mounted_uid;
428 data.wdog_pid = md->wdog_pid; 434 data.wdog_pid = find_get_pid(md->wdog_pid);
429 data.ncp_fd = md->ncp_fd; 435 data.ncp_fd = md->ncp_fd;
430 data.time_out = md->time_out; 436 data.time_out = md->time_out;
431 data.retry_count = md->retry_count; 437 data.retry_count = md->retry_count;
@@ -445,7 +451,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
445 data.flags = md->flags; 451 data.flags = md->flags;
446 data.int_flags = 0; 452 data.int_flags = 0;
447 data.mounted_uid = md->mounted_uid; 453 data.mounted_uid = md->mounted_uid;
448 data.wdog_pid = md->wdog_pid; 454 data.wdog_pid = find_get_pid(md->wdog_pid);
449 data.ncp_fd = md->ncp_fd; 455 data.ncp_fd = md->ncp_fd;
450 data.time_out = md->time_out; 456 data.time_out = md->time_out;
451 data.retry_count = md->retry_count; 457 data.retry_count = md->retry_count;
@@ -679,6 +685,7 @@ out_fput:
679 */ 685 */
680 fput(ncp_filp); 686 fput(ncp_filp);
681out: 687out:
688 put_pid(data.wdog_pid);
682 sb->s_fs_info = NULL; 689 sb->s_fs_info = NULL;
683 kfree(server); 690 kfree(server);
684 return error; 691 return error;
@@ -711,7 +718,8 @@ static void ncp_put_super(struct super_block *sb)
711 if (server->info_filp) 718 if (server->info_filp)
712 fput(server->info_filp); 719 fput(server->info_filp);
713 fput(server->ncp_filp); 720 fput(server->ncp_filp);
714 kill_proc(server->m.wdog_pid, SIGTERM, 1); 721 kill_pid(server->m.wdog_pid, SIGTERM, 1);
722 put_pid(server->m.wdog_pid);
715 723
716 kfree(server->priv.data); 724 kfree(server->priv.data);
717 kfree(server->auth.object_name); 725 kfree(server->auth.object_name);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ee458aeab24a..b3fd29baadc3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1877,7 +1877,7 @@ static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir,
1877 struct nfs_server *server = NFS_SERVER(dir->d_inode); 1877 struct nfs_server *server = NFS_SERVER(dir->d_inode);
1878 struct unlink_desc *up; 1878 struct unlink_desc *up;
1879 1879
1880 up = (struct unlink_desc *) kmalloc(sizeof(*up), GFP_KERNEL); 1880 up = kmalloc(sizeof(*up), GFP_KERNEL);
1881 if (!up) 1881 if (!up)
1882 return -ENOMEM; 1882 return -ENOMEM;
1883 1883
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index f37df46d2eaa..248dd92e6a56 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -787,15 +787,20 @@ exp_get_by_name(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
787 key.ex_dentry = dentry; 787 key.ex_dentry = dentry;
788 788
789 exp = svc_export_lookup(&key); 789 exp = svc_export_lookup(&key);
790 if (exp != NULL) 790 if (exp != NULL) {
791 switch (cache_check(&svc_export_cache, &exp->h, reqp)) { 791 int err;
792
793 err = cache_check(&svc_export_cache, &exp->h, reqp);
794 switch (err) {
792 case 0: break; 795 case 0: break;
793 case -EAGAIN: 796 case -EAGAIN:
794 exp = ERR_PTR(-EAGAIN); 797 case -ETIMEDOUT:
798 exp = ERR_PTR(err);
795 break; 799 break;
796 default: 800 default:
797 exp = NULL; 801 exp = NULL;
798 } 802 }
803 }
799 804
800 return exp; 805 return exp;
801} 806}
@@ -950,6 +955,8 @@ exp_export(struct nfsctl_export *nxp)
950 955
951 exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL); 956 exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
952 957
958 memset(&new, 0, sizeof(new));
959
953 /* must make sure there won't be an ex_fsid clash */ 960 /* must make sure there won't be an ex_fsid clash */
954 if ((nxp->ex_flags & NFSEXP_FSID) && 961 if ((nxp->ex_flags & NFSEXP_FSID) &&
955 (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) && 962 (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) &&
@@ -980,6 +987,9 @@ exp_export(struct nfsctl_export *nxp)
980 987
981 new.h.expiry_time = NEVER; 988 new.h.expiry_time = NEVER;
982 new.h.flags = 0; 989 new.h.flags = 0;
990 new.ex_path = kstrdup(nxp->ex_path, GFP_KERNEL);
991 if (!new.ex_path)
992 goto finish;
983 new.ex_client = clp; 993 new.ex_client = clp;
984 new.ex_mnt = nd.mnt; 994 new.ex_mnt = nd.mnt;
985 new.ex_dentry = nd.dentry; 995 new.ex_dentry = nd.dentry;
@@ -1000,10 +1010,11 @@ exp_export(struct nfsctl_export *nxp)
1000 /* failed to create at least one index */ 1010 /* failed to create at least one index */
1001 exp_do_unexport(exp); 1011 exp_do_unexport(exp);
1002 cache_flush(); 1012 cache_flush();
1003 err = -ENOMEM; 1013 } else
1004 } 1014 err = 0;
1005
1006finish: 1015finish:
1016 if (new.ex_path)
1017 kfree(new.ex_path);
1007 if (exp) 1018 if (exp)
1008 exp_put(exp); 1019 exp_put(exp);
1009 if (fsid_key && !IS_ERR(fsid_key)) 1020 if (fsid_key && !IS_ERR(fsid_key))
@@ -1104,6 +1115,10 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
1104 path, nd.dentry, clp->name, 1115 path, nd.dentry, clp->name,
1105 inode->i_sb->s_id, inode->i_ino); 1116 inode->i_sb->s_id, inode->i_ino);
1106 exp = exp_parent(clp, nd.mnt, nd.dentry, NULL); 1117 exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
1118 if (IS_ERR(exp)) {
1119 err = PTR_ERR(exp);
1120 goto out;
1121 }
1107 if (!exp) { 1122 if (!exp) {
1108 dprintk("nfsd: exp_rootfh export not found.\n"); 1123 dprintk("nfsd: exp_rootfh export not found.\n");
1109 goto out; 1124 goto out;
@@ -1159,12 +1174,10 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
1159 mk_fsid_v1(fsidv, 0); 1174 mk_fsid_v1(fsidv, 0);
1160 1175
1161 exp = exp_find(clp, 1, fsidv, creq); 1176 exp = exp_find(clp, 1, fsidv, creq);
1162 if (IS_ERR(exp) && PTR_ERR(exp) == -EAGAIN) 1177 if (IS_ERR(exp))
1163 return nfserr_dropit; 1178 return nfserrno(PTR_ERR(exp));
1164 if (exp == NULL) 1179 if (exp == NULL)
1165 return nfserr_perm; 1180 return nfserr_perm;
1166 else if (IS_ERR(exp))
1167 return nfserrno(PTR_ERR(exp));
1168 rv = fh_compose(fhp, exp, exp->ex_dentry, NULL); 1181 rv = fh_compose(fhp, exp, exp->ex_dentry, NULL);
1169 exp_put(exp); 1182 exp_put(exp);
1170 return rv; 1183 return rv;
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 11fdaf7721b4..221acd1f11f6 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -22,7 +22,7 @@
22/* 22/*
23 * Note: we hold the dentry use count while the file is open. 23 * Note: we hold the dentry use count while the file is open.
24 */ 24 */
25static u32 25static __be32
26nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp) 26nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
27{ 27{
28 __be32 nfserr; 28 __be32 nfserr;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 50bc94243ca1..8522729830db 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -33,13 +33,6 @@
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Note: some routines in this file are just trivial wrappers
38 * (e.g. nfsd4_lookup()) defined solely for the sake of consistent
39 * naming. Since all such routines have been declared "inline",
40 * there shouldn't be any associated overhead. At some point in
41 * the future, I might inline these "by hand" to clean up a
42 * little.
43 */ 36 */
44 37
45#include <linux/param.h> 38#include <linux/param.h>
@@ -161,8 +154,9 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
161} 154}
162 155
163 156
164static inline __be32 157static __be32
165nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open, struct nfs4_stateowner **replay_owner) 158nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
159 struct nfsd4_open *open)
166{ 160{
167 __be32 status; 161 __be32 status;
168 dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n", 162 dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n",
@@ -179,11 +173,11 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
179 status = nfsd4_process_open1(open); 173 status = nfsd4_process_open1(open);
180 if (status == nfserr_replay_me) { 174 if (status == nfserr_replay_me) {
181 struct nfs4_replay *rp = &open->op_stateowner->so_replay; 175 struct nfs4_replay *rp = &open->op_stateowner->so_replay;
182 fh_put(current_fh); 176 fh_put(&cstate->current_fh);
183 current_fh->fh_handle.fh_size = rp->rp_openfh_len; 177 cstate->current_fh.fh_handle.fh_size = rp->rp_openfh_len;
184 memcpy(&current_fh->fh_handle.fh_base, rp->rp_openfh, 178 memcpy(&cstate->current_fh.fh_handle.fh_base, rp->rp_openfh,
185 rp->rp_openfh_len); 179 rp->rp_openfh_len);
186 status = fh_verify(rqstp, current_fh, 0, MAY_NOP); 180 status = fh_verify(rqstp, &cstate->current_fh, 0, MAY_NOP);
187 if (status) 181 if (status)
188 dprintk("nfsd4_open: replay failed" 182 dprintk("nfsd4_open: replay failed"
189 " restoring previous filehandle\n"); 183 " restoring previous filehandle\n");
@@ -215,7 +209,8 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
215 * (3) set open->op_truncate if the file is to be 209 * (3) set open->op_truncate if the file is to be
216 * truncated after opening, (4) do permission checking. 210 * truncated after opening, (4) do permission checking.
217 */ 211 */
218 status = do_open_lookup(rqstp, current_fh, open); 212 status = do_open_lookup(rqstp, &cstate->current_fh,
213 open);
219 if (status) 214 if (status)
220 goto out; 215 goto out;
221 break; 216 break;
@@ -227,7 +222,8 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
227 * open->op_truncate if the file is to be truncated 222 * open->op_truncate if the file is to be truncated
228 * after opening, (3) do permission checking. 223 * after opening, (3) do permission checking.
229 */ 224 */
230 status = do_open_fhandle(rqstp, current_fh, open); 225 status = do_open_fhandle(rqstp, &cstate->current_fh,
226 open);
231 if (status) 227 if (status)
232 goto out; 228 goto out;
233 break; 229 break;
@@ -248,11 +244,11 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
248 * successful, it (1) truncates the file if open->op_truncate was 244 * successful, it (1) truncates the file if open->op_truncate was
249 * set, (2) sets open->op_stateid, (3) sets open->op_delegation. 245 * set, (2) sets open->op_stateid, (3) sets open->op_delegation.
250 */ 246 */
251 status = nfsd4_process_open2(rqstp, current_fh, open); 247 status = nfsd4_process_open2(rqstp, &cstate->current_fh, open);
252out: 248out:
253 if (open->op_stateowner) { 249 if (open->op_stateowner) {
254 nfs4_get_stateowner(open->op_stateowner); 250 nfs4_get_stateowner(open->op_stateowner);
255 *replay_owner = open->op_stateowner; 251 cstate->replay_owner = open->op_stateowner;
256 } 252 }
257 nfs4_unlock_state(); 253 nfs4_unlock_state();
258 return status; 254 return status;
@@ -261,71 +257,80 @@ out:
261/* 257/*
262 * filehandle-manipulating ops. 258 * filehandle-manipulating ops.
263 */ 259 */
264static inline __be32 260static __be32
265nfsd4_getfh(struct svc_fh *current_fh, struct svc_fh **getfh) 261nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
262 struct svc_fh **getfh)
266{ 263{
267 if (!current_fh->fh_dentry) 264 if (!cstate->current_fh.fh_dentry)
268 return nfserr_nofilehandle; 265 return nfserr_nofilehandle;
269 266
270 *getfh = current_fh; 267 *getfh = &cstate->current_fh;
271 return nfs_ok; 268 return nfs_ok;
272} 269}
273 270
274static inline __be32 271static __be32
275nfsd4_putfh(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_putfh *putfh) 272nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
273 struct nfsd4_putfh *putfh)
276{ 274{
277 fh_put(current_fh); 275 fh_put(&cstate->current_fh);
278 current_fh->fh_handle.fh_size = putfh->pf_fhlen; 276 cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
279 memcpy(&current_fh->fh_handle.fh_base, putfh->pf_fhval, putfh->pf_fhlen); 277 memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
280 return fh_verify(rqstp, current_fh, 0, MAY_NOP); 278 putfh->pf_fhlen);
279 return fh_verify(rqstp, &cstate->current_fh, 0, MAY_NOP);
281} 280}
282 281
283static inline __be32 282static __be32
284nfsd4_putrootfh(struct svc_rqst *rqstp, struct svc_fh *current_fh) 283nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
284 void *arg)
285{ 285{
286 __be32 status; 286 __be32 status;
287 287
288 fh_put(current_fh); 288 fh_put(&cstate->current_fh);
289 status = exp_pseudoroot(rqstp->rq_client, current_fh, 289 status = exp_pseudoroot(rqstp->rq_client, &cstate->current_fh,
290 &rqstp->rq_chandle); 290 &rqstp->rq_chandle);
291 return status; 291 return status;
292} 292}
293 293
294static inline __be32 294static __be32
295nfsd4_restorefh(struct svc_fh *current_fh, struct svc_fh *save_fh) 295nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
296 void *arg)
296{ 297{
297 if (!save_fh->fh_dentry) 298 if (!cstate->save_fh.fh_dentry)
298 return nfserr_restorefh; 299 return nfserr_restorefh;
299 300
300 fh_dup2(current_fh, save_fh); 301 fh_dup2(&cstate->current_fh, &cstate->save_fh);
301 return nfs_ok; 302 return nfs_ok;
302} 303}
303 304
304static inline __be32 305static __be32
305nfsd4_savefh(struct svc_fh *current_fh, struct svc_fh *save_fh) 306nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
307 void *arg)
306{ 308{
307 if (!current_fh->fh_dentry) 309 if (!cstate->current_fh.fh_dentry)
308 return nfserr_nofilehandle; 310 return nfserr_nofilehandle;
309 311
310 fh_dup2(save_fh, current_fh); 312 fh_dup2(&cstate->save_fh, &cstate->current_fh);
311 return nfs_ok; 313 return nfs_ok;
312} 314}
313 315
314/* 316/*
315 * misc nfsv4 ops 317 * misc nfsv4 ops
316 */ 318 */
317static inline __be32 319static __be32
318nfsd4_access(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_access *access) 320nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
321 struct nfsd4_access *access)
319{ 322{
320 if (access->ac_req_access & ~NFS3_ACCESS_FULL) 323 if (access->ac_req_access & ~NFS3_ACCESS_FULL)
321 return nfserr_inval; 324 return nfserr_inval;
322 325
323 access->ac_resp_access = access->ac_req_access; 326 access->ac_resp_access = access->ac_req_access;
324 return nfsd_access(rqstp, current_fh, &access->ac_resp_access, &access->ac_supported); 327 return nfsd_access(rqstp, &cstate->current_fh, &access->ac_resp_access,
328 &access->ac_supported);
325} 329}
326 330
327static inline __be32 331static __be32
328nfsd4_commit(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_commit *commit) 332nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
333 struct nfsd4_commit *commit)
329{ 334{
330 __be32 status; 335 __be32 status;
331 336
@@ -333,14 +338,16 @@ nfsd4_commit(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_com
333 *p++ = nfssvc_boot.tv_sec; 338 *p++ = nfssvc_boot.tv_sec;
334 *p++ = nfssvc_boot.tv_usec; 339 *p++ = nfssvc_boot.tv_usec;
335 340
336 status = nfsd_commit(rqstp, current_fh, commit->co_offset, commit->co_count); 341 status = nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
342 commit->co_count);
337 if (status == nfserr_symlink) 343 if (status == nfserr_symlink)
338 status = nfserr_inval; 344 status = nfserr_inval;
339 return status; 345 return status;
340} 346}
341 347
342static __be32 348static __be32
343nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_create *create) 349nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
350 struct nfsd4_create *create)
344{ 351{
345 struct svc_fh resfh; 352 struct svc_fh resfh;
346 __be32 status; 353 __be32 status;
@@ -348,7 +355,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
348 355
349 fh_init(&resfh, NFS4_FHSIZE); 356 fh_init(&resfh, NFS4_FHSIZE);
350 357
351 status = fh_verify(rqstp, current_fh, S_IFDIR, MAY_CREATE); 358 status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, MAY_CREATE);
352 if (status == nfserr_symlink) 359 if (status == nfserr_symlink)
353 status = nfserr_notdir; 360 status = nfserr_notdir;
354 if (status) 361 if (status)
@@ -365,9 +372,10 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
365 */ 372 */
366 create->cr_linkname[create->cr_linklen] = 0; 373 create->cr_linkname[create->cr_linklen] = 0;
367 374
368 status = nfsd_symlink(rqstp, current_fh, create->cr_name, 375 status = nfsd_symlink(rqstp, &cstate->current_fh,
369 create->cr_namelen, create->cr_linkname, 376 create->cr_name, create->cr_namelen,
370 create->cr_linklen, &resfh, &create->cr_iattr); 377 create->cr_linkname, create->cr_linklen,
378 &resfh, &create->cr_iattr);
371 break; 379 break;
372 380
373 case NF4BLK: 381 case NF4BLK:
@@ -375,9 +383,9 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
375 if (MAJOR(rdev) != create->cr_specdata1 || 383 if (MAJOR(rdev) != create->cr_specdata1 ||
376 MINOR(rdev) != create->cr_specdata2) 384 MINOR(rdev) != create->cr_specdata2)
377 return nfserr_inval; 385 return nfserr_inval;
378 status = nfsd_create(rqstp, current_fh, create->cr_name, 386 status = nfsd_create(rqstp, &cstate->current_fh,
379 create->cr_namelen, &create->cr_iattr, 387 create->cr_name, create->cr_namelen,
380 S_IFBLK, rdev, &resfh); 388 &create->cr_iattr, S_IFBLK, rdev, &resfh);
381 break; 389 break;
382 390
383 case NF4CHR: 391 case NF4CHR:
@@ -385,28 +393,28 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
385 if (MAJOR(rdev) != create->cr_specdata1 || 393 if (MAJOR(rdev) != create->cr_specdata1 ||
386 MINOR(rdev) != create->cr_specdata2) 394 MINOR(rdev) != create->cr_specdata2)
387 return nfserr_inval; 395 return nfserr_inval;
388 status = nfsd_create(rqstp, current_fh, create->cr_name, 396 status = nfsd_create(rqstp, &cstate->current_fh,
389 create->cr_namelen, &create->cr_iattr, 397 create->cr_name, create->cr_namelen,
390 S_IFCHR, rdev, &resfh); 398 &create->cr_iattr,S_IFCHR, rdev, &resfh);
391 break; 399 break;
392 400
393 case NF4SOCK: 401 case NF4SOCK:
394 status = nfsd_create(rqstp, current_fh, create->cr_name, 402 status = nfsd_create(rqstp, &cstate->current_fh,
395 create->cr_namelen, &create->cr_iattr, 403 create->cr_name, create->cr_namelen,
396 S_IFSOCK, 0, &resfh); 404 &create->cr_iattr, S_IFSOCK, 0, &resfh);
397 break; 405 break;
398 406
399 case NF4FIFO: 407 case NF4FIFO:
400 status = nfsd_create(rqstp, current_fh, create->cr_name, 408 status = nfsd_create(rqstp, &cstate->current_fh,
401 create->cr_namelen, &create->cr_iattr, 409 create->cr_name, create->cr_namelen,
402 S_IFIFO, 0, &resfh); 410 &create->cr_iattr, S_IFIFO, 0, &resfh);
403 break; 411 break;
404 412
405 case NF4DIR: 413 case NF4DIR:
406 create->cr_iattr.ia_valid &= ~ATTR_SIZE; 414 create->cr_iattr.ia_valid &= ~ATTR_SIZE;
407 status = nfsd_create(rqstp, current_fh, create->cr_name, 415 status = nfsd_create(rqstp, &cstate->current_fh,
408 create->cr_namelen, &create->cr_iattr, 416 create->cr_name, create->cr_namelen,
409 S_IFDIR, 0, &resfh); 417 &create->cr_iattr, S_IFDIR, 0, &resfh);
410 break; 418 break;
411 419
412 default: 420 default:
@@ -414,21 +422,22 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
414 } 422 }
415 423
416 if (!status) { 424 if (!status) {
417 fh_unlock(current_fh); 425 fh_unlock(&cstate->current_fh);
418 set_change_info(&create->cr_cinfo, current_fh); 426 set_change_info(&create->cr_cinfo, &cstate->current_fh);
419 fh_dup2(current_fh, &resfh); 427 fh_dup2(&cstate->current_fh, &resfh);
420 } 428 }
421 429
422 fh_put(&resfh); 430 fh_put(&resfh);
423 return status; 431 return status;
424} 432}
425 433
426static inline __be32 434static __be32
427nfsd4_getattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_getattr *getattr) 435nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
436 struct nfsd4_getattr *getattr)
428{ 437{
429 __be32 status; 438 __be32 status;
430 439
431 status = fh_verify(rqstp, current_fh, 0, MAY_NOP); 440 status = fh_verify(rqstp, &cstate->current_fh, 0, MAY_NOP);
432 if (status) 441 if (status)
433 return status; 442 return status;
434 443
@@ -438,26 +447,28 @@ nfsd4_getattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_ge
438 getattr->ga_bmval[0] &= NFSD_SUPPORTED_ATTRS_WORD0; 447 getattr->ga_bmval[0] &= NFSD_SUPPORTED_ATTRS_WORD0;
439 getattr->ga_bmval[1] &= NFSD_SUPPORTED_ATTRS_WORD1; 448 getattr->ga_bmval[1] &= NFSD_SUPPORTED_ATTRS_WORD1;
440 449
441 getattr->ga_fhp = current_fh; 450 getattr->ga_fhp = &cstate->current_fh;
442 return nfs_ok; 451 return nfs_ok;
443} 452}
444 453
445static inline __be32 454static __be32
446nfsd4_link(struct svc_rqst *rqstp, struct svc_fh *current_fh, 455nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
447 struct svc_fh *save_fh, struct nfsd4_link *link) 456 struct nfsd4_link *link)
448{ 457{
449 __be32 status = nfserr_nofilehandle; 458 __be32 status = nfserr_nofilehandle;
450 459
451 if (!save_fh->fh_dentry) 460 if (!cstate->save_fh.fh_dentry)
452 return status; 461 return status;
453 status = nfsd_link(rqstp, current_fh, link->li_name, link->li_namelen, save_fh); 462 status = nfsd_link(rqstp, &cstate->current_fh,
463 link->li_name, link->li_namelen, &cstate->save_fh);
454 if (!status) 464 if (!status)
455 set_change_info(&link->li_cinfo, current_fh); 465 set_change_info(&link->li_cinfo, &cstate->current_fh);
456 return status; 466 return status;
457} 467}
458 468
459static __be32 469static __be32
460nfsd4_lookupp(struct svc_rqst *rqstp, struct svc_fh *current_fh) 470nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
471 void *arg)
461{ 472{
462 struct svc_fh tmp_fh; 473 struct svc_fh tmp_fh;
463 __be32 ret; 474 __be32 ret;
@@ -466,22 +477,27 @@ nfsd4_lookupp(struct svc_rqst *rqstp, struct svc_fh *current_fh)
466 if((ret = exp_pseudoroot(rqstp->rq_client, &tmp_fh, 477 if((ret = exp_pseudoroot(rqstp->rq_client, &tmp_fh,
467 &rqstp->rq_chandle)) != 0) 478 &rqstp->rq_chandle)) != 0)
468 return ret; 479 return ret;
469 if (tmp_fh.fh_dentry == current_fh->fh_dentry) { 480 if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
470 fh_put(&tmp_fh); 481 fh_put(&tmp_fh);
471 return nfserr_noent; 482 return nfserr_noent;
472 } 483 }
473 fh_put(&tmp_fh); 484 fh_put(&tmp_fh);
474 return nfsd_lookup(rqstp, current_fh, "..", 2, current_fh); 485 return nfsd_lookup(rqstp, &cstate->current_fh,
486 "..", 2, &cstate->current_fh);
475} 487}
476 488
477static inline __be32 489static __be32
478nfsd4_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lookup *lookup) 490nfsd4_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
491 struct nfsd4_lookup *lookup)
479{ 492{
480 return nfsd_lookup(rqstp, current_fh, lookup->lo_name, lookup->lo_len, current_fh); 493 return nfsd_lookup(rqstp, &cstate->current_fh,
494 lookup->lo_name, lookup->lo_len,
495 &cstate->current_fh);
481} 496}
482 497
483static inline __be32 498static __be32
484nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read *read) 499nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
500 struct nfsd4_read *read)
485{ 501{
486 __be32 status; 502 __be32 status;
487 503
@@ -493,7 +509,8 @@ nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read
493 509
494 nfs4_lock_state(); 510 nfs4_lock_state();
495 /* check stateid */ 511 /* check stateid */
496 if ((status = nfs4_preprocess_stateid_op(current_fh, &read->rd_stateid, 512 if ((status = nfs4_preprocess_stateid_op(&cstate->current_fh,
513 &read->rd_stateid,
497 CHECK_FH | RD_STATE, &read->rd_filp))) { 514 CHECK_FH | RD_STATE, &read->rd_filp))) {
498 dprintk("NFSD: nfsd4_read: couldn't process stateid!\n"); 515 dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
499 goto out; 516 goto out;
@@ -504,12 +521,13 @@ nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read
504out: 521out:
505 nfs4_unlock_state(); 522 nfs4_unlock_state();
506 read->rd_rqstp = rqstp; 523 read->rd_rqstp = rqstp;
507 read->rd_fhp = current_fh; 524 read->rd_fhp = &cstate->current_fh;
508 return status; 525 return status;
509} 526}
510 527
511static inline __be32 528static __be32
512nfsd4_readdir(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_readdir *readdir) 529nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
530 struct nfsd4_readdir *readdir)
513{ 531{
514 u64 cookie = readdir->rd_cookie; 532 u64 cookie = readdir->rd_cookie;
515 static const nfs4_verifier zeroverf; 533 static const nfs4_verifier zeroverf;
@@ -527,48 +545,51 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_re
527 return nfserr_bad_cookie; 545 return nfserr_bad_cookie;
528 546
529 readdir->rd_rqstp = rqstp; 547 readdir->rd_rqstp = rqstp;
530 readdir->rd_fhp = current_fh; 548 readdir->rd_fhp = &cstate->current_fh;
531 return nfs_ok; 549 return nfs_ok;
532} 550}
533 551
534static inline __be32 552static __be32
535nfsd4_readlink(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_readlink *readlink) 553nfsd4_readlink(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
554 struct nfsd4_readlink *readlink)
536{ 555{
537 readlink->rl_rqstp = rqstp; 556 readlink->rl_rqstp = rqstp;
538 readlink->rl_fhp = current_fh; 557 readlink->rl_fhp = &cstate->current_fh;
539 return nfs_ok; 558 return nfs_ok;
540} 559}
541 560
542static inline __be32 561static __be32
543nfsd4_remove(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_remove *remove) 562nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
563 struct nfsd4_remove *remove)
544{ 564{
545 __be32 status; 565 __be32 status;
546 566
547 if (nfs4_in_grace()) 567 if (nfs4_in_grace())
548 return nfserr_grace; 568 return nfserr_grace;
549 status = nfsd_unlink(rqstp, current_fh, 0, remove->rm_name, remove->rm_namelen); 569 status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
570 remove->rm_name, remove->rm_namelen);
550 if (status == nfserr_symlink) 571 if (status == nfserr_symlink)
551 return nfserr_notdir; 572 return nfserr_notdir;
552 if (!status) { 573 if (!status) {
553 fh_unlock(current_fh); 574 fh_unlock(&cstate->current_fh);
554 set_change_info(&remove->rm_cinfo, current_fh); 575 set_change_info(&remove->rm_cinfo, &cstate->current_fh);
555 } 576 }
556 return status; 577 return status;
557} 578}
558 579
559static inline __be32 580static __be32
560nfsd4_rename(struct svc_rqst *rqstp, struct svc_fh *current_fh, 581nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
561 struct svc_fh *save_fh, struct nfsd4_rename *rename) 582 struct nfsd4_rename *rename)
562{ 583{
563 __be32 status = nfserr_nofilehandle; 584 __be32 status = nfserr_nofilehandle;
564 585
565 if (!save_fh->fh_dentry) 586 if (!cstate->save_fh.fh_dentry)
566 return status; 587 return status;
567 if (nfs4_in_grace() && !(save_fh->fh_export->ex_flags 588 if (nfs4_in_grace() && !(cstate->save_fh.fh_export->ex_flags
568 & NFSEXP_NOSUBTREECHECK)) 589 & NFSEXP_NOSUBTREECHECK))
569 return nfserr_grace; 590 return nfserr_grace;
570 status = nfsd_rename(rqstp, save_fh, rename->rn_sname, 591 status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
571 rename->rn_snamelen, current_fh, 592 rename->rn_snamelen, &cstate->current_fh,
572 rename->rn_tname, rename->rn_tnamelen); 593 rename->rn_tname, rename->rn_tnamelen);
573 594
574 /* the underlying filesystem returns different error's than required 595 /* the underlying filesystem returns different error's than required
@@ -576,27 +597,28 @@ nfsd4_rename(struct svc_rqst *rqstp, struct svc_fh *current_fh,
576 if (status == nfserr_isdir) 597 if (status == nfserr_isdir)
577 status = nfserr_exist; 598 status = nfserr_exist;
578 else if ((status == nfserr_notdir) && 599 else if ((status == nfserr_notdir) &&
579 (S_ISDIR(save_fh->fh_dentry->d_inode->i_mode) && 600 (S_ISDIR(cstate->save_fh.fh_dentry->d_inode->i_mode) &&
580 S_ISDIR(current_fh->fh_dentry->d_inode->i_mode))) 601 S_ISDIR(cstate->current_fh.fh_dentry->d_inode->i_mode)))
581 status = nfserr_exist; 602 status = nfserr_exist;
582 else if (status == nfserr_symlink) 603 else if (status == nfserr_symlink)
583 status = nfserr_notdir; 604 status = nfserr_notdir;
584 605
585 if (!status) { 606 if (!status) {
586 set_change_info(&rename->rn_sinfo, current_fh); 607 set_change_info(&rename->rn_sinfo, &cstate->current_fh);
587 set_change_info(&rename->rn_tinfo, save_fh); 608 set_change_info(&rename->rn_tinfo, &cstate->save_fh);
588 } 609 }
589 return status; 610 return status;
590} 611}
591 612
592static inline __be32 613static __be32
593nfsd4_setattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_setattr *setattr) 614nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
615 struct nfsd4_setattr *setattr)
594{ 616{
595 __be32 status = nfs_ok; 617 __be32 status = nfs_ok;
596 618
597 if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { 619 if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
598 nfs4_lock_state(); 620 nfs4_lock_state();
599 status = nfs4_preprocess_stateid_op(current_fh, 621 status = nfs4_preprocess_stateid_op(&cstate->current_fh,
600 &setattr->sa_stateid, CHECK_FH | WR_STATE, NULL); 622 &setattr->sa_stateid, CHECK_FH | WR_STATE, NULL);
601 nfs4_unlock_state(); 623 nfs4_unlock_state();
602 if (status) { 624 if (status) {
@@ -606,16 +628,18 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_se
606 } 628 }
607 status = nfs_ok; 629 status = nfs_ok;
608 if (setattr->sa_acl != NULL) 630 if (setattr->sa_acl != NULL)
609 status = nfsd4_set_nfs4_acl(rqstp, current_fh, setattr->sa_acl); 631 status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
632 setattr->sa_acl);
610 if (status) 633 if (status)
611 return status; 634 return status;
612 status = nfsd_setattr(rqstp, current_fh, &setattr->sa_iattr, 635 status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
613 0, (time_t)0); 636 0, (time_t)0);
614 return status; 637 return status;
615} 638}
616 639
617static inline __be32 640static __be32
618nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_write *write) 641nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
642 struct nfsd4_write *write)
619{ 643{
620 stateid_t *stateid = &write->wr_stateid; 644 stateid_t *stateid = &write->wr_stateid;
621 struct file *filp = NULL; 645 struct file *filp = NULL;
@@ -628,7 +652,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
628 return nfserr_inval; 652 return nfserr_inval;
629 653
630 nfs4_lock_state(); 654 nfs4_lock_state();
631 status = nfs4_preprocess_stateid_op(current_fh, stateid, 655 status = nfs4_preprocess_stateid_op(&cstate->current_fh, stateid,
632 CHECK_FH | WR_STATE, &filp); 656 CHECK_FH | WR_STATE, &filp);
633 if (filp) 657 if (filp)
634 get_file(filp); 658 get_file(filp);
@@ -645,9 +669,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
645 *p++ = nfssvc_boot.tv_sec; 669 *p++ = nfssvc_boot.tv_sec;
646 *p++ = nfssvc_boot.tv_usec; 670 *p++ = nfssvc_boot.tv_usec;
647 671
648 status = nfsd_write(rqstp, current_fh, filp, write->wr_offset, 672 status = nfsd_write(rqstp, &cstate->current_fh, filp,
649 rqstp->rq_vec, write->wr_vlen, write->wr_buflen, 673 write->wr_offset, rqstp->rq_vec, write->wr_vlen,
650 &write->wr_how_written); 674 write->wr_buflen, &write->wr_how_written);
651 if (filp) 675 if (filp)
652 fput(filp); 676 fput(filp);
653 677
@@ -662,13 +686,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
662 * to NFS_OK after the call; NVERIFY by mapping NFSERR_NOT_SAME to NFS_OK. 686 * to NFS_OK after the call; NVERIFY by mapping NFSERR_NOT_SAME to NFS_OK.
663 */ 687 */
664static __be32 688static __be32
665nfsd4_verify(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_verify *verify) 689_nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
690 struct nfsd4_verify *verify)
666{ 691{
667 __be32 *buf, *p; 692 __be32 *buf, *p;
668 int count; 693 int count;
669 __be32 status; 694 __be32 status;
670 695
671 status = fh_verify(rqstp, current_fh, 0, MAY_NOP); 696 status = fh_verify(rqstp, &cstate->current_fh, 0, MAY_NOP);
672 if (status) 697 if (status)
673 return status; 698 return status;
674 699
@@ -689,8 +714,9 @@ nfsd4_verify(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_ver
689 if (!buf) 714 if (!buf)
690 return nfserr_resource; 715 return nfserr_resource;
691 716
692 status = nfsd4_encode_fattr(current_fh, current_fh->fh_export, 717 status = nfsd4_encode_fattr(&cstate->current_fh,
693 current_fh->fh_dentry, buf, 718 cstate->current_fh.fh_export,
719 cstate->current_fh.fh_dentry, buf,
694 &count, verify->ve_bmval, 720 &count, verify->ve_bmval,
695 rqstp); 721 rqstp);
696 722
@@ -712,6 +738,26 @@ out_kfree:
712 return status; 738 return status;
713} 739}
714 740
741static __be32
742nfsd4_nverify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
743 struct nfsd4_verify *verify)
744{
745 __be32 status;
746
747 status = _nfsd4_verify(rqstp, cstate, verify);
748 return status == nfserr_not_same ? nfs_ok : status;
749}
750
751static __be32
752nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
753 struct nfsd4_verify *verify)
754{
755 __be32 status;
756
757 status = _nfsd4_verify(rqstp, cstate, verify);
758 return status == nfserr_same ? nfs_ok : status;
759}
760
715/* 761/*
716 * NULL call. 762 * NULL call.
717 */ 763 */
@@ -727,6 +773,42 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
727 nfsdstats.nfs4_opcount[opnum]++; 773 nfsdstats.nfs4_opcount[opnum]++;
728} 774}
729 775
776static void cstate_free(struct nfsd4_compound_state *cstate)
777{
778 if (cstate == NULL)
779 return;
780 fh_put(&cstate->current_fh);
781 fh_put(&cstate->save_fh);
782 BUG_ON(cstate->replay_owner);
783 kfree(cstate);
784}
785
786static struct nfsd4_compound_state *cstate_alloc(void)
787{
788 struct nfsd4_compound_state *cstate;
789
790 cstate = kmalloc(sizeof(struct nfsd4_compound_state), GFP_KERNEL);
791 if (cstate == NULL)
792 return NULL;
793 fh_init(&cstate->current_fh, NFS4_FHSIZE);
794 fh_init(&cstate->save_fh, NFS4_FHSIZE);
795 cstate->replay_owner = NULL;
796 return cstate;
797}
798
799typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
800 void *);
801
802struct nfsd4_operation {
803 nfsd4op_func op_func;
804 u32 op_flags;
805/* Most ops require a valid current filehandle; a few don't: */
806#define ALLOWED_WITHOUT_FH 1
807/* GETATTR and ops not listed as returning NFS4ERR_MOVED: */
808#define ALLOWED_ON_ABSENT_FS 2
809};
810
811static struct nfsd4_operation nfsd4_ops[];
730 812
731/* 813/*
732 * COMPOUND call. 814 * COMPOUND call.
@@ -737,21 +819,15 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
737 struct nfsd4_compoundres *resp) 819 struct nfsd4_compoundres *resp)
738{ 820{
739 struct nfsd4_op *op; 821 struct nfsd4_op *op;
740 struct svc_fh *current_fh = NULL; 822 struct nfsd4_operation *opdesc;
741 struct svc_fh *save_fh = NULL; 823 struct nfsd4_compound_state *cstate = NULL;
742 struct nfs4_stateowner *replay_owner = NULL; 824 int slack_bytes;
743 int slack_space; /* in words, not bytes! */
744 __be32 status; 825 __be32 status;
745 826
746 status = nfserr_resource; 827 status = nfserr_resource;
747 current_fh = kmalloc(sizeof(*current_fh), GFP_KERNEL); 828 cstate = cstate_alloc();
748 if (current_fh == NULL) 829 if (cstate == NULL)
749 goto out;
750 fh_init(current_fh, NFS4_FHSIZE);
751 save_fh = kmalloc(sizeof(*save_fh), GFP_KERNEL);
752 if (save_fh == NULL)
753 goto out; 830 goto out;
754 fh_init(save_fh, NFS4_FHSIZE);
755 831
756 resp->xbuf = &rqstp->rq_res; 832 resp->xbuf = &rqstp->rq_res;
757 resp->p = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len; 833 resp->p = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len;
@@ -790,164 +866,44 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
790 * failed response to the next operation. If we don't 866 * failed response to the next operation. If we don't
791 * have enough room, fail with ERR_RESOURCE. 867 * have enough room, fail with ERR_RESOURCE.
792 */ 868 */
793/* FIXME - is slack_space *really* words, or bytes??? - neilb */ 869 slack_bytes = (char *)resp->end - (char *)resp->p;
794 slack_space = (char *)resp->end - (char *)resp->p; 870 if (slack_bytes < COMPOUND_SLACK_SPACE
795 if (slack_space < COMPOUND_SLACK_SPACE + COMPOUND_ERR_SLACK_SPACE) { 871 + COMPOUND_ERR_SLACK_SPACE) {
796 BUG_ON(slack_space < COMPOUND_ERR_SLACK_SPACE); 872 BUG_ON(slack_bytes < COMPOUND_ERR_SLACK_SPACE);
797 op->status = nfserr_resource; 873 op->status = nfserr_resource;
798 goto encode_op; 874 goto encode_op;
799 } 875 }
800 876
801 /* All operations except RENEW, SETCLIENTID, RESTOREFH 877 opdesc = &nfsd4_ops[op->opnum];
802 * SETCLIENTID_CONFIRM, PUTFH and PUTROOTFH 878
803 * require a valid current filehandle 879 if (!cstate->current_fh.fh_dentry) {
804 */ 880 if (!(opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
805 if (!current_fh->fh_dentry) {
806 if (!((op->opnum == OP_PUTFH) ||
807 (op->opnum == OP_PUTROOTFH) ||
808 (op->opnum == OP_SETCLIENTID) ||
809 (op->opnum == OP_SETCLIENTID_CONFIRM) ||
810 (op->opnum == OP_RENEW) ||
811 (op->opnum == OP_RESTOREFH) ||
812 (op->opnum == OP_RELEASE_LOCKOWNER))) {
813 op->status = nfserr_nofilehandle; 881 op->status = nfserr_nofilehandle;
814 goto encode_op; 882 goto encode_op;
815 } 883 }
816 } 884 } else if (cstate->current_fh.fh_export->ex_fslocs.migrated &&
817 /* Check must be done at start of each operation, except 885 !(opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) {
818 * for GETATTR and ops not listed as returning NFS4ERR_MOVED
819 */
820 else if (current_fh->fh_export->ex_fslocs.migrated &&
821 !((op->opnum == OP_GETATTR) ||
822 (op->opnum == OP_PUTROOTFH) ||
823 (op->opnum == OP_PUTPUBFH) ||
824 (op->opnum == OP_RENEW) ||
825 (op->opnum == OP_SETCLIENTID) ||
826 (op->opnum == OP_RELEASE_LOCKOWNER))) {
827 op->status = nfserr_moved; 886 op->status = nfserr_moved;
828 goto encode_op; 887 goto encode_op;
829 } 888 }
830 switch (op->opnum) { 889
831 case OP_ACCESS: 890 if (opdesc->op_func)
832 op->status = nfsd4_access(rqstp, current_fh, &op->u.access); 891 op->status = opdesc->op_func(rqstp, cstate, &op->u);
833 break; 892 else
834 case OP_CLOSE:
835 op->status = nfsd4_close(rqstp, current_fh, &op->u.close, &replay_owner);
836 break;
837 case OP_COMMIT:
838 op->status = nfsd4_commit(rqstp, current_fh, &op->u.commit);
839 break;
840 case OP_CREATE:
841 op->status = nfsd4_create(rqstp, current_fh, &op->u.create);
842 break;
843 case OP_DELEGRETURN:
844 op->status = nfsd4_delegreturn(rqstp, current_fh, &op->u.delegreturn);
845 break;
846 case OP_GETATTR:
847 op->status = nfsd4_getattr(rqstp, current_fh, &op->u.getattr);
848 break;
849 case OP_GETFH:
850 op->status = nfsd4_getfh(current_fh, &op->u.getfh);
851 break;
852 case OP_LINK:
853 op->status = nfsd4_link(rqstp, current_fh, save_fh, &op->u.link);
854 break;
855 case OP_LOCK:
856 op->status = nfsd4_lock(rqstp, current_fh, &op->u.lock, &replay_owner);
857 break;
858 case OP_LOCKT:
859 op->status = nfsd4_lockt(rqstp, current_fh, &op->u.lockt);
860 break;
861 case OP_LOCKU:
862 op->status = nfsd4_locku(rqstp, current_fh, &op->u.locku, &replay_owner);
863 break;
864 case OP_LOOKUP:
865 op->status = nfsd4_lookup(rqstp, current_fh, &op->u.lookup);
866 break;
867 case OP_LOOKUPP:
868 op->status = nfsd4_lookupp(rqstp, current_fh);
869 break;
870 case OP_NVERIFY:
871 op->status = nfsd4_verify(rqstp, current_fh, &op->u.nverify);
872 if (op->status == nfserr_not_same)
873 op->status = nfs_ok;
874 break;
875 case OP_OPEN:
876 op->status = nfsd4_open(rqstp, current_fh, &op->u.open, &replay_owner);
877 break;
878 case OP_OPEN_CONFIRM:
879 op->status = nfsd4_open_confirm(rqstp, current_fh, &op->u.open_confirm, &replay_owner);
880 break;
881 case OP_OPEN_DOWNGRADE:
882 op->status = nfsd4_open_downgrade(rqstp, current_fh, &op->u.open_downgrade, &replay_owner);
883 break;
884 case OP_PUTFH:
885 op->status = nfsd4_putfh(rqstp, current_fh, &op->u.putfh);
886 break;
887 case OP_PUTROOTFH:
888 op->status = nfsd4_putrootfh(rqstp, current_fh);
889 break;
890 case OP_READ:
891 op->status = nfsd4_read(rqstp, current_fh, &op->u.read);
892 break;
893 case OP_READDIR:
894 op->status = nfsd4_readdir(rqstp, current_fh, &op->u.readdir);
895 break;
896 case OP_READLINK:
897 op->status = nfsd4_readlink(rqstp, current_fh, &op->u.readlink);
898 break;
899 case OP_REMOVE:
900 op->status = nfsd4_remove(rqstp, current_fh, &op->u.remove);
901 break;
902 case OP_RENAME:
903 op->status = nfsd4_rename(rqstp, current_fh, save_fh, &op->u.rename);
904 break;
905 case OP_RENEW:
906 op->status = nfsd4_renew(&op->u.renew);
907 break;
908 case OP_RESTOREFH:
909 op->status = nfsd4_restorefh(current_fh, save_fh);
910 break;
911 case OP_SAVEFH:
912 op->status = nfsd4_savefh(current_fh, save_fh);
913 break;
914 case OP_SETATTR:
915 op->status = nfsd4_setattr(rqstp, current_fh, &op->u.setattr);
916 break;
917 case OP_SETCLIENTID:
918 op->status = nfsd4_setclientid(rqstp, &op->u.setclientid);
919 break;
920 case OP_SETCLIENTID_CONFIRM:
921 op->status = nfsd4_setclientid_confirm(rqstp, &op->u.setclientid_confirm);
922 break;
923 case OP_VERIFY:
924 op->status = nfsd4_verify(rqstp, current_fh, &op->u.verify);
925 if (op->status == nfserr_same)
926 op->status = nfs_ok;
927 break;
928 case OP_WRITE:
929 op->status = nfsd4_write(rqstp, current_fh, &op->u.write);
930 break;
931 case OP_RELEASE_LOCKOWNER:
932 op->status = nfsd4_release_lockowner(rqstp, &op->u.release_lockowner);
933 break;
934 default:
935 BUG_ON(op->status == nfs_ok); 893 BUG_ON(op->status == nfs_ok);
936 break;
937 }
938 894
939encode_op: 895encode_op:
940 if (op->status == nfserr_replay_me) { 896 if (op->status == nfserr_replay_me) {
941 op->replay = &replay_owner->so_replay; 897 op->replay = &cstate->replay_owner->so_replay;
942 nfsd4_encode_replay(resp, op); 898 nfsd4_encode_replay(resp, op);
943 status = op->status = op->replay->rp_status; 899 status = op->status = op->replay->rp_status;
944 } else { 900 } else {
945 nfsd4_encode_operation(resp, op); 901 nfsd4_encode_operation(resp, op);
946 status = op->status; 902 status = op->status;
947 } 903 }
948 if (replay_owner && (replay_owner != (void *)(-1))) { 904 if (cstate->replay_owner) {
949 nfs4_put_stateowner(replay_owner); 905 nfs4_put_stateowner(cstate->replay_owner);
950 replay_owner = NULL; 906 cstate->replay_owner = NULL;
951 } 907 }
952 /* XXX Ugh, we need to get rid of this kind of special case: */ 908 /* XXX Ugh, we need to get rid of this kind of special case: */
953 if (op->opnum == OP_READ && op->u.read.rd_filp) 909 if (op->opnum == OP_READ && op->u.read.rd_filp)
@@ -958,15 +914,124 @@ encode_op:
958 914
959out: 915out:
960 nfsd4_release_compoundargs(args); 916 nfsd4_release_compoundargs(args);
961 if (current_fh) 917 cstate_free(cstate);
962 fh_put(current_fh);
963 kfree(current_fh);
964 if (save_fh)
965 fh_put(save_fh);
966 kfree(save_fh);
967 return status; 918 return status;
968} 919}
969 920
921static struct nfsd4_operation nfsd4_ops[OP_RELEASE_LOCKOWNER+1] = {
922 [OP_ACCESS] = {
923 .op_func = (nfsd4op_func)nfsd4_access,
924 },
925 [OP_CLOSE] = {
926 .op_func = (nfsd4op_func)nfsd4_close,
927 },
928 [OP_COMMIT] = {
929 .op_func = (nfsd4op_func)nfsd4_commit,
930 },
931 [OP_CREATE] = {
932 .op_func = (nfsd4op_func)nfsd4_create,
933 },
934 [OP_DELEGRETURN] = {
935 .op_func = (nfsd4op_func)nfsd4_delegreturn,
936 },
937 [OP_GETATTR] = {
938 .op_func = (nfsd4op_func)nfsd4_getattr,
939 .op_flags = ALLOWED_ON_ABSENT_FS,
940 },
941 [OP_GETFH] = {
942 .op_func = (nfsd4op_func)nfsd4_getfh,
943 },
944 [OP_LINK] = {
945 .op_func = (nfsd4op_func)nfsd4_link,
946 },
947 [OP_LOCK] = {
948 .op_func = (nfsd4op_func)nfsd4_lock,
949 },
950 [OP_LOCKT] = {
951 .op_func = (nfsd4op_func)nfsd4_lockt,
952 },
953 [OP_LOCKU] = {
954 .op_func = (nfsd4op_func)nfsd4_locku,
955 },
956 [OP_LOOKUP] = {
957 .op_func = (nfsd4op_func)nfsd4_lookup,
958 },
959 [OP_LOOKUPP] = {
960 .op_func = (nfsd4op_func)nfsd4_lookupp,
961 },
962 [OP_NVERIFY] = {
963 .op_func = (nfsd4op_func)nfsd4_nverify,
964 },
965 [OP_OPEN] = {
966 .op_func = (nfsd4op_func)nfsd4_open,
967 },
968 [OP_OPEN_CONFIRM] = {
969 .op_func = (nfsd4op_func)nfsd4_open_confirm,
970 },
971 [OP_OPEN_DOWNGRADE] = {
972 .op_func = (nfsd4op_func)nfsd4_open_downgrade,
973 },
974 [OP_PUTFH] = {
975 .op_func = (nfsd4op_func)nfsd4_putfh,
976 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
977 },
978 [OP_PUTPUBFH] = {
979 /* unsupported; just for future reference: */
980 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
981 },
982 [OP_PUTROOTFH] = {
983 .op_func = (nfsd4op_func)nfsd4_putrootfh,
984 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
985 },
986 [OP_READ] = {
987 .op_func = (nfsd4op_func)nfsd4_read,
988 },
989 [OP_READDIR] = {
990 .op_func = (nfsd4op_func)nfsd4_readdir,
991 },
992 [OP_READLINK] = {
993 .op_func = (nfsd4op_func)nfsd4_readlink,
994 },
995 [OP_REMOVE] = {
996 .op_func = (nfsd4op_func)nfsd4_remove,
997 },
998 [OP_RENAME] = {
999 .op_func = (nfsd4op_func)nfsd4_rename,
1000 },
1001 [OP_RENEW] = {
1002 .op_func = (nfsd4op_func)nfsd4_renew,
1003 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1004 },
1005 [OP_RESTOREFH] = {
1006 .op_func = (nfsd4op_func)nfsd4_restorefh,
1007 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1008 },
1009 [OP_SAVEFH] = {
1010 .op_func = (nfsd4op_func)nfsd4_savefh,
1011 },
1012 [OP_SETATTR] = {
1013 .op_func = (nfsd4op_func)nfsd4_setattr,
1014 },
1015 [OP_SETCLIENTID] = {
1016 .op_func = (nfsd4op_func)nfsd4_setclientid,
1017 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1018 },
1019 [OP_SETCLIENTID_CONFIRM] = {
1020 .op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
1021 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1022 },
1023 [OP_VERIFY] = {
1024 .op_func = (nfsd4op_func)nfsd4_verify,
1025 },
1026 [OP_WRITE] = {
1027 .op_func = (nfsd4op_func)nfsd4_write,
1028 },
1029 [OP_RELEASE_LOCKOWNER] = {
1030 .op_func = (nfsd4op_func)nfsd4_release_lockowner,
1031 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1032 },
1033};
1034
970#define nfs4svc_decode_voidargs NULL 1035#define nfs4svc_decode_voidargs NULL
971#define nfs4svc_release_void NULL 1036#define nfs4svc_release_void NULL
972#define nfsd4_voidres nfsd4_voidargs 1037#define nfsd4_voidres nfsd4_voidargs
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b7179bd45a1e..9de89df961f4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -711,7 +711,8 @@ out_err:
711 * 711 *
712 */ 712 */
713__be32 713__be32
714nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_setclientid *setclid) 714nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
715 struct nfsd4_setclientid *setclid)
715{ 716{
716 __be32 ip_addr = rqstp->rq_addr.sin_addr.s_addr; 717 __be32 ip_addr = rqstp->rq_addr.sin_addr.s_addr;
717 struct xdr_netobj clname = { 718 struct xdr_netobj clname = {
@@ -876,7 +877,9 @@ out:
876 * NOTE: callback information will be processed here in a future patch 877 * NOTE: callback information will be processed here in a future patch
877 */ 878 */
878__be32 879__be32
879nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_setclientid_confirm *setclientid_confirm) 880nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
881 struct nfsd4_compound_state *cstate,
882 struct nfsd4_setclientid_confirm *setclientid_confirm)
880{ 883{
881 __be32 ip_addr = rqstp->rq_addr.sin_addr.s_addr; 884 __be32 ip_addr = rqstp->rq_addr.sin_addr.s_addr;
882 struct nfs4_client *conf, *unconf; 885 struct nfs4_client *conf, *unconf;
@@ -1833,7 +1836,8 @@ static void laundromat_main(struct work_struct *);
1833static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); 1836static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
1834 1837
1835__be32 1838__be32
1836nfsd4_renew(clientid_t *clid) 1839nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1840 clientid_t *clid)
1837{ 1841{
1838 struct nfs4_client *clp; 1842 struct nfs4_client *clp;
1839 __be32 status; 1843 __be32 status;
@@ -2241,24 +2245,25 @@ check_replay:
2241} 2245}
2242 2246
2243__be32 2247__be32
2244nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_confirm *oc, struct nfs4_stateowner **replay_owner) 2248nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2249 struct nfsd4_open_confirm *oc)
2245{ 2250{
2246 __be32 status; 2251 __be32 status;
2247 struct nfs4_stateowner *sop; 2252 struct nfs4_stateowner *sop;
2248 struct nfs4_stateid *stp; 2253 struct nfs4_stateid *stp;
2249 2254
2250 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n", 2255 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
2251 (int)current_fh->fh_dentry->d_name.len, 2256 (int)cstate->current_fh.fh_dentry->d_name.len,
2252 current_fh->fh_dentry->d_name.name); 2257 cstate->current_fh.fh_dentry->d_name.name);
2253 2258
2254 status = fh_verify(rqstp, current_fh, S_IFREG, 0); 2259 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
2255 if (status) 2260 if (status)
2256 return status; 2261 return status;
2257 2262
2258 nfs4_lock_state(); 2263 nfs4_lock_state();
2259 2264
2260 if ((status = nfs4_preprocess_seqid_op(current_fh, oc->oc_seqid, 2265 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2261 &oc->oc_req_stateid, 2266 oc->oc_seqid, &oc->oc_req_stateid,
2262 CHECK_FH | CONFIRM | OPEN_STATE, 2267 CHECK_FH | CONFIRM | OPEN_STATE,
2263 &oc->oc_stateowner, &stp, NULL))) 2268 &oc->oc_stateowner, &stp, NULL)))
2264 goto out; 2269 goto out;
@@ -2278,7 +2283,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
2278out: 2283out:
2279 if (oc->oc_stateowner) { 2284 if (oc->oc_stateowner) {
2280 nfs4_get_stateowner(oc->oc_stateowner); 2285 nfs4_get_stateowner(oc->oc_stateowner);
2281 *replay_owner = oc->oc_stateowner; 2286 cstate->replay_owner = oc->oc_stateowner;
2282 } 2287 }
2283 nfs4_unlock_state(); 2288 nfs4_unlock_state();
2284 return status; 2289 return status;
@@ -2310,22 +2315,25 @@ reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
2310} 2315}
2311 2316
2312__be32 2317__be32
2313nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_downgrade *od, struct nfs4_stateowner **replay_owner) 2318nfsd4_open_downgrade(struct svc_rqst *rqstp,
2319 struct nfsd4_compound_state *cstate,
2320 struct nfsd4_open_downgrade *od)
2314{ 2321{
2315 __be32 status; 2322 __be32 status;
2316 struct nfs4_stateid *stp; 2323 struct nfs4_stateid *stp;
2317 unsigned int share_access; 2324 unsigned int share_access;
2318 2325
2319 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 2326 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
2320 (int)current_fh->fh_dentry->d_name.len, 2327 (int)cstate->current_fh.fh_dentry->d_name.len,
2321 current_fh->fh_dentry->d_name.name); 2328 cstate->current_fh.fh_dentry->d_name.name);
2322 2329
2323 if (!access_valid(od->od_share_access) 2330 if (!access_valid(od->od_share_access)
2324 || !deny_valid(od->od_share_deny)) 2331 || !deny_valid(od->od_share_deny))
2325 return nfserr_inval; 2332 return nfserr_inval;
2326 2333
2327 nfs4_lock_state(); 2334 nfs4_lock_state();
2328 if ((status = nfs4_preprocess_seqid_op(current_fh, od->od_seqid, 2335 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2336 od->od_seqid,
2329 &od->od_stateid, 2337 &od->od_stateid,
2330 CHECK_FH | OPEN_STATE, 2338 CHECK_FH | OPEN_STATE,
2331 &od->od_stateowner, &stp, NULL))) 2339 &od->od_stateowner, &stp, NULL)))
@@ -2355,7 +2363,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct n
2355out: 2363out:
2356 if (od->od_stateowner) { 2364 if (od->od_stateowner) {
2357 nfs4_get_stateowner(od->od_stateowner); 2365 nfs4_get_stateowner(od->od_stateowner);
2358 *replay_owner = od->od_stateowner; 2366 cstate->replay_owner = od->od_stateowner;
2359 } 2367 }
2360 nfs4_unlock_state(); 2368 nfs4_unlock_state();
2361 return status; 2369 return status;
@@ -2365,18 +2373,20 @@ out:
2365 * nfs4_unlock_state() called after encode 2373 * nfs4_unlock_state() called after encode
2366 */ 2374 */
2367__be32 2375__be32
2368nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_close *close, struct nfs4_stateowner **replay_owner) 2376nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2377 struct nfsd4_close *close)
2369{ 2378{
2370 __be32 status; 2379 __be32 status;
2371 struct nfs4_stateid *stp; 2380 struct nfs4_stateid *stp;
2372 2381
2373 dprintk("NFSD: nfsd4_close on file %.*s\n", 2382 dprintk("NFSD: nfsd4_close on file %.*s\n",
2374 (int)current_fh->fh_dentry->d_name.len, 2383 (int)cstate->current_fh.fh_dentry->d_name.len,
2375 current_fh->fh_dentry->d_name.name); 2384 cstate->current_fh.fh_dentry->d_name.name);
2376 2385
2377 nfs4_lock_state(); 2386 nfs4_lock_state();
2378 /* check close_lru for replay */ 2387 /* check close_lru for replay */
2379 if ((status = nfs4_preprocess_seqid_op(current_fh, close->cl_seqid, 2388 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2389 close->cl_seqid,
2380 &close->cl_stateid, 2390 &close->cl_stateid,
2381 CHECK_FH | OPEN_STATE | CLOSE_STATE, 2391 CHECK_FH | OPEN_STATE | CLOSE_STATE,
2382 &close->cl_stateowner, &stp, NULL))) 2392 &close->cl_stateowner, &stp, NULL)))
@@ -2397,22 +2407,24 @@ nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_clos
2397out: 2407out:
2398 if (close->cl_stateowner) { 2408 if (close->cl_stateowner) {
2399 nfs4_get_stateowner(close->cl_stateowner); 2409 nfs4_get_stateowner(close->cl_stateowner);
2400 *replay_owner = close->cl_stateowner; 2410 cstate->replay_owner = close->cl_stateowner;
2401 } 2411 }
2402 nfs4_unlock_state(); 2412 nfs4_unlock_state();
2403 return status; 2413 return status;
2404} 2414}
2405 2415
2406__be32 2416__be32
2407nfsd4_delegreturn(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_delegreturn *dr) 2417nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2418 struct nfsd4_delegreturn *dr)
2408{ 2419{
2409 __be32 status; 2420 __be32 status;
2410 2421
2411 if ((status = fh_verify(rqstp, current_fh, S_IFREG, 0))) 2422 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
2412 goto out; 2423 goto out;
2413 2424
2414 nfs4_lock_state(); 2425 nfs4_lock_state();
2415 status = nfs4_preprocess_stateid_op(current_fh, &dr->dr_stateid, DELEG_RET, NULL); 2426 status = nfs4_preprocess_stateid_op(&cstate->current_fh,
2427 &dr->dr_stateid, DELEG_RET, NULL);
2416 nfs4_unlock_state(); 2428 nfs4_unlock_state();
2417out: 2429out:
2418 return status; 2430 return status;
@@ -2635,7 +2647,8 @@ check_lock_length(u64 offset, u64 length)
2635 * LOCK operation 2647 * LOCK operation
2636 */ 2648 */
2637__be32 2649__be32
2638nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock *lock, struct nfs4_stateowner **replay_owner) 2650nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2651 struct nfsd4_lock *lock)
2639{ 2652{
2640 struct nfs4_stateowner *open_sop = NULL; 2653 struct nfs4_stateowner *open_sop = NULL;
2641 struct nfs4_stateowner *lock_sop = NULL; 2654 struct nfs4_stateowner *lock_sop = NULL;
@@ -2654,7 +2667,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2654 if (check_lock_length(lock->lk_offset, lock->lk_length)) 2667 if (check_lock_length(lock->lk_offset, lock->lk_length))
2655 return nfserr_inval; 2668 return nfserr_inval;
2656 2669
2657 if ((status = fh_verify(rqstp, current_fh, S_IFREG, MAY_LOCK))) { 2670 if ((status = fh_verify(rqstp, &cstate->current_fh,
2671 S_IFREG, MAY_LOCK))) {
2658 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 2672 dprintk("NFSD: nfsd4_lock: permission denied!\n");
2659 return status; 2673 return status;
2660 } 2674 }
@@ -2675,7 +2689,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2675 goto out; 2689 goto out;
2676 2690
2677 /* validate and update open stateid and open seqid */ 2691 /* validate and update open stateid and open seqid */
2678 status = nfs4_preprocess_seqid_op(current_fh, 2692 status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2679 lock->lk_new_open_seqid, 2693 lock->lk_new_open_seqid,
2680 &lock->lk_new_open_stateid, 2694 &lock->lk_new_open_stateid,
2681 CHECK_FH | OPEN_STATE, 2695 CHECK_FH | OPEN_STATE,
@@ -2702,7 +2716,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2702 goto out; 2716 goto out;
2703 } else { 2717 } else {
2704 /* lock (lock owner + lock stateid) already exists */ 2718 /* lock (lock owner + lock stateid) already exists */
2705 status = nfs4_preprocess_seqid_op(current_fh, 2719 status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2706 lock->lk_old_lock_seqid, 2720 lock->lk_old_lock_seqid,
2707 &lock->lk_old_lock_stateid, 2721 &lock->lk_old_lock_stateid,
2708 CHECK_FH | LOCK_STATE, 2722 CHECK_FH | LOCK_STATE,
@@ -2759,7 +2773,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2759 conflock.fl_ops = NULL; 2773 conflock.fl_ops = NULL;
2760 conflock.fl_lmops = NULL; 2774 conflock.fl_lmops = NULL;
2761 err = posix_lock_file_conf(filp, &file_lock, &conflock); 2775 err = posix_lock_file_conf(filp, &file_lock, &conflock);
2762 dprintk("NFSD: nfsd4_lock: posix_lock_file_conf status %d\n",status);
2763 switch (-err) { 2776 switch (-err) {
2764 case 0: /* success! */ 2777 case 0: /* success! */
2765 update_stateid(&lock_stp->st_stateid); 2778 update_stateid(&lock_stp->st_stateid);
@@ -2785,7 +2798,7 @@ out:
2785 release_stateowner(lock_sop); 2798 release_stateowner(lock_sop);
2786 if (lock->lk_replay_owner) { 2799 if (lock->lk_replay_owner) {
2787 nfs4_get_stateowner(lock->lk_replay_owner); 2800 nfs4_get_stateowner(lock->lk_replay_owner);
2788 *replay_owner = lock->lk_replay_owner; 2801 cstate->replay_owner = lock->lk_replay_owner;
2789 } 2802 }
2790 nfs4_unlock_state(); 2803 nfs4_unlock_state();
2791 return status; 2804 return status;
@@ -2795,7 +2808,8 @@ out:
2795 * LOCKT operation 2808 * LOCKT operation
2796 */ 2809 */
2797__be32 2810__be32
2798nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lockt *lockt) 2811nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2812 struct nfsd4_lockt *lockt)
2799{ 2813{
2800 struct inode *inode; 2814 struct inode *inode;
2801 struct file file; 2815 struct file file;
@@ -2816,14 +2830,14 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2816 if (STALE_CLIENTID(&lockt->lt_clientid)) 2830 if (STALE_CLIENTID(&lockt->lt_clientid))
2817 goto out; 2831 goto out;
2818 2832
2819 if ((status = fh_verify(rqstp, current_fh, S_IFREG, 0))) { 2833 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) {
2820 dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n"); 2834 dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n");
2821 if (status == nfserr_symlink) 2835 if (status == nfserr_symlink)
2822 status = nfserr_inval; 2836 status = nfserr_inval;
2823 goto out; 2837 goto out;
2824 } 2838 }
2825 2839
2826 inode = current_fh->fh_dentry->d_inode; 2840 inode = cstate->current_fh.fh_dentry->d_inode;
2827 locks_init_lock(&file_lock); 2841 locks_init_lock(&file_lock);
2828 switch (lockt->lt_type) { 2842 switch (lockt->lt_type) {
2829 case NFS4_READ_LT: 2843 case NFS4_READ_LT:
@@ -2862,7 +2876,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2862 * only the dentry:inode set. 2876 * only the dentry:inode set.
2863 */ 2877 */
2864 memset(&file, 0, sizeof (struct file)); 2878 memset(&file, 0, sizeof (struct file));
2865 file.f_path.dentry = current_fh->fh_dentry; 2879 file.f_path.dentry = cstate->current_fh.fh_dentry;
2866 2880
2867 status = nfs_ok; 2881 status = nfs_ok;
2868 if (posix_test_lock(&file, &file_lock, &conflock)) { 2882 if (posix_test_lock(&file, &file_lock, &conflock)) {
@@ -2875,7 +2889,8 @@ out:
2875} 2889}
2876 2890
2877__be32 2891__be32
2878nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_locku *locku, struct nfs4_stateowner **replay_owner) 2892nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2893 struct nfsd4_locku *locku)
2879{ 2894{
2880 struct nfs4_stateid *stp; 2895 struct nfs4_stateid *stp;
2881 struct file *filp = NULL; 2896 struct file *filp = NULL;
@@ -2892,7 +2907,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2892 2907
2893 nfs4_lock_state(); 2908 nfs4_lock_state();
2894 2909
2895 if ((status = nfs4_preprocess_seqid_op(current_fh, 2910 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2896 locku->lu_seqid, 2911 locku->lu_seqid,
2897 &locku->lu_stateid, 2912 &locku->lu_stateid,
2898 CHECK_FH | LOCK_STATE, 2913 CHECK_FH | LOCK_STATE,
@@ -2933,7 +2948,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2933out: 2948out:
2934 if (locku->lu_stateowner) { 2949 if (locku->lu_stateowner) {
2935 nfs4_get_stateowner(locku->lu_stateowner); 2950 nfs4_get_stateowner(locku->lu_stateowner);
2936 *replay_owner = locku->lu_stateowner; 2951 cstate->replay_owner = locku->lu_stateowner;
2937 } 2952 }
2938 nfs4_unlock_state(); 2953 nfs4_unlock_state();
2939 return status; 2954 return status;
@@ -2968,7 +2983,9 @@ out:
2968} 2983}
2969 2984
2970__be32 2985__be32
2971nfsd4_release_lockowner(struct svc_rqst *rqstp, struct nfsd4_release_lockowner *rlockowner) 2986nfsd4_release_lockowner(struct svc_rqst *rqstp,
2987 struct nfsd4_compound_state *cstate,
2988 struct nfsd4_release_lockowner *rlockowner)
2972{ 2989{
2973 clientid_t *clid = &rlockowner->rl_clientid; 2990 clientid_t *clid = &rlockowner->rl_clientid;
2974 struct nfs4_stateowner *sop; 2991 struct nfs4_stateowner *sop;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index f3f239db04bb..fea46368afb2 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1845,15 +1845,11 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
1845 1845
1846 exp_get(exp); 1846 exp_get(exp);
1847 if (d_mountpoint(dentry)) { 1847 if (d_mountpoint(dentry)) {
1848 if (nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp)) { 1848 int err;
1849 /* 1849
1850 * -EAGAIN is the only error returned from 1850 err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
1851 * nfsd_cross_mnt() and it indicates that an 1851 if (err) {
1852 * up-call has been initiated to fill in the export 1852 nfserr = nfserrno(err);
1853 * options on exp. When the answer comes back,
1854 * this call will be retried.
1855 */
1856 nfserr = nfserr_dropit;
1857 goto out_put; 1853 goto out_put;
1858 } 1854 }
1859 1855
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 727ab3bd450d..b06bf9f70efc 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -169,9 +169,11 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
169 exp = exp_find(rqstp->rq_client, 0, tfh, &rqstp->rq_chandle); 169 exp = exp_find(rqstp->rq_client, 0, tfh, &rqstp->rq_chandle);
170 } 170 }
171 171
172 error = nfserr_dropit; 172 if (IS_ERR(exp) && (PTR_ERR(exp) == -EAGAIN
173 if (IS_ERR(exp) && PTR_ERR(exp) == -EAGAIN) 173 || PTR_ERR(exp) == -ETIMEDOUT)) {
174 error = nfserrno(PTR_ERR(exp));
174 goto out; 175 goto out;
176 }
175 177
176 error = nfserr_stale; 178 error = nfserr_stale;
177 if (!exp || IS_ERR(exp)) 179 if (!exp || IS_ERR(exp))
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 4883d7586229..7a79c23aa6d4 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -99,7 +99,7 @@ static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
99/* 99/*
100 * Called from nfsd_lookup and encode_dirent. Check if we have crossed 100 * Called from nfsd_lookup and encode_dirent. Check if we have crossed
101 * a mount point. 101 * a mount point.
102 * Returns -EAGAIN leaving *dpp and *expp unchanged, 102 * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged,
103 * or nfs_ok having possibly changed *dpp and *expp 103 * or nfs_ok having possibly changed *dpp and *expp
104 */ 104 */
105int 105int
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index edc91ca3792a..f27e5378caf2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1959,7 +1959,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
1959 goto bail; 1959 goto bail;
1960 } 1960 }
1961 1961
1962 *tc = kcalloc(1, sizeof(struct ocfs2_truncate_context), GFP_KERNEL); 1962 *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
1963 if (!(*tc)) { 1963 if (!(*tc)) {
1964 status = -ENOMEM; 1964 status = -ENOMEM;
1965 mlog_errno(status); 1965 mlog_errno(status);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 4cd9a9580456..a25ef5a50386 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1553,7 +1553,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1553 struct o2hb_region *reg = NULL; 1553 struct o2hb_region *reg = NULL;
1554 struct config_item *ret = NULL; 1554 struct config_item *ret = NULL;
1555 1555
1556 reg = kcalloc(1, sizeof(struct o2hb_region), GFP_KERNEL); 1556 reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL);
1557 if (reg == NULL) 1557 if (reg == NULL)
1558 goto out; /* ENOMEM */ 1558 goto out; /* ENOMEM */
1559 1559
@@ -1679,7 +1679,7 @@ struct config_group *o2hb_alloc_hb_set(void)
1679 struct o2hb_heartbeat_group *hs = NULL; 1679 struct o2hb_heartbeat_group *hs = NULL;
1680 struct config_group *ret = NULL; 1680 struct config_group *ret = NULL;
1681 1681
1682 hs = kcalloc(1, sizeof(struct o2hb_heartbeat_group), GFP_KERNEL); 1682 hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL);
1683 if (hs == NULL) 1683 if (hs == NULL)
1684 goto out; 1684 goto out;
1685 1685
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 357f1d551771..b17333a0606b 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -714,7 +714,7 @@ static struct config_item *o2nm_node_group_make_item(struct config_group *group,
714 if (strlen(name) > O2NM_MAX_NAME_LEN) 714 if (strlen(name) > O2NM_MAX_NAME_LEN)
715 goto out; /* ENAMETOOLONG */ 715 goto out; /* ENAMETOOLONG */
716 716
717 node = kcalloc(1, sizeof(struct o2nm_node), GFP_KERNEL); 717 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
718 if (node == NULL) 718 if (node == NULL)
719 goto out; /* ENOMEM */ 719 goto out; /* ENOMEM */
720 720
@@ -825,8 +825,8 @@ static struct config_group *o2nm_cluster_group_make_group(struct config_group *g
825 if (o2nm_single_cluster) 825 if (o2nm_single_cluster)
826 goto out; /* ENOSPC */ 826 goto out; /* ENOSPC */
827 827
828 cluster = kcalloc(1, sizeof(struct o2nm_cluster), GFP_KERNEL); 828 cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
829 ns = kcalloc(1, sizeof(struct o2nm_node_group), GFP_KERNEL); 829 ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
830 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); 830 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
831 o2hb_group = o2hb_alloc_hb_set(); 831 o2hb_group = o2hb_alloc_hb_set();
832 if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL) 832 if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 457753df1ae7..ae4ff4a6636b 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -324,7 +324,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
324 struct page *page = NULL; 324 struct page *page = NULL;
325 325
326 page = alloc_page(GFP_NOFS); 326 page = alloc_page(GFP_NOFS);
327 sc = kcalloc(1, sizeof(*sc), GFP_NOFS); 327 sc = kzalloc(sizeof(*sc), GFP_NOFS);
328 if (sc == NULL || page == NULL) 328 if (sc == NULL || page == NULL)
329 goto out; 329 goto out;
330 330
@@ -714,7 +714,7 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
714 goto out; 714 goto out;
715 } 715 }
716 716
717 nmh = kcalloc(1, sizeof(struct o2net_msg_handler), GFP_NOFS); 717 nmh = kzalloc(sizeof(struct o2net_msg_handler), GFP_NOFS);
718 if (nmh == NULL) { 718 if (nmh == NULL) {
719 ret = -ENOMEM; 719 ret = -ENOMEM;
720 goto out; 720 goto out;
@@ -1918,9 +1918,9 @@ int o2net_init(void)
1918 1918
1919 o2quo_init(); 1919 o2quo_init();
1920 1920
1921 o2net_hand = kcalloc(1, sizeof(struct o2net_handshake), GFP_KERNEL); 1921 o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
1922 o2net_keep_req = kcalloc(1, sizeof(struct o2net_msg), GFP_KERNEL); 1922 o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
1923 o2net_keep_resp = kcalloc(1, sizeof(struct o2net_msg), GFP_KERNEL); 1923 o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
1924 if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) { 1924 if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) {
1925 kfree(o2net_hand); 1925 kfree(o2net_hand);
1926 kfree(o2net_keep_req); 1926 kfree(o2net_keep_req);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 420a375a3949..f0b25f2dd205 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -920,7 +920,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
920 920
921 mlog_entry("%p", dlm); 921 mlog_entry("%p", dlm);
922 922
923 ctxt = kcalloc(1, sizeof(*ctxt), GFP_KERNEL); 923 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
924 if (!ctxt) { 924 if (!ctxt) {
925 status = -ENOMEM; 925 status = -ENOMEM;
926 mlog_errno(status); 926 mlog_errno(status);
@@ -1223,7 +1223,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1223 int i; 1223 int i;
1224 struct dlm_ctxt *dlm = NULL; 1224 struct dlm_ctxt *dlm = NULL;
1225 1225
1226 dlm = kcalloc(1, sizeof(*dlm), GFP_KERNEL); 1226 dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
1227 if (!dlm) { 1227 if (!dlm) {
1228 mlog_errno(-ENOMEM); 1228 mlog_errno(-ENOMEM);
1229 goto leave; 1229 goto leave;
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 42a1b91979b5..e5ca3db197f6 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -408,13 +408,13 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
408 struct dlm_lock *lock; 408 struct dlm_lock *lock;
409 int kernel_allocated = 0; 409 int kernel_allocated = 0;
410 410
411 lock = kcalloc(1, sizeof(*lock), GFP_NOFS); 411 lock = kzalloc(sizeof(*lock), GFP_NOFS);
412 if (!lock) 412 if (!lock)
413 return NULL; 413 return NULL;
414 414
415 if (!lksb) { 415 if (!lksb) {
416 /* zero memory only if kernel-allocated */ 416 /* zero memory only if kernel-allocated */
417 lksb = kcalloc(1, sizeof(*lksb), GFP_NOFS); 417 lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
418 if (!lksb) { 418 if (!lksb) {
419 kfree(lock); 419 kfree(lock);
420 return NULL; 420 return NULL;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 856012b4fa49..0ad872055cb3 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1939,7 +1939,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1939 int ignore_higher, u8 request_from, u32 flags) 1939 int ignore_higher, u8 request_from, u32 flags)
1940{ 1940{
1941 struct dlm_work_item *item; 1941 struct dlm_work_item *item;
1942 item = kcalloc(1, sizeof(*item), GFP_NOFS); 1942 item = kzalloc(sizeof(*item), GFP_NOFS);
1943 if (!item) 1943 if (!item)
1944 return -ENOMEM; 1944 return -ENOMEM;
1945 1945
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index fb3e2b0817f1..367a11e9e2ed 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -757,7 +757,7 @@ static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
757 } 757 }
758 BUG_ON(num == dead_node); 758 BUG_ON(num == dead_node);
759 759
760 ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS); 760 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
761 if (!ndata) { 761 if (!ndata) {
762 dlm_destroy_recovery_area(dlm, dead_node); 762 dlm_destroy_recovery_area(dlm, dead_node);
763 return -ENOMEM; 763 return -ENOMEM;
@@ -842,7 +842,7 @@ int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
842 } 842 }
843 BUG_ON(lr->dead_node != dlm->reco.dead_node); 843 BUG_ON(lr->dead_node != dlm->reco.dead_node);
844 844
845 item = kcalloc(1, sizeof(*item), GFP_NOFS); 845 item = kzalloc(sizeof(*item), GFP_NOFS);
846 if (!item) { 846 if (!item) {
847 dlm_put(dlm); 847 dlm_put(dlm);
848 return -ENOMEM; 848 return -ENOMEM;
@@ -1323,7 +1323,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1323 1323
1324 ret = -ENOMEM; 1324 ret = -ENOMEM;
1325 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); 1325 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1326 item = kcalloc(1, sizeof(*item), GFP_NOFS); 1326 item = kzalloc(sizeof(*item), GFP_NOFS);
1327 if (!buf || !item) 1327 if (!buf || !item)
1328 goto leave; 1328 goto leave;
1329 1329
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index e9a82ad95c1e..9fd590b9bde3 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -153,6 +153,14 @@ int ocfs2_should_update_atime(struct inode *inode,
153 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) 153 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
154 return 0; 154 return 0;
155 155
156 if (vfsmnt->mnt_flags & MNT_RELATIME) {
157 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
158 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
159 return 1;
160
161 return 0;
162 }
163
156 now = CURRENT_TIME; 164 now = CURRENT_TIME;
157 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum)) 165 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
158 return 0; 166 return 0;
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 698d79a74ef8..4dedd9789108 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -776,7 +776,7 @@ static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
776{ 776{
777 int status; 777 int status;
778 778
779 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 779 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
780 if (!(*ac)) { 780 if (!(*ac)) {
781 status = -ENOMEM; 781 status = -ENOMEM;
782 mlog_errno(status); 782 mlog_errno(status);
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index aa6f5aadedc4..2d3ac32cb74e 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -175,7 +175,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
175 struct buffer_head *bh = NULL; 175 struct buffer_head *bh = NULL;
176 struct ocfs2_slot_info *si; 176 struct ocfs2_slot_info *si;
177 177
178 si = kcalloc(1, sizeof(struct ocfs2_slot_info), GFP_KERNEL); 178 si = kzalloc(sizeof(struct ocfs2_slot_info), GFP_KERNEL);
179 if (!si) { 179 if (!si) {
180 status = -ENOMEM; 180 status = -ENOMEM;
181 mlog_errno(status); 181 mlog_errno(status);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 000d71cca6c5..6dbb11762759 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -488,7 +488,7 @@ int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
488 int status; 488 int status;
489 u32 slot; 489 u32 slot;
490 490
491 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 491 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
492 if (!(*ac)) { 492 if (!(*ac)) {
493 status = -ENOMEM; 493 status = -ENOMEM;
494 mlog_errno(status); 494 mlog_errno(status);
@@ -530,7 +530,7 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
530{ 530{
531 int status; 531 int status;
532 532
533 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 533 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
534 if (!(*ac)) { 534 if (!(*ac)) {
535 status = -ENOMEM; 535 status = -ENOMEM;
536 mlog_errno(status); 536 mlog_errno(status);
@@ -595,7 +595,7 @@ int ocfs2_reserve_clusters(struct ocfs2_super *osb,
595 595
596 mlog_entry_void(); 596 mlog_entry_void();
597 597
598 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 598 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
599 if (!(*ac)) { 599 if (!(*ac)) {
600 status = -ENOMEM; 600 status = -ENOMEM;
601 mlog_errno(status); 601 mlog_errno(status);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index a6d2f8cc165b..6e300a88a47e 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1231,7 +1231,7 @@ static int ocfs2_setup_osb_uuid(struct ocfs2_super *osb, const unsigned char *uu
1231 1231
1232 BUG_ON(uuid_bytes != OCFS2_VOL_UUID_LEN); 1232 BUG_ON(uuid_bytes != OCFS2_VOL_UUID_LEN);
1233 1233
1234 osb->uuid_str = kcalloc(1, OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL); 1234 osb->uuid_str = kzalloc(OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL);
1235 if (osb->uuid_str == NULL) 1235 if (osb->uuid_str == NULL)
1236 return -ENOMEM; 1236 return -ENOMEM;
1237 1237
@@ -1262,7 +1262,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1262 1262
1263 mlog_entry_void(); 1263 mlog_entry_void();
1264 1264
1265 osb = kcalloc(1, sizeof(struct ocfs2_super), GFP_KERNEL); 1265 osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL);
1266 if (!osb) { 1266 if (!osb) {
1267 status = -ENOMEM; 1267 status = -ENOMEM;
1268 mlog_errno(status); 1268 mlog_errno(status);
@@ -1387,7 +1387,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1387 */ 1387 */
1388 /* initialize our journal structure */ 1388 /* initialize our journal structure */
1389 1389
1390 journal = kcalloc(1, sizeof(struct ocfs2_journal), GFP_KERNEL); 1390 journal = kzalloc(sizeof(struct ocfs2_journal), GFP_KERNEL);
1391 if (!journal) { 1391 if (!journal) {
1392 mlog(ML_ERROR, "unable to alloc journal\n"); 1392 mlog(ML_ERROR, "unable to alloc journal\n");
1393 status = -ENOMEM; 1393 status = -ENOMEM;
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 0315a8b61ed6..0afd8b9af70f 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -479,7 +479,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response
479{ 479{
480 struct ocfs2_net_wait_ctxt *w; 480 struct ocfs2_net_wait_ctxt *w;
481 481
482 w = kcalloc(1, sizeof(*w), GFP_NOFS); 482 w = kzalloc(sizeof(*w), GFP_NOFS);
483 if (!w) { 483 if (!w) {
484 mlog_errno(-ENOMEM); 484 mlog_errno(-ENOMEM);
485 goto bail; 485 goto bail;
@@ -642,7 +642,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
642 642
643 BUG_ON(!ocfs2_is_valid_vote_request(type)); 643 BUG_ON(!ocfs2_is_valid_vote_request(type));
644 644
645 request = kcalloc(1, sizeof(*request), GFP_NOFS); 645 request = kzalloc(sizeof(*request), GFP_NOFS);
646 if (!request) { 646 if (!request) {
647 mlog_errno(-ENOMEM); 647 mlog_errno(-ENOMEM);
648 } else { 648 } else {
diff --git a/fs/pipe.c b/fs/pipe.c
index f8b6bdcb879a..9a06e8e48e8d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -207,7 +207,7 @@ int generic_pipe_buf_pin(struct pipe_inode_info *info, struct pipe_buffer *buf)
207 return 0; 207 return 0;
208} 208}
209 209
210static struct pipe_buf_operations anon_pipe_buf_ops = { 210static const struct pipe_buf_operations anon_pipe_buf_ops = {
211 .can_merge = 1, 211 .can_merge = 1,
212 .map = generic_pipe_buf_map, 212 .map = generic_pipe_buf_map,
213 .unmap = generic_pipe_buf_unmap, 213 .unmap = generic_pipe_buf_unmap,
@@ -243,7 +243,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
243 if (bufs) { 243 if (bufs) {
244 int curbuf = pipe->curbuf; 244 int curbuf = pipe->curbuf;
245 struct pipe_buffer *buf = pipe->bufs + curbuf; 245 struct pipe_buffer *buf = pipe->bufs + curbuf;
246 struct pipe_buf_operations *ops = buf->ops; 246 const struct pipe_buf_operations *ops = buf->ops;
247 void *addr; 247 void *addr;
248 size_t chars = buf->len; 248 size_t chars = buf->len;
249 int error, atomic; 249 int error, atomic;
@@ -365,7 +365,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
365 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & 365 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
366 (PIPE_BUFFERS-1); 366 (PIPE_BUFFERS-1);
367 struct pipe_buffer *buf = pipe->bufs + lastbuf; 367 struct pipe_buffer *buf = pipe->bufs + lastbuf;
368 struct pipe_buf_operations *ops = buf->ops; 368 const struct pipe_buf_operations *ops = buf->ops;
369 int offset = buf->offset + buf->len; 369 int offset = buf->offset + buf->len;
370 370
371 if (ops->can_merge && offset + chars <= PAGE_SIZE) { 371 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
@@ -756,7 +756,7 @@ const struct file_operations rdwr_fifo_fops = {
756 .fasync = pipe_rdwr_fasync, 756 .fasync = pipe_rdwr_fasync,
757}; 757};
758 758
759static struct file_operations read_pipe_fops = { 759static const struct file_operations read_pipe_fops = {
760 .llseek = no_llseek, 760 .llseek = no_llseek,
761 .read = do_sync_read, 761 .read = do_sync_read,
762 .aio_read = pipe_read, 762 .aio_read = pipe_read,
@@ -768,7 +768,7 @@ static struct file_operations read_pipe_fops = {
768 .fasync = pipe_read_fasync, 768 .fasync = pipe_read_fasync,
769}; 769};
770 770
771static struct file_operations write_pipe_fops = { 771static const struct file_operations write_pipe_fops = {
772 .llseek = no_llseek, 772 .llseek = no_llseek,
773 .read = bad_pipe_r, 773 .read = bad_pipe_r,
774 .write = do_sync_write, 774 .write = do_sync_write,
@@ -780,7 +780,7 @@ static struct file_operations write_pipe_fops = {
780 .fasync = pipe_write_fasync, 780 .fasync = pipe_write_fasync,
781}; 781};
782 782
783static struct file_operations rdwr_pipe_fops = { 783static const struct file_operations rdwr_pipe_fops = {
784 .llseek = no_llseek, 784 .llseek = no_llseek,
785 .read = do_sync_read, 785 .read = do_sync_read,
786 .aio_read = pipe_read, 786 .aio_read = pipe_read,
diff --git a/fs/read_write.c b/fs/read_write.c
index 1d3dda4fa70c..707ac21700d3 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -450,8 +450,6 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
450 return seg; 450 return seg;
451} 451}
452 452
453EXPORT_UNUSED_SYMBOL(iov_shorten); /* June 2006 */
454
455ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov, 453ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
456 unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn) 454 unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
457{ 455{
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 97ae1b92bc47..5296a29cc5eb 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -135,7 +135,7 @@ static void *posix_acl_to_disk(const struct posix_acl *acl, size_t * size)
135 int n; 135 int n;
136 136
137 *size = reiserfs_acl_size(acl->a_count); 137 *size = reiserfs_acl_size(acl->a_count);
138 ext_acl = (reiserfs_acl_header *) kmalloc(sizeof(reiserfs_acl_header) + 138 ext_acl = kmalloc(sizeof(reiserfs_acl_header) +
139 acl->a_count * 139 acl->a_count *
140 sizeof(reiserfs_acl_entry), 140 sizeof(reiserfs_acl_entry),
141 GFP_NOFS); 141 GFP_NOFS);
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 4af4cd729a5a..84dfe3f3482e 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -482,12 +482,13 @@ smb_put_super(struct super_block *sb)
482 smb_close_socket(server); 482 smb_close_socket(server);
483 483
484 if (server->conn_pid) 484 if (server->conn_pid)
485 kill_proc(server->conn_pid, SIGTERM, 1); 485 kill_pid(server->conn_pid, SIGTERM, 1);
486 486
487 kfree(server->ops); 487 kfree(server->ops);
488 smb_unload_nls(server); 488 smb_unload_nls(server);
489 sb->s_fs_info = NULL; 489 sb->s_fs_info = NULL;
490 smb_unlock_server(server); 490 smb_unlock_server(server);
491 put_pid(server->conn_pid);
491 kfree(server); 492 kfree(server);
492} 493}
493 494
@@ -530,7 +531,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
530 INIT_LIST_HEAD(&server->xmitq); 531 INIT_LIST_HEAD(&server->xmitq);
531 INIT_LIST_HEAD(&server->recvq); 532 INIT_LIST_HEAD(&server->recvq);
532 server->conn_error = 0; 533 server->conn_error = 0;
533 server->conn_pid = 0; 534 server->conn_pid = NULL;
534 server->state = CONN_INVALID; /* no connection yet */ 535 server->state = CONN_INVALID; /* no connection yet */
535 server->generation = 0; 536 server->generation = 0;
536 537
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index a5ced9e0c6c4..feac46050619 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -877,7 +877,7 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
877 goto out_putf; 877 goto out_putf;
878 878
879 server->sock_file = filp; 879 server->sock_file = filp;
880 server->conn_pid = current->pid; 880 server->conn_pid = get_pid(task_pid(current));
881 server->opt = *opt; 881 server->opt = *opt;
882 server->generation += 1; 882 server->generation += 1;
883 server->state = CONN_VALID; 883 server->state = CONN_VALID;
@@ -971,8 +971,8 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
971 } 971 }
972 972
973 VERBOSE("protocol=%d, max_xmit=%d, pid=%d capabilities=0x%x\n", 973 VERBOSE("protocol=%d, max_xmit=%d, pid=%d capabilities=0x%x\n",
974 server->opt.protocol, server->opt.max_xmit, server->conn_pid, 974 server->opt.protocol, server->opt.max_xmit,
975 server->opt.capabilities); 975 pid_nr(server->conn_pid), server->opt.capabilities);
976 976
977 /* FIXME: this really should be done by smbmount. */ 977 /* FIXME: this really should be done by smbmount. */
978 if (server->opt.max_xmit > SMB_MAX_PACKET_SIZE) { 978 if (server->opt.max_xmit > SMB_MAX_PACKET_SIZE) {
diff --git a/fs/smbfs/smbiod.c b/fs/smbfs/smbiod.c
index e67540441288..89eaf31f1d46 100644
--- a/fs/smbfs/smbiod.c
+++ b/fs/smbfs/smbiod.c
@@ -152,7 +152,7 @@ int smbiod_retry(struct smb_sb_info *server)
152{ 152{
153 struct list_head *head; 153 struct list_head *head;
154 struct smb_request *req; 154 struct smb_request *req;
155 pid_t pid = server->conn_pid; 155 struct pid *pid = get_pid(server->conn_pid);
156 int result = 0; 156 int result = 0;
157 157
158 VERBOSE("state: %d\n", server->state); 158 VERBOSE("state: %d\n", server->state);
@@ -222,7 +222,7 @@ int smbiod_retry(struct smb_sb_info *server)
222 /* 222 /*
223 * Note: use the "priv" flag, as a user process may need to reconnect. 223 * Note: use the "priv" flag, as a user process may need to reconnect.
224 */ 224 */
225 result = kill_proc(pid, SIGUSR1, 1); 225 result = kill_pid(pid, SIGUSR1, 1);
226 if (result) { 226 if (result) {
227 /* FIXME: this is most likely fatal, umount? */ 227 /* FIXME: this is most likely fatal, umount? */
228 printk(KERN_ERR "smb_retry: signal failed [%d]\n", result); 228 printk(KERN_ERR "smb_retry: signal failed [%d]\n", result);
@@ -233,6 +233,7 @@ int smbiod_retry(struct smb_sb_info *server)
233 /* FIXME: The retried requests should perhaps get a "time boost". */ 233 /* FIXME: The retried requests should perhaps get a "time boost". */
234 234
235out: 235out:
236 put_pid(pid);
236 return result; 237 return result;
237} 238}
238 239
diff --git a/fs/splice.c b/fs/splice.c
index bbd0aeb3f68e..2fca6ebf4cc2 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -42,7 +42,7 @@ struct splice_pipe_desc {
42 struct partial_page *partial; /* pages[] may not be contig */ 42 struct partial_page *partial; /* pages[] may not be contig */
43 int nr_pages; /* number of pages in map */ 43 int nr_pages; /* number of pages in map */
44 unsigned int flags; /* splice flags */ 44 unsigned int flags; /* splice flags */
45 struct pipe_buf_operations *ops;/* ops associated with output pipe */ 45 const struct pipe_buf_operations *ops;/* ops associated with output pipe */
46}; 46};
47 47
48/* 48/*
@@ -139,7 +139,7 @@ error:
139 return err; 139 return err;
140} 140}
141 141
142static struct pipe_buf_operations page_cache_pipe_buf_ops = { 142static const struct pipe_buf_operations page_cache_pipe_buf_ops = {
143 .can_merge = 0, 143 .can_merge = 0,
144 .map = generic_pipe_buf_map, 144 .map = generic_pipe_buf_map,
145 .unmap = generic_pipe_buf_unmap, 145 .unmap = generic_pipe_buf_unmap,
@@ -159,7 +159,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
159 return generic_pipe_buf_steal(pipe, buf); 159 return generic_pipe_buf_steal(pipe, buf);
160} 160}
161 161
162static struct pipe_buf_operations user_page_pipe_buf_ops = { 162static const struct pipe_buf_operations user_page_pipe_buf_ops = {
163 .can_merge = 0, 163 .can_merge = 0,
164 .map = generic_pipe_buf_map, 164 .map = generic_pipe_buf_map,
165 .unmap = generic_pipe_buf_unmap, 165 .unmap = generic_pipe_buf_unmap,
@@ -724,7 +724,7 @@ static ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
724 for (;;) { 724 for (;;) {
725 if (pipe->nrbufs) { 725 if (pipe->nrbufs) {
726 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; 726 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
727 struct pipe_buf_operations *ops = buf->ops; 727 const struct pipe_buf_operations *ops = buf->ops;
728 728
729 sd.len = buf->len; 729 sd.len = buf->len;
730 if (sd.len > sd.total_len) 730 if (sd.len > sd.total_len)
diff --git a/include/asm-alpha/cacheflush.h b/include/asm-alpha/cacheflush.h
index 805640b41078..b686cc7fc44e 100644
--- a/include/asm-alpha/cacheflush.h
+++ b/include/asm-alpha/cacheflush.h
@@ -6,6 +6,7 @@
6/* Caches aren't brain-dead on the Alpha. */ 6/* Caches aren't brain-dead on the Alpha. */
7#define flush_cache_all() do { } while (0) 7#define flush_cache_all() do { } while (0)
8#define flush_cache_mm(mm) do { } while (0) 8#define flush_cache_mm(mm) do { } while (0)
9#define flush_cache_dup_mm(mm) do { } while (0)
9#define flush_cache_range(vma, start, end) do { } while (0) 10#define flush_cache_range(vma, start, end) do { } while (0)
10#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 11#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
11#define flush_dcache_page(page) do { } while (0) 12#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index f0845646aacb..378a3a2ce8d9 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -319,6 +319,8 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
319 unsigned long len, int write); 319 unsigned long len, int write);
320#endif 320#endif
321 321
322#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
323
322/* 324/*
323 * flush_cache_user_range is used when we want to ensure that the 325 * flush_cache_user_range is used when we want to ensure that the
324 * Harvard caches are synchronised for the user space address range. 326 * Harvard caches are synchronised for the user space address range.
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h
index d9b8bddc8732..5014794f9eb3 100644
--- a/include/asm-arm/thread_info.h
+++ b/include/asm-arm/thread_info.h
@@ -147,6 +147,7 @@ extern void iwmmxt_task_switch(struct thread_info *);
147#define TIF_POLLING_NRFLAG 16 147#define TIF_POLLING_NRFLAG 16
148#define TIF_USING_IWMMXT 17 148#define TIF_USING_IWMMXT 17
149#define TIF_MEMDIE 18 149#define TIF_MEMDIE 18
150#define TIF_FREEZE 19
150 151
151#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 152#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
152#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 153#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -154,6 +155,7 @@ extern void iwmmxt_task_switch(struct thread_info *);
154#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 155#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
155#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 156#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
156#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) 157#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
158#define _TIF_FREEZE (1 << TIF_FREEZE)
157 159
158/* 160/*
159 * Change these and you break ASM code in entry-common.S 161 * Change these and you break ASM code in entry-common.S
diff --git a/include/asm-arm26/cacheflush.h b/include/asm-arm26/cacheflush.h
index 9c1b9c7f2ebd..14ae15b6faab 100644
--- a/include/asm-arm26/cacheflush.h
+++ b/include/asm-arm26/cacheflush.h
@@ -22,6 +22,7 @@
22 22
23#define flush_cache_all() do { } while (0) 23#define flush_cache_all() do { } while (0)
24#define flush_cache_mm(mm) do { } while (0) 24#define flush_cache_mm(mm) do { } while (0)
25#define flush_cache_dup_mm(mm) do { } while (0)
25#define flush_cache_range(vma,start,end) do { } while (0) 26#define flush_cache_range(vma,start,end) do { } while (0)
26#define flush_cache_page(vma,vmaddr,pfn) do { } while (0) 27#define flush_cache_page(vma,vmaddr,pfn) do { } while (0)
27#define flush_cache_vmap(start, end) do { } while (0) 28#define flush_cache_vmap(start, end) do { } while (0)
diff --git a/include/asm-avr32/cacheflush.h b/include/asm-avr32/cacheflush.h
index f1bf1708980e..dfaaa88cd412 100644
--- a/include/asm-avr32/cacheflush.h
+++ b/include/asm-avr32/cacheflush.h
@@ -87,6 +87,7 @@ void invalidate_icache_region(void *start, size_t len);
87 */ 87 */
88#define flush_cache_all() do { } while (0) 88#define flush_cache_all() do { } while (0)
89#define flush_cache_mm(mm) do { } while (0) 89#define flush_cache_mm(mm) do { } while (0)
90#define flush_cache_dup_mm(mm) do { } while (0)
90#define flush_cache_range(vma, start, end) do { } while (0) 91#define flush_cache_range(vma, start, end) do { } while (0)
91#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 92#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
92#define flush_cache_vmap(start, end) do { } while (0) 93#define flush_cache_vmap(start, end) do { } while (0)
diff --git a/include/asm-avr32/pgalloc.h b/include/asm-avr32/pgalloc.h
index 7492cfb92ced..bb82e70cde8d 100644
--- a/include/asm-avr32/pgalloc.h
+++ b/include/asm-avr32/pgalloc.h
@@ -28,7 +28,7 @@ static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
28static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm) 28static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
29{ 29{
30 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); 30 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
31 pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); 31 pgd_t *pgd = kmalloc(pgd_size, GFP_KERNEL);
32 32
33 if (pgd) 33 if (pgd)
34 memset(pgd, 0, pgd_size); 34 memset(pgd, 0, pgd_size);
diff --git a/include/asm-cris/cacheflush.h b/include/asm-cris/cacheflush.h
index 72cc71dffe70..01af2de27c5b 100644
--- a/include/asm-cris/cacheflush.h
+++ b/include/asm-cris/cacheflush.h
@@ -9,6 +9,7 @@
9 */ 9 */
10#define flush_cache_all() do { } while (0) 10#define flush_cache_all() do { } while (0)
11#define flush_cache_mm(mm) do { } while (0) 11#define flush_cache_mm(mm) do { } while (0)
12#define flush_cache_dup_mm(mm) do { } while (0)
12#define flush_cache_range(vma, start, end) do { } while (0) 13#define flush_cache_range(vma, start, end) do { } while (0)
13#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 14#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
14#define flush_dcache_page(page) do { } while (0) 15#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-frv/cacheflush.h b/include/asm-frv/cacheflush.h
index eaa5826bc1c8..02500405a6fb 100644
--- a/include/asm-frv/cacheflush.h
+++ b/include/asm-frv/cacheflush.h
@@ -20,6 +20,7 @@
20 */ 20 */
21#define flush_cache_all() do {} while(0) 21#define flush_cache_all() do {} while(0)
22#define flush_cache_mm(mm) do {} while(0) 22#define flush_cache_mm(mm) do {} while(0)
23#define flush_cache_dup_mm(mm) do {} while(0)
23#define flush_cache_range(mm, start, end) do {} while(0) 24#define flush_cache_range(mm, start, end) do {} while(0)
24#define flush_cache_page(vma, vmaddr, pfn) do {} while(0) 25#define flush_cache_page(vma, vmaddr, pfn) do {} while(0)
25#define flush_cache_vmap(start, end) do {} while(0) 26#define flush_cache_vmap(start, end) do {} while(0)
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index d66c48e6ef14..d881f518e6a9 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -116,6 +116,7 @@ register struct thread_info *__current_thread_info asm("gr15");
116#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ 116#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
117#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 117#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
118#define TIF_MEMDIE 17 /* OOM killer killed process */ 118#define TIF_MEMDIE 17 /* OOM killer killed process */
119#define TIF_FREEZE 18 /* freezing for suspend */
119 120
120#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 121#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
121#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 122#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -125,6 +126,7 @@ register struct thread_info *__current_thread_info asm("gr15");
125#define _TIF_IRET (1 << TIF_IRET) 126#define _TIF_IRET (1 << TIF_IRET)
126#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 127#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
127#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 128#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
129#define _TIF_FREEZE (1 << TIF_FREEZE)
128 130
129#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 131#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
130#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 132#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/include/asm-h8300/cacheflush.h b/include/asm-h8300/cacheflush.h
index 1e4d95bb5ec9..71210d141b64 100644
--- a/include/asm-h8300/cacheflush.h
+++ b/include/asm-h8300/cacheflush.h
@@ -12,6 +12,7 @@
12 12
13#define flush_cache_all() 13#define flush_cache_all()
14#define flush_cache_mm(mm) 14#define flush_cache_mm(mm)
15#define flush_cache_dup_mm(mm) do { } while (0)
15#define flush_cache_range(vma,a,b) 16#define flush_cache_range(vma,a,b)
16#define flush_cache_page(vma,p,pfn) 17#define flush_cache_page(vma,p,pfn)
17#define flush_dcache_page(page) 18#define flush_dcache_page(page)
diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h
index 7199f7b326f1..74e03c8f2e51 100644
--- a/include/asm-i386/cacheflush.h
+++ b/include/asm-i386/cacheflush.h
@@ -7,6 +7,7 @@
7/* Caches aren't brain-dead on the intel. */ 7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0) 8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0) 9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
10#define flush_cache_range(vma, start, end) do { } while (0) 11#define flush_cache_range(vma, start, end) do { } while (0)
11#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
12#define flush_dcache_page(page) do { } while (0) 13#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 5679d4993072..609a3899475c 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -100,6 +100,8 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
100 100
101#define MSR_P6_PERFCTR0 0xc1 101#define MSR_P6_PERFCTR0 0xc1
102#define MSR_P6_PERFCTR1 0xc2 102#define MSR_P6_PERFCTR1 0xc2
103#define MSR_FSB_FREQ 0xcd
104
103 105
104#define MSR_IA32_BBL_CR_CTL 0x119 106#define MSR_IA32_BBL_CR_CTL 0x119
105 107
@@ -130,6 +132,9 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
130#define MSR_IA32_PERF_STATUS 0x198 132#define MSR_IA32_PERF_STATUS 0x198
131#define MSR_IA32_PERF_CTL 0x199 133#define MSR_IA32_PERF_CTL 0x199
132 134
135#define MSR_IA32_MPERF 0xE7
136#define MSR_IA32_APERF 0xE8
137
133#define MSR_IA32_THERM_CONTROL 0x19a 138#define MSR_IA32_THERM_CONTROL 0x19a
134#define MSR_IA32_THERM_INTERRUPT 0x19b 139#define MSR_IA32_THERM_INTERRUPT 0x19b
135#define MSR_IA32_THERM_STATUS 0x19c 140#define MSR_IA32_THERM_STATUS 0x19c
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 46d32ad92082..4b187bb377b4 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -134,6 +134,7 @@ static inline struct thread_info *current_thread_info(void)
134#define TIF_MEMDIE 16 134#define TIF_MEMDIE 16
135#define TIF_DEBUG 17 /* uses debug registers */ 135#define TIF_DEBUG 17 /* uses debug registers */
136#define TIF_IO_BITMAP 18 /* uses I/O bitmap */ 136#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
137#define TIF_FREEZE 19 /* is freezing for suspend */
137 138
138#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 139#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
139#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 140#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -147,6 +148,7 @@ static inline struct thread_info *current_thread_info(void)
147#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 148#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
148#define _TIF_DEBUG (1<<TIF_DEBUG) 149#define _TIF_DEBUG (1<<TIF_DEBUG)
149#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) 150#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
151#define _TIF_FREEZE (1<<TIF_FREEZE)
150 152
151/* work to do on interrupt/exception return */ 153/* work to do on interrupt/exception return */
152#define _TIF_WORK_MASK \ 154#define _TIF_WORK_MASK \
diff --git a/include/asm-ia64/break.h b/include/asm-ia64/break.h
index 8167828edc4b..f03402039896 100644
--- a/include/asm-ia64/break.h
+++ b/include/asm-ia64/break.h
@@ -12,8 +12,8 @@
12 * OS-specific debug break numbers: 12 * OS-specific debug break numbers:
13 */ 13 */
14#define __IA64_BREAK_KDB 0x80100 14#define __IA64_BREAK_KDB 0x80100
15#define __IA64_BREAK_KPROBE 0x80200 15#define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */
16#define __IA64_BREAK_JPROBE 0x80300 16#define __IA64_BREAK_JPROBE 0x82000
17 17
18/* 18/*
19 * OS-specific break numbers: 19 * OS-specific break numbers:
diff --git a/include/asm-ia64/cacheflush.h b/include/asm-ia64/cacheflush.h
index f2dacb4245ec..4906916d715b 100644
--- a/include/asm-ia64/cacheflush.h
+++ b/include/asm-ia64/cacheflush.h
@@ -18,6 +18,7 @@
18 18
19#define flush_cache_all() do { } while (0) 19#define flush_cache_all() do { } while (0)
20#define flush_cache_mm(mm) do { } while (0) 20#define flush_cache_mm(mm) do { } while (0)
21#define flush_cache_dup_mm(mm) do { } while (0)
21#define flush_cache_range(vma, start, end) do { } while (0) 22#define flush_cache_range(vma, start, end) do { } while (0)
22#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 23#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
23#define flush_icache_page(vma,page) do { } while (0) 24#define flush_icache_page(vma,page) do { } while (0)
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 1b45b71c79b9..828ae00e47c1 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -115,6 +115,7 @@ struct arch_specific_insn {
115 #define INST_FLAG_BREAK_INST 4 115 #define INST_FLAG_BREAK_INST 4
116 unsigned long inst_flag; 116 unsigned long inst_flag;
117 unsigned short target_br_reg; 117 unsigned short target_br_reg;
118 unsigned short slot;
118}; 119};
119 120
120extern int kprobe_exceptions_notify(struct notifier_block *self, 121extern int kprobe_exceptions_notify(struct notifier_block *self,
diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h
index 825eb7d882e6..556f53fa44cb 100644
--- a/include/asm-ia64/pci.h
+++ b/include/asm-ia64/pci.h
@@ -78,9 +78,6 @@ pcibios_penalize_isa_irq (int irq, int active)
78#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0) 78#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0)
79#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0) 79#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0)
80 80
81#define sg_dma_len(sg) ((sg)->dma_length)
82#define sg_dma_address(sg) ((sg)->dma_address)
83
84#ifdef CONFIG_PCI 81#ifdef CONFIG_PCI
85static inline void pci_dma_burst_advice(struct pci_dev *pdev, 82static inline void pci_dma_burst_advice(struct pci_dev *pdev,
86 enum pci_dma_burst_strategy *strat, 83 enum pci_dma_burst_strategy *strat,
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
index 834a189ef189..9dbea8844d5e 100644
--- a/include/asm-ia64/scatterlist.h
+++ b/include/asm-ia64/scatterlist.h
@@ -25,4 +25,7 @@ struct scatterlist {
25 */ 25 */
26#define ISA_DMA_THRESHOLD 0xffffffff 26#define ISA_DMA_THRESHOLD 0xffffffff
27 27
28#define sg_dma_len(sg) ((sg)->dma_length)
29#define sg_dma_address(sg) ((sg)->dma_address)
30
28#endif /* _ASM_IA64_SCATTERLIST_H */ 31#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 1d45e1518fb3..e52b8508083b 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -673,7 +673,7 @@ extern irqreturn_t xpc_notify_IRQ_handler(int, void *);
673extern void xpc_dropped_IPI_check(struct xpc_partition *); 673extern void xpc_dropped_IPI_check(struct xpc_partition *);
674extern void xpc_activate_partition(struct xpc_partition *); 674extern void xpc_activate_partition(struct xpc_partition *);
675extern void xpc_activate_kthreads(struct xpc_channel *, int); 675extern void xpc_activate_kthreads(struct xpc_channel *, int);
676extern void xpc_create_kthreads(struct xpc_channel *, int); 676extern void xpc_create_kthreads(struct xpc_channel *, int, int);
677extern void xpc_disconnect_wait(int); 677extern void xpc_disconnect_wait(int);
678 678
679 679
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 8adcde0934ca..9b505b25544f 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -88,6 +88,7 @@ struct thread_info {
88#define TIF_MEMDIE 17 88#define TIF_MEMDIE 17
89#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 89#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
90#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ 90#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
91#define TIF_FREEZE 20 /* is freezing for suspend */
91 92
92#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 93#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
93#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 94#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
@@ -98,6 +99,7 @@ struct thread_info {
98#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 99#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
99#define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 100#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
100#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) 101#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
102#define _TIF_FREEZE (1 << TIF_FREEZE)
101 103
102/* "work to do on user-return" bits */ 104/* "work to do on user-return" bits */
103#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 105#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
diff --git a/include/asm-m32r/cacheflush.h b/include/asm-m32r/cacheflush.h
index 8b261b49149e..56961a9511b2 100644
--- a/include/asm-m32r/cacheflush.h
+++ b/include/asm-m32r/cacheflush.h
@@ -9,6 +9,7 @@ extern void _flush_cache_copyback_all(void);
9#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) 9#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
10#define flush_cache_all() do { } while (0) 10#define flush_cache_all() do { } while (0)
11#define flush_cache_mm(mm) do { } while (0) 11#define flush_cache_mm(mm) do { } while (0)
12#define flush_cache_dup_mm(mm) do { } while (0)
12#define flush_cache_range(vma, start, end) do { } while (0) 13#define flush_cache_range(vma, start, end) do { } while (0)
13#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 14#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
14#define flush_dcache_page(page) do { } while (0) 15#define flush_dcache_page(page) do { } while (0)
@@ -29,6 +30,7 @@ extern void smp_flush_cache_all(void);
29#elif defined(CONFIG_CHIP_M32102) 30#elif defined(CONFIG_CHIP_M32102)
30#define flush_cache_all() do { } while (0) 31#define flush_cache_all() do { } while (0)
31#define flush_cache_mm(mm) do { } while (0) 32#define flush_cache_mm(mm) do { } while (0)
33#define flush_cache_dup_mm(mm) do { } while (0)
32#define flush_cache_range(vma, start, end) do { } while (0) 34#define flush_cache_range(vma, start, end) do { } while (0)
33#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 35#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
34#define flush_dcache_page(page) do { } while (0) 36#define flush_dcache_page(page) do { } while (0)
@@ -41,6 +43,7 @@ extern void smp_flush_cache_all(void);
41#else 43#else
42#define flush_cache_all() do { } while (0) 44#define flush_cache_all() do { } while (0)
43#define flush_cache_mm(mm) do { } while (0) 45#define flush_cache_mm(mm) do { } while (0)
46#define flush_cache_dup_mm(mm) do { } while (0)
44#define flush_cache_range(vma, start, end) do { } while (0) 47#define flush_cache_range(vma, start, end) do { } while (0)
45#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 48#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
46#define flush_dcache_page(page) do { } while (0) 49#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-m68k/cacheflush.h b/include/asm-m68k/cacheflush.h
index 24d3ff449135..16bf375fdbe1 100644
--- a/include/asm-m68k/cacheflush.h
+++ b/include/asm-m68k/cacheflush.h
@@ -89,6 +89,8 @@ static inline void flush_cache_mm(struct mm_struct *mm)
89 __flush_cache_030(); 89 __flush_cache_030();
90} 90}
91 91
92#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
93
92/* flush_cache_range/flush_cache_page must be macros to avoid 94/* flush_cache_range/flush_cache_page must be macros to avoid
93 a dependency on linux/mm.h, which includes this file... */ 95 a dependency on linux/mm.h, which includes this file... */
94static inline void flush_cache_range(struct vm_area_struct *vma, 96static inline void flush_cache_range(struct vm_area_struct *vma,
diff --git a/include/asm-m68k/swim_iop.h b/include/asm-m68k/swim_iop.h
deleted file mode 100644
index f29b67876b01..000000000000
--- a/include/asm-m68k/swim_iop.h
+++ /dev/null
@@ -1,221 +0,0 @@
1/*
2 * SWIM access through the IOP
3 * Written by Joshua M. Thompson
4 */
5
6/* IOP number and channel number for the SWIM */
7
8#define SWIM_IOP IOP_NUM_ISM
9#define SWIM_CHAN 1
10
11/* Command code: */
12
13#define CMD_INIT 0x01 /* Initialize */
14#define CMD_SHUTDOWN 0x02 /* Shutdown */
15#define CMD_START_POLL 0x03 /* Start insert/eject polling */
16#define CMD_STOP_POLL 0x04 /* Stop insert/eject polling */
17#define CMD_SETHFSTAG 0x05 /* Set HFS tag buffer address */
18#define CMD_STATUS 0x06 /* Status */
19#define CMD_EJECT 0x07 /* Eject */
20#define CMD_FORMAT 0x08 /* Format */
21#define CMD_FORMAT_VERIFY 0x09 /* Format and Verify */
22#define CMD_WRITE 0x0A /* Write */
23#define CMD_READ 0x0B /* Read */
24#define CMD_READ_VERIFY 0x0C /* Read and Verify */
25#define CMD_CACHE_CTRL 0x0D /* Cache control */
26#define CMD_TAGBUFF_CTRL 0x0E /* Tag buffer control */
27#define CMD_GET_ICON 0x0F /* Get Icon */
28
29/* Drive types: */
30
31/* note: apple sez DRV_FDHD is 4, but I get back a type */
32/* of 5 when I do a drive status check on my FDHD */
33
34#define DRV_NONE 0 /* No drive */
35#define DRV_UNKNOWN 1 /* Unspecified drive */
36#define DRV_400K 2 /* 400K */
37#define DRV_800K 3 /* 400K/800K */
38#define DRV_FDHD 5 /* 400K/800K/720K/1440K */
39#define DRV_HD20 7 /* Apple HD20 */
40
41/* Format types: */
42
43#define FMT_HD20 0x0001 /* Apple HD20 */
44#define FMT_400K 0x0002 /* 400K (GCR) */
45#define FMT_800K 0x0004 /* 800K (GCR) */
46#define FMT_720K 0x0008 /* 720K (MFM) */
47#define FMT_1440K 0x0010 /* 1.44M (MFM) */
48
49#define FMD_KIND_400K 1
50#define FMD_KIND_800K 2
51#define FMD_KIND_720K 3
52#define FMD_KIND_1440K 1
53
54/* Icon Flags: */
55
56#define ICON_MEDIA 0x01 /* Have IOP supply media icon */
57#define ICON_DRIVE 0x01 /* Have IOP supply drive icon */
58
59/* Error codes: */
60
61#define gcrOnMFMErr -400 /* GCR (400/800K) on HD media */
62#define verErr -84 /* verify failed */
63#define fmt2Err -83 /* can't get enough sync during format */
64#define fmt1Err -82 /* can't find sector 0 after track format */
65#define sectNFErr -81 /* can't find sector */
66#define seekErr -80 /* drive error during seek */
67#define spdAdjErr -79 /* can't set drive speed */
68#define twoSideErr -78 /* drive is single-sided */
69#define initIWMErr -77 /* error during initialization */
70#define tk0badErr -76 /* track zero is bad */
71#define cantStepErr -75 /* drive error during step */
72#define wrUnderrun -74 /* write underrun occurred */
73#define badDBtSlp -73 /* bad data bitslip marks */
74#define badDCksum -72 /* bad data checksum */
75#define noDtaMkErr -71 /* can't find data mark */
76#define badBtSlpErr -70 /* bad address bitslip marks */
77#define badCksmErr -69 /* bad address-mark checksum */
78#define dataVerErr -68 /* read-verify failed */
79#define noAdrMkErr -67 /* can't find an address mark */
80#define noNybErr -66 /* no nybbles? disk is probably degaussed */
81#define offLinErr -65 /* no disk in drive */
82#define noDriveErr -64 /* drive isn't connected */
83#define nsDrvErr -56 /* no such drive */
84#define paramErr -50 /* bad positioning information */
85#define wPrErr -44 /* write protected */
86#define openErr -23 /* already initialized */
87
88#ifndef __ASSEMBLY__
89
90struct swim_drvstatus {
91 __u16 curr_track; /* Current track number */
92 __u8 write_prot; /* 0x80 if disk is write protected */
93 __u8 disk_in_drive; /* 0x01 or 0x02 if a disk is in the drive */
94 __u8 installed; /* 0x01 if drive installed, 0xFF if not */
95 __u8 num_sides; /* 0x80 if two-sided format supported */
96 __u8 two_sided; /* 0xff if two-sided format diskette */
97 __u8 new_interface; /* 0x00 if old 400K drive, 0xFF if newer */
98 __u16 errors; /* Disk error count */
99 struct { /* 32 bits */
100 __u16 reserved;
101 __u16 :4;
102 __u16 external:1; /* Drive is external */
103 __u16 scsi:1; /* Drive is a SCSI drive */
104 __u16 fixed:1; /* Drive has fixed media */
105 __u16 secondary:1; /* Drive is secondary drive */
106 __u8 type; /* Drive type */
107 } info;
108 __u8 mfm_drive; /* 0xFF if this is an FDHD drive */
109 __u8 mfm_disk; /* 0xFF if 720K/1440K (MFM) disk */
110 __u8 mfm_format; /* 0x00 if 720K, 0xFF if 1440K */
111 __u8 ctlr_type; /* 0x00 if IWM, 0xFF if SWIM */
112 __u16 curr_format; /* Current format type */
113 __u16 allowed_fmt; /* Allowed format types */
114 __u32 num_blocks; /* Number of blocks on disk */
115 __u8 icon_flags; /* Icon flags */
116 __u8 unusued;
117};
118
119/* Commands issued from the host to the IOP: */
120
121struct swimcmd_init {
122 __u8 code; /* CMD_INIT */
123 __u8 unusued;
124 __u16 error;
125 __u8 drives[28]; /* drive type list */
126};
127
128struct swimcmd_startpoll {
129 __u8 code; /* CMD_START_POLL */
130 __u8 unusued;
131 __u16 error;
132};
133
134struct swimcmd_sethfstag {
135 __u8 code; /* CMD_SETHFSTAG */
136 __u8 unusued;
137 __u16 error;
138 caddr_t tagbuf; /* HFS tag buffer address */
139};
140
141struct swimcmd_status {
142 __u8 code; /* CMD_STATUS */
143 __u8 drive_num;
144 __u16 error;
145 struct swim_drvstatus status;
146};
147
148struct swimcmd_eject {
149 __u8 code; /* CMD_EJECT */
150 __u8 drive_num;
151 __u16 error;
152 struct swim_drvstatus status;
153};
154
155struct swimcmd_format {
156 __u8 code; /* CMD_FORMAT */
157 __u8 drive_num;
158 __u16 error;
159 union {
160 struct {
161 __u16 fmt; /* format kind */
162 __u8 hdrbyte; /* fmt byte for hdr (0=default) */
163 __u8 interleave; /* interleave (0 = default) */
164 caddr_t databuf; /* sector data buff (0=default */
165 caddr_t tagbuf; /* tag data buffer (0=default) */
166 } f;
167 struct swim_drvstatus status;
168 } p;
169};
170
171struct swimcmd_fmtverify {
172 __u8 code; /* CMD_FORMAT_VERIFY */
173 __u8 drive_num;
174 __u16 error;
175};
176
177struct swimcmd_rw {
178 __u8 code; /* CMD_READ, CMD_WRITE or CMD_READ_VERIFY */
179 __u8 drive_num;
180 __u16 error;
181 caddr_t buffer; /* R/W buffer address */
182 __u32 first_block; /* Starting block */
183 __u32 num_blocks; /* Number of blocks */
184 __u8 tag[12]; /* tag data */
185};
186
187struct swimcmd_cachectl {
188 __u8 code; /* CMD_CACHE_CTRL */
189 __u8 unused;
190 __u16 error;
191 __u8 enable; /* Nonzero to enable cache */
192 __u8 install; /* +1 = install, -1 = remove, 0 = neither */
193};
194
195struct swimcmd_tagbufctl {
196 __u8 code; /* CMD_TAGBUFF_CTRL */
197 __u8 unused;
198 __u16 error;
199 caddr_t buf; /* buffer address or 0 to disable */
200};
201
202struct swimcmd_geticon {
203 __u8 code; /* CMD_GET_ICON */
204 __u8 drive_num;
205 __u16 error;
206 caddr_t buffer; /* Nuffer address */
207 __u16 kind; /* 0 = media icon, 1 = drive icon */
208 __u16 unused;
209 __u16 max_bytes; /* maximum byte count */
210};
211
212/* Messages from the SWIM IOP to the host CPU: */
213
214struct swimmsg_status {
215 __u8 code; /* 1 = insert, 2 = eject, 3 = status changed */
216 __u8 drive_num;
217 __u16 error;
218 struct swim_drvstatus status;
219};
220
221#endif /* __ASSEMBLY__ */
diff --git a/include/asm-m68knommu/cacheflush.h b/include/asm-m68knommu/cacheflush.h
index c3aadf3b0d88..163dcb1a9689 100644
--- a/include/asm-m68knommu/cacheflush.h
+++ b/include/asm-m68knommu/cacheflush.h
@@ -8,6 +8,7 @@
8 8
9#define flush_cache_all() __flush_cache_all() 9#define flush_cache_all() __flush_cache_all()
10#define flush_cache_mm(mm) do { } while (0) 10#define flush_cache_mm(mm) do { } while (0)
11#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) __flush_cache_all() 12#define flush_cache_range(vma, start, end) __flush_cache_all()
12#define flush_cache_page(vma, vmaddr) do { } while (0) 13#define flush_cache_page(vma, vmaddr) do { } while (0)
13#define flush_dcache_range(start,len) __flush_cache_all() 14#define flush_dcache_range(start,len) __flush_cache_all()
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index e3c9925876a3..0ddada3bb0b6 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -17,6 +17,7 @@
17 * 17 *
18 * - flush_cache_all() flushes entire cache 18 * - flush_cache_all() flushes entire cache
19 * - flush_cache_mm(mm) flushes the specified mm context's cache lines 19 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
20 * - flush_cache_dup mm(mm) handles cache flushing when forking
20 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page 21 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
21 * - flush_cache_range(vma, start, end) flushes a range of pages 22 * - flush_cache_range(vma, start, end) flushes a range of pages
22 * - flush_icache_range(start, end) flush a range of instructions 23 * - flush_icache_range(start, end) flush a range of instructions
@@ -31,6 +32,7 @@
31extern void (*flush_cache_all)(void); 32extern void (*flush_cache_all)(void);
32extern void (*__flush_cache_all)(void); 33extern void (*__flush_cache_all)(void);
33extern void (*flush_cache_mm)(struct mm_struct *mm); 34extern void (*flush_cache_mm)(struct mm_struct *mm);
35#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
34extern void (*flush_cache_range)(struct vm_area_struct *vma, 36extern void (*flush_cache_range)(struct vm_area_struct *vma,
35 unsigned long start, unsigned long end); 37 unsigned long start, unsigned long end);
36extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); 38extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index 0dc1a45c27ed..2f9e1a9ec51f 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -35,7 +35,6 @@
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36 36
37#include <linux/pfn.h> 37#include <linux/pfn.h>
38#include <asm/cpu-features.h>
39#include <asm/io.h> 38#include <asm/io.h>
40 39
41extern void clear_page(void * page); 40extern void clear_page(void * page);
@@ -61,16 +60,13 @@ static inline void clear_user_page(void *addr, unsigned long vaddr,
61 flush_data_cache_page((unsigned long)addr); 60 flush_data_cache_page((unsigned long)addr);
62} 61}
63 62
64static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 63extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
65 struct page *to) 64 struct page *to);
66{ 65struct vm_area_struct;
67 extern void (*flush_data_cache_page)(unsigned long addr); 66extern void copy_user_highpage(struct page *to, struct page *from,
67 unsigned long vaddr, struct vm_area_struct *vma);
68 68
69 copy_page(vto, vfrom); 69#define __HAVE_ARCH_COPY_USER_HIGHPAGE
70 if (!cpu_has_ic_fills_f_dc ||
71 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
72 flush_data_cache_page((unsigned long)vto);
73}
74 70
75/* 71/*
76 * These are used to make use of C type-checking.. 72 * These are used to make use of C type-checking..
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index 2bc41f2e0271..aedb0512cb04 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -15,6 +15,8 @@
15#define flush_cache_mm(mm) flush_cache_all_local() 15#define flush_cache_mm(mm) flush_cache_all_local()
16#endif 16#endif
17 17
18#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
19
18#define flush_kernel_dcache_range(start,size) \ 20#define flush_kernel_dcache_range(start,size) \
19 flush_kernel_dcache_range_asm((start), (start)+(size)); 21 flush_kernel_dcache_range_asm((start), (start)+(size));
20 22
diff --git a/include/asm-powerpc/cacheflush.h b/include/asm-powerpc/cacheflush.h
index 8a740c88d93d..08e93e789219 100644
--- a/include/asm-powerpc/cacheflush.h
+++ b/include/asm-powerpc/cacheflush.h
@@ -18,6 +18,7 @@
18 */ 18 */
19#define flush_cache_all() do { } while (0) 19#define flush_cache_all() do { } while (0)
20#define flush_cache_mm(mm) do { } while (0) 20#define flush_cache_mm(mm) do { } while (0)
21#define flush_cache_dup_mm(mm) do { } while (0)
21#define flush_cache_range(vma, start, end) do { } while (0) 22#define flush_cache_range(vma, start, end) do { } while (0)
22#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 23#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
23#define flush_icache_page(vma, page) do { } while (0) 24#define flush_icache_page(vma, page) do { } while (0)
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index d339e2e88b11..3f32ca8bfec9 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
122#define TIF_RESTOREALL 12 /* Restore all regs (implies NOERROR) */ 122#define TIF_RESTOREALL 12 /* Restore all regs (implies NOERROR) */
123#define TIF_NOERROR 14 /* Force successful syscall return */ 123#define TIF_NOERROR 14 /* Force successful syscall return */
124#define TIF_RESTORE_SIGMASK 15 /* Restore signal mask in do_signal */ 124#define TIF_RESTORE_SIGMASK 15 /* Restore signal mask in do_signal */
125#define TIF_FREEZE 16 /* Freezing for suspend */
125 126
126/* as above, but as bit values */ 127/* as above, but as bit values */
127#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 128#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -138,6 +139,7 @@ static inline struct thread_info *current_thread_info(void)
138#define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 139#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
139#define _TIF_NOERROR (1<<TIF_NOERROR) 140#define _TIF_NOERROR (1<<TIF_NOERROR)
140#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 141#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
142#define _TIF_FREEZE (1<<TIF_FREEZE)
141#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) 143#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
142 144
143#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ 145#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
diff --git a/include/asm-s390/cacheflush.h b/include/asm-s390/cacheflush.h
index e399a8ba2ed7..f7cade8083f3 100644
--- a/include/asm-s390/cacheflush.h
+++ b/include/asm-s390/cacheflush.h
@@ -7,6 +7,7 @@
7/* Caches aren't brain-dead on the s390. */ 7/* Caches aren't brain-dead on the s390. */
8#define flush_cache_all() do { } while (0) 8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0) 9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
10#define flush_cache_range(vma, start, end) do { } while (0) 11#define flush_cache_range(vma, start, end) do { } while (0)
11#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
12#define flush_dcache_page(page) do { } while (0) 13#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-sh/cpu-sh2/cacheflush.h b/include/asm-sh/cpu-sh2/cacheflush.h
index f556fa80ea97..2979efb26de3 100644
--- a/include/asm-sh/cpu-sh2/cacheflush.h
+++ b/include/asm-sh/cpu-sh2/cacheflush.h
@@ -15,6 +15,7 @@
15 * 15 *
16 * - flush_cache_all() flushes entire cache 16 * - flush_cache_all() flushes entire cache
17 * - flush_cache_mm(mm) flushes the specified mm context's cache lines 17 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
18 * - flush_cache_dup mm(mm) handles cache flushing when forking
18 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page 19 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
19 * - flush_cache_range(vma, start, end) flushes a range of pages 20 * - flush_cache_range(vma, start, end) flushes a range of pages
20 * 21 *
@@ -27,6 +28,7 @@
27 */ 28 */
28#define flush_cache_all() do { } while (0) 29#define flush_cache_all() do { } while (0)
29#define flush_cache_mm(mm) do { } while (0) 30#define flush_cache_mm(mm) do { } while (0)
31#define flush_cache_dup_mm(mm) do { } while (0)
30#define flush_cache_range(vma, start, end) do { } while (0) 32#define flush_cache_range(vma, start, end) do { } while (0)
31#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 33#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
32#define flush_dcache_page(page) do { } while (0) 34#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index 03fde97a7fd0..f70d8ef76a15 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -15,6 +15,7 @@
15 * 15 *
16 * - flush_cache_all() flushes entire cache 16 * - flush_cache_all() flushes entire cache
17 * - flush_cache_mm(mm) flushes the specified mm context's cache lines 17 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
18 * - flush_cache_dup mm(mm) handles cache flushing when forking
18 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page 19 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
19 * - flush_cache_range(vma, start, end) flushes a range of pages 20 * - flush_cache_range(vma, start, end) flushes a range of pages
20 * 21 *
@@ -39,6 +40,7 @@
39 40
40void flush_cache_all(void); 41void flush_cache_all(void);
41void flush_cache_mm(struct mm_struct *mm); 42void flush_cache_mm(struct mm_struct *mm);
43#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
42void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 44void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
43 unsigned long end); 45 unsigned long end);
44void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); 46void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
@@ -48,6 +50,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page);
48#else 50#else
49#define flush_cache_all() do { } while (0) 51#define flush_cache_all() do { } while (0)
50#define flush_cache_mm(mm) do { } while (0) 52#define flush_cache_mm(mm) do { } while (0)
53#define flush_cache_dup_mm(mm) do { } while (0)
51#define flush_cache_range(vma, start, end) do { } while (0) 54#define flush_cache_range(vma, start, end) do { } while (0)
52#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 55#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
53#define flush_dcache_page(page) do { } while (0) 56#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index 515fd574267c..b01a10f31225 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -18,6 +18,7 @@
18 */ 18 */
19void flush_cache_all(void); 19void flush_cache_all(void);
20void flush_cache_mm(struct mm_struct *mm); 20void flush_cache_mm(struct mm_struct *mm);
21#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
21void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 22void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
22 unsigned long end); 23 unsigned long end);
23void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, 24void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 0c01dc550819..879f741105db 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
106#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 106#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
107#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 107#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
108#define TIF_MEMDIE 18 108#define TIF_MEMDIE 18
109#define TIF_FREEZE 19
109 110
110#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
111#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 112#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
114#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 115#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
115#define _TIF_USEDFPU (1<<TIF_USEDFPU) 116#define _TIF_USEDFPU (1<<TIF_USEDFPU)
116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 117#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
118#define _TIF_FREEZE (1<<TIF_FREEZE)
117 119
118#define _TIF_WORK_MASK 0x000000FE /* work to do on interrupt/exception return */ 120#define _TIF_WORK_MASK 0x000000FE /* work to do on interrupt/exception return */
119#define _TIF_ALLWORK_MASK 0x000000FF /* work to do on any return to u-space */ 121#define _TIF_ALLWORK_MASK 0x000000FF /* work to do on any return to u-space */
diff --git a/include/asm-sh64/cacheflush.h b/include/asm-sh64/cacheflush.h
index 55f71aa0aa6b..1e53a47bdc97 100644
--- a/include/asm-sh64/cacheflush.h
+++ b/include/asm-sh64/cacheflush.h
@@ -21,6 +21,8 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
21 struct page *page, unsigned long addr, 21 struct page *page, unsigned long addr,
22 int len); 22 int len);
23 23
24#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
25
24#define flush_dcache_mmap_lock(mapping) do { } while (0) 26#define flush_dcache_mmap_lock(mapping) do { } while (0)
25#define flush_dcache_mmap_unlock(mapping) do { } while (0) 27#define flush_dcache_mmap_unlock(mapping) do { } while (0)
26 28
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
index b29dd468817e..cb803e56cb64 100644
--- a/include/asm-sh64/pgalloc.h
+++ b/include/asm-sh64/pgalloc.h
@@ -41,7 +41,7 @@ static inline void pgd_init(unsigned long page)
41static inline pgd_t *get_pgd_slow(void) 41static inline pgd_t *get_pgd_slow(void)
42{ 42{
43 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); 43 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
44 pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); 44 pgd_t *ret = kmalloc(pgd_size, GFP_KERNEL);
45 return ret; 45 return ret;
46} 46}
47 47
diff --git a/include/asm-sparc/cacheflush.h b/include/asm-sparc/cacheflush.h
index fc632f811cd8..68ac10910271 100644
--- a/include/asm-sparc/cacheflush.h
+++ b/include/asm-sparc/cacheflush.h
@@ -48,6 +48,7 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
48 48
49#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)() 49#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
50#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm) 50#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end) 52#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
52#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr) 53#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
53#define flush_icache_range(start, end) do { } while (0) 54#define flush_icache_range(start, end) do { } while (0)
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h
index 745d1ab60371..122e4058dd9e 100644
--- a/include/asm-sparc64/cacheflush.h
+++ b/include/asm-sparc64/cacheflush.h
@@ -12,6 +12,7 @@
12/* These are the same regardless of whether this is an SMP kernel or not. */ 12/* These are the same regardless of whether this is an SMP kernel or not. */
13#define flush_cache_mm(__mm) \ 13#define flush_cache_mm(__mm) \
14 do { if ((__mm) == current->mm) flushw_user(); } while(0) 14 do { if ((__mm) == current->mm) flushw_user(); } while(0)
15#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
15#define flush_cache_range(vma, start, end) \ 16#define flush_cache_range(vma, start, end) \
16 flush_cache_mm((vma)->vm_mm) 17 flush_cache_mm((vma)->vm_mm)
17#define flush_cache_page(vma, page, pfn) \ 18#define flush_cache_page(vma, page, pfn) \
diff --git a/include/asm-v850/cacheflush.h b/include/asm-v850/cacheflush.h
index e1a87f82f1a4..9ece05a202ef 100644
--- a/include/asm-v850/cacheflush.h
+++ b/include/asm-v850/cacheflush.h
@@ -24,6 +24,7 @@
24 systems with MMUs, so we don't need them. */ 24 systems with MMUs, so we don't need them. */
25#define flush_cache_all() ((void)0) 25#define flush_cache_all() ((void)0)
26#define flush_cache_mm(mm) ((void)0) 26#define flush_cache_mm(mm) ((void)0)
27#define flush_cache_dup_mm(mm) ((void)0)
27#define flush_cache_range(vma, start, end) ((void)0) 28#define flush_cache_range(vma, start, end) ((void)0)
28#define flush_cache_page(vma, vmaddr, pfn) ((void)0) 29#define flush_cache_page(vma, vmaddr, pfn) ((void)0)
29#define flush_dcache_page(page) ((void)0) 30#define flush_dcache_page(page) ((void)0)
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
index d32f7f58752a..ab1cb5c7dc92 100644
--- a/include/asm-x86_64/cacheflush.h
+++ b/include/asm-x86_64/cacheflush.h
@@ -7,6 +7,7 @@
7/* Caches aren't brain-dead on the intel. */ 7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0) 8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0) 9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
10#define flush_cache_range(vma, start, end) do { } while (0) 11#define flush_cache_range(vma, start, end) do { } while (0)
11#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
12#define flush_dcache_page(page) do { } while (0) 13#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 952783d35c7b..3227bc93d69b 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -189,6 +189,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
189 189
190#define MSR_IA32_PERFCTR0 0xc1 190#define MSR_IA32_PERFCTR0 0xc1
191#define MSR_IA32_PERFCTR1 0xc2 191#define MSR_IA32_PERFCTR1 0xc2
192#define MSR_FSB_FREQ 0xcd
192 193
193#define MSR_MTRRcap 0x0fe 194#define MSR_MTRRcap 0x0fe
194#define MSR_IA32_BBL_CR_CTL 0x119 195#define MSR_IA32_BBL_CR_CTL 0x119
@@ -311,6 +312,9 @@ static inline unsigned int cpuid_edx(unsigned int op)
311#define MSR_IA32_PERF_STATUS 0x198 312#define MSR_IA32_PERF_STATUS 0x198
312#define MSR_IA32_PERF_CTL 0x199 313#define MSR_IA32_PERF_CTL 0x199
313 314
315#define MSR_IA32_MPERF 0xE7
316#define MSR_IA32_APERF 0xE8
317
314#define MSR_IA32_THERM_CONTROL 0x19a 318#define MSR_IA32_THERM_CONTROL 0x19a
315#define MSR_IA32_THERM_INTERRUPT 0x19b 319#define MSR_IA32_THERM_INTERRUPT 0x19b
316#define MSR_IA32_THERM_STATUS 0x19c 320#define MSR_IA32_THERM_STATUS 0x19c
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 787a08114b48..74a6c74397f7 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -122,6 +122,7 @@ static inline struct thread_info *stack_thread_info(void)
122#define TIF_MEMDIE 20 122#define TIF_MEMDIE 20
123#define TIF_DEBUG 21 /* uses debug registers */ 123#define TIF_DEBUG 21 /* uses debug registers */
124#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ 124#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
125#define TIF_FREEZE 23 /* is freezing for suspend */
125 126
126#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 127#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
127#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 128#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -137,6 +138,7 @@ static inline struct thread_info *stack_thread_info(void)
137#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 138#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
138#define _TIF_DEBUG (1<<TIF_DEBUG) 139#define _TIF_DEBUG (1<<TIF_DEBUG)
139#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) 140#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
141#define _TIF_FREEZE (1<<TIF_FREEZE)
140 142
141/* work to do on interrupt/exception return */ 143/* work to do on interrupt/exception return */
142#define _TIF_WORK_MASK \ 144#define _TIF_WORK_MASK \
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h
index 337765b629de..22ef901b7845 100644
--- a/include/asm-xtensa/cacheflush.h
+++ b/include/asm-xtensa/cacheflush.h
@@ -75,6 +75,7 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
75 75
76#define flush_cache_all() __flush_invalidate_cache_all(); 76#define flush_cache_all() __flush_invalidate_cache_all();
77#define flush_cache_mm(mm) __flush_invalidate_cache_all(); 77#define flush_cache_mm(mm) __flush_invalidate_cache_all();
78#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all();
78 79
79#define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); 80#define flush_cache_vmap(start,end) __flush_invalidate_cache_all();
80#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); 81#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all();
@@ -88,6 +89,7 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon
88 89
89#define flush_cache_all() do { } while (0) 90#define flush_cache_all() do { } while (0)
90#define flush_cache_mm(mm) do { } while (0) 91#define flush_cache_mm(mm) do { } while (0)
92#define flush_cache_dup_mm(mm) do { } while (0)
91 93
92#define flush_cache_vmap(start,end) do { } while (0) 94#define flush_cache_vmap(start,end) do { } while (0)
93#define flush_cache_vunmap(start,end) do { } while (0) 95#define flush_cache_vunmap(start,end) do { } while (0)
diff --git a/include/asm-xtensa/termbits.h b/include/asm-xtensa/termbits.h
index c780593ff5f9..057b9a3d8f83 100644
--- a/include/asm-xtensa/termbits.h
+++ b/include/asm-xtensa/termbits.h
@@ -30,6 +30,17 @@ struct termios {
30 cc_t c_cc[NCCS]; /* control characters */ 30 cc_t c_cc[NCCS]; /* control characters */
31}; 31};
32 32
33struct ktermios {
34 tcflag_t c_iflag; /* input mode flags */
35 tcflag_t c_oflag; /* output mode flags */
36 tcflag_t c_cflag; /* control mode flags */
37 tcflag_t c_lflag; /* local mode flags */
38 cc_t c_line; /* line discipline */
39 cc_t c_cc[NCCS]; /* control characters */
40 speed_t c_ispeed; /* input speed */
41 speed_t c_ospeed; /* output speed */
42};
43
33/* c_cc characters */ 44/* c_cc characters */
34 45
35#define VINTR 0 46#define VINTR 0
diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h
index 88a64e1144d5..d6352da05b10 100644
--- a/include/asm-xtensa/uaccess.h
+++ b/include/asm-xtensa/uaccess.h
@@ -23,7 +23,6 @@
23 23
24#ifdef __ASSEMBLY__ 24#ifdef __ASSEMBLY__
25 25
26#define _ASMLANGUAGE
27#include <asm/current.h> 26#include <asm/current.h>
28#include <asm/asm-offsets.h> 27#include <asm/asm-offsets.h>
29#include <asm/processor.h> 28#include <asm/processor.h>
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 3372ec6bf53a..a30ef13c9e62 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -105,6 +105,7 @@ struct kiocb {
105 wait_queue_t ki_wait; 105 wait_queue_t ki_wait;
106 loff_t ki_pos; 106 loff_t ki_pos;
107 107
108 atomic_t ki_bio_count; /* num bio used for this iocb */
108 void *private; 109 void *private;
109 /* State that we remember to be able to restart/retry */ 110 /* State that we remember to be able to restart/retry */
110 unsigned short ki_opcode; 111 unsigned short ki_opcode;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 092dbd0e7658..08daf3272c02 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -309,6 +309,7 @@ extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
309 gfp_t); 309 gfp_t);
310extern void bio_set_pages_dirty(struct bio *bio); 310extern void bio_set_pages_dirty(struct bio *bio);
311extern void bio_check_pages_dirty(struct bio *bio); 311extern void bio_check_pages_dirty(struct bio *bio);
312extern void bio_release_pages(struct bio *bio);
312extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 313extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
313extern int bio_uncopy_user(struct bio *); 314extern int bio_uncopy_user(struct bio *);
314void zero_fill_bio(struct bio *bio); 315void zero_fill_bio(struct bio *bio);
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index be512cc98791..4c2632a8d31b 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -64,7 +64,7 @@ void coda_sysctl_clean(void);
64 64
65#define CODA_ALLOC(ptr, cast, size) do { \ 65#define CODA_ALLOC(ptr, cast, size) do { \
66 if (size < PAGE_SIZE) \ 66 if (size < PAGE_SIZE) \
67 ptr = (cast)kmalloc((unsigned long) size, GFP_KERNEL); \ 67 ptr = kmalloc((unsigned long) size, GFP_KERNEL); \
68 else \ 68 else \
69 ptr = (cast)vmalloc((unsigned long) size); \ 69 ptr = (cast)vmalloc((unsigned long) size); \
70 if (!ptr) \ 70 if (!ptr) \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 538423d4a865..aca66984aafd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -40,7 +40,7 @@ extern void __chk_io_ptr(void __iomem *);
40#error no compiler-gcc.h file for this gcc version 40#error no compiler-gcc.h file for this gcc version
41#elif __GNUC__ == 4 41#elif __GNUC__ == 4
42# include <linux/compiler-gcc4.h> 42# include <linux/compiler-gcc4.h>
43#elif __GNUC__ == 3 43#elif __GNUC__ == 3 && __GNUC_MINOR__ >= 2
44# include <linux/compiler-gcc3.h> 44# include <linux/compiler-gcc3.h>
45#else 45#else
46# error Sorry, your compiler is too old/not recognized. 46# error Sorry, your compiler is too old/not recognized.
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index a7f015027535..fef6f3d0a4a7 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -160,31 +160,6 @@ struct configfs_group_operations {
160 void (*drop_item)(struct config_group *group, struct config_item *item); 160 void (*drop_item)(struct config_group *group, struct config_item *item);
161}; 161};
162 162
163
164
165/**
166 * Use these macros to make defining attributes easier. See include/linux/device.h
167 * for examples..
168 */
169
170#if 0
171#define __ATTR(_name,_mode,_show,_store) { \
172 .attr = {.ca_name = __stringify(_name), .ca_mode = _mode, .ca_owner = THIS_MODULE }, \
173 .show = _show, \
174 .store = _store, \
175}
176
177#define __ATTR_RO(_name) { \
178 .attr = { .ca_name = __stringify(_name), .ca_mode = 0444, .ca_owner = THIS_MODULE }, \
179 .show = _name##_show, \
180}
181
182#define __ATTR_NULL { .attr = { .name = NULL } }
183
184#define attr_name(_attr) (_attr).attr.name
185#endif
186
187
188struct configfs_subsystem { 163struct configfs_subsystem {
189 struct config_group su_group; 164 struct config_group su_group;
190 struct semaphore su_sem; 165 struct semaphore su_sem;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4ea39fee99c7..7f008f6bfdc3 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -172,6 +172,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
172 unsigned int relation); 172 unsigned int relation);
173 173
174 174
175extern int cpufreq_driver_getavg(struct cpufreq_policy *policy);
176
175int cpufreq_register_governor(struct cpufreq_governor *governor); 177int cpufreq_register_governor(struct cpufreq_governor *governor);
176void cpufreq_unregister_governor(struct cpufreq_governor *governor); 178void cpufreq_unregister_governor(struct cpufreq_governor *governor);
177 179
@@ -204,6 +206,7 @@ struct cpufreq_driver {
204 unsigned int (*get) (unsigned int cpu); 206 unsigned int (*get) (unsigned int cpu);
205 207
206 /* optional */ 208 /* optional */
209 unsigned int (*getavg) (unsigned int cpu);
207 int (*exit) (struct cpufreq_policy *policy); 210 int (*exit) (struct cpufreq_policy *policy);
208 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); 211 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
209 int (*resume) (struct cpufreq_policy *policy); 212 int (*resume) (struct cpufreq_policy *policy);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 8821e1f75b44..826b15e914e2 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -30,10 +30,19 @@ void cpuset_update_task_memory_state(void);
30 nodes_subset((nodes), current->mems_allowed) 30 nodes_subset((nodes), current->mems_allowed)
31int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); 31int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
32 32
33extern int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask); 33extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
34static int inline cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 34extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
35
36static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
37{
38 return number_of_cpusets <= 1 ||
39 __cpuset_zone_allowed_softwall(z, gfp_mask);
40}
41
42static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
35{ 43{
36 return number_of_cpusets <= 1 || __cpuset_zone_allowed(z, gfp_mask); 44 return number_of_cpusets <= 1 ||
45 __cpuset_zone_allowed_hardwall(z, gfp_mask);
37} 46}
38 47
39extern int cpuset_excl_nodes_overlap(const struct task_struct *p); 48extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
@@ -94,7 +103,12 @@ static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
94 return 1; 103 return 1;
95} 104}
96 105
97static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 106static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
107{
108 return 1;
109}
110
111static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
98{ 112{
99 return 1; 113 return 1;
100} 114}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 6fe56aaa6685..64177ec9a019 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -929,8 +929,6 @@ extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max)
929#define FB_MODE_IS_FIRST 16 929#define FB_MODE_IS_FIRST 16
930#define FB_MODE_IS_FROM_VAR 32 930#define FB_MODE_IS_FROM_VAR 32
931 931
932extern int fbmon_valid_timings(u_int pixclock, u_int htotal, u_int vtotal,
933 const struct fb_info *fb_info);
934extern int fbmon_dpms(const struct fb_info *fb_info); 932extern int fbmon_dpms(const struct fb_info *fb_info);
935extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, 933extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var,
936 struct fb_info *info); 934 struct fb_info *info);
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 393063096134..5e75e26d4787 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -16,16 +16,15 @@ static inline int frozen(struct task_struct *p)
16 */ 16 */
17static inline int freezing(struct task_struct *p) 17static inline int freezing(struct task_struct *p)
18{ 18{
19 return p->flags & PF_FREEZE; 19 return test_tsk_thread_flag(p, TIF_FREEZE);
20} 20}
21 21
22/* 22/*
23 * Request that a process be frozen 23 * Request that a process be frozen
24 * FIXME: SMP problem. We may not modify other process' flags!
25 */ 24 */
26static inline void freeze(struct task_struct *p) 25static inline void freeze(struct task_struct *p)
27{ 26{
28 p->flags |= PF_FREEZE; 27 set_tsk_thread_flag(p, TIF_FREEZE);
29} 28}
30 29
31/* 30/*
@@ -33,7 +32,7 @@ static inline void freeze(struct task_struct *p)
33 */ 32 */
34static inline void do_not_freeze(struct task_struct *p) 33static inline void do_not_freeze(struct task_struct *p)
35{ 34{
36 p->flags &= ~PF_FREEZE; 35 clear_tsk_thread_flag(p, TIF_FREEZE);
37} 36}
38 37
39/* 38/*
@@ -54,7 +53,9 @@ static inline int thaw_process(struct task_struct *p)
54 */ 53 */
55static inline void frozen_process(struct task_struct *p) 54static inline void frozen_process(struct task_struct *p)
56{ 55{
57 p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN; 56 p->flags |= PF_FROZEN;
57 wmb();
58 clear_tsk_thread_flag(p, TIF_FREEZE);
58} 59}
59 60
60extern void refrigerator(void); 61extern void refrigerator(void);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index adce6e1d70c2..186da813541e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -120,6 +120,7 @@ extern int dir_notify_enable;
120#define MS_PRIVATE (1<<18) /* change to private */ 120#define MS_PRIVATE (1<<18) /* change to private */
121#define MS_SLAVE (1<<19) /* change to slave */ 121#define MS_SLAVE (1<<19) /* change to slave */
122#define MS_SHARED (1<<20) /* change to shared */ 122#define MS_SHARED (1<<20) /* change to shared */
123#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
123#define MS_ACTIVE (1<<30) 124#define MS_ACTIVE (1<<30)
124#define MS_NOUSER (1<<31) 125#define MS_NOUSER (1<<31)
125 126
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 2cdba0c23957..afad95272841 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -105,7 +105,7 @@ static inline void gameport_set_phys(struct gameport *gameport,
105 105
106static inline struct gameport *gameport_allocate_port(void) 106static inline struct gameport *gameport_allocate_port(void)
107{ 107{
108 struct gameport *gameport = kcalloc(1, sizeof(struct gameport), GFP_KERNEL); 108 struct gameport *gameport = kzalloc(sizeof(struct gameport), GFP_KERNEL);
109 109
110 return gameport; 110 return gameport;
111} 111}
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 3d8768b619e9..ca9a602cffd7 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -96,7 +96,10 @@ static inline void memclear_highpage_flush(struct page *page, unsigned int offse
96 kunmap_atomic(kaddr, KM_USER0); 96 kunmap_atomic(kaddr, KM_USER0);
97} 97}
98 98
99static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) 99#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
100
101static inline void copy_user_highpage(struct page *to, struct page *from,
102 unsigned long vaddr, struct vm_area_struct *vma)
100{ 103{
101 char *vfrom, *vto; 104 char *vfrom, *vto;
102 105
@@ -109,6 +112,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from, unsign
109 smp_wmb(); 112 smp_wmb();
110} 113}
111 114
115#endif
116
112static inline void copy_highpage(struct page *to, struct page *from) 117static inline void copy_highpage(struct page *to, struct page *from)
113{ 118{
114 char *vfrom, *vto; 119 char *vfrom, *vto;
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 7ae3c3326643..d38778f2fbec 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -142,7 +142,6 @@
142#define I2C_DRIVERID_MTP008 1023 142#define I2C_DRIVERID_MTP008 1023
143#define I2C_DRIVERID_DS1621 1024 143#define I2C_DRIVERID_DS1621 1024
144#define I2C_DRIVERID_ADM1024 1025 144#define I2C_DRIVERID_ADM1024 1025
145#define I2C_DRIVERID_IT87 1026
146#define I2C_DRIVERID_CH700X 1027 /* single driver for CH7003-7009 digital pc to tv encoders */ 145#define I2C_DRIVERID_CH700X 1027 /* single driver for CH7003-7009 digital pc to tv encoders */
147#define I2C_DRIVERID_FSCPOS 1028 146#define I2C_DRIVERID_FSCPOS 1028
148#define I2C_DRIVERID_FSCSCY 1029 147#define I2C_DRIVERID_FSCSCY 1029
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index b5315150199e..6383d2d83bb0 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -75,7 +75,6 @@ extern struct nsproxy init_nsproxy;
75 .pid_ns = &init_pid_ns, \ 75 .pid_ns = &init_pid_ns, \
76 .count = ATOMIC_INIT(1), \ 76 .count = ATOMIC_INIT(1), \
77 .nslock = __SPIN_LOCK_UNLOCKED(nsproxy.nslock), \ 77 .nslock = __SPIN_LOCK_UNLOCKED(nsproxy.nslock), \
78 .id = 0, \
79 .uts_ns = &init_uts_ns, \ 78 .uts_ns = &init_uts_ns, \
80 .mnt_ns = NULL, \ 79 .mnt_ns = NULL, \
81 INIT_IPC_NS(ipc_ns) \ 80 INIT_IPC_NS(ipc_ns) \
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index aa50d89eacd7..246de1d84a26 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -23,7 +23,7 @@ struct svc_rqst;
23 * This is the set of functions for lockd->nfsd communication 23 * This is the set of functions for lockd->nfsd communication
24 */ 24 */
25struct nlmsvc_binding { 25struct nlmsvc_binding {
26 u32 (*fopen)(struct svc_rqst *, 26 __be32 (*fopen)(struct svc_rqst *,
27 struct nfs_fh *, 27 struct nfs_fh *,
28 struct file **); 28 struct file **);
29 void (*fclose)(struct file *); 29 void (*fclose)(struct file *);
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 0c962b82a9de..ac25b5649c59 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -191,7 +191,7 @@ __be32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *);
191unsigned long nlmsvc_retry_blocked(void); 191unsigned long nlmsvc_retry_blocked(void);
192void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, 192void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
193 nlm_host_match_fn_t match); 193 nlm_host_match_fn_t match);
194void nlmsvc_grant_reply(struct nlm_cookie *, u32); 194void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
195 195
196/* 196/*
197 * File handling for the server personality 197 * File handling for the server personality
diff --git a/include/linux/lockd/sm_inter.h b/include/linux/lockd/sm_inter.h
index fc61d40964da..22a645828f26 100644
--- a/include/linux/lockd/sm_inter.h
+++ b/include/linux/lockd/sm_inter.h
@@ -24,7 +24,7 @@
24 * Arguments for all calls to statd 24 * Arguments for all calls to statd
25 */ 25 */
26struct nsm_args { 26struct nsm_args {
27 u32 addr; /* remote address */ 27 __be32 addr; /* remote address */
28 u32 prog; /* RPC callback info */ 28 u32 prog; /* RPC callback info */
29 u32 vers; 29 u32 vers;
30 u32 proc; 30 u32 proc;
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 29e7d9fc9dad..83a1f9f6237b 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -69,7 +69,7 @@ typedef struct nlm_args nlm_args;
69 */ 69 */
70struct nlm_res { 70struct nlm_res {
71 struct nlm_cookie cookie; 71 struct nlm_cookie cookie;
72 u32 status; 72 __be32 status;
73 struct nlm_lock lock; 73 struct nlm_lock lock;
74}; 74};
75 75
@@ -80,9 +80,9 @@ struct nlm_reboot {
80 char * mon; 80 char * mon;
81 int len; 81 int len;
82 u32 state; 82 u32 state;
83 u32 addr; 83 __be32 addr;
84 u32 vers; 84 __be32 vers;
85 u32 proto; 85 __be32 proto;
86}; 86};
87 87
88/* 88/*
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 498bfbd3b4e1..ea097dddc44f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -281,15 +281,25 @@ struct lock_class_key { };
281#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) 281#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
282extern void early_init_irq_lock_class(void); 282extern void early_init_irq_lock_class(void);
283#else 283#else
284# define early_init_irq_lock_class() do { } while (0) 284static inline void early_init_irq_lock_class(void)
285{
286}
285#endif 287#endif
286 288
287#ifdef CONFIG_TRACE_IRQFLAGS 289#ifdef CONFIG_TRACE_IRQFLAGS
288extern void early_boot_irqs_off(void); 290extern void early_boot_irqs_off(void);
289extern void early_boot_irqs_on(void); 291extern void early_boot_irqs_on(void);
292extern void print_irqtrace_events(struct task_struct *curr);
290#else 293#else
291# define early_boot_irqs_off() do { } while (0) 294static inline void early_boot_irqs_off(void)
292# define early_boot_irqs_on() do { } while (0) 295{
296}
297static inline void early_boot_irqs_on(void)
298{
299}
300static inline void print_irqtrace_events(struct task_struct *curr)
301{
302}
293#endif 303#endif
294 304
295/* 305/*
diff --git a/include/linux/mount.h b/include/linux/mount.h
index e357dc86a4de..1b7e178b0d84 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -27,6 +27,7 @@ struct mnt_namespace;
27#define MNT_NOEXEC 0x04 27#define MNT_NOEXEC 0x04
28#define MNT_NOATIME 0x08 28#define MNT_NOATIME 0x08
29#define MNT_NODIRATIME 0x10 29#define MNT_NODIRATIME 0x10
30#define MNT_RELATIME 0x20
30 31
31#define MNT_SHRINKABLE 0x100 32#define MNT_SHRINKABLE 0x100
32 33
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
index db4f3776978a..de24af79ebd3 100644
--- a/include/linux/n_r3964.h
+++ b/include/linux/n_r3964.h
@@ -116,7 +116,7 @@ struct r3964_message;
116 116
117struct r3964_client_info { 117struct r3964_client_info {
118 spinlock_t lock; 118 spinlock_t lock;
119 pid_t pid; 119 struct pid *pid;
120 unsigned int sig_flags; 120 unsigned int sig_flags;
121 121
122 struct r3964_client_info *next; 122 struct r3964_client_info *next;
diff --git a/include/linux/ncp_mount.h b/include/linux/ncp_mount.h
index f46bddcdbd3b..a2b549eb1eca 100644
--- a/include/linux/ncp_mount.h
+++ b/include/linux/ncp_mount.h
@@ -75,7 +75,7 @@ struct ncp_mount_data_kernel {
75 unsigned int int_flags; /* internal flags */ 75 unsigned int int_flags; /* internal flags */
76#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001 76#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001
77 __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */ 77 __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */
78 __kernel_pid_t wdog_pid; /* Who cares for our watchdog packets? */ 78 struct pid *wdog_pid; /* Who cares for our watchdog packets? */
79 unsigned int ncp_fd; /* The socket to the ncp port */ 79 unsigned int ncp_fd; /* The socket to the ncp port */
80 unsigned int time_out; /* How long should I wait after 80 unsigned int time_out; /* How long should I wait after
81 sending a NCP request? */ 81 sending a NCP request? */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index edb54c3171b3..0727774772ba 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -275,12 +275,12 @@ static inline int is_fsid(struct svc_fh *fh, struct knfsd_fh *reffh)
275 * we might process an operation with side effects, and be unable to 275 * we might process an operation with side effects, and be unable to
276 * tell the client that the operation succeeded. 276 * tell the client that the operation succeeded.
277 * 277 *
278 * COMPOUND_SLACK_SPACE - this is the minimum amount of buffer space 278 * COMPOUND_SLACK_SPACE - this is the minimum bytes of buffer space
279 * needed to encode an "ordinary" _successful_ operation. (GETATTR, 279 * needed to encode an "ordinary" _successful_ operation. (GETATTR,
280 * READ, READDIR, and READLINK have their own buffer checks.) if we 280 * READ, READDIR, and READLINK have their own buffer checks.) if we
281 * fall below this level, we fail the next operation with NFS4ERR_RESOURCE. 281 * fall below this level, we fail the next operation with NFS4ERR_RESOURCE.
282 * 282 *
283 * COMPOUND_ERR_SLACK_SPACE - this is the minimum amount of buffer space 283 * COMPOUND_ERR_SLACK_SPACE - this is the minimum bytes of buffer space
284 * needed to encode an operation which has failed with NFS4ERR_RESOURCE. 284 * needed to encode an operation which has failed with NFS4ERR_RESOURCE.
285 * care is taken to ensure that we never fall below this level for any 285 * care is taken to ensure that we never fall below this level for any
286 * reason. 286 * reason.
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index c3673f487e84..ab5c236bd9a7 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -273,7 +273,6 @@ struct nfs4_stateid {
273 ((err) != nfserr_stale_stateid) && \ 273 ((err) != nfserr_stale_stateid) && \
274 ((err) != nfserr_bad_stateid)) 274 ((err) != nfserr_bad_stateid))
275 275
276extern __be32 nfsd4_renew(clientid_t *clid);
277extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, 276extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh,
278 stateid_t *stateid, int flags, struct file **filp); 277 stateid_t *stateid, int flags, struct file **filp);
279extern void nfs4_lock_state(void); 278extern void nfs4_lock_state(void);
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 45ca01b5f844..09799bcee0ac 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -44,6 +44,12 @@
44#define NFSD4_MAX_TAGLEN 128 44#define NFSD4_MAX_TAGLEN 128
45#define XDR_LEN(n) (((n) + 3) & ~3) 45#define XDR_LEN(n) (((n) + 3) & ~3)
46 46
47struct nfsd4_compound_state {
48 struct svc_fh current_fh;
49 struct svc_fh save_fh;
50 struct nfs4_stateowner *replay_owner;
51};
52
47struct nfsd4_change_info { 53struct nfsd4_change_info {
48 u32 atomic; 54 u32 atomic;
49 u32 before_ctime_sec; 55 u32 before_ctime_sec;
@@ -430,35 +436,39 @@ __be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
430 struct dentry *dentry, __be32 *buffer, int *countp, 436 struct dentry *dentry, __be32 *buffer, int *countp,
431 u32 *bmval, struct svc_rqst *); 437 u32 *bmval, struct svc_rqst *);
432extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, 438extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
439 struct nfsd4_compound_state *,
433 struct nfsd4_setclientid *setclid); 440 struct nfsd4_setclientid *setclid);
434extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 441extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
442 struct nfsd4_compound_state *,
435 struct nfsd4_setclientid_confirm *setclientid_confirm); 443 struct nfsd4_setclientid_confirm *setclientid_confirm);
436extern __be32 nfsd4_process_open1(struct nfsd4_open *open); 444extern __be32 nfsd4_process_open1(struct nfsd4_open *open);
437extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, 445extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
438 struct svc_fh *current_fh, struct nfsd4_open *open); 446 struct svc_fh *current_fh, struct nfsd4_open *open);
439extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, 447extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
440 struct svc_fh *current_fh, struct nfsd4_open_confirm *oc, 448 struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
441 struct nfs4_stateowner **); 449extern __be32 nfsd4_close(struct svc_rqst *rqstp,
442extern __be32 nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, 450 struct nfsd4_compound_state *,
443 struct nfsd4_close *close, 451 struct nfsd4_close *close);
444 struct nfs4_stateowner **replay_owner);
445extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, 452extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp,
446 struct svc_fh *current_fh, struct nfsd4_open_downgrade *od, 453 struct nfsd4_compound_state *,
447 struct nfs4_stateowner **replay_owner); 454 struct nfsd4_open_downgrade *od);
448extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, 455extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
449 struct nfsd4_lock *lock, 456 struct nfsd4_lock *lock);
450 struct nfs4_stateowner **replay_owner); 457extern __be32 nfsd4_lockt(struct svc_rqst *rqstp,
451extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, 458 struct nfsd4_compound_state *,
452 struct nfsd4_lockt *lockt); 459 struct nfsd4_lockt *lockt);
453extern __be32 nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, 460extern __be32 nfsd4_locku(struct svc_rqst *rqstp,
454 struct nfsd4_locku *locku, 461 struct nfsd4_compound_state *,
455 struct nfs4_stateowner **replay_owner); 462 struct nfsd4_locku *locku);
456extern __be32 463extern __be32
457nfsd4_release_lockowner(struct svc_rqst *rqstp, 464nfsd4_release_lockowner(struct svc_rqst *rqstp,
465 struct nfsd4_compound_state *,
458 struct nfsd4_release_lockowner *rlockowner); 466 struct nfsd4_release_lockowner *rlockowner);
459extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *); 467extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *);
460extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, 468extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp,
461 struct svc_fh *current_fh, struct nfsd4_delegreturn *dr); 469 struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr);
470extern __be32 nfsd4_renew(struct svc_rqst *rqstp,
471 struct nfsd4_compound_state *, clientid_t *clid);
462#endif 472#endif
463 473
464/* 474/*
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index fdfb0e44912f..0b9f0dc30d61 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -24,7 +24,6 @@ struct pid_namespace;
24struct nsproxy { 24struct nsproxy {
25 atomic_t count; 25 atomic_t count;
26 spinlock_t nslock; 26 spinlock_t nslock;
27 unsigned long id;
28 struct uts_namespace *uts_ns; 27 struct uts_namespace *uts_ns;
29 struct ipc_namespace *ipc_ns; 28 struct ipc_namespace *ipc_ns;
30 struct mnt_namespace *mnt_ns; 29 struct mnt_namespace *mnt_ns;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 51180dba9a98..95c1e74afebc 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2067,6 +2067,10 @@
2067#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 2067#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
2068#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 2068#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
2069 2069
2070#define PCI_VENDOR_ID_KORENIX 0x1982
2071#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
2072#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
2073
2070#define PCI_VENDOR_ID_TEKRAM 0x1de1 2074#define PCI_VENDOR_ID_TEKRAM 0x1de1
2071#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 2075#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
2072 2076
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index ea4f7cd7bfd8..2e19478e9e84 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -12,7 +12,7 @@
12struct pipe_buffer { 12struct pipe_buffer {
13 struct page *page; 13 struct page *page;
14 unsigned int offset, len; 14 unsigned int offset, len;
15 struct pipe_buf_operations *ops; 15 const struct pipe_buf_operations *ops;
16 unsigned int flags; 16 unsigned int flags;
17}; 17};
18 18
@@ -41,9 +41,7 @@ struct pipe_buf_operations {
41struct pipe_inode_info { 41struct pipe_inode_info {
42 wait_queue_head_t wait; 42 wait_queue_head_t wait;
43 unsigned int nrbufs, curbuf; 43 unsigned int nrbufs, curbuf;
44 struct pipe_buffer bufs[PIPE_BUFFERS];
45 struct page *tmp_page; 44 struct page *tmp_page;
46 unsigned int start;
47 unsigned int readers; 45 unsigned int readers;
48 unsigned int writers; 46 unsigned int writers;
49 unsigned int waiting_writers; 47 unsigned int waiting_writers;
@@ -52,6 +50,7 @@ struct pipe_inode_info {
52 struct fasync_struct *fasync_readers; 50 struct fasync_struct *fasync_readers;
53 struct fasync_struct *fasync_writers; 51 struct fasync_struct *fasync_writers;
54 struct inode *inode; 52 struct inode *inode;
53 struct pipe_buffer bufs[PIPE_BUFFERS];
55}; 54};
56 55
57/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual 56/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 20f47b81d3fa..8bbd459eafdc 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -39,7 +39,7 @@ extern struct platform_device *platform_device_register_simple(char *, unsigned
39 39
40extern struct platform_device *platform_device_alloc(const char *name, unsigned int id); 40extern struct platform_device *platform_device_alloc(const char *name, unsigned int id);
41extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); 41extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num);
42extern int platform_device_add_data(struct platform_device *pdev, void *data, size_t size); 42extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size);
43extern int platform_device_add(struct platform_device *pdev); 43extern int platform_device_add(struct platform_device *pdev);
44extern void platform_device_del(struct platform_device *pdev); 44extern void platform_device_del(struct platform_device *pdev);
45extern void platform_device_put(struct platform_device *pdev); 45extern void platform_device_put(struct platform_device *pdev);
diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h
new file mode 100644
index 000000000000..f9c90b33285b
--- /dev/null
+++ b/include/linux/reciprocal_div.h
@@ -0,0 +1,32 @@
1#ifndef _LINUX_RECIPROCAL_DIV_H
2#define _LINUX_RECIPROCAL_DIV_H
3
4#include <linux/types.h>
5
6/*
7 * This file describes reciprocical division.
8 *
9 * This optimizes the (A/B) problem, when A and B are two u32
10 * and B is a known value (but not known at compile time)
11 *
12 * The math principle used is :
13 * Let RECIPROCAL_VALUE(B) be (((1LL << 32) + (B - 1))/ B)
14 * Then A / B = (u32)(((u64)(A) * (R)) >> 32)
15 *
16 * This replaces a divide by a multiply (and a shift), and
17 * is generally less expensive in CPU cycles.
18 */
19
20/*
21 * Computes the reciprocal value (R) for the value B of the divisor.
22 * Should not be called before each reciprocal_divide(),
23 * or else the performance is slower than a normal divide.
24 */
25extern u32 reciprocal_value(u32 B);
26
27
28static inline u32 reciprocal_divide(u32 A, u32 R)
29{
30 return (u32)(((u64)A * R) >> 32);
31}
32#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ea92e5c89089..446373535190 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1144,7 +1144,6 @@ static inline void put_task_struct(struct task_struct *t)
1144#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1144#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1145#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ 1145#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
1146#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1146#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1147#define PF_FREEZE 0x00004000 /* this task is being frozen for suspend now */
1148#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1147#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1149#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1148#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1150#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1149#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2271886744f8..1ef822e31c77 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/include/linux/slab.h 2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 * Written by Mark Hemment, 1996. 3 *
4 * (markhe@nextd.demon.co.uk) 4 * (C) SGI 2006, Christoph Lameter <clameter@sgi.com>
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
5 */ 7 */
6 8
7#ifndef _LINUX_SLAB_H 9#ifndef _LINUX_SLAB_H
@@ -10,64 +12,95 @@
10#ifdef __KERNEL__ 12#ifdef __KERNEL__
11 13
12#include <linux/gfp.h> 14#include <linux/gfp.h>
13#include <linux/init.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
16#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
17#include <linux/compiler.h>
18 16
19/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
20typedef struct kmem_cache kmem_cache_t __deprecated; 17typedef struct kmem_cache kmem_cache_t __deprecated;
21 18
22/* flags to pass to kmem_cache_create(). 19/*
23 * The first 3 are only valid when the allocator as been build 20 * Flags to pass to kmem_cache_create().
24 * SLAB_DEBUG_SUPPORT. 21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
25 */ 22 */
26#define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */ 23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
27#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ 24#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
28#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
29#define SLAB_POISON 0x00000800UL /* Poison objects */ 26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
30#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ 27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
31#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ 28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
32#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ 29#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
33#define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */ 30#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
34#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate 31#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
35 what is reclaimable later*/ 32#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
36#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ 33#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
37#define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */
38#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 34#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
39 35
40/* flags passed to a constructor func */ 36/* Flags passed to a constructor functions */
41#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ 37#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
42#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */ 38#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
43#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */ 39#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
44 40
45#ifndef CONFIG_SLOB 41/*
46 42 * struct kmem_cache related prototypes
47/* prototypes */ 43 */
48extern void __init kmem_cache_init(void); 44void __init kmem_cache_init(void);
45extern int slab_is_available(void);
49 46
50extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 47struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
51 unsigned long, 48 unsigned long,
52 void (*)(void *, struct kmem_cache *, unsigned long), 49 void (*)(void *, struct kmem_cache *, unsigned long),
53 void (*)(void *, struct kmem_cache *, unsigned long)); 50 void (*)(void *, struct kmem_cache *, unsigned long));
54extern void kmem_cache_destroy(struct kmem_cache *); 51void kmem_cache_destroy(struct kmem_cache *);
55extern int kmem_cache_shrink(struct kmem_cache *); 52int kmem_cache_shrink(struct kmem_cache *);
56extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 53void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); 54void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
58extern void kmem_cache_free(struct kmem_cache *, void *); 55void kmem_cache_free(struct kmem_cache *, void *);
59extern unsigned int kmem_cache_size(struct kmem_cache *); 56unsigned int kmem_cache_size(struct kmem_cache *);
60extern const char *kmem_cache_name(struct kmem_cache *); 57const char *kmem_cache_name(struct kmem_cache *);
58int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
59
60#ifdef CONFIG_NUMA
61extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62#else
63static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64 gfp_t flags, int node)
65{
66 return kmem_cache_alloc(cachep, flags);
67}
68#endif
69
70/*
71 * Common kmalloc functions provided by all allocators
72 */
73void *__kmalloc(size_t, gfp_t);
74void *__kzalloc(size_t, gfp_t);
75void kfree(const void *);
76unsigned int ksize(const void *);
77
78/**
79 * kcalloc - allocate memory for an array. The memory is set to zero.
80 * @n: number of elements.
81 * @size: element size.
82 * @flags: the type of memory to allocate.
83 */
84static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
85{
86 if (n != 0 && size > ULONG_MAX / n)
87 return NULL;
88 return __kzalloc(n * size, flags);
89}
61 90
62/* Size description struct for general caches. */ 91/*
63struct cache_sizes { 92 * Allocator specific definitions. These are mainly used to establish optimized
64 size_t cs_size; 93 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
65 struct kmem_cache *cs_cachep; 94 * the appropriate general cache at compile time.
66 struct kmem_cache *cs_dmacachep; 95 */
67};
68extern struct cache_sizes malloc_sizes[];
69 96
70extern void *__kmalloc(size_t, gfp_t); 97#ifdef CONFIG_SLAB
98#include <linux/slab_def.h>
99#else
100/*
101 * Fallback definitions for an allocator not wanting to provide
102 * its own optimized kmalloc definitions (like SLOB).
103 */
71 104
72/** 105/**
73 * kmalloc - allocate memory 106 * kmalloc - allocate memory
@@ -116,46 +149,9 @@ extern void *__kmalloc(size_t, gfp_t);
116 */ 149 */
117static inline void *kmalloc(size_t size, gfp_t flags) 150static inline void *kmalloc(size_t size, gfp_t flags)
118{ 151{
119 if (__builtin_constant_p(size)) {
120 int i = 0;
121#define CACHE(x) \
122 if (size <= x) \
123 goto found; \
124 else \
125 i++;
126#include "kmalloc_sizes.h"
127#undef CACHE
128 {
129 extern void __you_cannot_kmalloc_that_much(void);
130 __you_cannot_kmalloc_that_much();
131 }
132found:
133 return kmem_cache_alloc((flags & GFP_DMA) ?
134 malloc_sizes[i].cs_dmacachep :
135 malloc_sizes[i].cs_cachep, flags);
136 }
137 return __kmalloc(size, flags); 152 return __kmalloc(size, flags);
138} 153}
139 154
140/*
141 * kmalloc_track_caller is a special version of kmalloc that records the
142 * calling function of the routine calling it for slab leak tracking instead
143 * of just the calling function (confusing, eh?).
144 * It's useful when the call to kmalloc comes from a widely-used standard
145 * allocator where we care about the real place the memory allocation
146 * request comes from.
147 */
148#ifndef CONFIG_DEBUG_SLAB
149#define kmalloc_track_caller(size, flags) \
150 __kmalloc(size, flags)
151#else
152extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
153#define kmalloc_track_caller(size, flags) \
154 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
155#endif
156
157extern void *__kzalloc(size_t, gfp_t);
158
159/** 155/**
160 * kzalloc - allocate memory. The memory is set to zero. 156 * kzalloc - allocate memory. The memory is set to zero.
161 * @size: how many bytes of memory are required. 157 * @size: how many bytes of memory are required.
@@ -163,72 +159,41 @@ extern void *__kzalloc(size_t, gfp_t);
163 */ 159 */
164static inline void *kzalloc(size_t size, gfp_t flags) 160static inline void *kzalloc(size_t size, gfp_t flags)
165{ 161{
166 if (__builtin_constant_p(size)) {
167 int i = 0;
168#define CACHE(x) \
169 if (size <= x) \
170 goto found; \
171 else \
172 i++;
173#include "kmalloc_sizes.h"
174#undef CACHE
175 {
176 extern void __you_cannot_kzalloc_that_much(void);
177 __you_cannot_kzalloc_that_much();
178 }
179found:
180 return kmem_cache_zalloc((flags & GFP_DMA) ?
181 malloc_sizes[i].cs_dmacachep :
182 malloc_sizes[i].cs_cachep, flags);
183 }
184 return __kzalloc(size, flags); 162 return __kzalloc(size, flags);
185} 163}
164#endif
186 165
187/** 166#ifndef CONFIG_NUMA
188 * kcalloc - allocate memory for an array. The memory is set to zero. 167static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
189 * @n: number of elements.
190 * @size: element size.
191 * @flags: the type of memory to allocate.
192 */
193static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
194{ 168{
195 if (n != 0 && size > ULONG_MAX / n) 169 return kmalloc(size, flags);
196 return NULL;
197 return kzalloc(n * size, flags);
198} 170}
199 171
200extern void kfree(const void *); 172static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
201extern unsigned int ksize(const void *);
202extern int slab_is_available(void);
203
204#ifdef CONFIG_NUMA
205extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
206extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
207
208static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
209{ 173{
210 if (__builtin_constant_p(size)) { 174 return __kmalloc(size, flags);
211 int i = 0;
212#define CACHE(x) \
213 if (size <= x) \
214 goto found; \
215 else \
216 i++;
217#include "kmalloc_sizes.h"
218#undef CACHE
219 {
220 extern void __you_cannot_kmalloc_that_much(void);
221 __you_cannot_kmalloc_that_much();
222 }
223found:
224 return kmem_cache_alloc_node((flags & GFP_DMA) ?
225 malloc_sizes[i].cs_dmacachep :
226 malloc_sizes[i].cs_cachep, flags, node);
227 }
228 return __kmalloc_node(size, flags, node);
229} 175}
176#endif /* !CONFIG_NUMA */
230 177
231/* 178/*
179 * kmalloc_track_caller is a special version of kmalloc that records the
180 * calling function of the routine calling it for slab leak tracking instead
181 * of just the calling function (confusing, eh?).
182 * It's useful when the call to kmalloc comes from a widely-used standard
183 * allocator where we care about the real place the memory allocation
184 * request comes from.
185 */
186#ifdef CONFIG_DEBUG_SLAB
187extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
188#define kmalloc_track_caller(size, flags) \
189 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
190#else
191#define kmalloc_track_caller(size, flags) \
192 __kmalloc(size, flags)
193#endif /* DEBUG_SLAB */
194
195#ifdef CONFIG_NUMA
196/*
232 * kmalloc_node_track_caller is a special version of kmalloc_node that 197 * kmalloc_node_track_caller is a special version of kmalloc_node that
233 * records the calling function of the routine calling it for slab leak 198 * records the calling function of the routine calling it for slab leak
234 * tracking instead of just the calling function (confusing, eh?). 199 * tracking instead of just the calling function (confusing, eh?).
@@ -236,70 +201,23 @@ found:
236 * standard allocator where we care about the real place the memory 201 * standard allocator where we care about the real place the memory
237 * allocation request comes from. 202 * allocation request comes from.
238 */ 203 */
239#ifndef CONFIG_DEBUG_SLAB 204#ifdef CONFIG_DEBUG_SLAB
240#define kmalloc_node_track_caller(size, flags, node) \
241 __kmalloc_node(size, flags, node)
242#else
243extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); 205extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
244#define kmalloc_node_track_caller(size, flags, node) \ 206#define kmalloc_node_track_caller(size, flags, node) \
245 __kmalloc_node_track_caller(size, flags, node, \ 207 __kmalloc_node_track_caller(size, flags, node, \
246 __builtin_return_address(0)) 208 __builtin_return_address(0))
209#else
210#define kmalloc_node_track_caller(size, flags, node) \
211 __kmalloc_node(size, flags, node)
247#endif 212#endif
213
248#else /* CONFIG_NUMA */ 214#else /* CONFIG_NUMA */
249static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
250 gfp_t flags, int node)
251{
252 return kmem_cache_alloc(cachep, flags);
253}
254static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
255{
256 return kmalloc(size, flags);
257}
258 215
259#define kmalloc_node_track_caller(size, flags, node) \ 216#define kmalloc_node_track_caller(size, flags, node) \
260 kmalloc_track_caller(size, flags) 217 kmalloc_track_caller(size, flags)
261#endif
262 218
263extern int FASTCALL(kmem_cache_reap(int)); 219#endif /* DEBUG_SLAB */
264extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
265
266#else /* CONFIG_SLOB */
267
268/* SLOB allocator routines */
269
270void kmem_cache_init(void);
271struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
272 unsigned long,
273 void (*)(void *, struct kmem_cache *, unsigned long),
274 void (*)(void *, struct kmem_cache *, unsigned long));
275void kmem_cache_destroy(struct kmem_cache *c);
276void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
277void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
278void kmem_cache_free(struct kmem_cache *c, void *b);
279const char *kmem_cache_name(struct kmem_cache *);
280void *kmalloc(size_t size, gfp_t flags);
281void *__kzalloc(size_t size, gfp_t flags);
282void kfree(const void *m);
283unsigned int ksize(const void *m);
284unsigned int kmem_cache_size(struct kmem_cache *c);
285
286static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
287{
288 return __kzalloc(n * size, flags);
289}
290
291#define kmem_cache_shrink(d) (0)
292#define kmem_cache_reap(a)
293#define kmem_ptr_validate(a, b) (0)
294#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
295#define kmalloc_node(s, f, n) kmalloc(s, f)
296#define kzalloc(s, f) __kzalloc(s, f)
297#define kmalloc_track_caller kmalloc
298
299#define kmalloc_node_track_caller kmalloc_node
300
301#endif /* CONFIG_SLOB */
302 220
303#endif /* __KERNEL__ */ 221#endif /* __KERNEL__ */
304
305#endif /* _LINUX_SLAB_H */ 222#endif /* _LINUX_SLAB_H */
223
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
new file mode 100644
index 000000000000..4b463e66ddea
--- /dev/null
+++ b/include/linux/slab_def.h
@@ -0,0 +1,100 @@
1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
17
18/* Size description struct for general caches. */
19struct cache_sizes {
20 size_t cs_size;
21 struct kmem_cache *cs_cachep;
22 struct kmem_cache *cs_dmacachep;
23};
24extern struct cache_sizes malloc_sizes[];
25
26static inline void *kmalloc(size_t size, gfp_t flags)
27{
28 if (__builtin_constant_p(size)) {
29 int i = 0;
30#define CACHE(x) \
31 if (size <= x) \
32 goto found; \
33 else \
34 i++;
35#include "kmalloc_sizes.h"
36#undef CACHE
37 {
38 extern void __you_cannot_kmalloc_that_much(void);
39 __you_cannot_kmalloc_that_much();
40 }
41found:
42 return kmem_cache_alloc((flags & GFP_DMA) ?
43 malloc_sizes[i].cs_dmacachep :
44 malloc_sizes[i].cs_cachep, flags);
45 }
46 return __kmalloc(size, flags);
47}
48
49static inline void *kzalloc(size_t size, gfp_t flags)
50{
51 if (__builtin_constant_p(size)) {
52 int i = 0;
53#define CACHE(x) \
54 if (size <= x) \
55 goto found; \
56 else \
57 i++;
58#include "kmalloc_sizes.h"
59#undef CACHE
60 {
61 extern void __you_cannot_kzalloc_that_much(void);
62 __you_cannot_kzalloc_that_much();
63 }
64found:
65 return kmem_cache_zalloc((flags & GFP_DMA) ?
66 malloc_sizes[i].cs_dmacachep :
67 malloc_sizes[i].cs_cachep, flags);
68 }
69 return __kzalloc(size, flags);
70}
71
72#ifdef CONFIG_NUMA
73extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
74
75static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76{
77 if (__builtin_constant_p(size)) {
78 int i = 0;
79#define CACHE(x) \
80 if (size <= x) \
81 goto found; \
82 else \
83 i++;
84#include "kmalloc_sizes.h"
85#undef CACHE
86 {
87 extern void __you_cannot_kmalloc_that_much(void);
88 __you_cannot_kmalloc_that_much();
89 }
90found:
91 return kmem_cache_alloc_node((flags & GFP_DMA) ?
92 malloc_sizes[i].cs_dmacachep :
93 malloc_sizes[i].cs_cachep, flags, node);
94 }
95 return __kmalloc_node(size, flags, node);
96}
97
98#endif /* CONFIG_NUMA */
99
100#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/smb_fs_sb.h b/include/linux/smb_fs_sb.h
index 5b4ae2cc445c..3aa97aa4277f 100644
--- a/include/linux/smb_fs_sb.h
+++ b/include/linux/smb_fs_sb.h
@@ -55,7 +55,7 @@ struct smb_sb_info {
55 * generation is incremented. 55 * generation is incremented.
56 */ 56 */
57 unsigned int generation; 57 unsigned int generation;
58 pid_t conn_pid; 58 struct pid *conn_pid;
59 struct smb_conn_opt opt; 59 struct smb_conn_opt opt;
60 wait_queue_head_t conn_wq; 60 wait_queue_head_t conn_wq;
61 int conn_complete; 61 int conn_complete;
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 6d8846e7be6d..81480e613467 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -11,7 +11,7 @@
11 ** the sysctl() binary interface. Do *NOT* change the 11 ** the sysctl() binary interface. Do *NOT* change the
12 ** numbering of any existing values here, and do not change 12 ** numbering of any existing values here, and do not change
13 ** any numbers within any one set of values. If you have to 13 ** any numbers within any one set of values. If you have to
14 ** have to redefine an existing interface, use a new number for it. 14 ** redefine an existing interface, use a new number for it.
15 ** The kernel will then return -ENOTDIR to any application using 15 ** The kernel will then return -ENOTDIR to any application using
16 ** the old binary interface. 16 ** the old binary interface.
17 ** 17 **
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 9df8833670cb..98a1d8cfb73d 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -37,23 +37,37 @@ struct sysrq_key_op {
37 37
38#ifdef CONFIG_MAGIC_SYSRQ 38#ifdef CONFIG_MAGIC_SYSRQ
39 39
40extern int sysrq_on(void);
41
42/*
43 * Do not use this one directly:
44 */
45extern int __sysrq_enabled;
46
40/* Generic SysRq interface -- you may call it from any device driver, supplying 47/* Generic SysRq interface -- you may call it from any device driver, supplying
41 * ASCII code of the key, pointer to registers and kbd/tty structs (if they 48 * ASCII code of the key, pointer to registers and kbd/tty structs (if they
42 * are available -- else NULL's). 49 * are available -- else NULL's).
43 */ 50 */
44 51
45void handle_sysrq(int, struct tty_struct *); 52void handle_sysrq(int key, struct tty_struct *tty);
46void __handle_sysrq(int, struct tty_struct *, int check_mask); 53void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
47int register_sysrq_key(int, struct sysrq_key_op *); 54int register_sysrq_key(int key, struct sysrq_key_op *op);
48int unregister_sysrq_key(int, struct sysrq_key_op *); 55int unregister_sysrq_key(int key, struct sysrq_key_op *op);
49struct sysrq_key_op *__sysrq_get_key_op(int key); 56struct sysrq_key_op *__sysrq_get_key_op(int key);
50 57
51#else 58#else
52 59
60static inline int sysrq_on(void)
61{
62 return 0;
63}
53static inline int __reterr(void) 64static inline int __reterr(void)
54{ 65{
55 return -EINVAL; 66 return -EINVAL;
56} 67}
68static inline void handle_sysrq(int key, struct tty_struct *tty)
69{
70}
57 71
58#define register_sysrq_key(ig,nore) __reterr() 72#define register_sysrq_key(ig,nore) __reterr()
59#define unregister_sysrq_key(ig,nore) __reterr() 73#define unregister_sysrq_key(ig,nore) __reterr()
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 10a3eec191fd..41456c148842 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -739,13 +739,13 @@ struct sockaddr_hci {
739struct hci_filter { 739struct hci_filter {
740 unsigned long type_mask; 740 unsigned long type_mask;
741 unsigned long event_mask[2]; 741 unsigned long event_mask[2];
742 __u16 opcode; 742 __le16 opcode;
743}; 743};
744 744
745struct hci_ufilter { 745struct hci_ufilter {
746 __u32 type_mask; 746 __u32 type_mask;
747 __u32 event_mask[2]; 747 __u32 event_mask[2];
748 __u16 opcode; 748 __le16 opcode;
749}; 749};
750 750
751#define HCI_FLT_TYPE_BITS 31 751#define HCI_FLT_TYPE_BITS 31
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 66bf4d7d0dfb..db037205c9e8 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005 Intel Corporation. All rights reserved. 2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +41,9 @@
41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
42 struct ib_qp_attr *src); 42 struct ib_qp_attr *src);
43 43
44void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
45 struct ib_ah_attr *src);
46
44void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 47void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
45 struct ib_sa_path_rec *src); 48 struct ib_sa_path_rec *src);
46 49
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8eacc3510993..fd2353fa7e12 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -43,6 +43,8 @@
43 43
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h>
47#include <linux/dma-mapping.h>
46 48
47#include <asm/atomic.h> 49#include <asm/atomic.h>
48#include <asm/scatterlist.h> 50#include <asm/scatterlist.h>
@@ -848,6 +850,49 @@ struct ib_cache {
848 u8 *lmc_cache; 850 u8 *lmc_cache;
849}; 851};
850 852
853struct ib_dma_mapping_ops {
854 int (*mapping_error)(struct ib_device *dev,
855 u64 dma_addr);
856 u64 (*map_single)(struct ib_device *dev,
857 void *ptr, size_t size,
858 enum dma_data_direction direction);
859 void (*unmap_single)(struct ib_device *dev,
860 u64 addr, size_t size,
861 enum dma_data_direction direction);
862 u64 (*map_page)(struct ib_device *dev,
863 struct page *page, unsigned long offset,
864 size_t size,
865 enum dma_data_direction direction);
866 void (*unmap_page)(struct ib_device *dev,
867 u64 addr, size_t size,
868 enum dma_data_direction direction);
869 int (*map_sg)(struct ib_device *dev,
870 struct scatterlist *sg, int nents,
871 enum dma_data_direction direction);
872 void (*unmap_sg)(struct ib_device *dev,
873 struct scatterlist *sg, int nents,
874 enum dma_data_direction direction);
875 u64 (*dma_address)(struct ib_device *dev,
876 struct scatterlist *sg);
877 unsigned int (*dma_len)(struct ib_device *dev,
878 struct scatterlist *sg);
879 void (*sync_single_for_cpu)(struct ib_device *dev,
880 u64 dma_handle,
881 size_t size,
882 enum dma_data_direction dir);
883 void (*sync_single_for_device)(struct ib_device *dev,
884 u64 dma_handle,
885 size_t size,
886 enum dma_data_direction dir);
887 void *(*alloc_coherent)(struct ib_device *dev,
888 size_t size,
889 u64 *dma_handle,
890 gfp_t flag);
891 void (*free_coherent)(struct ib_device *dev,
892 size_t size, void *cpu_addr,
893 u64 dma_handle);
894};
895
851struct iw_cm_verbs; 896struct iw_cm_verbs;
852 897
853struct ib_device { 898struct ib_device {
@@ -992,6 +1037,8 @@ struct ib_device {
992 struct ib_mad *in_mad, 1037 struct ib_mad *in_mad,
993 struct ib_mad *out_mad); 1038 struct ib_mad *out_mad);
994 1039
1040 struct ib_dma_mapping_ops *dma_ops;
1041
995 struct module *owner; 1042 struct module *owner;
996 struct class_device class_dev; 1043 struct class_device class_dev;
997 struct kobject ports_parent; 1044 struct kobject ports_parent;
@@ -1395,10 +1442,216 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1395 * usable for DMA. 1442 * usable for DMA.
1396 * @pd: The protection domain associated with the memory region. 1443 * @pd: The protection domain associated with the memory region.
1397 * @mr_access_flags: Specifies the memory access rights. 1444 * @mr_access_flags: Specifies the memory access rights.
1445 *
1446 * Note that the ib_dma_*() functions defined below must be used
1447 * to create/destroy addresses used with the Lkey or Rkey returned
1448 * by ib_get_dma_mr().
1398 */ 1449 */
1399struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1450struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1400 1451
1401/** 1452/**
1453 * ib_dma_mapping_error - check a DMA addr for error
1454 * @dev: The device for which the dma_addr was created
1455 * @dma_addr: The DMA address to check
1456 */
1457static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1458{
1459 return dev->dma_ops ?
1460 dev->dma_ops->mapping_error(dev, dma_addr) :
1461 dma_mapping_error(dma_addr);
1462}
1463
1464/**
1465 * ib_dma_map_single - Map a kernel virtual address to DMA address
1466 * @dev: The device for which the dma_addr is to be created
1467 * @cpu_addr: The kernel virtual address
1468 * @size: The size of the region in bytes
1469 * @direction: The direction of the DMA
1470 */
1471static inline u64 ib_dma_map_single(struct ib_device *dev,
1472 void *cpu_addr, size_t size,
1473 enum dma_data_direction direction)
1474{
1475 return dev->dma_ops ?
1476 dev->dma_ops->map_single(dev, cpu_addr, size, direction) :
1477 dma_map_single(dev->dma_device, cpu_addr, size, direction);
1478}
1479
1480/**
1481 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1482 * @dev: The device for which the DMA address was created
1483 * @addr: The DMA address
1484 * @size: The size of the region in bytes
1485 * @direction: The direction of the DMA
1486 */
1487static inline void ib_dma_unmap_single(struct ib_device *dev,
1488 u64 addr, size_t size,
1489 enum dma_data_direction direction)
1490{
1491 dev->dma_ops ?
1492 dev->dma_ops->unmap_single(dev, addr, size, direction) :
1493 dma_unmap_single(dev->dma_device, addr, size, direction);
1494}
1495
1496/**
1497 * ib_dma_map_page - Map a physical page to DMA address
1498 * @dev: The device for which the dma_addr is to be created
1499 * @page: The page to be mapped
1500 * @offset: The offset within the page
1501 * @size: The size of the region in bytes
1502 * @direction: The direction of the DMA
1503 */
1504static inline u64 ib_dma_map_page(struct ib_device *dev,
1505 struct page *page,
1506 unsigned long offset,
1507 size_t size,
1508 enum dma_data_direction direction)
1509{
1510 return dev->dma_ops ?
1511 dev->dma_ops->map_page(dev, page, offset, size, direction) :
1512 dma_map_page(dev->dma_device, page, offset, size, direction);
1513}
1514
1515/**
1516 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1517 * @dev: The device for which the DMA address was created
1518 * @addr: The DMA address
1519 * @size: The size of the region in bytes
1520 * @direction: The direction of the DMA
1521 */
1522static inline void ib_dma_unmap_page(struct ib_device *dev,
1523 u64 addr, size_t size,
1524 enum dma_data_direction direction)
1525{
1526 dev->dma_ops ?
1527 dev->dma_ops->unmap_page(dev, addr, size, direction) :
1528 dma_unmap_page(dev->dma_device, addr, size, direction);
1529}
1530
1531/**
1532 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1533 * @dev: The device for which the DMA addresses are to be created
1534 * @sg: The array of scatter/gather entries
1535 * @nents: The number of scatter/gather entries
1536 * @direction: The direction of the DMA
1537 */
1538static inline int ib_dma_map_sg(struct ib_device *dev,
1539 struct scatterlist *sg, int nents,
1540 enum dma_data_direction direction)
1541{
1542 return dev->dma_ops ?
1543 dev->dma_ops->map_sg(dev, sg, nents, direction) :
1544 dma_map_sg(dev->dma_device, sg, nents, direction);
1545}
1546
1547/**
1548 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1549 * @dev: The device for which the DMA addresses were created
1550 * @sg: The array of scatter/gather entries
1551 * @nents: The number of scatter/gather entries
1552 * @direction: The direction of the DMA
1553 */
1554static inline void ib_dma_unmap_sg(struct ib_device *dev,
1555 struct scatterlist *sg, int nents,
1556 enum dma_data_direction direction)
1557{
1558 dev->dma_ops ?
1559 dev->dma_ops->unmap_sg(dev, sg, nents, direction) :
1560 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1561}
1562
1563/**
1564 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1565 * @dev: The device for which the DMA addresses were created
1566 * @sg: The scatter/gather entry
1567 */
1568static inline u64 ib_sg_dma_address(struct ib_device *dev,
1569 struct scatterlist *sg)
1570{
1571 return dev->dma_ops ?
1572 dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg);
1573}
1574
1575/**
1576 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1577 * @dev: The device for which the DMA addresses were created
1578 * @sg: The scatter/gather entry
1579 */
1580static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1581 struct scatterlist *sg)
1582{
1583 return dev->dma_ops ?
1584 dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg);
1585}
1586
1587/**
1588 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1589 * @dev: The device for which the DMA address was created
1590 * @addr: The DMA address
1591 * @size: The size of the region in bytes
1592 * @dir: The direction of the DMA
1593 */
1594static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1595 u64 addr,
1596 size_t size,
1597 enum dma_data_direction dir)
1598{
1599 dev->dma_ops ?
1600 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) :
1601 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1602}
1603
1604/**
1605 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1606 * @dev: The device for which the DMA address was created
1607 * @addr: The DMA address
1608 * @size: The size of the region in bytes
1609 * @dir: The direction of the DMA
1610 */
1611static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1612 u64 addr,
1613 size_t size,
1614 enum dma_data_direction dir)
1615{
1616 dev->dma_ops ?
1617 dev->dma_ops->sync_single_for_device(dev, addr, size, dir) :
1618 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1619}
1620
1621/**
1622 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1623 * @dev: The device for which the DMA address is requested
1624 * @size: The size of the region to allocate in bytes
1625 * @dma_handle: A pointer for returning the DMA address of the region
1626 * @flag: memory allocator flags
1627 */
1628static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1629 size_t size,
1630 u64 *dma_handle,
1631 gfp_t flag)
1632{
1633 return dev->dma_ops ?
1634 dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) :
1635 dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
1636}
1637
1638/**
1639 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1640 * @dev: The device for which the DMA addresses were allocated
1641 * @size: The size of the region
1642 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1643 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1644 */
1645static inline void ib_dma_free_coherent(struct ib_device *dev,
1646 size_t size, void *cpu_addr,
1647 u64 dma_handle)
1648{
1649 dev->dma_ops ?
1650 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) :
1651 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1652}
1653
1654/**
1402 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1655 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1403 * by an HCA. 1656 * by an HCA.
1404 * @pd: The protection domain associated assigned to the registered region. 1657 * @pd: The protection domain associated assigned to the registered region.
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index deb5a0a4cee5..36cd8a8526a0 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -77,11 +77,34 @@ struct rdma_route {
77 int num_paths; 77 int num_paths;
78}; 78};
79 79
80struct rdma_conn_param {
81 const void *private_data;
82 u8 private_data_len;
83 u8 responder_resources;
84 u8 initiator_depth;
85 u8 flow_control;
86 u8 retry_count; /* ignored when accepting */
87 u8 rnr_retry_count;
88 /* Fields below ignored if a QP is created on the rdma_cm_id. */
89 u8 srq;
90 u32 qp_num;
91};
92
93struct rdma_ud_param {
94 const void *private_data;
95 u8 private_data_len;
96 struct ib_ah_attr ah_attr;
97 u32 qp_num;
98 u32 qkey;
99};
100
80struct rdma_cm_event { 101struct rdma_cm_event {
81 enum rdma_cm_event_type event; 102 enum rdma_cm_event_type event;
82 int status; 103 int status;
83 void *private_data; 104 union {
84 u8 private_data_len; 105 struct rdma_conn_param conn;
106 struct rdma_ud_param ud;
107 } param;
85}; 108};
86 109
87struct rdma_cm_id; 110struct rdma_cm_id;
@@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
204int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 227int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
205 int *qp_attr_mask); 228 int *qp_attr_mask);
206 229
207struct rdma_conn_param {
208 const void *private_data;
209 u8 private_data_len;
210 u8 responder_resources;
211 u8 initiator_depth;
212 u8 flow_control;
213 u8 retry_count; /* ignored when accepting */
214 u8 rnr_retry_count;
215 /* Fields below ignored if a QP is created on the rdma_cm_id. */
216 u8 srq;
217 u32 qp_num;
218 enum ib_qp_type qp_type;
219};
220
221/** 230/**
222 * rdma_connect - Initiate an active connection request. 231 * rdma_connect - Initiate an active connection request.
232 * @id: Connection identifier to connect.
233 * @conn_param: Connection information used for connected QPs.
223 * 234 *
224 * Users must have resolved a route for the rdma_cm_id to connect with 235 * Users must have resolved a route for the rdma_cm_id to connect with
225 * by having called rdma_resolve_route before calling this routine. 236 * by having called rdma_resolve_route before calling this routine.
237 *
238 * This call will either connect to a remote QP or obtain remote QP
239 * information for unconnected rdma_cm_id's. The actual operation is
240 * based on the rdma_cm_id's port space.
226 */ 241 */
227int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 242int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
228 243
@@ -253,6 +268,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
253int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 268int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
254 269
255/** 270/**
271 * rdma_notify - Notifies the RDMA CM of an asynchronous event that has
272 * occurred on the connection.
273 * @id: Connection identifier to transition to established.
274 * @event: Asynchronous event.
275 *
276 * This routine should be invoked by users to notify the CM of relevant
277 * communication events. Events that should be reported to the CM and
278 * when to report them are:
279 *
280 * IB_EVENT_COMM_EST - Used when a message is received on a connected
281 * QP before an RTU has been received.
282 */
283int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
284
285/**
256 * rdma_reject - Called to reject a connection request or response. 286 * rdma_reject - Called to reject a connection request or response.
257 */ 287 */
258int rdma_reject(struct rdma_cm_id *id, const void *private_data, 288int rdma_reject(struct rdma_cm_id *id, const void *private_data,
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index e8c3af1804d4..9b176df1d667 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -44,4 +44,7 @@
44int rdma_set_ib_paths(struct rdma_cm_id *id, 44int rdma_set_ib_paths(struct rdma_cm_id *id,
45 struct ib_sa_path_rec *path_rec, int num_paths); 45 struct ib_sa_path_rec *path_rec, int num_paths);
46 46
47/* Global qkey for UD QPs and multicast groups. */
48#define RDMA_UD_QKEY 0x01234567
49
47#endif /* RDMA_CM_IB_H */ 50#endif /* RDMA_CM_IB_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
new file mode 100644
index 000000000000..9572ab8eeac1
--- /dev/null
+++ b/include/rdma/rdma_user_cm.h
@@ -0,0 +1,206 @@
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef RDMA_USER_CM_H
34#define RDMA_USER_CM_H
35
36#include <linux/types.h>
37#include <linux/in6.h>
38#include <rdma/ib_user_verbs.h>
39#include <rdma/ib_user_sa.h>
40
41#define RDMA_USER_CM_ABI_VERSION 3
42
43#define RDMA_MAX_PRIVATE_DATA 256
44
45enum {
46 RDMA_USER_CM_CMD_CREATE_ID,
47 RDMA_USER_CM_CMD_DESTROY_ID,
48 RDMA_USER_CM_CMD_BIND_ADDR,
49 RDMA_USER_CM_CMD_RESOLVE_ADDR,
50 RDMA_USER_CM_CMD_RESOLVE_ROUTE,
51 RDMA_USER_CM_CMD_QUERY_ROUTE,
52 RDMA_USER_CM_CMD_CONNECT,
53 RDMA_USER_CM_CMD_LISTEN,
54 RDMA_USER_CM_CMD_ACCEPT,
55 RDMA_USER_CM_CMD_REJECT,
56 RDMA_USER_CM_CMD_DISCONNECT,
57 RDMA_USER_CM_CMD_INIT_QP_ATTR,
58 RDMA_USER_CM_CMD_GET_EVENT,
59 RDMA_USER_CM_CMD_GET_OPTION,
60 RDMA_USER_CM_CMD_SET_OPTION,
61 RDMA_USER_CM_CMD_NOTIFY
62};
63
64/*
65 * command ABI structures.
66 */
67struct rdma_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct rdma_ucm_create_id {
74 __u64 uid;
75 __u64 response;
76 __u16 ps;
77 __u8 reserved[6];
78};
79
80struct rdma_ucm_create_id_resp {
81 __u32 id;
82};
83
84struct rdma_ucm_destroy_id {
85 __u64 response;
86 __u32 id;
87 __u32 reserved;
88};
89
90struct rdma_ucm_destroy_id_resp {
91 __u32 events_reported;
92};
93
94struct rdma_ucm_bind_addr {
95 __u64 response;
96 struct sockaddr_in6 addr;
97 __u32 id;
98};
99
100struct rdma_ucm_resolve_addr {
101 struct sockaddr_in6 src_addr;
102 struct sockaddr_in6 dst_addr;
103 __u32 id;
104 __u32 timeout_ms;
105};
106
107struct rdma_ucm_resolve_route {
108 __u32 id;
109 __u32 timeout_ms;
110};
111
112struct rdma_ucm_query_route {
113 __u64 response;
114 __u32 id;
115 __u32 reserved;
116};
117
118struct rdma_ucm_query_route_resp {
119 __u64 node_guid;
120 struct ib_user_path_rec ib_route[2];
121 struct sockaddr_in6 src_addr;
122 struct sockaddr_in6 dst_addr;
123 __u32 num_paths;
124 __u8 port_num;
125 __u8 reserved[3];
126};
127
128struct rdma_ucm_conn_param {
129 __u32 qp_num;
130 __u32 reserved;
131 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
132 __u8 private_data_len;
133 __u8 srq;
134 __u8 responder_resources;
135 __u8 initiator_depth;
136 __u8 flow_control;
137 __u8 retry_count;
138 __u8 rnr_retry_count;
139 __u8 valid;
140};
141
142struct rdma_ucm_ud_param {
143 __u32 qp_num;
144 __u32 qkey;
145 struct ib_uverbs_ah_attr ah_attr;
146 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
147 __u8 private_data_len;
148 __u8 reserved[7];
149};
150
151struct rdma_ucm_connect {
152 struct rdma_ucm_conn_param conn_param;
153 __u32 id;
154 __u32 reserved;
155};
156
157struct rdma_ucm_listen {
158 __u32 id;
159 __u32 backlog;
160};
161
162struct rdma_ucm_accept {
163 __u64 uid;
164 struct rdma_ucm_conn_param conn_param;
165 __u32 id;
166 __u32 reserved;
167};
168
169struct rdma_ucm_reject {
170 __u32 id;
171 __u8 private_data_len;
172 __u8 reserved[3];
173 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
174};
175
176struct rdma_ucm_disconnect {
177 __u32 id;
178};
179
180struct rdma_ucm_init_qp_attr {
181 __u64 response;
182 __u32 id;
183 __u32 qp_state;
184};
185
186struct rdma_ucm_notify {
187 __u32 id;
188 __u32 event;
189};
190
191struct rdma_ucm_get_event {
192 __u64 response;
193};
194
195struct rdma_ucm_event_resp {
196 __u64 uid;
197 __u32 id;
198 __u32 event;
199 __u32 status;
200 union {
201 struct rdma_ucm_conn_param conn;
202 struct rdma_ucm_ud_param ud;
203 } param;
204};
205
206#endif /* RDMA_USER_CM_H */
diff --git a/include/video/sstfb.h b/include/video/sstfb.h
index 5dbf5e7e50a8..baa163f770ab 100644
--- a/include/video/sstfb.h
+++ b/include/video/sstfb.h
@@ -119,7 +119,7 @@
119#define BACKPORCH 0x0208 119#define BACKPORCH 0x0208
120#define VIDEODIMENSIONS 0x020c 120#define VIDEODIMENSIONS 0x020c
121#define FBIINIT0 0x0210 /* misc+fifo controls */ 121#define FBIINIT0 0x0210 /* misc+fifo controls */
122# define EN_VGA_PASSTHROUGH BIT(0) 122# define DIS_VGA_PASSTHROUGH BIT(0)
123# define FBI_RESET BIT(1) 123# define FBI_RESET BIT(1)
124# define FIFO_RESET BIT(2) 124# define FIFO_RESET BIT(2)
125#define FBIINIT1 0x0214 /* PCI + video controls */ 125#define FBIINIT1 0x0214 /* PCI + video controls */
@@ -251,7 +251,7 @@
251# define DACREG_ICS_CLK1_A 0 /* bit4 */ 251# define DACREG_ICS_CLK1_A 0 /* bit4 */
252 252
253/* sst default init registers */ 253/* sst default init registers */
254#define FBIINIT0_DEFAULT EN_VGA_PASSTHROUGH 254#define FBIINIT0_DEFAULT DIS_VGA_PASSTHROUGH
255 255
256#define FBIINIT1_DEFAULT \ 256#define FBIINIT1_DEFAULT \
257 ( \ 257 ( \
@@ -296,6 +296,11 @@
296 * 296 *
297 */ 297 */
298 298
299/* ioctl to enable/disable VGA passthrough */
300#define SSTFB_SET_VGAPASS _IOW('F', 0xdd, __u32)
301#define SSTFB_GET_VGAPASS _IOR('F', 0xdd, __u32)
302
303
299/* used to know witch clock to set */ 304/* used to know witch clock to set */
300enum { 305enum {
301 VID_CLOCK=0, 306 VID_CLOCK=0,
@@ -317,7 +322,7 @@ struct pll_timing {
317}; 322};
318 323
319struct dac_switch { 324struct dac_switch {
320 char * name; 325 const char *name;
321 int (*detect) (struct fb_info *info); 326 int (*detect) (struct fb_info *info);
322 int (*set_pll) (struct fb_info *info, const struct pll_timing *t, const int clock); 327 int (*set_pll) (struct fb_info *info, const struct pll_timing *t, const int clock);
323 void (*set_vidmod) (struct fb_info *info, const int bpp); 328 void (*set_vidmod) (struct fb_info *info, const int bpp);
@@ -345,7 +350,7 @@ struct sstfb_par {
345 struct pci_dev *dev; 350 struct pci_dev *dev;
346 int type; 351 int type;
347 u8 revision; 352 u8 revision;
348 int gfx_clock; /* status */ 353 u8 vgapass; /* VGA pass through: 1=enabled, 0=disabled */
349}; 354};
350 355
351#endif /* _SSTFB_H_ */ 356#endif /* _SSTFB_H_ */
diff --git a/init/Kconfig b/init/Kconfig
index 9edf103b3ec3..f000edb3bb7a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -348,7 +348,7 @@ config SYSCTL_SYSCALL
348 If unsure say Y here. 348 If unsure say Y here.
349 349
350config KALLSYMS 350config KALLSYMS
351 bool "Load all symbols for debugging/kksymoops" if EMBEDDED 351 bool "Load all symbols for debugging/ksymoops" if EMBEDDED
352 default y 352 default y
353 help 353 help
354 Say Y here to let the kernel print out symbolic crash information and 354 Say Y here to let the kernel print out symbolic crash information and
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 0992616eeed6..c82c215693d7 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -36,7 +36,7 @@ struct msg_msg *load_msg(const void __user *src, int len)
36 if (alen > DATALEN_MSG) 36 if (alen > DATALEN_MSG)
37 alen = DATALEN_MSG; 37 alen = DATALEN_MSG;
38 38
39 msg = (struct msg_msg *)kmalloc(sizeof(*msg) + alen, GFP_KERNEL); 39 msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
40 if (msg == NULL) 40 if (msg == NULL)
41 return ERR_PTR(-ENOMEM); 41 return ERR_PTR(-ENOMEM);
42 42
@@ -56,7 +56,7 @@ struct msg_msg *load_msg(const void __user *src, int len)
56 alen = len; 56 alen = len;
57 if (alen > DATALEN_SEG) 57 if (alen > DATALEN_SEG)
58 alen = DATALEN_SEG; 58 alen = DATALEN_SEG;
59 seg = (struct msg_msgseg *)kmalloc(sizeof(*seg) + alen, 59 seg = kmalloc(sizeof(*seg) + alen,
60 GFP_KERNEL); 60 GFP_KERNEL);
61 if (seg == NULL) { 61 if (seg == NULL) {
62 err = -ENOMEM; 62 err = -ENOMEM;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2c3b4431472b..232aed2b10f9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2342,32 +2342,48 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2342} 2342}
2343 2343
2344/** 2344/**
2345 * cpuset_zone_allowed - Can we allocate memory on zone z's memory node? 2345 * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node?
2346 * @z: is this zone on an allowed node? 2346 * @z: is this zone on an allowed node?
2347 * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL) 2347 * @gfp_mask: memory allocation flags
2348 * 2348 *
2349 * If we're in interrupt, yes, we can always allocate. If zone 2349 * If we're in interrupt, yes, we can always allocate. If
2350 * __GFP_THISNODE is set, yes, we can always allocate. If zone
2350 * z's node is in our tasks mems_allowed, yes. If it's not a 2351 * z's node is in our tasks mems_allowed, yes. If it's not a
2351 * __GFP_HARDWALL request and this zone's nodes is in the nearest 2352 * __GFP_HARDWALL request and this zone's nodes is in the nearest
2352 * mem_exclusive cpuset ancestor to this tasks cpuset, yes. 2353 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
2353 * Otherwise, no. 2354 * Otherwise, no.
2354 * 2355 *
2356 * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall()
2357 * reduces to cpuset_zone_allowed_hardwall(). Otherwise,
2358 * cpuset_zone_allowed_softwall() might sleep, and might allow a zone
2359 * from an enclosing cpuset.
2360 *
2361 * cpuset_zone_allowed_hardwall() only handles the simpler case of
2362 * hardwall cpusets, and never sleeps.
2363 *
2364 * The __GFP_THISNODE placement logic is really handled elsewhere,
2365 * by forcibly using a zonelist starting at a specified node, and by
2366 * (in get_page_from_freelist()) refusing to consider the zones for
2367 * any node on the zonelist except the first. By the time any such
2368 * calls get to this routine, we should just shut up and say 'yes'.
2369 *
2355 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 2370 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2356 * and do not allow allocations outside the current tasks cpuset. 2371 * and do not allow allocations outside the current tasks cpuset.
2357 * GFP_KERNEL allocations are not so marked, so can escape to the 2372 * GFP_KERNEL allocations are not so marked, so can escape to the
2358 * nearest mem_exclusive ancestor cpuset. 2373 * nearest enclosing mem_exclusive ancestor cpuset.
2359 * 2374 *
2360 * Scanning up parent cpusets requires callback_mutex. The __alloc_pages() 2375 * Scanning up parent cpusets requires callback_mutex. The
2361 * routine only calls here with __GFP_HARDWALL bit _not_ set if 2376 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2362 * it's a GFP_KERNEL allocation, and all nodes in the current tasks 2377 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2363 * mems_allowed came up empty on the first pass over the zonelist. 2378 * current tasks mems_allowed came up empty on the first pass over
2364 * So only GFP_KERNEL allocations, if all nodes in the cpuset are 2379 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2365 * short of memory, might require taking the callback_mutex mutex. 2380 * cpuset are short of memory, might require taking the callback_mutex
2381 * mutex.
2366 * 2382 *
2367 * The first call here from mm/page_alloc:get_page_from_freelist() 2383 * The first call here from mm/page_alloc:get_page_from_freelist()
2368 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, so 2384 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2369 * no allocation on a node outside the cpuset is allowed (unless in 2385 * so no allocation on a node outside the cpuset is allowed (unless
2370 * interrupt, of course). 2386 * in interrupt, of course).
2371 * 2387 *
2372 * The second pass through get_page_from_freelist() doesn't even call 2388 * The second pass through get_page_from_freelist() doesn't even call
2373 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 2389 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
@@ -2380,12 +2396,12 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2380 * GFP_USER - only nodes in current tasks mems allowed ok. 2396 * GFP_USER - only nodes in current tasks mems allowed ok.
2381 * 2397 *
2382 * Rule: 2398 * Rule:
2383 * Don't call cpuset_zone_allowed() if you can't sleep, unless you 2399 * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you
2384 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables 2400 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2385 * the code that might scan up ancestor cpusets and sleep. 2401 * the code that might scan up ancestor cpusets and sleep.
2386 **/ 2402 */
2387 2403
2388int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 2404int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
2389{ 2405{
2390 int node; /* node that zone z is on */ 2406 int node; /* node that zone z is on */
2391 const struct cpuset *cs; /* current cpuset ancestors */ 2407 const struct cpuset *cs; /* current cpuset ancestors */
@@ -2415,6 +2431,40 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
2415 return allowed; 2431 return allowed;
2416} 2432}
2417 2433
2434/*
2435 * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node?
2436 * @z: is this zone on an allowed node?
2437 * @gfp_mask: memory allocation flags
2438 *
2439 * If we're in interrupt, yes, we can always allocate.
2440 * If __GFP_THISNODE is set, yes, we can always allocate. If zone
2441 * z's node is in our tasks mems_allowed, yes. Otherwise, no.
2442 *
2443 * The __GFP_THISNODE placement logic is really handled elsewhere,
2444 * by forcibly using a zonelist starting at a specified node, and by
2445 * (in get_page_from_freelist()) refusing to consider the zones for
2446 * any node on the zonelist except the first. By the time any such
2447 * calls get to this routine, we should just shut up and say 'yes'.
2448 *
2449 * Unlike the cpuset_zone_allowed_softwall() variant, above,
2450 * this variant requires that the zone be in the current tasks
2451 * mems_allowed or that we're in interrupt. It does not scan up the
2452 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2453 * It never sleeps.
2454 */
2455
2456int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
2457{
2458 int node; /* node that zone z is on */
2459
2460 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2461 return 1;
2462 node = zone_to_nid(z);
2463 if (node_isset(node, current->mems_allowed))
2464 return 1;
2465 return 0;
2466}
2467
2418/** 2468/**
2419 * cpuset_lock - lock out any changes to cpuset structures 2469 * cpuset_lock - lock out any changes to cpuset structures
2420 * 2470 *
diff --git a/kernel/fork.c b/kernel/fork.c
index d16c566eb645..fc723e595cd5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -203,7 +203,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
203 struct mempolicy *pol; 203 struct mempolicy *pol;
204 204
205 down_write(&oldmm->mmap_sem); 205 down_write(&oldmm->mmap_sem);
206 flush_cache_mm(oldmm); 206 flush_cache_dup_mm(oldmm);
207 /* 207 /*
208 * Not linked in yet - no deadlock potential: 208 * Not linked in yet - no deadlock potential:
209 */ 209 */
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b02032476dc2..01e750559034 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,13 +43,49 @@
43#include "lockdep_internals.h" 43#include "lockdep_internals.h"
44 44
45/* 45/*
46 * hash_lock: protects the lockdep hashes and class/list/hash allocators. 46 * lockdep_lock: protects the lockdep graph, the hashes and the
47 * class/list/hash allocators.
47 * 48 *
48 * This is one of the rare exceptions where it's justified 49 * This is one of the rare exceptions where it's justified
49 * to use a raw spinlock - we really dont want the spinlock 50 * to use a raw spinlock - we really dont want the spinlock
50 * code to recurse back into the lockdep code. 51 * code to recurse back into the lockdep code...
51 */ 52 */
52static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 53static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
54
55static int graph_lock(void)
56{
57 __raw_spin_lock(&lockdep_lock);
58 /*
59 * Make sure that if another CPU detected a bug while
60 * walking the graph we dont change it (while the other
61 * CPU is busy printing out stuff with the graph lock
62 * dropped already)
63 */
64 if (!debug_locks) {
65 __raw_spin_unlock(&lockdep_lock);
66 return 0;
67 }
68 return 1;
69}
70
71static inline int graph_unlock(void)
72{
73 __raw_spin_unlock(&lockdep_lock);
74 return 0;
75}
76
77/*
78 * Turn lock debugging off and return with 0 if it was off already,
79 * and also release the graph lock:
80 */
81static inline int debug_locks_off_graph_unlock(void)
82{
83 int ret = debug_locks_off();
84
85 __raw_spin_unlock(&lockdep_lock);
86
87 return ret;
88}
53 89
54static int lockdep_initialized; 90static int lockdep_initialized;
55 91
@@ -57,14 +93,15 @@ unsigned long nr_list_entries;
57static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 93static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
58 94
59/* 95/*
60 * Allocate a lockdep entry. (assumes hash_lock held, returns 96 * Allocate a lockdep entry. (assumes the graph_lock held, returns
61 * with NULL on failure) 97 * with NULL on failure)
62 */ 98 */
63static struct lock_list *alloc_list_entry(void) 99static struct lock_list *alloc_list_entry(void)
64{ 100{
65 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { 101 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
66 __raw_spin_unlock(&hash_lock); 102 if (!debug_locks_off_graph_unlock())
67 debug_locks_off(); 103 return NULL;
104
68 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); 105 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
69 printk("turning off the locking correctness validator.\n"); 106 printk("turning off the locking correctness validator.\n");
70 return NULL; 107 return NULL;
@@ -145,9 +182,7 @@ EXPORT_SYMBOL(lockdep_on);
145 */ 182 */
146 183
147#define VERBOSE 0 184#define VERBOSE 0
148#ifdef VERBOSE 185#define VERY_VERBOSE 0
149# define VERY_VERBOSE 0
150#endif
151 186
152#if VERBOSE 187#if VERBOSE
153# define HARDIRQ_VERBOSE 1 188# define HARDIRQ_VERBOSE 1
@@ -172,8 +207,8 @@ static int class_filter(struct lock_class *class)
172 !strcmp(class->name, "&struct->lockfield")) 207 !strcmp(class->name, "&struct->lockfield"))
173 return 1; 208 return 1;
174#endif 209#endif
175 /* Allow everything else. 0 would be filter everything else */ 210 /* Filter everything else. 1 would be to allow everything else */
176 return 1; 211 return 0;
177} 212}
178#endif 213#endif
179 214
@@ -207,7 +242,7 @@ static int softirq_verbose(struct lock_class *class)
207 242
208/* 243/*
209 * Stack-trace: tightly packed array of stack backtrace 244 * Stack-trace: tightly packed array of stack backtrace
210 * addresses. Protected by the hash_lock. 245 * addresses. Protected by the graph_lock.
211 */ 246 */
212unsigned long nr_stack_trace_entries; 247unsigned long nr_stack_trace_entries;
213static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 248static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
@@ -226,18 +261,15 @@ static int save_trace(struct stack_trace *trace)
226 trace->max_entries = trace->nr_entries; 261 trace->max_entries = trace->nr_entries;
227 262
228 nr_stack_trace_entries += trace->nr_entries; 263 nr_stack_trace_entries += trace->nr_entries;
229 if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) {
230 __raw_spin_unlock(&hash_lock);
231 return 0;
232 }
233 264
234 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { 265 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
235 __raw_spin_unlock(&hash_lock); 266 if (!debug_locks_off_graph_unlock())
236 if (debug_locks_off()) { 267 return 0;
237 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); 268
238 printk("turning off the locking correctness validator.\n"); 269 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
239 dump_stack(); 270 printk("turning off the locking correctness validator.\n");
240 } 271 dump_stack();
272
241 return 0; 273 return 0;
242 } 274 }
243 275
@@ -526,9 +558,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
526{ 558{
527 struct task_struct *curr = current; 559 struct task_struct *curr = current;
528 560
529 __raw_spin_unlock(&hash_lock); 561 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
530 debug_locks_off();
531 if (debug_locks_silent)
532 return 0; 562 return 0;
533 563
534 printk("\n=======================================================\n"); 564 printk("\n=======================================================\n");
@@ -556,12 +586,10 @@ static noinline int print_circular_bug_tail(void)
556 if (debug_locks_silent) 586 if (debug_locks_silent)
557 return 0; 587 return 0;
558 588
559 /* hash_lock unlocked by the header */
560 __raw_spin_lock(&hash_lock);
561 this.class = check_source->class; 589 this.class = check_source->class;
562 if (!save_trace(&this.trace)) 590 if (!save_trace(&this.trace))
563 return 0; 591 return 0;
564 __raw_spin_unlock(&hash_lock); 592
565 print_circular_bug_entry(&this, 0); 593 print_circular_bug_entry(&this, 0);
566 594
567 printk("\nother info that might help us debug this:\n\n"); 595 printk("\nother info that might help us debug this:\n\n");
@@ -577,8 +605,10 @@ static noinline int print_circular_bug_tail(void)
577 605
578static int noinline print_infinite_recursion_bug(void) 606static int noinline print_infinite_recursion_bug(void)
579{ 607{
580 __raw_spin_unlock(&hash_lock); 608 if (!debug_locks_off_graph_unlock())
581 DEBUG_LOCKS_WARN_ON(1); 609 return 0;
610
611 WARN_ON(1);
582 612
583 return 0; 613 return 0;
584} 614}
@@ -713,9 +743,7 @@ print_bad_irq_dependency(struct task_struct *curr,
713 enum lock_usage_bit bit2, 743 enum lock_usage_bit bit2,
714 const char *irqclass) 744 const char *irqclass)
715{ 745{
716 __raw_spin_unlock(&hash_lock); 746 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
717 debug_locks_off();
718 if (debug_locks_silent)
719 return 0; 747 return 0;
720 748
721 printk("\n======================================================\n"); 749 printk("\n======================================================\n");
@@ -796,9 +824,7 @@ static int
796print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 824print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
797 struct held_lock *next) 825 struct held_lock *next)
798{ 826{
799 debug_locks_off(); 827 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
800 __raw_spin_unlock(&hash_lock);
801 if (debug_locks_silent)
802 return 0; 828 return 0;
803 829
804 printk("\n=============================================\n"); 830 printk("\n=============================================\n");
@@ -974,14 +1000,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
974 * Debugging printouts: 1000 * Debugging printouts:
975 */ 1001 */
976 if (verbose(prev->class) || verbose(next->class)) { 1002 if (verbose(prev->class) || verbose(next->class)) {
977 __raw_spin_unlock(&hash_lock); 1003 graph_unlock();
978 printk("\n new dependency: "); 1004 printk("\n new dependency: ");
979 print_lock_name(prev->class); 1005 print_lock_name(prev->class);
980 printk(" => "); 1006 printk(" => ");
981 print_lock_name(next->class); 1007 print_lock_name(next->class);
982 printk("\n"); 1008 printk("\n");
983 dump_stack(); 1009 dump_stack();
984 __raw_spin_lock(&hash_lock); 1010 return graph_lock();
985 } 1011 }
986 return 1; 1012 return 1;
987} 1013}
@@ -1046,8 +1072,10 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1046 } 1072 }
1047 return 1; 1073 return 1;
1048out_bug: 1074out_bug:
1049 __raw_spin_unlock(&hash_lock); 1075 if (!debug_locks_off_graph_unlock())
1050 DEBUG_LOCKS_WARN_ON(1); 1076 return 0;
1077
1078 WARN_ON(1);
1051 1079
1052 return 0; 1080 return 0;
1053} 1081}
@@ -1201,7 +1229,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1201 hash_head = classhashentry(key); 1229 hash_head = classhashentry(key);
1202 1230
1203 raw_local_irq_save(flags); 1231 raw_local_irq_save(flags);
1204 __raw_spin_lock(&hash_lock); 1232 if (!graph_lock()) {
1233 raw_local_irq_restore(flags);
1234 return NULL;
1235 }
1205 /* 1236 /*
1206 * We have to do the hash-walk again, to avoid races 1237 * We have to do the hash-walk again, to avoid races
1207 * with another CPU: 1238 * with another CPU:
@@ -1214,9 +1245,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1214 * the hash: 1245 * the hash:
1215 */ 1246 */
1216 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 1247 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1217 __raw_spin_unlock(&hash_lock); 1248 if (!debug_locks_off_graph_unlock()) {
1249 raw_local_irq_restore(flags);
1250 return NULL;
1251 }
1218 raw_local_irq_restore(flags); 1252 raw_local_irq_restore(flags);
1219 debug_locks_off(); 1253
1220 printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); 1254 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1221 printk("turning off the locking correctness validator.\n"); 1255 printk("turning off the locking correctness validator.\n");
1222 return NULL; 1256 return NULL;
@@ -1237,18 +1271,23 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1237 list_add_tail_rcu(&class->hash_entry, hash_head); 1271 list_add_tail_rcu(&class->hash_entry, hash_head);
1238 1272
1239 if (verbose(class)) { 1273 if (verbose(class)) {
1240 __raw_spin_unlock(&hash_lock); 1274 graph_unlock();
1241 raw_local_irq_restore(flags); 1275 raw_local_irq_restore(flags);
1276
1242 printk("\nnew class %p: %s", class->key, class->name); 1277 printk("\nnew class %p: %s", class->key, class->name);
1243 if (class->name_version > 1) 1278 if (class->name_version > 1)
1244 printk("#%d", class->name_version); 1279 printk("#%d", class->name_version);
1245 printk("\n"); 1280 printk("\n");
1246 dump_stack(); 1281 dump_stack();
1282
1247 raw_local_irq_save(flags); 1283 raw_local_irq_save(flags);
1248 __raw_spin_lock(&hash_lock); 1284 if (!graph_lock()) {
1285 raw_local_irq_restore(flags);
1286 return NULL;
1287 }
1249 } 1288 }
1250out_unlock_set: 1289out_unlock_set:
1251 __raw_spin_unlock(&hash_lock); 1290 graph_unlock();
1252 raw_local_irq_restore(flags); 1291 raw_local_irq_restore(flags);
1253 1292
1254 if (!subclass || force) 1293 if (!subclass || force)
@@ -1264,7 +1303,7 @@ out_unlock_set:
1264 * add it and return 0 - in this case the new dependency chain is 1303 * add it and return 0 - in this case the new dependency chain is
1265 * validated. If the key is already hashed, return 1. 1304 * validated. If the key is already hashed, return 1.
1266 */ 1305 */
1267static inline int lookup_chain_cache(u64 chain_key) 1306static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
1268{ 1307{
1269 struct list_head *hash_head = chainhashentry(chain_key); 1308 struct list_head *hash_head = chainhashentry(chain_key);
1270 struct lock_chain *chain; 1309 struct lock_chain *chain;
@@ -1278,34 +1317,32 @@ static inline int lookup_chain_cache(u64 chain_key)
1278 if (chain->chain_key == chain_key) { 1317 if (chain->chain_key == chain_key) {
1279cache_hit: 1318cache_hit:
1280 debug_atomic_inc(&chain_lookup_hits); 1319 debug_atomic_inc(&chain_lookup_hits);
1281 /* 1320 if (very_verbose(class))
1282 * In the debugging case, force redundant checking 1321 printk("\nhash chain already cached, key: %016Lx tail class: [%p] %s\n", chain_key, class->key, class->name);
1283 * by returning 1:
1284 */
1285#ifdef CONFIG_DEBUG_LOCKDEP
1286 __raw_spin_lock(&hash_lock);
1287 return 1;
1288#endif
1289 return 0; 1322 return 0;
1290 } 1323 }
1291 } 1324 }
1325 if (very_verbose(class))
1326 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", chain_key, class->key, class->name);
1292 /* 1327 /*
1293 * Allocate a new chain entry from the static array, and add 1328 * Allocate a new chain entry from the static array, and add
1294 * it to the hash: 1329 * it to the hash:
1295 */ 1330 */
1296 __raw_spin_lock(&hash_lock); 1331 if (!graph_lock())
1332 return 0;
1297 /* 1333 /*
1298 * We have to walk the chain again locked - to avoid duplicates: 1334 * We have to walk the chain again locked - to avoid duplicates:
1299 */ 1335 */
1300 list_for_each_entry(chain, hash_head, entry) { 1336 list_for_each_entry(chain, hash_head, entry) {
1301 if (chain->chain_key == chain_key) { 1337 if (chain->chain_key == chain_key) {
1302 __raw_spin_unlock(&hash_lock); 1338 graph_unlock();
1303 goto cache_hit; 1339 goto cache_hit;
1304 } 1340 }
1305 } 1341 }
1306 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { 1342 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1307 __raw_spin_unlock(&hash_lock); 1343 if (!debug_locks_off_graph_unlock())
1308 debug_locks_off(); 1344 return 0;
1345
1309 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); 1346 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1310 printk("turning off the locking correctness validator.\n"); 1347 printk("turning off the locking correctness validator.\n");
1311 return 0; 1348 return 0;
@@ -1381,9 +1418,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1381 struct held_lock *this, int forwards, 1418 struct held_lock *this, int forwards,
1382 const char *irqclass) 1419 const char *irqclass)
1383{ 1420{
1384 __raw_spin_unlock(&hash_lock); 1421 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1385 debug_locks_off();
1386 if (debug_locks_silent)
1387 return 0; 1422 return 0;
1388 1423
1389 printk("\n=========================================================\n"); 1424 printk("\n=========================================================\n");
@@ -1453,7 +1488,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1453 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); 1488 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1454} 1489}
1455 1490
1456static inline void print_irqtrace_events(struct task_struct *curr) 1491void print_irqtrace_events(struct task_struct *curr)
1457{ 1492{
1458 printk("irq event stamp: %u\n", curr->irq_events); 1493 printk("irq event stamp: %u\n", curr->irq_events);
1459 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); 1494 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
@@ -1466,19 +1501,13 @@ static inline void print_irqtrace_events(struct task_struct *curr)
1466 print_ip_sym(curr->softirq_disable_ip); 1501 print_ip_sym(curr->softirq_disable_ip);
1467} 1502}
1468 1503
1469#else
1470static inline void print_irqtrace_events(struct task_struct *curr)
1471{
1472}
1473#endif 1504#endif
1474 1505
1475static int 1506static int
1476print_usage_bug(struct task_struct *curr, struct held_lock *this, 1507print_usage_bug(struct task_struct *curr, struct held_lock *this,
1477 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 1508 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1478{ 1509{
1479 __raw_spin_unlock(&hash_lock); 1510 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1480 debug_locks_off();
1481 if (debug_locks_silent)
1482 return 0; 1511 return 0;
1483 1512
1484 printk("\n=================================\n"); 1513 printk("\n=================================\n");
@@ -1539,12 +1568,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1539 if (likely(this->class->usage_mask & new_mask)) 1568 if (likely(this->class->usage_mask & new_mask))
1540 return 1; 1569 return 1;
1541 1570
1542 __raw_spin_lock(&hash_lock); 1571 if (!graph_lock())
1572 return 0;
1543 /* 1573 /*
1544 * Make sure we didnt race: 1574 * Make sure we didnt race:
1545 */ 1575 */
1546 if (unlikely(this->class->usage_mask & new_mask)) { 1576 if (unlikely(this->class->usage_mask & new_mask)) {
1547 __raw_spin_unlock(&hash_lock); 1577 graph_unlock();
1548 return 1; 1578 return 1;
1549 } 1579 }
1550 1580
@@ -1730,16 +1760,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1730 debug_atomic_dec(&nr_unused_locks); 1760 debug_atomic_dec(&nr_unused_locks);
1731 break; 1761 break;
1732 default: 1762 default:
1733 __raw_spin_unlock(&hash_lock); 1763 if (!debug_locks_off_graph_unlock())
1734 debug_locks_off(); 1764 return 0;
1735 WARN_ON(1); 1765 WARN_ON(1);
1736 return 0; 1766 return 0;
1737 } 1767 }
1738 1768
1739 __raw_spin_unlock(&hash_lock); 1769 graph_unlock();
1740 1770
1741 /* 1771 /*
1742 * We must printk outside of the hash_lock: 1772 * We must printk outside of the graph_lock:
1743 */ 1773 */
1744 if (ret == 2) { 1774 if (ret == 2) {
1745 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 1775 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
@@ -2137,9 +2167,9 @@ out_calc_hash:
2137 * We look up the chain_key and do the O(N^2) check and update of 2167 * We look up the chain_key and do the O(N^2) check and update of
2138 * the dependencies only if this is a new dependency chain. 2168 * the dependencies only if this is a new dependency chain.
2139 * (If lookup_chain_cache() returns with 1 it acquires 2169 * (If lookup_chain_cache() returns with 1 it acquires
2140 * hash_lock for us) 2170 * graph_lock for us)
2141 */ 2171 */
2142 if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) { 2172 if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
2143 /* 2173 /*
2144 * Check whether last held lock: 2174 * Check whether last held lock:
2145 * 2175 *
@@ -2170,7 +2200,7 @@ out_calc_hash:
2170 if (!chain_head && ret != 2) 2200 if (!chain_head && ret != 2)
2171 if (!check_prevs_add(curr, hlock)) 2201 if (!check_prevs_add(curr, hlock))
2172 return 0; 2202 return 0;
2173 __raw_spin_unlock(&hash_lock); 2203 graph_unlock();
2174 } 2204 }
2175 curr->lockdep_depth++; 2205 curr->lockdep_depth++;
2176 check_chain_key(curr); 2206 check_chain_key(curr);
@@ -2433,6 +2463,7 @@ EXPORT_SYMBOL_GPL(lock_release);
2433void lockdep_reset(void) 2463void lockdep_reset(void)
2434{ 2464{
2435 unsigned long flags; 2465 unsigned long flags;
2466 int i;
2436 2467
2437 raw_local_irq_save(flags); 2468 raw_local_irq_save(flags);
2438 current->curr_chain_key = 0; 2469 current->curr_chain_key = 0;
@@ -2443,6 +2474,8 @@ void lockdep_reset(void)
2443 nr_softirq_chains = 0; 2474 nr_softirq_chains = 0;
2444 nr_process_chains = 0; 2475 nr_process_chains = 0;
2445 debug_locks = 1; 2476 debug_locks = 1;
2477 for (i = 0; i < CHAINHASH_SIZE; i++)
2478 INIT_LIST_HEAD(chainhash_table + i);
2446 raw_local_irq_restore(flags); 2479 raw_local_irq_restore(flags);
2447} 2480}
2448 2481
@@ -2479,7 +2512,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
2479 int i; 2512 int i;
2480 2513
2481 raw_local_irq_save(flags); 2514 raw_local_irq_save(flags);
2482 __raw_spin_lock(&hash_lock); 2515 graph_lock();
2483 2516
2484 /* 2517 /*
2485 * Unhash all classes that were created by this module: 2518 * Unhash all classes that were created by this module:
@@ -2493,7 +2526,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
2493 zap_class(class); 2526 zap_class(class);
2494 } 2527 }
2495 2528
2496 __raw_spin_unlock(&hash_lock); 2529 graph_unlock();
2497 raw_local_irq_restore(flags); 2530 raw_local_irq_restore(flags);
2498} 2531}
2499 2532
@@ -2521,20 +2554,20 @@ void lockdep_reset_lock(struct lockdep_map *lock)
2521 * Debug check: in the end all mapped classes should 2554 * Debug check: in the end all mapped classes should
2522 * be gone. 2555 * be gone.
2523 */ 2556 */
2524 __raw_spin_lock(&hash_lock); 2557 graph_lock();
2525 for (i = 0; i < CLASSHASH_SIZE; i++) { 2558 for (i = 0; i < CLASSHASH_SIZE; i++) {
2526 head = classhash_table + i; 2559 head = classhash_table + i;
2527 if (list_empty(head)) 2560 if (list_empty(head))
2528 continue; 2561 continue;
2529 list_for_each_entry_safe(class, next, head, hash_entry) { 2562 list_for_each_entry_safe(class, next, head, hash_entry) {
2530 if (unlikely(class == lock->class_cache)) { 2563 if (unlikely(class == lock->class_cache)) {
2531 __raw_spin_unlock(&hash_lock); 2564 if (debug_locks_off_graph_unlock())
2532 DEBUG_LOCKS_WARN_ON(1); 2565 WARN_ON(1);
2533 goto out_restore; 2566 goto out_restore;
2534 } 2567 }
2535 } 2568 }
2536 } 2569 }
2537 __raw_spin_unlock(&hash_lock); 2570 graph_unlock();
2538 2571
2539out_restore: 2572out_restore:
2540 raw_local_irq_restore(flags); 2573 raw_local_irq_restore(flags);
diff --git a/kernel/module.c b/kernel/module.c
index d9eae45d0145..b565eaeff7e6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -824,9 +824,34 @@ static inline void module_unload_init(struct module *mod)
824} 824}
825#endif /* CONFIG_MODULE_UNLOAD */ 825#endif /* CONFIG_MODULE_UNLOAD */
826 826
827static ssize_t show_initstate(struct module_attribute *mattr,
828 struct module *mod, char *buffer)
829{
830 const char *state = "unknown";
831
832 switch (mod->state) {
833 case MODULE_STATE_LIVE:
834 state = "live";
835 break;
836 case MODULE_STATE_COMING:
837 state = "coming";
838 break;
839 case MODULE_STATE_GOING:
840 state = "going";
841 break;
842 }
843 return sprintf(buffer, "%s\n", state);
844}
845
846static struct module_attribute initstate = {
847 .attr = { .name = "initstate", .mode = 0444, .owner = THIS_MODULE },
848 .show = show_initstate,
849};
850
827static struct module_attribute *modinfo_attrs[] = { 851static struct module_attribute *modinfo_attrs[] = {
828 &modinfo_version, 852 &modinfo_version,
829 &modinfo_srcversion, 853 &modinfo_srcversion,
854 &initstate,
830#ifdef CONFIG_MODULE_UNLOAD 855#ifdef CONFIG_MODULE_UNLOAD
831 &refcnt, 856 &refcnt,
832#endif 857#endif
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index e2ce748e96af..f5b9ee6f6bbb 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -46,10 +46,8 @@ static inline struct nsproxy *clone_namespaces(struct nsproxy *orig)
46 struct nsproxy *ns; 46 struct nsproxy *ns;
47 47
48 ns = kmemdup(orig, sizeof(struct nsproxy), GFP_KERNEL); 48 ns = kmemdup(orig, sizeof(struct nsproxy), GFP_KERNEL);
49 if (ns) { 49 if (ns)
50 atomic_set(&ns->count, 1); 50 atomic_set(&ns->count, 1);
51 ns->id = -1;
52 }
53 return ns; 51 return ns;
54} 52}
55 53
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 710ed084e7c5..ed296225dcd4 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -20,13 +20,14 @@ config PM
20 sending the processor to sleep and saving power. 20 sending the processor to sleep and saving power.
21 21
22config PM_LEGACY 22config PM_LEGACY
23 bool "Legacy Power Management API" 23 bool "Legacy Power Management API (DEPRECATED)"
24 depends on PM 24 depends on PM
25 default y 25 default n
26 ---help--- 26 ---help---
27 Support for pm_register() and friends. 27 Support for pm_register() and friends. This old API is obsoleted
28 by the driver model.
28 29
29 If unsure, say Y. 30 If unsure, say N.
30 31
31config PM_DEBUG 32config PM_DEBUG
32 bool "Power Management Debug Support" 33 bool "Power Management Debug Support"
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 99eeb119b06d..6d566bf7085c 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -28,8 +28,7 @@ static inline int freezeable(struct task_struct * p)
28 if ((p == current) || 28 if ((p == current) ||
29 (p->flags & PF_NOFREEZE) || 29 (p->flags & PF_NOFREEZE) ||
30 (p->exit_state == EXIT_ZOMBIE) || 30 (p->exit_state == EXIT_ZOMBIE) ||
31 (p->exit_state == EXIT_DEAD) || 31 (p->exit_state == EXIT_DEAD))
32 (p->state == TASK_STOPPED))
33 return 0; 32 return 0;
34 return 1; 33 return 1;
35} 34}
@@ -61,10 +60,16 @@ static inline void freeze_process(struct task_struct *p)
61 unsigned long flags; 60 unsigned long flags;
62 61
63 if (!freezing(p)) { 62 if (!freezing(p)) {
64 freeze(p); 63 rmb();
65 spin_lock_irqsave(&p->sighand->siglock, flags); 64 if (!frozen(p)) {
66 signal_wake_up(p, 0); 65 if (p->state == TASK_STOPPED)
67 spin_unlock_irqrestore(&p->sighand->siglock, flags); 66 force_sig_specific(SIGSTOP, p);
67
68 freeze(p);
69 spin_lock_irqsave(&p->sighand->siglock, flags);
70 signal_wake_up(p, p->state == TASK_STOPPED);
71 spin_unlock_irqrestore(&p->sighand->siglock, flags);
72 }
68 } 73 }
69} 74}
70 75
@@ -103,9 +108,7 @@ static unsigned int try_to_freeze_tasks(int freeze_user_space)
103 if (frozen(p)) 108 if (frozen(p))
104 continue; 109 continue;
105 110
106 if (p->state == TASK_TRACED && 111 if (p->state == TASK_TRACED && frozen(p->parent)) {
107 (frozen(p->parent) ||
108 p->parent->state == TASK_STOPPED)) {
109 cancel_freezing(p); 112 cancel_freezing(p);
110 continue; 113 continue;
111 } 114 }
diff --git a/kernel/relay.c b/kernel/relay.c
index 818e514729cf..a4701e7ba7d0 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -138,7 +138,7 @@ depopulate:
138 */ 138 */
139struct rchan_buf *relay_create_buf(struct rchan *chan) 139struct rchan_buf *relay_create_buf(struct rchan *chan)
140{ 140{
141 struct rchan_buf *buf = kcalloc(1, sizeof(struct rchan_buf), GFP_KERNEL); 141 struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
142 if (!buf) 142 if (!buf)
143 return NULL; 143 return NULL;
144 144
@@ -479,7 +479,7 @@ struct rchan *relay_open(const char *base_filename,
479 if (!(subbuf_size && n_subbufs)) 479 if (!(subbuf_size && n_subbufs))
480 return NULL; 480 return NULL;
481 481
482 chan = kcalloc(1, sizeof(struct rchan), GFP_KERNEL); 482 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
483 if (!chan) 483 if (!chan)
484 return NULL; 484 return NULL;
485 485
diff --git a/kernel/sched.c b/kernel/sched.c
index 8a0afb97af71..5cd833bc2173 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3429,6 +3429,8 @@ asmlinkage void __sched schedule(void)
3429 "%s/0x%08x/%d\n", 3429 "%s/0x%08x/%d\n",
3430 current->comm, preempt_count(), current->pid); 3430 current->comm, preempt_count(), current->pid);
3431 debug_show_held_locks(current); 3431 debug_show_held_locks(current);
3432 if (irqs_disabled())
3433 print_irqtrace_events(current);
3432 dump_stack(); 3434 dump_stack();
3433 } 3435 }
3434 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3436 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -6977,6 +6979,8 @@ void __might_sleep(char *file, int line)
6977 printk("in_atomic():%d, irqs_disabled():%d\n", 6979 printk("in_atomic():%d, irqs_disabled():%d\n",
6978 in_atomic(), irqs_disabled()); 6980 in_atomic(), irqs_disabled());
6979 debug_show_held_locks(current); 6981 debug_show_held_locks(current);
6982 if (irqs_disabled())
6983 print_irqtrace_events(current);
6980 dump_stack(); 6984 dump_stack();
6981 } 6985 }
6982#endif 6986#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 1921ffdc5e77..5630255d2e2a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1705,7 +1705,9 @@ finish_stop(int stop_count)
1705 read_unlock(&tasklist_lock); 1705 read_unlock(&tasklist_lock);
1706 } 1706 }
1707 1707
1708 schedule(); 1708 do {
1709 schedule();
1710 } while (try_to_freeze());
1709 /* 1711 /*
1710 * Now we don't run again until continued. 1712 * Now we don't run again until continued.
1711 */ 1713 */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 130c5ec9ee0b..600b33358ded 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -65,7 +65,6 @@ extern int sysctl_overcommit_memory;
65extern int sysctl_overcommit_ratio; 65extern int sysctl_overcommit_ratio;
66extern int sysctl_panic_on_oom; 66extern int sysctl_panic_on_oom;
67extern int max_threads; 67extern int max_threads;
68extern int sysrq_enabled;
69extern int core_uses_pid; 68extern int core_uses_pid;
70extern int suid_dumpable; 69extern int suid_dumpable;
71extern char core_pattern[]; 70extern char core_pattern[];
@@ -543,7 +542,7 @@ static ctl_table kern_table[] = {
543 { 542 {
544 .ctl_name = KERN_SYSRQ, 543 .ctl_name = KERN_SYSRQ,
545 .procname = "sysrq", 544 .procname = "sysrq",
546 .data = &sysrq_enabled, 545 .data = &__sysrq_enabled,
547 .maxlen = sizeof (int), 546 .maxlen = sizeof (int),
548 .mode = 0644, 547 .mode = 0644,
549 .proc_handler = &proc_dointvec, 548 .proc_handler = &proc_dointvec,
diff --git a/kernel/timer.c b/kernel/timer.c
index 0256ab443d8a..feddf817baa5 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1146,11 +1146,15 @@ static inline void calc_load(unsigned long ticks)
1146 unsigned long active_tasks; /* fixed-point */ 1146 unsigned long active_tasks; /* fixed-point */
1147 static int count = LOAD_FREQ; 1147 static int count = LOAD_FREQ;
1148 1148
1149 active_tasks = count_active_tasks(); 1149 count -= ticks;
1150 for (count -= ticks; count < 0; count += LOAD_FREQ) { 1150 if (unlikely(count < 0)) {
1151 CALC_LOAD(avenrun[0], EXP_1, active_tasks); 1151 active_tasks = count_active_tasks();
1152 CALC_LOAD(avenrun[1], EXP_5, active_tasks); 1152 do {
1153 CALC_LOAD(avenrun[2], EXP_15, active_tasks); 1153 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1154 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1155 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1156 count += LOAD_FREQ;
1157 } while (count < 0);
1154 } 1158 }
1155} 1159}
1156 1160
diff --git a/lib/Kconfig b/lib/Kconfig
index 47b172df3e31..9b03581cdecb 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -101,4 +101,9 @@ config TEXTSEARCH_FSM
101config PLIST 101config PLIST
102 boolean 102 boolean
103 103
104config IOMAP_COPY
105 boolean
106 depends on !UML
107 default y
108
104endmenu 109endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0701ddda1df8..818e4589f718 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -437,7 +437,7 @@ config FAIL_PAGE_ALLOC
437 Provide fault-injection capability for alloc_pages(). 437 Provide fault-injection capability for alloc_pages().
438 438
439config FAIL_MAKE_REQUEST 439config FAIL_MAKE_REQUEST
440 bool "Fault-injection capabilitiy for disk IO" 440 bool "Fault-injection capability for disk IO"
441 depends on FAULT_INJECTION 441 depends on FAULT_INJECTION
442 help 442 help
443 Provide fault-injection capability for disk IO. 443 Provide fault-injection capability for disk IO.
diff --git a/lib/Makefile b/lib/Makefile
index 2d6106af53cd..77b4bad7d441 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,20 +5,21 @@
5lib-y := ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o irq_regs.o 8 sha1.o irq_regs.o reciprocal_div.o
9 9
10lib-$(CONFIG_MMU) += ioremap.o 10lib-$(CONFIG_MMU) += ioremap.o
11lib-$(CONFIG_SMP) += cpumask.o 11lib-$(CONFIG_SMP) += cpumask.o
12 12
13lib-y += kobject.o kref.o kobject_uevent.o klist.o 13lib-y += kobject.o kref.o kobject_uevent.o klist.o
14 14
15obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o random32.o 15obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o
16 16
17ifeq ($(CONFIG_DEBUG_KOBJECT),y) 17ifeq ($(CONFIG_DEBUG_KOBJECT),y)
18CFLAGS_kobject.o += -DDEBUG 18CFLAGS_kobject.o += -DDEBUG
19CFLAGS_kobject_uevent.o += -DDEBUG 19CFLAGS_kobject_uevent.o += -DDEBUG
20endif 20endif
21 21
22obj-$(CONFIG_IOMAP_COPY) += iomap_copy.o
22obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 23obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
23obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 24obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
24lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 25lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 99fa277f9f7b..a9e4415b02dc 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -5,7 +5,6 @@
5 * 5 *
6 * (C) Copyright 1995 1996 Linus Torvalds 6 * (C) Copyright 1995 1996 Linus Torvalds
7 */ 7 */
8#include <linux/io.h>
9#include <linux/vmalloc.h> 8#include <linux/vmalloc.h>
10#include <linux/mm.h> 9#include <linux/mm.h>
11 10
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
new file mode 100644
index 000000000000..6a3bd48fa2a0
--- /dev/null
+++ b/lib/reciprocal_div.c
@@ -0,0 +1,9 @@
1#include <asm/div64.h>
2#include <linux/reciprocal_div.h>
3
4u32 reciprocal_value(u32 k)
5{
6 u64 val = (1LL << 32) + (k - 1);
7 do_div(val, k);
8 return (u32)val;
9}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0ccc7f230252..cb362f761f17 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -44,14 +44,14 @@ static void clear_huge_page(struct page *page, unsigned long addr)
44} 44}
45 45
46static void copy_huge_page(struct page *dst, struct page *src, 46static void copy_huge_page(struct page *dst, struct page *src,
47 unsigned long addr) 47 unsigned long addr, struct vm_area_struct *vma)
48{ 48{
49 int i; 49 int i;
50 50
51 might_sleep(); 51 might_sleep();
52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53 cond_resched(); 53 cond_resched();
54 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE); 54 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
55 } 55 }
56} 56}
57 57
@@ -73,7 +73,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
73 73
74 for (z = zonelist->zones; *z; z++) { 74 for (z = zonelist->zones; *z; z++) {
75 nid = zone_to_nid(*z); 75 nid = zone_to_nid(*z);
76 if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && 76 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
77 !list_empty(&hugepage_freelists[nid])) 77 !list_empty(&hugepage_freelists[nid]))
78 break; 78 break;
79 } 79 }
@@ -442,7 +442,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
442 } 442 }
443 443
444 spin_unlock(&mm->page_table_lock); 444 spin_unlock(&mm->page_table_lock);
445 copy_huge_page(new_page, old_page, address); 445 copy_huge_page(new_page, old_page, address, vma);
446 spin_lock(&mm->page_table_lock); 446 spin_lock(&mm->page_table_lock);
447 447
448 ptep = huge_pte_offset(mm, address & HPAGE_MASK); 448 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
diff --git a/mm/memory.c b/mm/memory.c
index bf6100236e62..c00bac66ce9f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1441,7 +1441,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1441 return pte; 1441 return pte;
1442} 1442}
1443 1443
1444static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) 1444static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
1445{ 1445{
1446 /* 1446 /*
1447 * If the source page was a PFN mapping, we don't have 1447 * If the source page was a PFN mapping, we don't have
@@ -1464,9 +1464,9 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
1464 kunmap_atomic(kaddr, KM_USER0); 1464 kunmap_atomic(kaddr, KM_USER0);
1465 flush_dcache_page(dst); 1465 flush_dcache_page(dst);
1466 return; 1466 return;
1467 1467
1468 } 1468 }
1469 copy_user_highpage(dst, src, va); 1469 copy_user_highpage(dst, src, va, vma);
1470} 1470}
1471 1471
1472/* 1472/*
@@ -1577,7 +1577,7 @@ gotten:
1577 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1577 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1578 if (!new_page) 1578 if (!new_page)
1579 goto oom; 1579 goto oom;
1580 cow_user_page(new_page, old_page, address); 1580 cow_user_page(new_page, old_page, address, vma);
1581 } 1581 }
1582 1582
1583 /* 1583 /*
@@ -2200,7 +2200,7 @@ retry:
2200 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 2200 page = alloc_page_vma(GFP_HIGHUSER, vma, address);
2201 if (!page) 2201 if (!page)
2202 goto oom; 2202 goto oom;
2203 copy_user_highpage(page, new_page, address); 2203 copy_user_highpage(page, new_page, address, vma);
2204 page_cache_release(new_page); 2204 page_cache_release(new_page);
2205 new_page = page; 2205 new_page = page;
2206 anon = 1; 2206 anon = 1;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 223d9ccb7d64..64cf3c214634 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -177,7 +177,7 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
177 nodemask_t nodes = node_online_map; 177 nodemask_t nodes = node_online_map;
178 178
179 for (z = zonelist->zones; *z; z++) 179 for (z = zonelist->zones; *z; z++)
180 if (cpuset_zone_allowed(*z, gfp_mask)) 180 if (cpuset_zone_allowed_softwall(*z, gfp_mask))
181 node_clear(zone_to_nid(*z), nodes); 181 node_clear(zone_to_nid(*z), nodes);
182 else 182 else
183 return CONSTRAINT_CPUSET; 183 return CONSTRAINT_CPUSET;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e6b17b2989e0..8c1a116875bc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1162,7 +1162,7 @@ zonelist_scan:
1162 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) 1162 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
1163 break; 1163 break;
1164 if ((alloc_flags & ALLOC_CPUSET) && 1164 if ((alloc_flags & ALLOC_CPUSET) &&
1165 !cpuset_zone_allowed(zone, gfp_mask)) 1165 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1166 goto try_next_zone; 1166 goto try_next_zone;
1167 1167
1168 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1168 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
diff --git a/mm/slab.c b/mm/slab.c
index 2c655532f5ef..909975f6e090 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -109,6 +109,7 @@
109#include <linux/mutex.h> 109#include <linux/mutex.h>
110#include <linux/fault-inject.h> 110#include <linux/fault-inject.h>
111#include <linux/rtmutex.h> 111#include <linux/rtmutex.h>
112#include <linux/reciprocal_div.h>
112 113
113#include <asm/cacheflush.h> 114#include <asm/cacheflush.h>
114#include <asm/tlbflush.h> 115#include <asm/tlbflush.h>
@@ -386,6 +387,7 @@ struct kmem_cache {
386 unsigned int shared; 387 unsigned int shared;
387 388
388 unsigned int buffer_size; 389 unsigned int buffer_size;
390 u32 reciprocal_buffer_size;
389/* 3) touched by every alloc & free from the backend */ 391/* 3) touched by every alloc & free from the backend */
390 struct kmem_list3 *nodelists[MAX_NUMNODES]; 392 struct kmem_list3 *nodelists[MAX_NUMNODES];
391 393
@@ -627,10 +629,17 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
627 return slab->s_mem + cache->buffer_size * idx; 629 return slab->s_mem + cache->buffer_size * idx;
628} 630}
629 631
630static inline unsigned int obj_to_index(struct kmem_cache *cache, 632/*
631 struct slab *slab, void *obj) 633 * We want to avoid an expensive divide : (offset / cache->buffer_size)
634 * Using the fact that buffer_size is a constant for a particular cache,
635 * we can replace (offset / cache->buffer_size) by
636 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
637 */
638static inline unsigned int obj_to_index(const struct kmem_cache *cache,
639 const struct slab *slab, void *obj)
632{ 640{
633 return (unsigned)(obj - slab->s_mem) / cache->buffer_size; 641 u32 offset = (obj - slab->s_mem);
642 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
634} 643}
635 644
636/* 645/*
@@ -1427,6 +1436,8 @@ void __init kmem_cache_init(void)
1427 1436
1428 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1437 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1429 cache_line_size()); 1438 cache_line_size());
1439 cache_cache.reciprocal_buffer_size =
1440 reciprocal_value(cache_cache.buffer_size);
1430 1441
1431 for (order = 0; order < MAX_ORDER; order++) { 1442 for (order = 0; order < MAX_ORDER; order++) {
1432 cache_estimate(order, cache_cache.buffer_size, 1443 cache_estimate(order, cache_cache.buffer_size,
@@ -2313,6 +2324,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2313 if (flags & SLAB_CACHE_DMA) 2324 if (flags & SLAB_CACHE_DMA)
2314 cachep->gfpflags |= GFP_DMA; 2325 cachep->gfpflags |= GFP_DMA;
2315 cachep->buffer_size = size; 2326 cachep->buffer_size = size;
2327 cachep->reciprocal_buffer_size = reciprocal_value(size);
2316 2328
2317 if (flags & CFLGS_OFF_SLAB) { 2329 if (flags & CFLGS_OFF_SLAB) {
2318 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2330 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -3252,6 +3264,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3252 struct zone **z; 3264 struct zone **z;
3253 void *obj = NULL; 3265 void *obj = NULL;
3254 int nid; 3266 int nid;
3267 gfp_t local_flags = (flags & GFP_LEVEL_MASK);
3255 3268
3256retry: 3269retry:
3257 /* 3270 /*
@@ -3261,7 +3274,7 @@ retry:
3261 for (z = zonelist->zones; *z && !obj; z++) { 3274 for (z = zonelist->zones; *z && !obj; z++) {
3262 nid = zone_to_nid(*z); 3275 nid = zone_to_nid(*z);
3263 3276
3264 if (cpuset_zone_allowed(*z, flags | __GFP_HARDWALL) && 3277 if (cpuset_zone_allowed_hardwall(*z, flags) &&
3265 cache->nodelists[nid] && 3278 cache->nodelists[nid] &&
3266 cache->nodelists[nid]->free_objects) 3279 cache->nodelists[nid]->free_objects)
3267 obj = ____cache_alloc_node(cache, 3280 obj = ____cache_alloc_node(cache,
@@ -3275,7 +3288,12 @@ retry:
3275 * We may trigger various forms of reclaim on the allowed 3288 * We may trigger various forms of reclaim on the allowed
3276 * set and go into memory reserves if necessary. 3289 * set and go into memory reserves if necessary.
3277 */ 3290 */
3291 if (local_flags & __GFP_WAIT)
3292 local_irq_enable();
3293 kmem_flagcheck(cache, flags);
3278 obj = kmem_getpages(cache, flags, -1); 3294 obj = kmem_getpages(cache, flags, -1);
3295 if (local_flags & __GFP_WAIT)
3296 local_irq_disable();
3279 if (obj) { 3297 if (obj) {
3280 /* 3298 /*
3281 * Insert into the appropriate per node queues 3299 * Insert into the appropriate per node queues
@@ -3535,7 +3553,7 @@ EXPORT_SYMBOL(kmem_cache_zalloc);
3535 * 3553 *
3536 * Currently only used for dentry validation. 3554 * Currently only used for dentry validation.
3537 */ 3555 */
3538int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) 3556int fastcall kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3539{ 3557{
3540 unsigned long addr = (unsigned long)ptr; 3558 unsigned long addr = (unsigned long)ptr;
3541 unsigned long min_addr = PAGE_OFFSET; 3559 unsigned long min_addr = PAGE_OFFSET;
diff --git a/mm/slob.c b/mm/slob.c
index 542394184a58..2e9236e10ed1 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -157,7 +157,7 @@ static int fastcall find_order(int size)
157 return order; 157 return order;
158} 158}
159 159
160void *kmalloc(size_t size, gfp_t gfp) 160void *__kmalloc(size_t size, gfp_t gfp)
161{ 161{
162 slob_t *m; 162 slob_t *m;
163 bigblock_t *bb; 163 bigblock_t *bb;
@@ -186,8 +186,7 @@ void *kmalloc(size_t size, gfp_t gfp)
186 slob_free(bb, sizeof(bigblock_t)); 186 slob_free(bb, sizeof(bigblock_t));
187 return 0; 187 return 0;
188} 188}
189 189EXPORT_SYMBOL(__kmalloc);
190EXPORT_SYMBOL(kmalloc);
191 190
192void kfree(const void *block) 191void kfree(const void *block)
193{ 192{
@@ -329,6 +328,17 @@ EXPORT_SYMBOL(kmem_cache_name);
329static struct timer_list slob_timer = TIMER_INITIALIZER( 328static struct timer_list slob_timer = TIMER_INITIALIZER(
330 (void (*)(unsigned long))kmem_cache_init, 0, 0); 329 (void (*)(unsigned long))kmem_cache_init, 0, 0);
331 330
331int kmem_cache_shrink(struct kmem_cache *d)
332{
333 return 0;
334}
335EXPORT_SYMBOL(kmem_cache_shrink);
336
337int kmem_ptr_validate(struct kmem_cache *a, const void *b)
338{
339 return 0;
340}
341
332void kmem_cache_init(void) 342void kmem_cache_init(void)
333{ 343{
334 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1); 344 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 093f5fe6dd77..e9813b06c7a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -984,7 +984,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
984 if (!populated_zone(zone)) 984 if (!populated_zone(zone))
985 continue; 985 continue;
986 986
987 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 987 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
988 continue; 988 continue;
989 989
990 note_zone_scanning_priority(zone, priority); 990 note_zone_scanning_priority(zone, priority);
@@ -1034,7 +1034,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1034 for (i = 0; zones[i] != NULL; i++) { 1034 for (i = 0; zones[i] != NULL; i++) {
1035 struct zone *zone = zones[i]; 1035 struct zone *zone = zones[i];
1036 1036
1037 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1037 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1038 continue; 1038 continue;
1039 1039
1040 lru_pages += zone->nr_active + zone->nr_inactive; 1040 lru_pages += zone->nr_active + zone->nr_inactive;
@@ -1089,7 +1089,7 @@ out:
1089 for (i = 0; zones[i] != 0; i++) { 1089 for (i = 0; zones[i] != 0; i++) {
1090 struct zone *zone = zones[i]; 1090 struct zone *zone = zones[i];
1091 1091
1092 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1092 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1093 continue; 1093 continue;
1094 1094
1095 zone->prev_priority = priority; 1095 zone->prev_priority = priority;
@@ -1354,7 +1354,7 @@ void wakeup_kswapd(struct zone *zone, int order)
1354 return; 1354 return;
1355 if (pgdat->kswapd_max_order < order) 1355 if (pgdat->kswapd_max_order < order)
1356 pgdat->kswapd_max_order = order; 1356 pgdat->kswapd_max_order = order;
1357 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1357 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1358 return; 1358 return;
1359 if (!waitqueue_active(&pgdat->kswapd_wait)) 1359 if (!waitqueue_active(&pgdat->kswapd_wait))
1360 return; 1360 return;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 711a085eca5b..dbf98c49dbaa 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -123,10 +123,10 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
123 if (flt->opcode && 123 if (flt->opcode &&
124 ((evt == HCI_EV_CMD_COMPLETE && 124 ((evt == HCI_EV_CMD_COMPLETE &&
125 flt->opcode != 125 flt->opcode !=
126 get_unaligned((__u16 *)(skb->data + 3))) || 126 get_unaligned((__le16 *)(skb->data + 3))) ||
127 (evt == HCI_EV_CMD_STATUS && 127 (evt == HCI_EV_CMD_STATUS &&
128 flt->opcode != 128 flt->opcode !=
129 get_unaligned((__u16 *)(skb->data + 4))))) 129 get_unaligned((__le16 *)(skb->data + 4)))))
130 continue; 130 continue;
131 } 131 }
132 132
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 74046efdf875..8ce00d3703da 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -565,7 +565,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
565 } else { 565 } else {
566 struct sk_buff *free_it = next; 566 struct sk_buff *free_it = next;
567 567
568 /* Old fragmnet is completely overridden with 568 /* Old fragment is completely overridden with
569 * new one drop it. 569 * new one drop it.
570 */ 570 */
571 next = next->next; 571 next = next->next;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 700353b330fd..066c64a97fd8 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -804,19 +804,19 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
804 804
805 integ_len = svc_getnl(&buf->head[0]); 805 integ_len = svc_getnl(&buf->head[0]);
806 if (integ_len & 3) 806 if (integ_len & 3)
807 goto out; 807 return stat;
808 if (integ_len > buf->len) 808 if (integ_len > buf->len)
809 goto out; 809 return stat;
810 if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) 810 if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len))
811 BUG(); 811 BUG();
812 /* copy out mic... */ 812 /* copy out mic... */
813 if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) 813 if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
814 BUG(); 814 BUG();
815 if (mic.len > RPC_MAX_AUTH_SIZE) 815 if (mic.len > RPC_MAX_AUTH_SIZE)
816 goto out; 816 return stat;
817 mic.data = kmalloc(mic.len, GFP_KERNEL); 817 mic.data = kmalloc(mic.len, GFP_KERNEL);
818 if (!mic.data) 818 if (!mic.data)
819 goto out; 819 return stat;
820 if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) 820 if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
821 goto out; 821 goto out;
822 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); 822 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
@@ -826,6 +826,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
826 goto out; 826 goto out;
827 stat = 0; 827 stat = 0;
828out: 828out:
829 kfree(mic.data);
829 return stat; 830 return stat;
830} 831}
831 832
@@ -1065,7 +1066,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1065 } 1066 }
1066 switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) { 1067 switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
1067 case -EAGAIN: 1068 case -EAGAIN:
1068 goto drop; 1069 case -ETIMEDOUT:
1069 case -ENOENT: 1070 case -ENOENT:
1070 goto drop; 1071 goto drop;
1071 case 0: 1072 case 0:
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 80aff0474572..14274490f92e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -34,7 +34,7 @@
34 34
35#define RPCDBG_FACILITY RPCDBG_CACHE 35#define RPCDBG_FACILITY RPCDBG_CACHE
36 36
37static void cache_defer_req(struct cache_req *req, struct cache_head *item); 37static int cache_defer_req(struct cache_req *req, struct cache_head *item);
38static void cache_revisit_request(struct cache_head *item); 38static void cache_revisit_request(struct cache_head *item);
39 39
40static void cache_init(struct cache_head *h) 40static void cache_init(struct cache_head *h)
@@ -185,6 +185,7 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
185 * 185 *
186 * Returns 0 if the cache_head can be used, or cache_puts it and returns 186 * Returns 0 if the cache_head can be used, or cache_puts it and returns
187 * -EAGAIN if upcall is pending, 187 * -EAGAIN if upcall is pending,
188 * -ETIMEDOUT if upcall failed and should be retried,
188 * -ENOENT if cache entry was negative 189 * -ENOENT if cache entry was negative
189 */ 190 */
190int cache_check(struct cache_detail *detail, 191int cache_check(struct cache_detail *detail,
@@ -236,7 +237,8 @@ int cache_check(struct cache_detail *detail,
236 } 237 }
237 238
238 if (rv == -EAGAIN) 239 if (rv == -EAGAIN)
239 cache_defer_req(rqstp, h); 240 if (cache_defer_req(rqstp, h) != 0)
241 rv = -ETIMEDOUT;
240 242
241 if (rv) 243 if (rv)
242 cache_put(h, detail); 244 cache_put(h, detail);
@@ -523,14 +525,21 @@ static LIST_HEAD(cache_defer_list);
523static struct list_head cache_defer_hash[DFR_HASHSIZE]; 525static struct list_head cache_defer_hash[DFR_HASHSIZE];
524static int cache_defer_cnt; 526static int cache_defer_cnt;
525 527
526static void cache_defer_req(struct cache_req *req, struct cache_head *item) 528static int cache_defer_req(struct cache_req *req, struct cache_head *item)
527{ 529{
528 struct cache_deferred_req *dreq; 530 struct cache_deferred_req *dreq;
529 int hash = DFR_HASH(item); 531 int hash = DFR_HASH(item);
530 532
533 if (cache_defer_cnt >= DFR_MAX) {
534 /* too much in the cache, randomly drop this one,
535 * or continue and drop the oldest below
536 */
537 if (net_random()&1)
538 return -ETIMEDOUT;
539 }
531 dreq = req->defer(req); 540 dreq = req->defer(req);
532 if (dreq == NULL) 541 if (dreq == NULL)
533 return; 542 return -ETIMEDOUT;
534 543
535 dreq->item = item; 544 dreq->item = item;
536 dreq->recv_time = get_seconds(); 545 dreq->recv_time = get_seconds();
@@ -546,17 +555,8 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
546 /* it is in, now maybe clean up */ 555 /* it is in, now maybe clean up */
547 dreq = NULL; 556 dreq = NULL;
548 if (++cache_defer_cnt > DFR_MAX) { 557 if (++cache_defer_cnt > DFR_MAX) {
549 /* too much in the cache, randomly drop 558 dreq = list_entry(cache_defer_list.prev,
550 * first or last 559 struct cache_deferred_req, recent);
551 */
552 if (net_random()&1)
553 dreq = list_entry(cache_defer_list.next,
554 struct cache_deferred_req,
555 recent);
556 else
557 dreq = list_entry(cache_defer_list.prev,
558 struct cache_deferred_req,
559 recent);
560 list_del(&dreq->recent); 560 list_del(&dreq->recent);
561 list_del(&dreq->hash); 561 list_del(&dreq->hash);
562 cache_defer_cnt--; 562 cache_defer_cnt--;
@@ -571,6 +571,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
571 /* must have just been validated... */ 571 /* must have just been validated... */
572 cache_revisit_request(item); 572 cache_revisit_request(item);
573 } 573 }
574 return 0;
574} 575}
575 576
576static void cache_revisit_request(struct cache_head *item) 577static void cache_revisit_request(struct cache_head *item)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index eb44ec929ca1..f3001f3626f6 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -308,7 +308,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
308 308
309 serv->sv_nrpools = npools; 309 serv->sv_nrpools = npools;
310 serv->sv_pools = 310 serv->sv_pools =
311 kcalloc(sizeof(struct svc_pool), serv->sv_nrpools, 311 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
312 GFP_KERNEL); 312 GFP_KERNEL);
313 if (!serv->sv_pools) { 313 if (!serv->sv_pools) {
314 kfree(serv); 314 kfree(serv);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index a0a953a430c2..0d1e8fb83b93 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -53,6 +53,10 @@ struct auth_domain *unix_domain_find(char *name)
53 return NULL; 53 return NULL;
54 kref_init(&new->h.ref); 54 kref_init(&new->h.ref);
55 new->h.name = kstrdup(name, GFP_KERNEL); 55 new->h.name = kstrdup(name, GFP_KERNEL);
56 if (new->h.name == NULL) {
57 kfree(new);
58 return NULL;
59 }
56 new->h.flavour = &svcauth_unix; 60 new->h.flavour = &svcauth_unix;
57 new->addr_changes = 0; 61 new->addr_changes = 0;
58 rv = auth_domain_lookup(name, &new->h); 62 rv = auth_domain_lookup(name, &new->h);
@@ -435,6 +439,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
435 default: 439 default:
436 BUG(); 440 BUG();
437 case -EAGAIN: 441 case -EAGAIN:
442 case -ETIMEDOUT:
438 return SVC_DROP; 443 return SVC_DROP;
439 case -ENOENT: 444 case -ENOENT:
440 return SVC_DENIED; 445 return SVC_DENIED;
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 458a2c46cef3..baf55c459c8b 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -208,7 +208,7 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg,
208 208
209 if (mng.link_subscriptions > 64) 209 if (mng.link_subscriptions > 64)
210 break; 210 break;
211 sub = (struct subscr_data *)kmalloc(sizeof(*sub), 211 sub = kmalloc(sizeof(*sub),
212 GFP_ATOMIC); 212 GFP_ATOMIC);
213 if (sub == NULL) { 213 if (sub == NULL) {
214 warn("Memory squeeze; dropped remote link subscription\n"); 214 warn("Memory squeeze; dropped remote link subscription\n");
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 4dcb8867b5f4..124b341a18c0 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -600,7 +600,7 @@ int main(int ac, char **av)
600 input_mode = ask_silent; 600 input_mode = ask_silent;
601 valid_stdin = 1; 601 valid_stdin = 1;
602 } 602 }
603 } else if (sym_change_count) { 603 } else if (conf_get_changed()) {
604 name = getenv("KCONFIG_NOSILENTUPDATE"); 604 name = getenv("KCONFIG_NOSILENTUPDATE");
605 if (name && *name) { 605 if (name && *name) {
606 fprintf(stderr, _("\n*** Kernel configuration requires explicit update.\n\n")); 606 fprintf(stderr, _("\n*** Kernel configuration requires explicit update.\n\n"));
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 66b15ef02931..664fe29dacef 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -100,7 +100,7 @@ int conf_read_simple(const char *name, int def)
100 in = zconf_fopen(name); 100 in = zconf_fopen(name);
101 if (in) 101 if (in)
102 goto load; 102 goto load;
103 sym_change_count++; 103 sym_add_change_count(1);
104 if (!sym_defconfig_list) 104 if (!sym_defconfig_list)
105 return 1; 105 return 1;
106 106
@@ -312,7 +312,7 @@ int conf_read(const char *name)
312 struct expr *e; 312 struct expr *e;
313 int i, flags; 313 int i, flags;
314 314
315 sym_change_count = 0; 315 sym_set_change_count(0);
316 316
317 if (conf_read_simple(name, S_DEF_USER)) 317 if (conf_read_simple(name, S_DEF_USER))
318 return 1; 318 return 1;
@@ -364,7 +364,7 @@ int conf_read(const char *name)
364 sym->flags &= flags | ~SYMBOL_DEF_USER; 364 sym->flags &= flags | ~SYMBOL_DEF_USER;
365 } 365 }
366 366
367 sym_change_count += conf_warnings || conf_unsaved; 367 sym_add_change_count(conf_warnings || conf_unsaved);
368 368
369 return 0; 369 return 0;
370} 370}
@@ -432,7 +432,7 @@ int conf_write(const char *name)
432 use_timestamp ? "# " : "", 432 use_timestamp ? "# " : "",
433 use_timestamp ? ctime(&now) : ""); 433 use_timestamp ? ctime(&now) : "");
434 434
435 if (!sym_change_count) 435 if (!conf_get_changed())
436 sym_clear_all_valid(); 436 sym_clear_all_valid();
437 437
438 menu = rootmenu.list; 438 menu = rootmenu.list;
@@ -528,7 +528,7 @@ int conf_write(const char *name)
528 "# configuration written to %s\n" 528 "# configuration written to %s\n"
529 "#\n"), newname); 529 "#\n"), newname);
530 530
531 sym_change_count = 0; 531 sym_set_change_count(0);
532 532
533 return 0; 533 return 0;
534} 534}
@@ -765,3 +765,30 @@ int conf_write_autoconf(void)
765 765
766 return 0; 766 return 0;
767} 767}
768
769static int sym_change_count;
770static void (*conf_changed_callback)(void);
771
772void sym_set_change_count(int count)
773{
774 int _sym_change_count = sym_change_count;
775 sym_change_count = count;
776 if (conf_changed_callback &&
777 (bool)_sym_change_count != (bool)count)
778 conf_changed_callback();
779}
780
781void sym_add_change_count(int count)
782{
783 sym_set_change_count(count + sym_change_count);
784}
785
786bool conf_get_changed(void)
787{
788 return sym_change_count;
789}
790
791void conf_set_changed_callback(void (*fn)(void))
792{
793 conf_changed_callback = fn;
794}
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 7b0d3a93d5c0..61d8166166ef 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -38,8 +38,6 @@ static gboolean show_all = FALSE;
38static gboolean show_debug = FALSE; 38static gboolean show_debug = FALSE;
39static gboolean resizeable = FALSE; 39static gboolean resizeable = FALSE;
40 40
41static gboolean config_changed = FALSE;
42
43static char nohelp_text[] = 41static char nohelp_text[] =
44 N_("Sorry, no help available for this option yet.\n"); 42 N_("Sorry, no help available for this option yet.\n");
45 43
@@ -50,6 +48,8 @@ GtkWidget *text_w = NULL;
50GtkWidget *hpaned = NULL; 48GtkWidget *hpaned = NULL;
51GtkWidget *vpaned = NULL; 49GtkWidget *vpaned = NULL;
52GtkWidget *back_btn = NULL; 50GtkWidget *back_btn = NULL;
51GtkWidget *save_btn = NULL;
52GtkWidget *save_menu_item = NULL;
53 53
54GtkTextTag *tag1, *tag2; 54GtkTextTag *tag1, *tag2;
55GdkColor color; 55GdkColor color;
@@ -75,7 +75,7 @@ static void display_tree_part(void);
75static void update_tree(struct menu *src, GtkTreeIter * dst); 75static void update_tree(struct menu *src, GtkTreeIter * dst);
76static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row); 76static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row);
77static gchar **fill_row(struct menu *menu); 77static gchar **fill_row(struct menu *menu);
78 78static void conf_changed(void);
79 79
80/* Helping/Debugging Functions */ 80/* Helping/Debugging Functions */
81 81
@@ -224,6 +224,10 @@ void init_main_window(const gchar * glade_file)
224 gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget, 224 gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget,
225 show_value); 225 show_value);
226 226
227 save_btn = glade_xml_get_widget(xml, "button3");
228 save_menu_item = glade_xml_get_widget(xml, "save1");
229 conf_set_changed_callback(conf_changed);
230
227 style = gtk_widget_get_style(main_wnd); 231 style = gtk_widget_get_style(main_wnd);
228 widget = glade_xml_get_widget(xml, "toolbar1"); 232 widget = glade_xml_get_widget(xml, "toolbar1");
229 233
@@ -512,14 +516,14 @@ static void text_insert_msg(const char *title, const char *message)
512 516
513/* Main Windows Callbacks */ 517/* Main Windows Callbacks */
514 518
515void on_save1_activate(GtkMenuItem * menuitem, gpointer user_data); 519void on_save_activate(GtkMenuItem * menuitem, gpointer user_data);
516gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event, 520gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event,
517 gpointer user_data) 521 gpointer user_data)
518{ 522{
519 GtkWidget *dialog, *label; 523 GtkWidget *dialog, *label;
520 gint result; 524 gint result;
521 525
522 if (config_changed == FALSE) 526 if (!conf_get_changed())
523 return FALSE; 527 return FALSE;
524 528
525 dialog = gtk_dialog_new_with_buttons(_("Warning !"), 529 dialog = gtk_dialog_new_with_buttons(_("Warning !"),
@@ -543,7 +547,7 @@ gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event,
543 result = gtk_dialog_run(GTK_DIALOG(dialog)); 547 result = gtk_dialog_run(GTK_DIALOG(dialog));
544 switch (result) { 548 switch (result) {
545 case GTK_RESPONSE_YES: 549 case GTK_RESPONSE_YES:
546 on_save1_activate(NULL, NULL); 550 on_save_activate(NULL, NULL);
547 return FALSE; 551 return FALSE;
548 case GTK_RESPONSE_NO: 552 case GTK_RESPONSE_NO:
549 return FALSE; 553 return FALSE;
@@ -621,12 +625,10 @@ void on_load1_activate(GtkMenuItem * menuitem, gpointer user_data)
621} 625}
622 626
623 627
624void on_save1_activate(GtkMenuItem * menuitem, gpointer user_data) 628void on_save_activate(GtkMenuItem * menuitem, gpointer user_data)
625{ 629{
626 if (conf_write(NULL)) 630 if (conf_write(NULL))
627 text_insert_msg(_("Error"), _("Unable to save configuration !")); 631 text_insert_msg(_("Error"), _("Unable to save configuration !"));
628
629 config_changed = FALSE;
630} 632}
631 633
632 634
@@ -819,12 +821,6 @@ void on_load_clicked(GtkButton * button, gpointer user_data)
819} 821}
820 822
821 823
822void on_save_clicked(GtkButton * button, gpointer user_data)
823{
824 on_save1_activate(NULL, user_data);
825}
826
827
828void on_single_clicked(GtkButton * button, gpointer user_data) 824void on_single_clicked(GtkButton * button, gpointer user_data)
829{ 825{
830 view_mode = SINGLE_VIEW; 826 view_mode = SINGLE_VIEW;
@@ -899,7 +895,6 @@ static void renderer_edited(GtkCellRendererText * cell,
899 895
900 sym_set_string_value(sym, new_def); 896 sym_set_string_value(sym, new_def);
901 897
902 config_changed = TRUE;
903 update_tree(&rootmenu, NULL); 898 update_tree(&rootmenu, NULL);
904 899
905 gtk_tree_path_free(path); 900 gtk_tree_path_free(path);
@@ -930,7 +925,6 @@ static void change_sym_value(struct menu *menu, gint col)
930 if (!sym_tristate_within_range(sym, newval)) 925 if (!sym_tristate_within_range(sym, newval))
931 newval = yes; 926 newval = yes;
932 sym_set_tristate_value(sym, newval); 927 sym_set_tristate_value(sym, newval);
933 config_changed = TRUE;
934 if (view_mode == FULL_VIEW) 928 if (view_mode == FULL_VIEW)
935 update_tree(&rootmenu, NULL); 929 update_tree(&rootmenu, NULL);
936 else if (view_mode == SPLIT_VIEW) { 930 else if (view_mode == SPLIT_VIEW) {
@@ -1633,3 +1627,10 @@ int main(int ac, char *av[])
1633 1627
1634 return 0; 1628 return 0;
1635} 1629}
1630
1631static void conf_changed(void)
1632{
1633 bool changed = conf_get_changed();
1634 gtk_widget_set_sensitive(save_btn, changed);
1635 gtk_widget_set_sensitive(save_menu_item, changed);
1636}
diff --git a/scripts/kconfig/gconf.glade b/scripts/kconfig/gconf.glade
index f8744ed64967..803233fdd6dd 100644
--- a/scripts/kconfig/gconf.glade
+++ b/scripts/kconfig/gconf.glade
@@ -70,7 +70,7 @@
70 <property name="tooltip" translatable="yes">Save the config in .config</property> 70 <property name="tooltip" translatable="yes">Save the config in .config</property>
71 <property name="label" translatable="yes">_Save</property> 71 <property name="label" translatable="yes">_Save</property>
72 <property name="use_underline">True</property> 72 <property name="use_underline">True</property>
73 <signal name="activate" handler="on_save1_activate"/> 73 <signal name="activate" handler="on_save_activate"/>
74 <accelerator key="S" modifiers="GDK_CONTROL_MASK" signal="activate"/> 74 <accelerator key="S" modifiers="GDK_CONTROL_MASK" signal="activate"/>
75 75
76 <child internal-child="image"> 76 <child internal-child="image">
@@ -380,7 +380,7 @@
380 <property name="visible_horizontal">True</property> 380 <property name="visible_horizontal">True</property>
381 <property name="visible_vertical">True</property> 381 <property name="visible_vertical">True</property>
382 <property name="is_important">False</property> 382 <property name="is_important">False</property>
383 <signal name="clicked" handler="on_save_clicked"/> 383 <signal name="clicked" handler="on_save_activate"/>
384 </widget> 384 </widget>
385 <packing> 385 <packing>
386 <property name="expand">False</property> 386 <property name="expand">False</property>
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 2628023a1fe1..9b2706a41548 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -65,6 +65,8 @@ char *zconf_curname(void);
65 65
66/* confdata.c */ 66/* confdata.c */
67char *conf_get_default_confname(void); 67char *conf_get_default_confname(void);
68void sym_set_change_count(int count);
69void sym_add_change_count(int count);
68 70
69/* kconfig_load.c */ 71/* kconfig_load.c */
70void kconfig_load(void); 72void kconfig_load(void);
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index a263746cfa7d..15030770d1ad 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -5,6 +5,8 @@ P(conf_read,int,(const char *name));
5P(conf_read_simple,int,(const char *name, int)); 5P(conf_read_simple,int,(const char *name, int));
6P(conf_write,int,(const char *name)); 6P(conf_write,int,(const char *name));
7P(conf_write_autoconf,int,(void)); 7P(conf_write_autoconf,int,(void));
8P(conf_get_changed,bool,(void));
9P(conf_set_changed_callback, void,(void (*fn)(void)));
8 10
9/* menu.c */ 11/* menu.c */
10P(rootmenu,struct menu,); 12P(rootmenu,struct menu,);
@@ -16,7 +18,6 @@ P(menu_get_parent_menu,struct menu *,(struct menu *menu));
16 18
17/* symbol.c */ 19/* symbol.c */
18P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]); 20P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]);
19P(sym_change_count,int,);
20 21
21P(sym_lookup,struct symbol *,(const char *name, int isconst)); 22P(sym_lookup,struct symbol *,(const char *name, int isconst));
22P(sym_find,struct symbol *,(const char *name)); 23P(sym_find,struct symbol *,(const char *name));
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 08a4c7af93ea..3f9a1321b3e6 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -890,14 +890,19 @@ int main(int ac, char **av)
890 do { 890 do {
891 conf(&rootmenu); 891 conf(&rootmenu);
892 dialog_clear(); 892 dialog_clear();
893 res = dialog_yesno(NULL, 893 if (conf_get_changed())
894 _("Do you wish to save your " 894 res = dialog_yesno(NULL,
895 "new kernel configuration?\n" 895 _("Do you wish to save your "
896 "<ESC><ESC> to continue."), 896 "new kernel configuration?\n"
897 6, 60); 897 "<ESC><ESC> to continue."),
898 6, 60);
899 else
900 res = -1;
898 } while (res == KEY_ESC); 901 } while (res == KEY_ESC);
899 end_dialog(); 902 end_dialog();
900 if (res == 0) { 903
904 switch (res) {
905 case 0:
901 if (conf_write(NULL)) { 906 if (conf_write(NULL)) {
902 fprintf(stderr, _("\n\n" 907 fprintf(stderr, _("\n\n"
903 "Error during writing of the kernel configuration.\n" 908 "Error during writing of the kernel configuration.\n"
@@ -905,11 +910,13 @@ int main(int ac, char **av)
905 "\n\n")); 910 "\n\n"));
906 return 1; 911 return 1;
907 } 912 }
913 case -1:
908 printf(_("\n\n" 914 printf(_("\n\n"
909 "*** End of Linux kernel configuration.\n" 915 "*** End of Linux kernel configuration.\n"
910 "*** Execute 'make' to build the kernel or try 'make help'." 916 "*** Execute 'make' to build the kernel or try 'make help'."
911 "\n\n")); 917 "\n\n"));
912 } else { 918 break;
919 default:
913 fprintf(stderr, _("\n\n" 920 fprintf(stderr, _("\n\n"
914 "Your kernel configuration changes were NOT saved." 921 "Your kernel configuration changes were NOT saved."
915 "\n\n")); 922 "\n\n"));
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index f5628c57640b..0b2fcc417f59 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -38,6 +38,8 @@
38static QApplication *configApp; 38static QApplication *configApp;
39static ConfigSettings *configSettings; 39static ConfigSettings *configSettings;
40 40
41QAction *ConfigMainWindow::saveAction;
42
41static inline QString qgettext(const char* str) 43static inline QString qgettext(const char* str)
42{ 44{
43 return QString::fromLocal8Bit(gettext(str)); 45 return QString::fromLocal8Bit(gettext(str));
@@ -1306,8 +1308,11 @@ ConfigMainWindow::ConfigMainWindow(void)
1306 connect(quitAction, SIGNAL(activated()), SLOT(close())); 1308 connect(quitAction, SIGNAL(activated()), SLOT(close()));
1307 QAction *loadAction = new QAction("Load", QPixmap(xpm_load), "&Load", CTRL+Key_L, this); 1309 QAction *loadAction = new QAction("Load", QPixmap(xpm_load), "&Load", CTRL+Key_L, this);
1308 connect(loadAction, SIGNAL(activated()), SLOT(loadConfig())); 1310 connect(loadAction, SIGNAL(activated()), SLOT(loadConfig()));
1309 QAction *saveAction = new QAction("Save", QPixmap(xpm_save), "&Save", CTRL+Key_S, this); 1311 saveAction = new QAction("Save", QPixmap(xpm_save), "&Save", CTRL+Key_S, this);
1310 connect(saveAction, SIGNAL(activated()), SLOT(saveConfig())); 1312 connect(saveAction, SIGNAL(activated()), SLOT(saveConfig()));
1313 conf_set_changed_callback(conf_changed);
1314 // Set saveAction's initial state
1315 conf_changed();
1311 QAction *saveAsAction = new QAction("Save As...", "Save &As...", 0, this); 1316 QAction *saveAsAction = new QAction("Save As...", "Save &As...", 0, this);
1312 connect(saveAsAction, SIGNAL(activated()), SLOT(saveConfigAs())); 1317 connect(saveAsAction, SIGNAL(activated()), SLOT(saveConfigAs()));
1313 QAction *searchAction = new QAction("Search", "&Search", CTRL+Key_F, this); 1318 QAction *searchAction = new QAction("Search", "&Search", CTRL+Key_F, this);
@@ -1585,7 +1590,7 @@ void ConfigMainWindow::showFullView(void)
1585 */ 1590 */
1586void ConfigMainWindow::closeEvent(QCloseEvent* e) 1591void ConfigMainWindow::closeEvent(QCloseEvent* e)
1587{ 1592{
1588 if (!sym_change_count) { 1593 if (!conf_get_changed()) {
1589 e->accept(); 1594 e->accept();
1590 return; 1595 return;
1591 } 1596 }
@@ -1658,6 +1663,12 @@ void ConfigMainWindow::saveSettings(void)
1658 configSettings->writeSizes("/split2", split2->sizes()); 1663 configSettings->writeSizes("/split2", split2->sizes());
1659} 1664}
1660 1665
1666void ConfigMainWindow::conf_changed(void)
1667{
1668 if (saveAction)
1669 saveAction->setEnabled(conf_get_changed());
1670}
1671
1661void fixup_rootmenu(struct menu *menu) 1672void fixup_rootmenu(struct menu *menu)
1662{ 1673{
1663 struct menu *child; 1674 struct menu *child;
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index 6a9e3b14c227..6fc1c5f14425 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -297,6 +297,9 @@ protected:
297 297
298class ConfigMainWindow : public QMainWindow { 298class ConfigMainWindow : public QMainWindow {
299 Q_OBJECT 299 Q_OBJECT
300
301 static QAction *saveAction;
302 static void conf_changed(void);
300public: 303public:
301 ConfigMainWindow(void); 304 ConfigMainWindow(void);
302public slots: 305public slots:
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index ee225ced2ce4..8f06c474d800 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -30,7 +30,6 @@ struct symbol symbol_yes = {
30 .flags = SYMBOL_VALID, 30 .flags = SYMBOL_VALID,
31}; 31};
32 32
33int sym_change_count;
34struct symbol *sym_defconfig_list; 33struct symbol *sym_defconfig_list;
35struct symbol *modules_sym; 34struct symbol *modules_sym;
36tristate modules_val; 35tristate modules_val;
@@ -379,7 +378,7 @@ void sym_clear_all_valid(void)
379 378
380 for_all_symbols(i, sym) 379 for_all_symbols(i, sym)
381 sym->flags &= ~SYMBOL_VALID; 380 sym->flags &= ~SYMBOL_VALID;
382 sym_change_count++; 381 sym_add_change_count(1);
383 if (modules_sym) 382 if (modules_sym)
384 sym_calc_value(modules_sym); 383 sym_calc_value(modules_sym);
385} 384}
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index 2fb0a4fc61d0..d777fe85627f 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -2135,7 +2135,7 @@ void conf_parse(const char *name)
2135 sym_check_deps(sym); 2135 sym_check_deps(sym);
2136 } 2136 }
2137 2137
2138 sym_change_count = 1; 2138 sym_set_change_count(1);
2139} 2139}
2140 2140
2141const char *zconf_tokenname(int token) 2141const char *zconf_tokenname(int token)
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index ab44feb3c600..04a5864c03b1 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -504,7 +504,7 @@ void conf_parse(const char *name)
504 sym_check_deps(sym); 504 sym_check_deps(sym);
505 } 505 }
506 506
507 sym_change_count = 1; 507 sym_set_change_count(1);
508} 508}
509 509
510const char *zconf_tokenname(int token) 510const char *zconf_tokenname(int token)
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index ac0a58222992..15ab5d02e80a 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -997,6 +997,7 @@ static int exit_section_ref_ok(const char *name)
997 "__bug_table", /* used by powerpc for BUG() */ 997 "__bug_table", /* used by powerpc for BUG() */
998 ".exitcall.exit", 998 ".exitcall.exit",
999 ".eh_frame", 999 ".eh_frame",
1000 ".parainstructions",
1000 ".stab", 1001 ".stab",
1001 "__ex_table", 1002 "__ex_table",
1002 ".fixup", 1003 ".fixup",
diff --git a/sound/aoa/fabrics/Kconfig b/sound/aoa/fabrics/Kconfig
index c3bc7705c86a..50d7021ff677 100644
--- a/sound/aoa/fabrics/Kconfig
+++ b/sound/aoa/fabrics/Kconfig
@@ -1,6 +1,6 @@
1config SND_AOA_FABRIC_LAYOUT 1config SND_AOA_FABRIC_LAYOUT
2 tristate "layout-id fabric" 2 tristate "layout-id fabric"
3 depends SND_AOA 3 depends on SND_AOA
4 select SND_AOA_SOUNDBUS 4 select SND_AOA_SOUNDBUS
5 select SND_AOA_SOUNDBUS_I2S 5 select SND_AOA_SOUNDBUS_I2S
6 ---help--- 6 ---help---
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index f4c67042e3ac..3391f2a9b4d1 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -1023,7 +1023,7 @@ static int snd_mixer_oss_build_input(struct snd_mixer_oss *mixer, struct snd_mix
1023 } 1023 }
1024 up_read(&mixer->card->controls_rwsem); 1024 up_read(&mixer->card->controls_rwsem);
1025 if (slot.present != 0) { 1025 if (slot.present != 0) {
1026 pslot = (struct slot *)kmalloc(sizeof(slot), GFP_KERNEL); 1026 pslot = kmalloc(sizeof(slot), GFP_KERNEL);
1027 if (! pslot) 1027 if (! pslot)
1028 return -ENOMEM; 1028 return -ENOMEM;
1029 *pslot = slot; 1029 *pslot = slot;
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index 0ffa9970bf0f..7cf9913a47b2 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -1992,7 +1992,7 @@ int ad1848_init (char *name, struct resource *ports, int irq, int dma_playback,
1992 devc->audio_flags |= DMA_DUPLEX; 1992 devc->audio_flags |= DMA_DUPLEX;
1993 } 1993 }
1994 1994
1995 portc = (ad1848_port_info *) kmalloc(sizeof(ad1848_port_info), GFP_KERNEL); 1995 portc = kmalloc(sizeof(ad1848_port_info), GFP_KERNEL);
1996 if(portc==NULL) { 1996 if(portc==NULL) {
1997 release_region(devc->base, 4); 1997 release_region(devc->base, 4);
1998 return -1; 1998 return -1;
diff --git a/sound/oss/cs4232.c b/sound/oss/cs4232.c
index b6924c7f1484..de40e21bf279 100644
--- a/sound/oss/cs4232.c
+++ b/sound/oss/cs4232.c
@@ -408,7 +408,7 @@ static int __init cs4232_pnp_probe(struct pnp_dev *dev, const struct pnp_device_
408{ 408{
409 struct address_info *isapnpcfg; 409 struct address_info *isapnpcfg;
410 410
411 isapnpcfg=(struct address_info*)kmalloc(sizeof(*isapnpcfg),GFP_KERNEL); 411 isapnpcfg = kmalloc(sizeof(*isapnpcfg),GFP_KERNEL);
412 if (!isapnpcfg) 412 if (!isapnpcfg)
413 return -ENOMEM; 413 return -ENOMEM;
414 414
diff --git a/sound/oss/emu10k1/audio.c b/sound/oss/emu10k1/audio.c
index 49f902f35c28..efcf589d7083 100644
--- a/sound/oss/emu10k1/audio.c
+++ b/sound/oss/emu10k1/audio.c
@@ -1139,7 +1139,7 @@ static int emu10k1_audio_open(struct inode *inode, struct file *file)
1139 1139
1140match: 1140match:
1141 1141
1142 wave_dev = (struct emu10k1_wavedevice *) kmalloc(sizeof(struct emu10k1_wavedevice), GFP_KERNEL); 1142 wave_dev = kmalloc(sizeof(struct emu10k1_wavedevice), GFP_KERNEL);
1143 1143
1144 if (wave_dev == NULL) { 1144 if (wave_dev == NULL) {
1145 ERROR(); 1145 ERROR();
@@ -1155,7 +1155,7 @@ match:
1155 /* Recording */ 1155 /* Recording */
1156 struct wiinst *wiinst; 1156 struct wiinst *wiinst;
1157 1157
1158 if ((wiinst = (struct wiinst *) kmalloc(sizeof(struct wiinst), GFP_KERNEL)) == NULL) { 1158 if ((wiinst = kmalloc(sizeof(struct wiinst), GFP_KERNEL)) == NULL) {
1159 ERROR(); 1159 ERROR();
1160 kfree(wave_dev); 1160 kfree(wave_dev);
1161 return -ENOMEM; 1161 return -ENOMEM;
@@ -1211,7 +1211,7 @@ match:
1211 struct woinst *woinst; 1211 struct woinst *woinst;
1212 int i; 1212 int i;
1213 1213
1214 if ((woinst = (struct woinst *) kmalloc(sizeof(struct woinst), GFP_KERNEL)) == NULL) { 1214 if ((woinst = kmalloc(sizeof(struct woinst), GFP_KERNEL)) == NULL) {
1215 ERROR(); 1215 ERROR();
1216 kfree(wave_dev); 1216 kfree(wave_dev);
1217 return -ENOMEM; 1217 return -ENOMEM;
diff --git a/sound/oss/emu10k1/cardmi.c b/sound/oss/emu10k1/cardmi.c
index 0545814cc67d..57674f8c8a2e 100644
--- a/sound/oss/emu10k1/cardmi.c
+++ b/sound/oss/emu10k1/cardmi.c
@@ -157,7 +157,7 @@ int emu10k1_mpuin_add_buffer(struct emu10k1_mpuin *card_mpuin, struct midi_hdr *
157 midihdr->flags |= MIDIBUF_INQUEUE; /* set */ 157 midihdr->flags |= MIDIBUF_INQUEUE; /* set */
158 midihdr->flags &= ~MIDIBUF_DONE; /* clear */ 158 midihdr->flags &= ~MIDIBUF_DONE; /* clear */
159 159
160 if ((midiq = (struct midi_queue *) kmalloc(sizeof(struct midi_queue), GFP_ATOMIC)) == NULL) { 160 if ((midiq = kmalloc(sizeof(struct midi_queue), GFP_ATOMIC)) == NULL) {
161 /* Message lost */ 161 /* Message lost */
162 return -1; 162 return -1;
163 } 163 }
diff --git a/sound/oss/emu10k1/cardmo.c b/sound/oss/emu10k1/cardmo.c
index 5938d31f9e21..a8cc75db3e45 100644
--- a/sound/oss/emu10k1/cardmo.c
+++ b/sound/oss/emu10k1/cardmo.c
@@ -117,7 +117,7 @@ int emu10k1_mpuout_add_buffer(struct emu10k1_card *card, struct midi_hdr *midihd
117 midihdr->flags |= MIDIBUF_INQUEUE; 117 midihdr->flags |= MIDIBUF_INQUEUE;
118 midihdr->flags &= ~MIDIBUF_DONE; 118 midihdr->flags &= ~MIDIBUF_DONE;
119 119
120 if ((midiq = (struct midi_queue *) kmalloc(sizeof(struct midi_queue), GFP_KERNEL)) == NULL) { 120 if ((midiq = kmalloc(sizeof(struct midi_queue), GFP_KERNEL)) == NULL) {
121 /* Message lost */ 121 /* Message lost */
122 return -1; 122 return -1;
123 } 123 }
diff --git a/sound/oss/emu10k1/midi.c b/sound/oss/emu10k1/midi.c
index 8ac77df86397..cca3dad2bdf4 100644
--- a/sound/oss/emu10k1/midi.c
+++ b/sound/oss/emu10k1/midi.c
@@ -58,7 +58,7 @@ static int midiin_add_buffer(struct emu10k1_mididevice *midi_dev, struct midi_hd
58{ 58{
59 struct midi_hdr *midihdr; 59 struct midi_hdr *midihdr;
60 60
61 if ((midihdr = (struct midi_hdr *) kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL) { 61 if ((midihdr = kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL) {
62 ERROR(); 62 ERROR();
63 return -EINVAL; 63 return -EINVAL;
64 } 64 }
@@ -128,7 +128,7 @@ match:
128 mutex_lock(&card->open_sem); 128 mutex_lock(&card->open_sem);
129 } 129 }
130 130
131 if ((midi_dev = (struct emu10k1_mididevice *) kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL) 131 if ((midi_dev = kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL)
132 return -EINVAL; 132 return -EINVAL;
133 133
134 midi_dev->card = card; 134 midi_dev->card = card;
@@ -328,7 +328,7 @@ static ssize_t emu10k1_midi_write(struct file *file, const char __user *buffer,
328 if (!access_ok(VERIFY_READ, buffer, count)) 328 if (!access_ok(VERIFY_READ, buffer, count))
329 return -EFAULT; 329 return -EFAULT;
330 330
331 if ((midihdr = (struct midi_hdr *) kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL) 331 if ((midihdr = kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL)
332 return -EINVAL; 332 return -EINVAL;
333 333
334 midihdr->bufferlength = count; 334 midihdr->bufferlength = count;
@@ -490,7 +490,7 @@ int emu10k1_seq_midi_open(int dev, int mode,
490 490
491 DPF(2, "emu10k1_seq_midi_open()\n"); 491 DPF(2, "emu10k1_seq_midi_open()\n");
492 492
493 if ((midi_dev = (struct emu10k1_mididevice *) kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL) 493 if ((midi_dev = kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL)
494 return -EINVAL; 494 return -EINVAL;
495 495
496 midi_dev->card = card; 496 midi_dev->card = card;
@@ -540,7 +540,7 @@ int emu10k1_seq_midi_out(int dev, unsigned char midi_byte)
540 540
541 card = midi_devs[dev]->devc; 541 card = midi_devs[dev]->devc;
542 542
543 if ((midihdr = (struct midi_hdr *) kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL) 543 if ((midihdr = kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL)
544 return -EINVAL; 544 return -EINVAL;
545 545
546 midihdr->bufferlength = 1; 546 midihdr->bufferlength = 1;
diff --git a/sound/oss/emu10k1/mixer.c b/sound/oss/emu10k1/mixer.c
index cbcaaa34189a..6419796c2ed7 100644
--- a/sound/oss/emu10k1/mixer.c
+++ b/sound/oss/emu10k1/mixer.c
@@ -194,7 +194,7 @@ static int emu10k1_private_mixer(struct emu10k1_card *card, unsigned int cmd, un
194 194
195 case SOUND_MIXER_PRIVATE3: 195 case SOUND_MIXER_PRIVATE3:
196 196
197 ctl = (struct mixer_private_ioctl *) kmalloc(sizeof(struct mixer_private_ioctl), GFP_KERNEL); 197 ctl = kmalloc(sizeof(struct mixer_private_ioctl), GFP_KERNEL);
198 if (ctl == NULL) 198 if (ctl == NULL)
199 return -ENOMEM; 199 return -ENOMEM;
200 200
diff --git a/sound/oss/hal2.c b/sound/oss/hal2.c
index 784bdd707055..d18286ccc14d 100644
--- a/sound/oss/hal2.c
+++ b/sound/oss/hal2.c
@@ -1435,7 +1435,7 @@ static int hal2_init_card(struct hal2_card **phal2, struct hpc3_regs *hpc3)
1435 int ret = 0; 1435 int ret = 0;
1436 struct hal2_card *hal2; 1436 struct hal2_card *hal2;
1437 1437
1438 hal2 = (struct hal2_card *) kmalloc(sizeof(struct hal2_card), GFP_KERNEL); 1438 hal2 = kmalloc(sizeof(struct hal2_card), GFP_KERNEL);
1439 if (!hal2) 1439 if (!hal2)
1440 return -ENOMEM; 1440 return -ENOMEM;
1441 memset(hal2, 0, sizeof(struct hal2_card)); 1441 memset(hal2, 0, sizeof(struct hal2_card));
diff --git a/sound/oss/mpu401.c b/sound/oss/mpu401.c
index e96220541971..2796c0ef985f 100644
--- a/sound/oss/mpu401.c
+++ b/sound/oss/mpu401.c
@@ -1023,7 +1023,7 @@ int attach_mpu401(struct address_info *hw_config, struct module *owner)
1023 devc->capabilities |= MPU_CAP_INTLG; /* Supports intelligent mode */ 1023 devc->capabilities |= MPU_CAP_INTLG; /* Supports intelligent mode */
1024 1024
1025 1025
1026 mpu401_synth_operations[m] = (struct synth_operations *)kmalloc(sizeof(struct synth_operations), GFP_KERNEL); 1026 mpu401_synth_operations[m] = kmalloc(sizeof(struct synth_operations), GFP_KERNEL);
1027 1027
1028 if (mpu401_synth_operations[m] == NULL) 1028 if (mpu401_synth_operations[m] == NULL)
1029 { 1029 {
diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c
index 4799bc77f987..2e8cfa5481f2 100644
--- a/sound/oss/opl3.c
+++ b/sound/oss/opl3.c
@@ -166,7 +166,7 @@ int opl3_detect(int ioaddr, int *osp)
166 return 0; 166 return 0;
167 } 167 }
168 168
169 devc = (struct opl_devinfo *)kmalloc(sizeof(*devc), GFP_KERNEL); 169 devc = kmalloc(sizeof(*devc), GFP_KERNEL);
170 170
171 if (devc == NULL) 171 if (devc == NULL)
172 { 172 {
diff --git a/sound/oss/sb_common.c b/sound/oss/sb_common.c
index 440537c72604..07cbacf63824 100644
--- a/sound/oss/sb_common.c
+++ b/sound/oss/sb_common.c
@@ -625,7 +625,7 @@ int sb_dsp_detect(struct address_info *hw_config, int pci, int pciio, struct sb_
625 */ 625 */
626 626
627 627
628 detected_devc = (sb_devc *)kmalloc(sizeof(sb_devc), GFP_KERNEL); 628 detected_devc = kmalloc(sizeof(sb_devc), GFP_KERNEL);
629 if (detected_devc == NULL) 629 if (detected_devc == NULL)
630 { 630 {
631 printk(KERN_ERR "sb: Can't allocate memory for device information\n"); 631 printk(KERN_ERR "sb: Can't allocate memory for device information\n");
diff --git a/sound/oss/sb_midi.c b/sound/oss/sb_midi.c
index 2e3bc045caba..8b796704e112 100644
--- a/sound/oss/sb_midi.c
+++ b/sound/oss/sb_midi.c
@@ -173,7 +173,7 @@ void sb_dsp_midi_init(sb_devc * devc, struct module *owner)
173 return; 173 return;
174 } 174 }
175 std_midi_synth.midi_dev = devc->my_mididev = dev; 175 std_midi_synth.midi_dev = devc->my_mididev = dev;
176 midi_devs[dev] = (struct midi_operations *)kmalloc(sizeof(struct midi_operations), GFP_KERNEL); 176 midi_devs[dev] = kmalloc(sizeof(struct midi_operations), GFP_KERNEL);
177 if (midi_devs[dev] == NULL) 177 if (midi_devs[dev] == NULL)
178 { 178 {
179 printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n"); 179 printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n");
@@ -189,7 +189,7 @@ void sb_dsp_midi_init(sb_devc * devc, struct module *owner)
189 midi_devs[dev]->devc = devc; 189 midi_devs[dev]->devc = devc;
190 190
191 191
192 midi_devs[dev]->converter = (struct synth_operations *)kmalloc(sizeof(struct synth_operations), GFP_KERNEL); 192 midi_devs[dev]->converter = kmalloc(sizeof(struct synth_operations), GFP_KERNEL);
193 if (midi_devs[dev]->converter == NULL) 193 if (midi_devs[dev]->converter == NULL)
194 { 194 {
195 printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n"); 195 printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n");
diff --git a/sound/oss/sb_mixer.c b/sound/oss/sb_mixer.c
index 238e2cf44b08..fad1a4f25ad6 100644
--- a/sound/oss/sb_mixer.c
+++ b/sound/oss/sb_mixer.c
@@ -734,7 +734,7 @@ int sb_mixer_init(sb_devc * devc, struct module *owner)
734 if (m == -1) 734 if (m == -1)
735 return 0; 735 return 0;
736 736
737 mixer_devs[m] = (struct mixer_operations *)kmalloc(sizeof(struct mixer_operations), GFP_KERNEL); 737 mixer_devs[m] = kmalloc(sizeof(struct mixer_operations), GFP_KERNEL);
738 if (mixer_devs[m] == NULL) 738 if (mixer_devs[m] == NULL)
739 { 739 {
740 printk(KERN_ERR "sb_mixer: Can't allocate memory\n"); 740 printk(KERN_ERR "sb_mixer: Can't allocate memory\n");
diff --git a/sound/oss/v_midi.c b/sound/oss/v_midi.c
index d952b2264da1..103940fd5b4f 100644
--- a/sound/oss/v_midi.c
+++ b/sound/oss/v_midi.c
@@ -183,7 +183,7 @@ static void __init attach_v_midi (struct address_info *hw_config)
183 return; 183 return;
184 } 184 }
185 185
186 m=(struct vmidi_memory *)kmalloc(sizeof(struct vmidi_memory), GFP_KERNEL); 186 m = kmalloc(sizeof(struct vmidi_memory), GFP_KERNEL);
187 if (m == NULL) 187 if (m == NULL)
188 { 188 {
189 printk(KERN_WARNING "Loopback MIDI: Failed to allocate memory\n"); 189 printk(KERN_WARNING "Loopback MIDI: Failed to allocate memory\n");
diff --git a/sound/oss/waveartist.c b/sound/oss/waveartist.c
index c5bf363d32c2..26a7c6af95bc 100644
--- a/sound/oss/waveartist.c
+++ b/sound/oss/waveartist.c
@@ -1267,7 +1267,7 @@ static int __init waveartist_init(wavnc_info *devc)
1267 conf_printf2(dev_name, devc->hw.io_base, devc->hw.irq, 1267 conf_printf2(dev_name, devc->hw.io_base, devc->hw.irq,
1268 devc->hw.dma, devc->hw.dma2); 1268 devc->hw.dma, devc->hw.dma2);
1269 1269
1270 portc = (wavnc_port_info *)kmalloc(sizeof(wavnc_port_info), GFP_KERNEL); 1270 portc = kmalloc(sizeof(wavnc_port_info), GFP_KERNEL);
1271 if (portc == NULL) 1271 if (portc == NULL)
1272 goto nomem; 1272 goto nomem;
1273 1273